HV: treewide: fix violations of coding guideline C-EP-05

The coding guideline rule C-EP-05 requires that 'parentheses shall be used
to set the operator precedence explicitly'. This patch adds the missing
parentheses detected by the static analyzer.

Tracked-On: #6776
Signed-off-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Junjie Mao 2021-10-29 10:10:56 +08:00 committed by wenlingz
parent 4cf6c288cd
commit 3f3f4be642
6 changed files with 12 additions and 12 deletions

View File

@ -292,7 +292,7 @@ void create_prelaunched_vm_e820(struct acrn_vm *vm)
hpa1_hi_size = vm_config->memory.size - lowmem_max_length;
gpa_start = add_ram_entry((vm->e820_entries + entry_idx), gpa_start, hpa1_hi_size);
entry_idx++;
} else if (vm_config->memory.size <= MEM_1M + hpa1_part1_max_length + MEM_1M) {
} else if (vm_config->memory.size <= (MEM_1M + hpa1_part1_max_length + MEM_1M)) {
/*
* In this case, hpa1 is only enough for the first
* 1M + part1 + last 1M (ACPI NVS/DATA), so part2 will be empty.
@ -310,7 +310,7 @@ void create_prelaunched_vm_e820(struct acrn_vm *vm)
hpa2_lo_size = (lowmem_max_length - vm_config->memory.size);
gpa_start = vm->e820_entries[ENTRY_HPA1_LOW_PART2].baseaddr + vm->e820_entries[ENTRY_HPA1_LOW_PART2].length;
if (hpa2_lo_size > 0 && remaining_hpa2_size > 0) {
if ((hpa2_lo_size > 0) && (remaining_hpa2_size > 0)) {
/* In this case, hpa2 may have some parts to be mapped to lowmem, so we add an entry for hpa2_lo */
if (remaining_hpa2_size > hpa2_lo_size) {
remaining_hpa2_size -= hpa2_lo_size;

View File

@ -108,7 +108,7 @@ static void free_irq_vector(uint32_t irq)
vr = irqd->vector;
irqd->vector = VECTOR_INVALID;
if (vr <= NR_MAX_VECTOR && vector_to_irq[vr] == irq) {
if ((vr <= NR_MAX_VECTOR) && (vector_to_irq[vr] == irq)) {
vector_to_irq[vr] = IRQ_INVALID;
}
}

View File

@ -16,7 +16,7 @@ struct page *alloc_page(struct page_pool *pool)
spinlock_obtain(&pool->lock);
for (loop_idx = pool->last_hint_id;
loop_idx < pool->last_hint_id + pool->bitmap_size; loop_idx++) {
loop_idx < (pool->last_hint_id + pool->bitmap_size); loop_idx++) {
idx = loop_idx % pool->bitmap_size;
if (*(pool->bitmap + idx) != ~0UL) {
bit = ffz64(*(pool->bitmap + idx));

View File

@ -99,14 +99,14 @@ vioapic_set_pinstate(struct acrn_single_vioapic *vioapic, uint32_t pin, uint32_t
/* clear pin_state and deliver interrupt according to polarity */
bitmap_clear_nolock((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
if ((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_ALO)
&& old_lvl != level) {
&& (old_lvl != level)) {
vioapic_generate_intr(vioapic, pin);
}
} else {
/* set pin_state and deliver intrrupt according to polarity */
bitmap_set_nolock((uint16_t)(pin & 0x3FU), &vioapic->pin_state[pin >> 6U]);
if ((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_AHI)
&& old_lvl != level) {
&& (old_lvl != level)) {
vioapic_generate_intr(vioapic, pin);
}
}
@ -277,8 +277,8 @@ static inline bool vioapic_need_intr(const struct acrn_single_vioapic *vioapic,
if (pin < vioapic->chipinfo.nr_pins) {
rte = vioapic->rtbl[pin];
lvl = (uint32_t)bitmap_test(pin & 0x3FU, &vioapic->pin_state[pin >> 6U]);
ret = !!(((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_ALO) && lvl == 0U) ||
((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_AHI) && lvl != 0U));
ret = !!(((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_ALO) && (lvl == 0U)) ||
((rte.bits.intr_polarity == IOAPIC_RTE_INTPOL_AHI) && (lvl != 0U)));
}
return ret;

View File

@ -462,7 +462,7 @@ static int32_t vpic_ocw2(const struct acrn_vpic *vpic, struct i8259_reg_state *i
/* if level ack PTDEV */
if ((i8259->elc & (1U << (isr_bit & 0x7U))) != 0U) {
vgsi = vpin_to_vgsi(vm, (primary_pic(vpic, i8259) ? isr_bit : isr_bit + 8U));
vgsi = vpin_to_vgsi(vm, (primary_pic(vpic, i8259) ? isr_bit : (isr_bit + 8U)));
ptirq_intx_ack(vm, vgsi, INTX_CTLR_PIC);
}
} else if (((val & OCW2_SL) != 0U) && i8259->rotate) {

View File

@ -461,7 +461,7 @@ static void scan_pci_hierarchy(uint8_t bus, uint64_t buses_visited[BUSES_BITMAP_
&buses_visited[current_bus_index >> 6U]);
pbdf.bits.b = current_bus_index;
if (pbdf.bits.b < phys_pci_mmcfg.start_bus || pbdf.bits.b > phys_pci_mmcfg.end_bus) {
if ((pbdf.bits.b < phys_pci_mmcfg.start_bus) || (pbdf.bits.b > phys_pci_mmcfg.end_bus)) {
continue;
}
@ -739,8 +739,8 @@ static void pci_enumerate_ext_cap(struct pci_pdev *pdev)
pcie_dev_type = (((uint8_t)pci_pdev_read_cfg(pdev->bdf,
pdev->pcie_capoff + PCIER_FLAGS, 1)) & PCIEM_FLAGS_TYPE) >> 4;
if (pcie_dev_type == PCIEM_TYPE_ENDPOINT ||
pcie_dev_type == PCIEM_TYPE_ROOT_INT_EP) {
if ((pcie_dev_type == PCIEM_TYPE_ENDPOINT) ||
(pcie_dev_type == PCIEM_TYPE_ROOT_INT_EP)) {
/* No need to enable ptm on ep device. If a PTM-capable ep pass
* through to guest, guest OS will enable it
*/