HV:x86:fix "expression is not Boolean"

MISRA C explicit required expression should be boolean when
in branch statements (if,while...).

Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Huihuang Shi 2018-06-20 13:32:59 +08:00 committed by lijinxia
parent cb56086239
commit 3ee1f8dfbf
10 changed files with 120 additions and 118 deletions

View File

@ -188,7 +188,7 @@ ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
static bool ptdev_hv_owned_intx(struct vm *vm, struct ptdev_intx_info *info)
{
/* vm0 pin 4 (uart) is owned by hypervisor under debug version */
if (is_vm0(vm) && vm->vuart && info->virt_pin == 4)
if (is_vm0(vm) && (vm->vuart != NULL) && info->virt_pin == 4)
return true;
else
return false;
@ -307,8 +307,8 @@ add_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_id(
entry_id_from_msix(phys_bdf, msix_entry_index));
if (!entry) {
if (_lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index)) {
if (entry == NULL) {
if (_lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index) != NULL) {
pr_err("MSIX re-add vbdf%x", virt_bdf);
spinlock_release(&ptdev_lock);
@ -327,7 +327,7 @@ add_msix_remapping(struct vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
entry->ptdev_intr_info.msi.msix_entry_index,
entry->vm->attr.id,
entry->virt_bdf, vm->attr.id, virt_bdf);
ASSERT(0, "msix entry pbdf%x idx%d already in vm%d",
ASSERT(false, "msix entry pbdf%x idx%d already in vm%d",
phys_bdf, msix_entry_index, entry->vm->attr.id);
spinlock_release(&ptdev_lock);
@ -350,7 +350,7 @@ remove_msix_remapping(struct vm *vm, uint16_t virt_bdf, int msix_entry_index)
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vmsi(vm, virt_bdf, msix_entry_index);
if (!entry)
if (entry == NULL)
goto END;
if (is_entry_active(entry))
@ -384,8 +384,8 @@ add_intx_remapping(struct vm *vm, uint8_t virt_pin,
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_id(entry_id_from_intx(phys_pin));
if (!entry) {
if (_lookup_entry_by_vintx(vm, virt_pin, vpin_src)) {
if (entry == NULL) {
if (_lookup_entry_by_vintx(vm, virt_pin, vpin_src) != NULL) {
pr_err("INTX re-add vpin %d", virt_pin);
spinlock_release(&ptdev_lock);
return &invalid_entry;
@ -405,7 +405,7 @@ add_intx_remapping(struct vm *vm, uint8_t virt_pin,
entry->vm->attr.id,
entry->ptdev_intr_info.intx.virt_pin,
vm->attr.id, virt_pin);
ASSERT(0, "intx entry pin%d already vm%d",
ASSERT(false, "intx entry pin%d already vm%d",
phys_pin, entry->vm->attr.id);
spinlock_release(&ptdev_lock);
@ -431,7 +431,7 @@ static void remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin)
spinlock_obtain(&ptdev_lock);
entry = _lookup_entry_by_vintx(vm, virt_pin, vpin_src);
if (!entry)
if (entry == NULL)
goto END;
if (is_entry_active(entry)) {
@ -512,7 +512,7 @@ void ptdev_softirq(__unused int cpu)
struct ptdev_remapping_info *entry = ptdev_dequeue_softirq();
struct vm *vm;
if (!entry)
if (entry == NULL)
break;
/* skip any inactive entry */
@ -553,7 +553,7 @@ void ptdev_intx_ack(struct vm *vm, int virt_pin,
int phys_pin;
entry = lookup_entry_by_vintx(vm, virt_pin, vpin_src);
if (!entry)
if (entry == NULL)
return;
phys_pin = entry->ptdev_intr_info.intx.phys_pin;
@ -602,7 +602,7 @@ int ptdev_msix_remap(struct vm *vm, uint16_t virt_bdf,
*/
entry = lookup_entry_by_vmsi(vm, virt_bdf, info->msix_entry_index);
if (!entry) {
if (entry == NULL) {
/* VM0 we add mapping dynamically */
if (is_vm0(vm)) {
entry = add_msix_remapping(vm, virt_bdf, virt_bdf,
@ -721,7 +721,7 @@ int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info)
/* query if we have virt to phys mapping */
entry = lookup_entry_by_vintx(vm, info->virt_pin, info->vpin_src);
if (!entry) {
if (entry == NULL) {
if (is_vm0(vm)) {
bool pic_pin = (info->vpin_src == PTDEV_VPIN_PIC);
@ -737,12 +737,12 @@ int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info)
pic_ioapic_pin_map[info->virt_pin],
pic_pin ? PTDEV_VPIN_IOAPIC
: PTDEV_VPIN_PIC);
if (entry)
if (entry != NULL)
need_switch_vpin_src = true;
}
/* entry could be updated by above switch check */
if (!entry) {
if (entry == NULL) {
/* allocate entry during first unmask */
if (vpin_masked(vm, info->virt_pin,
info->vpin_src))
@ -792,9 +792,9 @@ int ptdev_intx_pin_remap(struct vm *vm, struct ptdev_intx_info *info)
"IOAPIC pin=%d pirq=%d vpin=%d switch from %s to %s "
"vpin=%d for vm%d", phys_pin, phys_irq,
entry->ptdev_intr_info.intx.virt_pin,
entry->ptdev_intr_info.intx.vpin_src ?
(entry->ptdev_intr_info.intx.vpin_src != 0)?
"vPIC" : "vIOAPIC",
entry->ptdev_intr_info.intx.vpin_src ?
(entry->ptdev_intr_info.intx.vpin_src != 0)?
"vIOPIC" : "vPIC",
info->virt_pin,
entry->vm->attr.id);
@ -922,8 +922,8 @@ static void get_entry_info(struct ptdev_remapping_info *entry, char *type,
strcpy_s(type, 16, "MSI");
*dest = (entry->ptdev_intr_info.msi.pmsi_addr & 0xFF000)
>> 12;
if (entry->ptdev_intr_info.msi.pmsi_data &
APIC_TRIGMOD_LEVEL)
if ((entry->ptdev_intr_info.msi.pmsi_data &
APIC_TRIGMOD_LEVEL) != 0U)
*lvl_tm = true;
else
*lvl_tm = false;
@ -943,7 +943,7 @@ static void get_entry_info(struct ptdev_remapping_info *entry, char *type,
strcpy_s(type, 16, "PIC");
ioapic_get_rte(phys_irq, &rte);
*dest = ((rte >> 32) & IOAPIC_RTE_DEST) >> 24;
if (rte & IOAPIC_RTE_TRGRLVL)
if ((rte & IOAPIC_RTE_TRGRLVL) != 0U)
*lvl_tm = true;
else
*lvl_tm = false;

View File

@ -217,7 +217,7 @@ static int hardware_detect_support(void)
}
ret = check_vmx_mmu_cap();
if (ret)
if (ret != 0)
return ret;
pr_acrnlog("hardware support HV");
@ -465,7 +465,7 @@ void bsp_boot_init(void)
init_logmsg(CONFIG_LOG_BUF_SIZE,
CONFIG_LOG_DESTINATION);
if (HV_RC_VERSION)
if (HV_RC_VERSION != 0)
pr_acrnlog("HV version %d.%d-rc%d-%s-%s %s build by %s, start time %lluus",
HV_MAJOR_VERSION, HV_MINOR_VERSION, HV_RC_VERSION,
HV_BUILD_TIME, HV_BUILD_VERSION, HV_BUILD_TYPE,
@ -782,7 +782,7 @@ void cpu_dead(uint32_t logical_id)
/* Halt the CPU */
do {
asm volatile ("hlt");
} while (halt);
} while (halt != 0);
}
static void cpu_set_logical_id(uint32_t logical_id)
@ -908,7 +908,7 @@ static void cpu_xsave_init(void)
cpuid(CPUID_FEATURES, &unused, &unused, &ecx, &unused);
/* if set, update it */
if (ecx & CPUID_ECX_OSXSAVE)
if ((ecx & CPUID_ECX_OSXSAVE) != 0U)
boot_cpu_data.cpuid_leaves[FEAT_1_ECX] |=
CPUID_ECX_OSXSAVE;
}

View File

@ -94,13 +94,13 @@ static int get_state_tbl_idx(char *cpuname)
int i;
int count = ARRAY_SIZE(cpu_state_tbl);
if (!cpuname) {
if (cpuname == NULL) {
return -1;
}
for (i = 0; i < count; i++) {
if (!strcmp((cpu_state_tbl[i].model_name),
cpuname)) {
if (strcmp((cpu_state_tbl[i].model_name),
cpuname) == 0) {
return i;
}
}
@ -124,7 +124,7 @@ void load_cpu_state_data(void)
state_info = &(cpu_state_tbl + tbl_idx)->state_info;
if (state_info->px_cnt && state_info->px_data) {
if ((state_info->px_cnt != 0U) && (state_info->px_data != NULL)) {
if (state_info->px_cnt > MAX_PSTATE) {
boot_cpu_data.state_info.px_cnt = MAX_PSTATE;
} else {
@ -134,7 +134,7 @@ void load_cpu_state_data(void)
boot_cpu_data.state_info.px_data = state_info->px_data;
}
if (state_info->cx_cnt && state_info->cx_data) {
if ((state_info->cx_cnt != 0U) && (state_info->cx_data != NULL)) {
if (state_info->cx_cnt > MAX_CX_ENTRY) {
boot_cpu_data.state_info.cx_cnt = MAX_CX_ENTRY;
} else {

View File

@ -24,7 +24,7 @@ static inline struct vcpuid_entry *find_vcpuid_entry(struct vcpu *vcpu,
if (tmp->leaf < leaf)
continue;
if (tmp->leaf == leaf) {
if ((tmp->flags & CPUID_CHECK_SUBLEAF) &&
if ((tmp->flags & CPUID_CHECK_SUBLEAF) != 0U &&
(tmp->subleaf != subleaf))
continue;
entry = tmp;
@ -36,7 +36,7 @@ static inline struct vcpuid_entry *find_vcpuid_entry(struct vcpu *vcpu,
if (entry == NULL) {
uint32_t limit;
if (leaf & 0x80000000)
if ((leaf & 0x80000000) != 0U)
limit = vm->vcpuid_xlevel;
else
limit = vm->vcpuid_level;
@ -86,7 +86,7 @@ static void init_vcpuid_entry(__unused struct vm *vm,
switch (leaf) {
case 0x07:
if (!subleaf) {
if (subleaf == 0U) {
cpuid(leaf,
&entry->eax, &entry->ebx,
&entry->ecx, &entry->edx);
@ -162,7 +162,7 @@ int set_vcpuid_entries(struct vm *vm)
init_vcpuid_entry(vm, 0, 0, 0, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
vm->vcpuid_level = limit = entry.eax;
@ -179,7 +179,7 @@ int set_vcpuid_entries(struct vm *vm)
init_vcpuid_entry(vm, i, 0,
CPUID_CHECK_SUBLEAF, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
times = entry.eax & 0xff;
@ -187,7 +187,7 @@ int set_vcpuid_entries(struct vm *vm)
init_vcpuid_entry(vm, i, j,
CPUID_CHECK_SUBLEAF, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
}
break;
@ -206,7 +206,7 @@ int set_vcpuid_entries(struct vm *vm)
if (i == 0x0d && entry.eax == 0)
continue;
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
}
break;
@ -214,7 +214,7 @@ int set_vcpuid_entries(struct vm *vm)
default:
init_vcpuid_entry(vm, i, 0, 0, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
break;
}
@ -222,24 +222,24 @@ int set_vcpuid_entries(struct vm *vm)
init_vcpuid_entry(vm, 0x40000000, 0, 0, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
init_vcpuid_entry(vm, 0x40000010, 0, 0, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
init_vcpuid_entry(vm, 0x80000000, 0, 0, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
vm->vcpuid_xlevel = limit = entry.eax;
for (i = 0x80000001; i <= limit; i++) {
init_vcpuid_entry(vm, i, 0, 0, &entry);
result = set_vcpuid_entry(vm, &entry);
if (result)
if (result != 0)
return result;
}
@ -258,7 +258,7 @@ void guest_cpuid(struct vcpu *vcpu,
struct vcpuid_entry *entry =
find_vcpuid_entry(vcpu, leaf, subleaf);
if (entry) {
if (entry != NULL) {
*eax = entry->eax;
*ebx = entry->ebx;
*ecx = entry->ecx;
@ -301,15 +301,15 @@ void guest_cpuid(struct vcpu *vcpu,
*ecx &= ~CPUID_ECX_VMX;
/*no xsave support for guest if it is not enabled on host*/
if (!(*ecx & CPUID_ECX_OSXSAVE))
if ((*ecx & CPUID_ECX_OSXSAVE) == 0U)
*ecx &= ~CPUID_ECX_XSAVE;
*ecx &= ~CPUID_ECX_OSXSAVE;
if (*ecx & CPUID_ECX_XSAVE) {
if ((*ecx & CPUID_ECX_XSAVE) != 0U) {
uint64_t cr4;
/*read guest CR4*/
cr4 = exec_vmread(VMX_GUEST_CR4);
if (cr4 & CR4_OSXSAVE)
if ((cr4 & CR4_OSXSAVE) != 0U)
*ecx |= CPUID_ECX_OSXSAVE;
}
break;

View File

@ -23,8 +23,8 @@ static uint64_t find_next_table(uint32_t table_offset, void *table_base)
+ (table_offset * IA32E_COMM_ENTRY_SIZE));
/* If bit 7 is set, entry is not a subtable. */
if ((table_entry & IA32E_PDPTE_PS_BIT)
|| (table_entry & IA32E_PDE_PS_BIT))
if ((table_entry & IA32E_PDPTE_PS_BIT) != 0U
|| (table_entry & IA32E_PDE_PS_BIT) != 0U)
return sub_table_addr;
/* Set table present bits to any of the read/write/execute bits */
@ -79,11 +79,11 @@ void free_ept_mem(void *pml4_addr)
pde_addr));
/* Free page table entry table */
if (pte_addr)
if (pte_addr != NULL)
free_paging_struct(pte_addr);
}
/* Free page directory entry table */
if (pde_addr)
if (pde_addr != NULL)
free_paging_struct(pde_addr);
}
free_paging_struct(pdpt_addr);
@ -103,7 +103,7 @@ void destroy_ept(struct vm *vm)
* - trusty is enabled. But not initialized yet.
* Check vm->arch_vm.sworld_eptp.
*/
if (vm->sworld_control.sworld_enabled && vm->arch_vm.sworld_eptp) {
if (vm->sworld_control.sworld_enabled && (vm->arch_vm.sworld_eptp != 0U)) {
free_ept_mem(HPA2HVA(vm->arch_vm.sworld_eptp));
vm->arch_vm.sworld_eptp = 0;
}
@ -130,7 +130,7 @@ uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size)
vm->attr.boot_idx, gpa);
}
if (size)
if (size != NULL)
*size = pg_size;
return hpa;
@ -172,12 +172,12 @@ int is_ept_supported(void)
tmp64 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
/* Check if secondary processor based VM control is available. */
if (tmp64 & MMU_MEM_ATTR_BIT_EXECUTE_DISABLE) {
if ((tmp64 & MMU_MEM_ATTR_BIT_EXECUTE_DISABLE) != 0U) {
/* Read primary processor based VM control. */
tmp64 = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
/* Check if EPT is supported. */
if (tmp64 & (((uint64_t)VMX_PROCBASED_CTLS2_EPT) << 32)) {
if ((tmp64 & (((uint64_t)VMX_PROCBASED_CTLS2_EPT) << 32)) != 0U) {
/* EPT is present. */
status = 1;
} else {
@ -213,7 +213,7 @@ int register_mmio_emulation_handler(struct vm *vm,
struct mem_io_node *mmio_node;
if (vm->hw.created_vcpus > 0 && vm->hw.vcpu_array[0]->launched) {
ASSERT(0, "register mmio handler after vm launched");
ASSERT(false, "register mmio handler after vm launched");
return status;
}
@ -224,7 +224,7 @@ int register_mmio_emulation_handler(struct vm *vm,
(struct mem_io_node *)calloc(1, sizeof(struct mem_io_node));
/* Ensure memory successfully allocated */
if (mmio_node) {
if (mmio_node != NULL) {
/* Fill in information for this node */
mmio_node->read_write = read_write;
mmio_node->handler_private_data = handler_private_data;
@ -334,7 +334,7 @@ int ept_violation_vmexit_handler(struct vcpu *vcpu)
exit_qual = vcpu->arch_vcpu.exit_qualification;
/* Specify if read or write operation */
if (exit_qual & 0x2) {
if ((exit_qual & 0x2) != 0U) {
/* Write operation */
mmio->read_write = HV_MEM_IO_WRITE;
@ -467,7 +467,7 @@ int ept_mmap(struct vm *vm, uint64_t hpa,
/* Setup memory map parameters */
map_params.page_table_type = PTT_EPT;
if (vm->arch_vm.nworld_eptp) {
if (vm->arch_vm.nworld_eptp != 0U) {
map_params.pml4_base = HPA2HVA(vm->arch_vm.nworld_eptp);
map_params.pml4_inverted = HPA2HVA(vm->arch_vm.m2p);
} else {
@ -485,7 +485,7 @@ int ept_mmap(struct vm *vm, uint64_t hpa,
unmap_mem(&map_params, (void *)hpa, (void *)gpa,
size, prot);
} else
ASSERT(0, "unknown map type");
ASSERT(false, "unknown map type");
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);

View File

@ -60,7 +60,7 @@ static int is_guest_irq_enabled(struct vcpu *vcpu)
/* Read the RFLAGS of the guest */
guest_rflags = cur_context->rflags;
/* Check the RFLAGS[IF] bit first */
if (guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) {
if ((guest_rflags & HV_ARCH_VCPU_RFLAGS_IF) != 0U) {
/* Interrupts are allowed */
/* Check for temporarily disabled interrupts */
guest_state = exec_vmread(VMX_GUEST_INTERRUPTIBILITY_INFO);
@ -157,7 +157,7 @@ static int vcpu_do_pending_extint(struct vcpu *vcpu)
/* check if there is valid interrupt from vPIC, if yes just inject it */
/* PIC only connect with primary CPU */
primary = get_primary_vcpu(vm);
if (vm->vpic && vcpu == primary) {
if ((vm->vpic != NULL) && vcpu == primary) {
vpic_pending_intr(vcpu->vm, &vector);
if (vector <= NR_MAX_VECTOR) {
@ -234,7 +234,7 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
vcpu->arch_vcpu.exception_info.exception = vector;
if (exception_type[vector] & EXCEPTION_ERROR_CODE_VALID)
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U)
vcpu->arch_vcpu.exception_info.error = err_code;
else
vcpu->arch_vcpu.exception_info.error = 0;
@ -244,7 +244,7 @@ int vcpu_queue_exception(struct vcpu *vcpu, uint32_t vector,
static void _vcpu_inject_exception(struct vcpu *vcpu, uint32_t vector)
{
if (exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) {
if ((exception_type[vector] & EXCEPTION_ERROR_CODE_VALID) != 0U) {
exec_vmwrite(VMX_ENTRY_EXCEPTION_ERROR_CODE,
vcpu->arch_vcpu.exception_info.error);
}
@ -314,7 +314,7 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu)
TRACE_2L(TRC_VMEXIT_INTERRUPT_WINDOW, 0, 0);
if (!vcpu)
if (vcpu == NULL)
return -1;
if (vcpu_pending_request(vcpu)) {
@ -341,7 +341,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
struct intr_excp_ctx ctx;
intr_info = exec_vmread(VMX_EXIT_INT_INFO);
if ((!(intr_info & VMX_INT_INFO_VALID)) ||
if (((intr_info & VMX_INT_INFO_VALID) == 0U) ||
(((intr_info & VMX_INT_TYPE_MASK) >> 8)
!= VMX_INT_TYPE_EXT_INT)) {
pr_err("Invalid VM exit interrupt info:%x", intr_info);
@ -383,7 +383,8 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
/* handling cancelled event injection when vcpu is switched out */
if (vcpu->arch_vcpu.inject_event_pending) {
if (vcpu->arch_vcpu.inject_info.intr_info & (EXCEPTION_ERROR_CODE_VALID << 8))
if ((vcpu->arch_vcpu.inject_info.intr_info &
(EXCEPTION_ERROR_CODE_VALID << 8)) != 0U)
exec_vmwrite(VMX_ENTRY_EXCEPTION_ERROR_CODE,
vcpu->arch_vcpu.inject_info.error_code);
@ -396,7 +397,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
/* SDM Vol 3 - table 6-2, inject high priority exception before
* maskable hardware interrupt */
if (vcpu_inject_hi_exception(vcpu))
if (vcpu_inject_hi_exception(vcpu) != 0)
goto INTR_WIN;
/* inject NMI before maskable hardware interrupt */
@ -415,14 +416,14 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
* - external interrupt, if IF clear, will keep in IDT_VEC_INFO_FIELD
* at next vm exit?
*/
if (vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) {
if ((vcpu->arch_vcpu.idt_vectoring_info & VMX_INT_INFO_VALID) != 0U) {
exec_vmwrite(VMX_ENTRY_INT_INFO_FIELD,
vcpu->arch_vcpu.idt_vectoring_info);
goto INTR_WIN;
}
/* Guest interruptable or not */
if (is_guest_irq_enabled(vcpu)) {
if (is_guest_irq_enabled(vcpu) != 0) {
/* Inject external interrupt first */
if (bitmap_test_and_clear(ACRN_REQUEST_EXTINT,
pending_req_bits)) {
@ -441,7 +442,7 @@ int acrn_handle_pending_request(struct vcpu *vcpu)
}
/* SDM Vol3 table 6-2, inject lowpri exception */
if (vcpu_inject_lo_exception(vcpu))
if (vcpu_inject_lo_exception(vcpu) != 0)
goto INTR_WIN;
INTR_WIN:
@ -471,10 +472,10 @@ void cancel_event_injection(struct vcpu *vcpu)
* The event will be re-injected in next acrn_handle_pending_request
* call.
*/
if (intinfo & VMX_INT_INFO_VALID) {
if ((intinfo & VMX_INT_INFO_VALID) != 0U) {
vcpu->arch_vcpu.inject_event_pending = true;
if (intinfo & (EXCEPTION_ERROR_CODE_VALID << 8))
if ((intinfo & (EXCEPTION_ERROR_CODE_VALID << 8)) != 0U)
vcpu->arch_vcpu.inject_info.error_code =
exec_vmread(VMX_ENTRY_EXCEPTION_ERROR_CODE);
@ -502,13 +503,13 @@ int exception_vmexit_handler(struct vcpu *vcpu)
/* Obtain VM-Exit information field pg 2912 */
intinfo = exec_vmread(VMX_EXIT_INT_INFO);
if (intinfo & VMX_INT_INFO_VALID) {
if ((intinfo & VMX_INT_INFO_VALID) != 0U) {
exception_vector = intinfo & 0xFF;
/* Check if exception caused by the guest is a HW exception.
* If the exit occurred due to a HW exception obtain the
* error code to be conveyed to get via the stack
*/
if (intinfo & VMX_INT_INFO_ERR_CODE_VALID) {
if ((intinfo & VMX_INT_INFO_ERR_CODE_VALID) != 0U) {
int_err_code = exec_vmread(VMX_EXIT_INT_ERROR_CODE);
/* get current privilege level and fault address */

View File

@ -38,7 +38,7 @@ static void dm_emulate_pio_pre(struct vcpu *vcpu, uint64_t exit_qual,
uint32_t sz, uint64_t req_value)
{
vcpu->req.type = REQ_PORTIO;
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual))
if (VM_EXIT_IO_INSTRUCTION_ACCESS_DIRECTION(exit_qual) != 0U)
vcpu->req.reqs.pio_request.direction = REQUEST_READ;
else
vcpu->req.reqs.pio_request.direction = REQUEST_WRITE;
@ -120,7 +120,7 @@ int io_instr_vmexit_handler(struct vcpu *vcpu)
if (status != 0) {
pr_fatal("Err:IO %s access to port 0x%04x, size=%u",
direction ? "read" : "write", port, sz);
(direction != 0) ? "read" : "write", port, sz);
}
@ -129,7 +129,7 @@ int io_instr_vmexit_handler(struct vcpu *vcpu)
static void register_io_handler(struct vm *vm, struct vm_io_handler *hdlr)
{
if (vm->arch_vm.io_handler)
if (vm->arch_vm.io_handler != NULL)
hdlr->next = vm->arch_vm.io_handler;
vm->arch_vm.io_handler = hdlr;
@ -140,7 +140,7 @@ static void empty_io_handler_list(struct vm *vm)
struct vm_io_handler *handler = vm->arch_vm.io_handler;
struct vm_io_handler *tmp;
while (handler) {
while (handler != NULL) {
tmp = handler;
handler = tmp->next;
free(tmp);
@ -165,7 +165,7 @@ void allow_guest_io_access(struct vm *vm, uint32_t address, uint32_t nbytes)
b = vm->arch_vm.iobitmap[0];
for (i = 0; i < nbytes; i++) {
if (address & 0x8000)
if ((address & 0x8000) != 0U)
b = vm->arch_vm.iobitmap[1];
a = address & 0x7fff;
b[a >> 5] &= ~(1 << (a & 0x1f));
@ -181,7 +181,7 @@ static void deny_guest_io_access(struct vm *vm, uint32_t address, uint32_t nbyte
b = vm->arch_vm.iobitmap[0];
for (i = 0; i < nbytes; i++) {
if (address & 0x8000)
if ((address & 0x8000) != 0U)
b = vm->arch_vm.iobitmap[1];
a = address & 0x7fff;
b[a >> 5] |= (1 << (a & 0x1f));
@ -216,7 +216,8 @@ void setup_io_bitmap(struct vm *vm)
vm->arch_vm.iobitmap[0] = alloc_page();
vm->arch_vm.iobitmap[1] = alloc_page();
ASSERT(vm->arch_vm.iobitmap[0] && vm->arch_vm.iobitmap[1], "");
ASSERT((vm->arch_vm.iobitmap[0] != NULL) &&
(vm->arch_vm.iobitmap[1] != NULL), "");
if (is_vm0(vm)) {
memset(vm->arch_vm.iobitmap[0], 0x00, CPU_PAGE_SIZE);

View File

@ -205,7 +205,7 @@ static void ioapic_set_routing(uint32_t gsi, uint32_t vr)
rte = create_rte_for_gsi_irq(gsi, vr);
ioapic_set_rte_entry(addr, gsi_table[gsi].pin, &rte);
if (rte.lo_32 & IOAPIC_RTE_TRGRMOD)
if ((rte.lo_32 & IOAPIC_RTE_TRGRMOD) != 0U)
update_irq_handler(gsi, handle_level_interrupt_common);
else
update_irq_handler(gsi, common_handler_edge);
@ -421,7 +421,7 @@ void get_rte_info(struct ioapic_rte *rte, bool *mask, bool *irr,
*irr = ((rte->lo_32 & IOAPIC_RTE_REM_IRR) == IOAPIC_RTE_REM_IRR);
*phys = ((rte->lo_32 & IOAPIC_RTE_DESTMOD) == IOAPIC_RTE_DESTPHY);
*delmode = rte->lo_32 & IOAPIC_RTE_DELMOD;
*level = rte->lo_32 & IOAPIC_RTE_TRGRLVL ? true : false;
*level = ((rte->lo_32 & IOAPIC_RTE_TRGRLVL) != 0U) ? true : false;
*vector = rte->lo_32 & IOAPIC_RTE_INTVEC;
*dest = rte->hi_32 >> APIC_ID_SHIFT;
}

View File

@ -48,7 +48,7 @@ static void init_irq_desc(void)
irq_desc_base = alloc_pages(page_num);
ASSERT(irq_desc_base, "page alloc failed!");
ASSERT(irq_desc_base != NULL, "page alloc failed!");
memset(irq_desc_base, 0, page_num * CPU_PAGE_SIZE);
for (i = 0; i < NR_MAX_IRQS; i++) {
@ -203,14 +203,14 @@ irq_desc_append_dev(struct irq_desc *desc, void *node, bool share)
* ioapic setup.
* caller can later update it with update_irq_handler()
*/
if (!desc->irq_handler)
if (desc->irq_handler == NULL)
desc->irq_handler = common_handler_edge;
} else if (!share || desc->used == IRQ_ASSIGNED_NOSHARE) {
/* dev node added failed */
added = false;
} else {
/* dev_list point to last valid node */
while (dev_list->next)
while (dev_list->next != NULL)
dev_list = dev_list->next;
/* add node */
dev_list->next = node;
@ -421,7 +421,7 @@ void handle_spurious_interrupt(uint32_t vector)
pr_warn("Spurious vector: 0x%x.", vector);
if (spurious_handler)
if (spurious_handler != NULL)
spurious_handler(vector);
}
@ -441,7 +441,7 @@ void dispatch_interrupt(struct intr_excp_ctx *ctx)
if (vr != desc->vector)
goto ERR;
if (desc->used == IRQ_NOT_ASSIGNED || !desc->irq_handler) {
if (desc->used == IRQ_NOT_ASSIGNED || desc->irq_handler == NULL) {
/* mask irq if possible */
goto ERR;
}
@ -479,8 +479,8 @@ int handle_level_interrupt_common(struct irq_desc *desc,
/* Send EOI to LAPIC/IOAPIC IRR */
send_lapic_eoi();
while (dev) {
if (dev->dev_handler)
while (dev != NULL) {
if (dev->dev_handler != NULL)
dev->dev_handler(desc->irq, dev->dev_data);
dev = dev->next;
}
@ -515,8 +515,8 @@ int common_handler_edge(struct irq_desc *desc, __unused void *handler_data)
/* Send EOI to LAPIC/IOAPIC IRR */
send_lapic_eoi();
while (dev) {
if (dev->dev_handler)
while (dev != NULL) {
if (dev->dev_handler != NULL)
dev->dev_handler(desc->irq, dev->dev_data);
dev = dev->next;
}
@ -552,8 +552,8 @@ int common_dev_handler_level(struct irq_desc *desc, __unused void *handler_data)
/* Send EOI to LAPIC/IOAPIC IRR */
send_lapic_eoi();
while (dev) {
if (dev->dev_handler)
while (dev != NULL) {
if (dev->dev_handler != NULL)
dev->dev_handler(desc->irq, dev->dev_data);
dev = dev->next;
}
@ -573,8 +573,8 @@ int quick_handler_nolock(struct irq_desc *desc, __unused void *handler_data)
/* Send EOI to LAPIC/IOAPIC IRR */
send_lapic_eoi();
while (dev) {
if (dev->dev_handler)
while (dev != NULL) {
if (dev->dev_handler != NULL)
dev->dev_handler(desc->irq, dev->dev_data);
dev = dev->next;
}
@ -621,7 +621,7 @@ void unregister_handler_common(struct dev_handler_node *node)
goto UNLOCK_EXIT;
}
while (head->next) {
while (head->next != NULL) {
if (head->next == node)
break;
head = head->next;

View File

@ -222,7 +222,7 @@ static inline uint32_t check_page_table_present(int page_table_type,
table_entry &= (IA32E_COMM_P_BIT);
}
return (table_entry) ? PT_PRESENT : PT_NOT_PRESENT;
return (table_entry != 0U) ? PT_PRESENT : PT_NOT_PRESENT;
}
static uint32_t map_mem_region(void *vaddr, void *paddr,
@ -280,7 +280,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
/* If not a EPT entry, see if the PAT bit is set for PDPT entry
*/
if ((table_type == PTT_HOST) && (attr & IA32E_PDPTE_PAT_BIT)) {
if ((table_type == PTT_HOST) && (attr & IA32E_PDPTE_PAT_BIT) != 0U) {
/* The PAT bit is set; Clear it and set the page table
* PAT bit instead
*/
@ -315,7 +315,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
break;
case PT_MISCFG_PRESENT:
default:
ASSERT(0, "entry misconfigurated present bits");
ASSERT(false, "entry misconfigurated present bits");
return 0;
}
@ -399,7 +399,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
break;
}
default:
ASSERT(0, "Bad memory map request type");
ASSERT(false, "Bad memory map request type");
return 0;
}
@ -414,9 +414,9 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
* modified after AP start in the future.
*/
if ((phy_cpu_num != 0) &&
(pcpu_active_bitmap &
((pcpu_active_bitmap &
((1UL << phy_cpu_num) - 1))
!= (1UL << CPU_BOOT_ID)) {
!= (1UL << CPU_BOOT_ID))) {
panic("need shootdown for invlpg");
}
inv_tlb_one_page(vaddr);
@ -478,7 +478,7 @@ static int get_table_entry(void *addr, void *table_base,
uint32_t table_offset;
if (table_base == NULL || table_level >= IA32E_UNKNOWN) {
ASSERT(0, "Incorrect Arguments");
ASSERT(false, "Incorrect Arguments");
return -EINVAL;
}
@ -504,7 +504,7 @@ static void *walk_paging_struct(void *addr, void *table_base,
if (table_base == NULL || table_level >= IA32E_UNKNOWN
|| map_params == NULL) {
ASSERT(0, "Incorrect Arguments");
ASSERT(false, "Incorrect Arguments");
return NULL;
}
@ -536,7 +536,7 @@ static void *walk_paging_struct(void *addr, void *table_base,
/* Error: Unable to find table memory necessary
* to map memory
*/
ASSERT(0, "Fail to alloc table memory "
ASSERT(false, "Fail to alloc table memory "
"for map memory");
return NULL;
@ -652,7 +652,7 @@ void *alloc_paging_struct(void)
/* Allocate a page from Hypervisor heap */
ptr = alloc_page();
ASSERT(ptr, "page alloc failed!");
ASSERT(ptr != NULL, "page alloc failed!");
memset(ptr, 0, CPU_PAGE_SIZE);
return ptr;
@ -660,7 +660,7 @@ void *alloc_paging_struct(void)
void free_paging_struct(void *ptr)
{
if (ptr) {
if (ptr != NULL) {
memset(ptr, 0, CPU_PAGE_SIZE);
free(ptr);
}
@ -692,21 +692,21 @@ uint64_t config_page_table_attr(struct map_params *map_params, uint32_t flags)
/* Convert generic memory flags to architecture specific attributes */
/* Check if read access */
if (flags & MMU_MEM_ATTR_READ) {
if ((flags & MMU_MEM_ATTR_READ) != 0U) {
/* Configure for read access */
attr |= ((table_type == PTT_EPT)
? IA32E_EPT_R_BIT : 0);
}
/* Check for write access */
if (flags & MMU_MEM_ATTR_WRITE) {
if ((flags & MMU_MEM_ATTR_WRITE) != 0U) {
/* Configure for write access */
attr |= ((table_type == PTT_EPT)
? IA32E_EPT_W_BIT : MMU_MEM_ATTR_BIT_READ_WRITE);
}
/* Check for execute access */
if (flags & MMU_MEM_ATTR_EXECUTE) {
if ((flags & MMU_MEM_ATTR_EXECUTE) != 0U) {
/* Configure for execute (EPT only) */
attr |= ((table_type == PTT_EPT)
? IA32E_EPT_X_BIT : 0);
@ -725,19 +725,19 @@ uint64_t config_page_table_attr(struct map_params *map_params, uint32_t flags)
}
/* Check for cache / memory types */
if (flags & MMU_MEM_ATTR_WB_CACHE) {
if ((flags & MMU_MEM_ATTR_WB_CACHE) != 0U) {
/* Configure for write back cache */
attr |= ((table_type == PTT_EPT)
? IA32E_EPT_WB : MMU_MEM_ATTR_TYPE_CACHED_WB);
} else if (flags & MMU_MEM_ATTR_WT_CACHE) {
} else if ((flags & MMU_MEM_ATTR_WT_CACHE) != 0U) {
/* Configure for write through cache */
attr |= ((table_type == PTT_EPT)
? IA32E_EPT_WT : MMU_MEM_ATTR_TYPE_CACHED_WT);
} else if (flags & MMU_MEM_ATTR_UNCACHED) {
} else if ((flags & MMU_MEM_ATTR_UNCACHED) != 0U) {
/* Configure for uncached */
attr |= ((table_type == PTT_EPT)
? IA32E_EPT_UNCACHED : MMU_MEM_ATTR_TYPE_UNCACHED);
} else if (flags & MMU_MEM_ATTR_WC) {
} else if ((flags & MMU_MEM_ATTR_WC) != 0U) {
/* Configure for write combining */
attr |= ((table_type == PTT_EPT)
? IA32E_EPT_WC : MMU_MEM_ATTR_TYPE_WRITE_COMBINED);
@ -804,7 +804,7 @@ int obtain_last_page_table_entry(struct map_params *map_params,
entry->entry_val = table_entry;
return 0;
}
if (table_entry & IA32E_PDPTE_PS_BIT) {
if ((table_entry & IA32E_PDPTE_PS_BIT) != 0U) {
/* 1GB page size, return the base addr of the pg entry*/
entry->entry_level = IA32E_PDPT;
entry->entry_base = table_addr;
@ -837,7 +837,7 @@ int obtain_last_page_table_entry(struct map_params *map_params,
entry->entry_val = table_entry;
return 0;
}
if (table_entry & IA32E_PDE_PS_BIT) {
if ((table_entry & IA32E_PDE_PS_BIT) != 0U) {
/* 2MB page size, return the base addr of the pg entry*/
entry->entry_level = IA32E_PD;
entry->entry_base = table_addr;
@ -977,7 +977,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
* Unable to find table memory necessary to map memory
*/
pr_err("Fail to find table memory for map memory");
ASSERT(0, "fail to alloc table memory for map memory");
ASSERT(false, "fail to alloc table memory for map memory");
return 0;
}
@ -1047,7 +1047,7 @@ static int modify_paging(struct map_params *map_params, void *paddr,
|| (map_params == NULL)) {
pr_err("%s: vaddr=0x%llx size=0x%llx req_type=0x%lx",
__func__, vaddr, size, request_type);
ASSERT(0, "Incorrect Arguments");
ASSERT(false, "Incorrect Arguments");
return -EINVAL;
}