hv: dev: fix "Procedure has more than one exit point"

IEC 61508,ISO 26262 standards highly recommend single-exit rule.

Reduce the count of the "return entries".
Fix the violations which is comply with the cases list below:
1.Function has 2 return entries.
2.The first return entry is used to return the error code of
checking variable whether is valid.

Fix the violations in "if else" format.

Tracked-On: #861
Signed-off-by: Huihuang Shi <huihuang.shi@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Huihuang Shi 2018-11-29 11:10:11 +08:00 committed by wenlingz
parent ba44417d96
commit 414860fb89
3 changed files with 106 additions and 97 deletions

View File

@ -55,12 +55,16 @@ ptirq_lookup_entry_by_vpin(struct acrn_vm *vm, uint8_t virt_pin, bool pic_pin)
#ifdef CONFIG_COM_IRQ
static bool ptdev_hv_owned_intx(const struct acrn_vm *vm, const union source_id *virt_sid)
{
bool ret;
/* vm0 vuart pin is owned by hypervisor under debug version */
if (is_vm0(vm) && (virt_sid->intx_id.pin == CONFIG_COM_IRQ)) {
return true;
ret = true;
} else {
return false;
ret = false;
}
return ret;
}
#endif /* CONFIG_COM_IRQ */
@ -496,37 +500,35 @@ void ptirq_intx_ack(struct acrn_vm *vm, uint8_t virt_pin,
bool pic_pin = (vpin_src == PTDEV_VPIN_PIC);
entry = ptirq_lookup_entry_by_vpin(vm, virt_pin, pic_pin);
if (entry == NULL) {
return;
}
if (entry != NULL) {
phys_irq = entry->allocated_pirq;
phys_irq = entry->allocated_pirq;
/* NOTE: only Level trigger will process EOI/ACK and if we got here
* means we have this vioapic or vpic or both enabled
*/
switch (vpin_src) {
case PTDEV_VPIN_IOAPIC:
if (entry->polarity != 0U) {
vioapic_set_irq(vm, virt_pin, GSI_SET_HIGH);
} else {
vioapic_set_irq(vm, virt_pin, GSI_SET_LOW);
}
break;
case PTDEV_VPIN_PIC:
vpic_set_irq(vm, virt_pin, GSI_SET_LOW);
default:
/*
* In this switch statement, vpin_src shall either be
* PTDEV_VPIN_IOAPIC or PTDEV_VPIN_PIC.
* Gracefully return if prior case clauses have not been met.
/* NOTE: only Level trigger will process EOI/ACK and if we got here
* means we have this vioapic or vpic or both enabled
*/
break;
}
switch (vpin_src) {
case PTDEV_VPIN_IOAPIC:
if (entry->polarity != 0U) {
vioapic_set_irq(vm, virt_pin, GSI_SET_HIGH);
} else {
vioapic_set_irq(vm, virt_pin, GSI_SET_LOW);
}
break;
case PTDEV_VPIN_PIC:
vpic_set_irq(vm, virt_pin, GSI_SET_LOW);
default:
/*
* In this switch statement, vpin_src shall either be
* PTDEV_VPIN_IOAPIC or PTDEV_VPIN_PIC.
* Gracefully return if prior case clauses have not been met.
*/
break;
}
dev_dbg(ACRN_DBG_PTIRQ, "dev-assign: irq=0x%x acked vr: 0x%x",
phys_irq, irq_to_vector(phys_irq));
gsi_unmask_irq(phys_irq);
dev_dbg(ACRN_DBG_PTIRQ, "dev-assign: irq=0x%x acked vr: 0x%x",
phys_irq, irq_to_vector(phys_irq));
gsi_unmask_irq(phys_irq);
}
}
/* Main entry for PCI device assignment with MSI and MSI-X

View File

@ -266,29 +266,35 @@ static void vcpu_inject_exception(struct acrn_vcpu *vcpu, uint32_t vector)
vcpu_retain_rip(vcpu);
}
static int vcpu_inject_hi_exception(struct acrn_vcpu *vcpu)
static int32_t vcpu_inject_hi_exception(struct acrn_vcpu *vcpu)
{
uint32_t vector = vcpu->arch.exception_info.exception;
int32_t ret;
if (vector == IDT_MC || vector == IDT_BP || vector == IDT_DB) {
vcpu_inject_exception(vcpu, vector);
return 1;
ret = 1;
} else {
ret = 0;
}
return 0;
return ret;
}
static int vcpu_inject_lo_exception(struct acrn_vcpu *vcpu)
static int32_t vcpu_inject_lo_exception(struct acrn_vcpu *vcpu)
{
uint32_t vector = vcpu->arch.exception_info.exception;
int32_t ret;
/* high priority exception already be injected */
if (vector <= NR_MAX_VECTOR) {
vcpu_inject_exception(vcpu, vector);
return 1;
ret = 1;
} else {
ret = 0;
}
return 0;
return ret;
}
/* Inject external interrupt to guest */
@ -357,10 +363,11 @@ int interrupt_window_vmexit_handler(struct acrn_vcpu *vcpu)
return 0;
}
int external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
{
uint32_t intr_info;
struct intr_excp_ctx ctx;
int32_t ret;
intr_info = exec_vmread32(VMX_EXIT_INT_INFO);
if (((intr_info & VMX_INT_INFO_VALID) == 0U) ||
@ -368,25 +375,26 @@ int external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
!= VMX_INT_TYPE_EXT_INT)) {
pr_err("Invalid VM exit interrupt info:%x", intr_info);
vcpu_retain_rip(vcpu);
return -EINVAL;
}
ctx.vector = intr_info & 0xFFU;
ctx.rip = vcpu_get_rip(vcpu);
ctx.rflags = vcpu_get_rflags(vcpu);
ctx.cs = exec_vmread32(VMX_GUEST_CS_SEL);
ret = -EINVAL;
} else {
ctx.vector = intr_info & 0xFFU;
ctx.rip = vcpu_get_rip(vcpu);
ctx.rflags = vcpu_get_rflags(vcpu);
ctx.cs = exec_vmread32(VMX_GUEST_CS_SEL);
#ifdef CONFIG_PARTITION_MODE
partition_mode_dispatch_interrupt(&ctx);
partition_mode_dispatch_interrupt(&ctx);
#else
dispatch_interrupt(&ctx);
dispatch_interrupt(&ctx);
#endif
vcpu_retain_rip(vcpu);
vcpu_retain_rip(vcpu);
TRACE_2L(TRACE_VMEXIT_EXTERNAL_INTERRUPT, ctx.vector, 0UL);
TRACE_2L(TRACE_VMEXIT_EXTERNAL_INTERRUPT, ctx.vector, 0UL);
ret = 0;
}
return 0;
return ret;
}
int acrn_handle_pending_request(struct acrn_vcpu *vcpu)

View File

@ -144,11 +144,15 @@ static inline uint8_t* get_ctx_table(uint32_t dmar_index, uint8_t bus_no)
bool iommu_snoop_supported(struct acrn_vm *vm)
{
bool ret;
if (vm->iommu == NULL || vm->iommu->iommu_snoop) {
return true;
ret = true;
} else {
ret = false;
}
return false;
return ret;
}
static struct dmar_drhd_rt dmar_drhd_units[CONFIG_MAX_IOMMU_NUM];
@ -267,12 +271,10 @@ static void iommu_flush_cache(const struct dmar_drhd_rt *dmar_unit,
uint32_t i;
/* if vtd support page-walk coherency, no need to flush cacheline */
if (iommu_ecap_c(dmar_unit->ecap) != 0U) {
return;
}
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
clflush((char *)p + i);
if (iommu_ecap_c(dmar_unit->ecap) == 0U) {
for (i = 0U; i < size; i += CACHE_LINE_SIZE) {
clflush((char *)p + i);
}
}
}
@ -478,16 +480,14 @@ static void dmar_write_buffer_flush(struct dmar_drhd_rt *dmar_unit)
{
uint32_t status;
if (iommu_cap_rwbf(dmar_unit->cap) == 0U) {
return;
if (iommu_cap_rwbf(dmar_unit->cap) != 0U) {
spinlock_obtain(&(dmar_unit->lock));
iommu_write32(dmar_unit, DMAR_GCMD_REG, dmar_unit->gcmd | DMA_GCMD_WBF);
/* read lower 32 bits to check */
dmar_wait_completion(dmar_unit, DMAR_GSTS_REG, DMA_GSTS_WBFS, true, &status);
spinlock_release(&(dmar_unit->lock));
}
spinlock_obtain(&(dmar_unit->lock));
iommu_write32(dmar_unit, DMAR_GCMD_REG, dmar_unit->gcmd | DMA_GCMD_WBF);
/* read lower 32 bits to check */
dmar_wait_completion(dmar_unit, DMAR_GSTS_REG, DMA_GSTS_WBFS, true, &status);
spinlock_release(&(dmar_unit->lock));
}
/*
@ -687,19 +687,17 @@ static void fault_status_analysis(uint32_t status)
static void fault_record_analysis(__unused uint64_t low, uint64_t high)
{
if (dma_frcd_up_f(high)) {
return;
}
/* currently skip PASID related parsing */
pr_info("%s, Reason: 0x%x, SID: %x.%x.%x @0x%llx",
(dma_frcd_up_t(high) != 0U) ? "Read/Atomic" : "Write", dma_frcd_up_fr(high),
pci_bus(dma_frcd_up_sid(high)), pci_slot(dma_frcd_up_sid(high)), pci_func(dma_frcd_up_sid(high)), low);
if (!dma_frcd_up_f(high)) {
/* currently skip PASID related parsing */
pr_info("%s, Reason: 0x%x, SID: %x.%x.%x @0x%llx",
(dma_frcd_up_t(high) != 0U) ? "Read/Atomic" : "Write", dma_frcd_up_fr(high),
pci_bus(dma_frcd_up_sid(high)), pci_slot(dma_frcd_up_sid(high)), pci_func(dma_frcd_up_sid(high)), low);
#if DBG_IOMMU
if (iommu_ecap_dt(dmar_unit->ecap) != 0U) {
pr_info("Address Type: 0x%x", dma_frcd_up_at(high));
}
if (iommu_ecap_dt(dmar_unit->ecap) != 0U) {
pr_info("Address Type: 0x%x", dma_frcd_up_at(high));
}
#endif
}
}
static void dmar_fault_handler(uint32_t irq, void *data)
@ -1011,25 +1009,25 @@ struct iommu_domain *create_iommu_domain(uint16_t vm_id, uint64_t translation_ta
if (translation_table == 0UL) {
pr_err("translation table is NULL");
return NULL;
domain = NULL;
} else {
/*
* A hypercall is called to create an iommu domain for a valid VM,
* and hv code limit the VM number to CONFIG_MAX_VM_NUM.
* So the array iommu_domains will not be accessed out of range.
*/
domain = &iommu_domains[vmid_to_domainid(vm_id)];
domain->is_host = false;
domain->vm_id = vm_id;
domain->trans_table_ptr = translation_table;
domain->addr_width = addr_width;
domain->is_tt_ept = true;
dev_dbg(ACRN_DBG_IOMMU, "create domain [%d]: vm_id = %hu, ept@0x%x",
vmid_to_domainid(domain->vm_id), domain->vm_id, domain->trans_table_ptr);
}
/*
* A hypercall is called to create an iommu domain for a valid VM,
* and hv code limit the VM number to CONFIG_MAX_VM_NUM.
* So the array iommu_domains will not be accessed out of range.
*/
domain = &iommu_domains[vmid_to_domainid(vm_id)];
domain->is_host = false;
domain->vm_id = vm_id;
domain->trans_table_ptr = translation_table;
domain->addr_width = addr_width;
domain->is_tt_ept = true;
dev_dbg(ACRN_DBG_IOMMU, "create domain [%d]: vm_id = %hu, ept@0x%x",
vmid_to_domainid(domain->vm_id), domain->vm_id, domain->trans_table_ptr);
return domain;
}
@ -1105,15 +1103,16 @@ void resume_iommu(void)
int init_iommu(void)
{
int ret = 0;
int ret;
ret = register_hrhd_units();
if (ret != 0) {
return ret;
} else {
do_action_for_iommus(dmar_prepare);
ret = 0;
}
do_action_for_iommus(dmar_prepare);
return ret;
}