HV: handle adding ptdev entry failure cases

handle adding pass-through device entry failure cases,
instead of calling ASSERT, to avoid hypervisor crash.

Tracked-On: #1860
Signed-off-by: Minggui Cao <minggui.cao@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Minggui Cao 2018-11-20 16:14:19 +08:00 committed by lijinxia
parent fe08a44e83
commit e350abe40d
3 changed files with 38 additions and 52 deletions

View File

@ -192,9 +192,8 @@ ptdev_build_physical_rte(struct acrn_vm *vm,
* - if the entry already be added by vm0, then change the owner to current vm * - if the entry already be added by vm0, then change the owner to current vm
* - if the entry already be added by other vm, return NULL * - if the entry already be added by other vm, return NULL
*/ */
static struct ptdev_remapping_info * static struct ptdev_remapping_info *add_msix_remapping(struct acrn_vm *vm,
add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t phys_bdf, uint16_t virt_bdf, uint16_t phys_bdf, uint32_t entry_nr)
uint32_t entry_nr)
{ {
struct ptdev_remapping_info *entry; struct ptdev_remapping_info *entry;
DEFINE_MSI_SID(phys_sid, phys_bdf, entry_nr); DEFINE_MSI_SID(phys_sid, phys_bdf, entry_nr);
@ -203,45 +202,41 @@ add_msix_remapping(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t phys_bdf,
spinlock_obtain(&ptdev_lock); spinlock_obtain(&ptdev_lock);
entry = ptdev_lookup_entry_by_sid(PTDEV_INTR_MSI, &phys_sid, NULL); entry = ptdev_lookup_entry_by_sid(PTDEV_INTR_MSI, &phys_sid, NULL);
if (entry == NULL) { if (entry == NULL) {
if (ptdev_lookup_entry_by_sid(PTDEV_INTR_MSI, if (ptdev_lookup_entry_by_sid(PTDEV_INTR_MSI, &virt_sid, vm) != NULL) {
&virt_sid, vm) != NULL) {
pr_err("MSIX re-add vbdf%x", virt_bdf); pr_err("MSIX re-add vbdf%x", virt_bdf);
spinlock_release(&ptdev_lock); spinlock_release(&ptdev_lock);
return NULL; return NULL;
} }
entry = alloc_entry(vm, PTDEV_INTR_MSI); entry = alloc_entry(vm, PTDEV_INTR_MSI);
entry->phys_sid.value = phys_sid.value; entry->phys_sid.value = phys_sid.value;
entry->virt_sid.value = virt_sid.value; entry->virt_sid.value = virt_sid.value;
/* update msi source and active entry */ /* update msi source and active entry */
ptdev_activate_entry(entry, IRQ_INVALID); if (ptdev_activate_entry(entry, IRQ_INVALID) < 0) {
release_entry(entry);
entry = NULL;
}
} else if (entry->vm != vm) { } else if (entry->vm != vm) {
if (is_vm0(entry->vm)) { if (is_vm0(entry->vm)) {
entry->vm = vm; entry->vm = vm;
entry->virt_sid.msi_id.bdf = virt_bdf; entry->virt_sid.msi_id.bdf = virt_bdf;
} else { } else {
pr_err("MSIX pbdf%x idx=%d already in vm%d with vbdf%x," pr_err("MSIX pbdf%x idx=%d already in vm%d with vbdf%x, not able to add into vm%d with vbdf%x",
" not able to add into vm%d with vbdf%x", entry->phys_sid.msi_id.bdf, entry->phys_sid.msi_id.entry_nr, entry->vm->vm_id,
entry->phys_sid.msi_id.bdf, entry->virt_sid.msi_id.bdf, vm->vm_id, virt_bdf);
entry->phys_sid.msi_id.entry_nr,
entry->vm->vm_id, entry->virt_sid.msi_id.bdf,
vm->vm_id, virt_bdf);
ASSERT(false, "msix entry pbdf%x idx%d already in vm%d",
phys_bdf, entry_nr, entry->vm->vm_id);
spinlock_release(&ptdev_lock); pr_err("msix entry pbdf%x idx%d already in vm%d", phys_bdf, entry_nr, entry->vm->vm_id);
return NULL; entry = NULL;
} }
} else { } else {
/* The mapping has already been added to the VM. No action /* The mapping has already been added to the VM. No action
* required. */ * required. */
} }
spinlock_release(&ptdev_lock);
dev_dbg(ACRN_DBG_IRQ, spinlock_release(&ptdev_lock);
"VM%d MSIX add vector mapping vbdf%x:pbdf%x idx=%d", dev_dbg(ACRN_DBG_IRQ, "VM%d MSIX add vector mapping vbdf%x:pbdf%x idx=%d",
entry->vm->vm_id, virt_bdf, phys_bdf, entry_nr); vm->vm_id, virt_bdf, phys_bdf, entry_nr);
return entry; return entry;
} }
@ -281,28 +276,24 @@ END:
* - if the entry already be added by vm0, then change the owner to current vm * - if the entry already be added by vm0, then change the owner to current vm
* - if the entry already be added by other vm, return NULL * - if the entry already be added by other vm, return NULL
*/ */
static struct ptdev_remapping_info * static struct ptdev_remapping_info *add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin,
add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin,
uint8_t phys_pin, bool pic_pin) uint8_t phys_pin, bool pic_pin)
{ {
struct ptdev_remapping_info *entry; struct ptdev_remapping_info *entry;
enum ptdev_vpin_source vpin_src = enum ptdev_vpin_source vpin_src = pic_pin ? PTDEV_VPIN_PIC : PTDEV_VPIN_IOAPIC;
pic_pin ? PTDEV_VPIN_PIC : PTDEV_VPIN_IOAPIC;
DEFINE_IOAPIC_SID(phys_sid, phys_pin, 0); DEFINE_IOAPIC_SID(phys_sid, phys_pin, 0);
DEFINE_IOAPIC_SID(virt_sid, virt_pin, vpin_src); DEFINE_IOAPIC_SID(virt_sid, virt_pin, vpin_src);
uint32_t phys_irq = pin_to_irq(phys_pin); uint32_t phys_irq = pin_to_irq(phys_pin);
if (!irq_is_gsi(phys_irq)) { if (!irq_is_gsi(phys_irq)) {
pr_err("%s, invalid phys_pin: %d <-> irq: 0x%x is not a GSI\n", pr_err("%s, invalid phys_pin: %d <-> irq: 0x%x is not a GSI\n", __func__, phys_pin, phys_irq);
__func__, phys_pin, phys_irq);
return NULL; return NULL;
} }
spinlock_obtain(&ptdev_lock); spinlock_obtain(&ptdev_lock);
entry = ptdev_lookup_entry_by_sid(PTDEV_INTR_INTX, &phys_sid, NULL); entry = ptdev_lookup_entry_by_sid(PTDEV_INTR_INTX, &phys_sid, NULL);
if (entry == NULL) { if (entry == NULL) {
if (ptdev_lookup_entry_by_sid(PTDEV_INTR_INTX, if (ptdev_lookup_entry_by_sid(PTDEV_INTR_INTX, &virt_sid, vm) != NULL) {
&virt_sid, vm) != NULL) {
pr_err("INTX re-add vpin %d", virt_pin); pr_err("INTX re-add vpin %d", virt_pin);
spinlock_release(&ptdev_lock); spinlock_release(&ptdev_lock);
return NULL; return NULL;
@ -312,20 +303,18 @@ add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin,
entry->virt_sid.value = virt_sid.value; entry->virt_sid.value = virt_sid.value;
/* activate entry */ /* activate entry */
ptdev_activate_entry(entry, phys_irq); if (ptdev_activate_entry(entry, phys_irq) < 0) {
release_entry(entry);
entry = NULL;
}
} else if (entry->vm != vm) { } else if (entry->vm != vm) {
if (is_vm0(entry->vm)) { if (is_vm0(entry->vm)) {
entry->vm = vm; entry->vm = vm;
entry->virt_sid.value = virt_sid.value; entry->virt_sid.value = virt_sid.value;
} else { } else {
pr_err("INTX pin%d already in vm%d with vpin%d," pr_err("INTX pin%d already in vm%d with vpin%d, not able to add into vm%d with vpin%d",
" not able to add into vm%d with vpin%d", phys_pin, entry->vm->vm_id, entry->virt_sid.intx_id.pin, vm->vm_id, virt_pin);
phys_pin, entry->vm->vm_id, entry = NULL;
entry->virt_sid.intx_id.pin,
vm->vm_id, virt_pin);
spinlock_release(&ptdev_lock);
return NULL;
} }
} else { } else {
/* The mapping has already been added to the VM. No action /* The mapping has already been added to the VM. No action
@ -333,10 +322,7 @@ add_intx_remapping(struct acrn_vm *vm, uint8_t virt_pin,
} }
spinlock_release(&ptdev_lock); spinlock_release(&ptdev_lock);
dev_dbg(ACRN_DBG_IRQ, "VM%d INTX add pin mapping vpin%d:ppin%d", vm->vm_id, virt_pin, phys_pin);
dev_dbg(ACRN_DBG_IRQ,
"VM%d INTX add pin mapping vpin%d:ppin%d",
entry->vm->vm_id, virt_pin, phys_pin);
return entry; return entry;
} }

View File

@ -160,19 +160,21 @@ static void ptdev_interrupt_handler(__unused uint32_t irq, void *data)
} }
/* active intr with irq registering */ /* active intr with irq registering */
void int32_t ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq)
ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq)
{ {
int32_t retval; int32_t retval;
/* register and allocate host vector/irq */ /* register and allocate host vector/irq */
retval = request_irq(phys_irq, ptdev_interrupt_handler, retval = request_irq(phys_irq, ptdev_interrupt_handler, (void *)entry, IRQF_PT);
(void *)entry, IRQF_PT);
ASSERT(retval >= 0, "dev register failed"); if (retval < 0) {
entry->allocated_pirq = (uint32_t)retval; pr_err("request irq failed, please check!, phys-irq=%d", phys_irq);
} else {
entry->allocated_pirq = (uint32_t)retval;
atomic_set32(&entry->active, ACTIVE_FLAG);
}
atomic_set32(&entry->active, ACTIVE_FLAG); return retval;
} }
void void

View File

@ -77,9 +77,7 @@ struct ptdev_remapping_info *ptdev_dequeue_softirq(struct acrn_vm *vm);
struct ptdev_remapping_info *alloc_entry(struct acrn_vm *vm, struct ptdev_remapping_info *alloc_entry(struct acrn_vm *vm,
uint32_t intr_type); uint32_t intr_type);
void release_entry(struct ptdev_remapping_info *entry); void release_entry(struct ptdev_remapping_info *entry);
void ptdev_activate_entry( int32_t ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq);
struct ptdev_remapping_info *entry,
uint32_t phys_irq);
void ptdev_deactivate_entry(struct ptdev_remapping_info *entry); void ptdev_deactivate_entry(struct ptdev_remapping_info *entry);
#ifdef HV_DEBUG #ifdef HV_DEBUG