hv: Introduce check_vm_vlapic_state API
This patch introduces check_vm_vlapic_state API instead of is_lapic_pt_enabled to check if all the vCPUs of a VM are using x2APIC mode and LAPIC pass-through is enabled on all of them. When the VM is in VM_VLAPIC_TRANSITION or VM_VLAPIC_DISABLED state, following conditions apply. 1) For pass-thru MSI interrupts, interrupt source is not programmed. 2) For DM emulated device MSI interrupts, interrupt is not delivered. 3) For IPIs, it will work only if the sender and destination are both in x2APIC mode. Tracked-On: #3253 Signed-off-by: Sainath Grandhi <sainath.grandhi@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
f3627d4839
commit
7d44cd5c28
|
@ -636,24 +636,49 @@ int32_t ptirq_msix_remap(struct acrn_vm *vm, uint16_t virt_bdf, uint16_t phys_bd
|
|||
spinlock_release(&ptdev_lock);
|
||||
|
||||
if (entry != NULL) {
|
||||
ret = 0;
|
||||
if (is_entry_active(entry) && (info->vmsi_data.full == 0U)) {
|
||||
/* handle destroy case */
|
||||
info->pmsi_data.full = 0U;
|
||||
} else {
|
||||
/* build physical config MSI, update to info->pmsi_xxx */
|
||||
if (is_lapic_pt_enabled(vm)) {
|
||||
/* for vm with lapic-pt, keep vector from guest */
|
||||
ptirq_build_physical_msi(vm, info, entry, (uint32_t)info->vmsi_data.bits.vector);
|
||||
if (is_lapic_pt_configured(vm)) {
|
||||
enum vm_vlapic_state vlapic_state = check_vm_vlapic_state(vm);
|
||||
if (vlapic_state == VM_VLAPIC_X2APIC) {
|
||||
/*
|
||||
* All the vCPUs are in x2APIC mode and LAPIC is Pass-through
|
||||
* Use guest vector to program the interrupt source
|
||||
*/
|
||||
ptirq_build_physical_msi(vm, info, entry, (uint32_t)info->vmsi_data.bits.vector);
|
||||
} else if (vlapic_state == VM_VLAPIC_XAPIC) {
|
||||
/*
|
||||
* All the vCPUs are in xAPIC mode and LAPIC is emulated
|
||||
* Use host vector to program the interrupt source
|
||||
*/
|
||||
ptirq_build_physical_msi(vm, info, entry, irq_to_vector(entry->allocated_pirq));
|
||||
} else if (vlapic_state == VM_VLAPIC_TRANSITION) {
|
||||
/*
|
||||
* vCPUs are in middle of transition, so do not program interrupt source
|
||||
* TODO: Devices programmed during transistion do not work after transition
|
||||
* as device is not programmed with interrupt info. Need to implement a
|
||||
* method to get interrupts working after transition.
|
||||
*/
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
/* Do nothing for VM_VLAPIC_DISABLED */
|
||||
ret = -EFAULT;
|
||||
}
|
||||
} else {
|
||||
ptirq_build_physical_msi(vm, info, entry, irq_to_vector(entry->allocated_pirq));
|
||||
}
|
||||
|
||||
entry->msi = *info;
|
||||
dev_dbg(ACRN_DBG_IRQ, "PCI %x:%x.%x MSI VR[%d] 0x%x->0x%x assigned to vm%d",
|
||||
pci_bus(virt_bdf), pci_slot(virt_bdf), pci_func(virt_bdf), entry_nr,
|
||||
info->vmsi_data.bits.vector, irq_to_vector(entry->allocated_pirq), entry->vm->vm_id);
|
||||
if (ret == 0) {
|
||||
entry->msi = *info;
|
||||
dev_dbg(ACRN_DBG_IRQ, "PCI %x:%x.%x MSI VR[%d] 0x%x->0x%x assigned to vm%d",
|
||||
pci_bus(virt_bdf), pci_slot(virt_bdf), pci_func(virt_bdf), entry_nr,
|
||||
info->vmsi_data.bits.vector, irq_to_vector(entry->allocated_pirq), entry->vm->vm_id);
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -595,7 +595,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
|
|||
if (atomic_load32(&vcpu->running) == 1U) {
|
||||
remove_from_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id);
|
||||
|
||||
if (is_lapic_pt_enabled(vcpu->vm)) {
|
||||
if (is_lapic_pt_enabled(vcpu)) {
|
||||
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_INIT);
|
||||
} else {
|
||||
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_IPI);
|
||||
|
@ -738,3 +738,17 @@ uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask)
|
|||
|
||||
return dmask;
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Check if vCPU uses LAPIC in x2APIC mode and the VM, vCPU belongs to, is configured for
|
||||
* LAPIC Pass-through
|
||||
*
|
||||
* @pre vcpu != NULL
|
||||
*
|
||||
* @return true, if vCPU LAPIC is in x2APIC mode and VM, vCPU belongs to, is configured for
|
||||
* LAPIC Pass-through
|
||||
*/
|
||||
bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu)
|
||||
{
|
||||
return ((is_x2apic_enabled(vcpu_vlapic(vcpu))) && (is_lapic_pt_configured(vcpu->vm)));
|
||||
}
|
||||
|
|
|
@ -2042,6 +2042,9 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
|
|||
if ((phys == false) || (shorthand != APIC_DEST_DESTFLD)) {
|
||||
pr_err("Logical destination mode or shorthands \
|
||||
not supported in ICR forpartition mode\n");
|
||||
/*
|
||||
* TODO: To support logical destination and shorthand modes
|
||||
*/
|
||||
} else {
|
||||
vcpu_id = vm_apicid2vcpu_id(vm, vapic_id);
|
||||
if ((vcpu_id < vm->hw.created_vcpus) && (vm->hw.vcpu_array[vcpu_id].state != VCPU_OFFLINE)) {
|
||||
|
@ -2056,11 +2059,13 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
|
|||
break;
|
||||
default:
|
||||
/* convert the dest from virtual apic_id to physical apic_id */
|
||||
papic_id = per_cpu(lapic_id, target_vcpu->pcpu_id);
|
||||
dev_dbg(ACRN_DBG_LAPICPT,
|
||||
"%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx",
|
||||
__func__, vapic_id, papic_id, icr_low);
|
||||
msr_write(MSR_IA32_EXT_APIC_ICR, (((uint64_t)papic_id) << 32U) | icr_low);
|
||||
if (is_x2apic_enabled(vcpu_vlapic(target_vcpu))) {
|
||||
papic_id = per_cpu(lapic_id, target_vcpu->pcpu_id);
|
||||
dev_dbg(ACRN_DBG_LAPICPT,
|
||||
"%s vapic_id: 0x%08lx papic_id: 0x%08lx icr_low:0x%08lx",
|
||||
__func__, vapic_id, papic_id, icr_low);
|
||||
msr_write(MSR_IA32_EXT_APIC_ICR, (((uint64_t)papic_id) << 32U) | icr_low);
|
||||
}
|
||||
break;
|
||||
}
|
||||
ret = 0;
|
||||
|
@ -2602,14 +2607,3 @@ void vlapic_set_apicv_ops(void)
|
|||
apicv_ops = &apicv_basic_ops;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @pre vm != NULL
|
||||
* @pre vm->vmid < CONFIG_MAX_VM_NUM
|
||||
*/
|
||||
bool is_lapic_pt_enabled(struct acrn_vm *vm)
|
||||
{
|
||||
struct acrn_vcpu *vcpu = vcpu_from_vid(vm, 0U);
|
||||
|
||||
return ((is_x2apic_enabled(vcpu_vlapic(vcpu))) && (is_lapic_pt_configured(vm)));
|
||||
}
|
||||
|
|
|
@ -559,7 +559,7 @@ int32_t shutdown_vm(struct acrn_vm *vm)
|
|||
reset_vcpu(vcpu);
|
||||
offline_vcpu(vcpu);
|
||||
|
||||
if (is_lapic_pt_enabled(vm)) {
|
||||
if (is_lapic_pt_enabled(vcpu)) {
|
||||
bitmap_set_nolock(vcpu->pcpu_id, &mask);
|
||||
make_pcpu_offline(vcpu->pcpu_id);
|
||||
}
|
||||
|
@ -567,7 +567,7 @@ int32_t shutdown_vm(struct acrn_vm *vm)
|
|||
|
||||
wait_pcpus_offline(mask);
|
||||
|
||||
if (is_lapic_pt_enabled(vm) && !start_pcpus(mask)) {
|
||||
if (is_lapic_pt_configured(vm) && !start_pcpus(mask)) {
|
||||
pr_fatal("Failed to start all cpus in mask(0x%llx)", mask);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
@ -841,3 +841,16 @@ void update_vm_vlapic_state(struct acrn_vm *vm)
|
|||
vm->arch_vm.vlapic_state = vlapic_state;
|
||||
spinlock_release(&vm->vm_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Check state of vLAPICs of a VM
|
||||
*
|
||||
* @pre vm != NULL
|
||||
*/
|
||||
enum vm_vlapic_state check_vm_vlapic_state(const struct acrn_vm *vm)
|
||||
{
|
||||
enum vm_vlapic_state vlapic_state;
|
||||
|
||||
vlapic_state = vm->arch_vm.vlapic_state;
|
||||
return vlapic_state;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ void vcpu_thread(struct sched_object *obj)
|
|||
init_vmcs(vcpu);
|
||||
}
|
||||
|
||||
if (!is_lapic_pt_enabled(vcpu->vm)) {
|
||||
if (!is_lapic_pt_enabled(vcpu)) {
|
||||
/* handle pending softirq when irq enable*/
|
||||
do_softirq();
|
||||
CPU_IRQ_DISABLE();
|
||||
|
@ -64,7 +64,7 @@ void vcpu_thread(struct sched_object *obj)
|
|||
|
||||
profiling_pre_vmexit_handler(vcpu);
|
||||
|
||||
if (!is_lapic_pt_enabled(vcpu->vm)) {
|
||||
if (!is_lapic_pt_enabled(vcpu)) {
|
||||
CPU_IRQ_ENABLE();
|
||||
}
|
||||
/* Dispatch handler */
|
||||
|
|
|
@ -484,9 +484,27 @@ int32_t hcall_inject_msi(struct acrn_vm *vm, uint16_t vmid, uint64_t param)
|
|||
pr_err("%s: Unable copy param to vm\n", __func__);
|
||||
} else {
|
||||
/* For target cpu with lapic pt, send ipi instead of injection via vlapic */
|
||||
if (is_lapic_pt_enabled(target_vm)) {
|
||||
inject_msi_lapic_pt(target_vm, &msi);
|
||||
ret = 0;
|
||||
if (is_lapic_pt_configured(target_vm)) {
|
||||
enum vm_vlapic_state vlapic_state = check_vm_vlapic_state(vm);
|
||||
if (vlapic_state == VM_VLAPIC_X2APIC) {
|
||||
/*
|
||||
* All the vCPUs of VM are in x2APIC mode and LAPIC is PT
|
||||
* Inject the vMSI as an IPI directly to VM
|
||||
*/
|
||||
inject_msi_lapic_pt(target_vm, &msi);
|
||||
ret = 0;
|
||||
} else if (vlapic_state == VM_VLAPIC_XAPIC) {
|
||||
/*
|
||||
* All the vCPUs of VM are in xAPIC and use vLAPIC
|
||||
* Inject using vLAPIC
|
||||
*/
|
||||
ret = vlapic_intr_msi(target_vm, msi.msi_addr, msi.msi_data);
|
||||
} else {
|
||||
/*
|
||||
* For cases VM_VLAPIC_DISABLED and VM_VLAPIC_TRANSITION
|
||||
* Silently drop interrupt
|
||||
*/
|
||||
}
|
||||
} else {
|
||||
ret = vlapic_intr_msi(target_vm, msi.msi_addr, msi.msi_data);
|
||||
}
|
||||
|
|
|
@ -747,7 +747,7 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id);
|
|||
* @return The physical destination CPU mask
|
||||
*/
|
||||
uint64_t vcpumask2pcpumask(struct acrn_vm *vm, uint64_t vdmask);
|
||||
|
||||
bool is_lapic_pt_enabled(struct acrn_vcpu *vcpu);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
|
|
@ -210,7 +210,6 @@ void vlapic_calc_dest(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
|
|||
uint32_t dest, bool phys, bool lowprio);
|
||||
void vlapic_calc_dest_lapic_pt(struct acrn_vm *vm, uint64_t *dmask, bool is_broadcast,
|
||||
uint32_t dest, bool phys);
|
||||
bool is_lapic_pt_enabled(struct acrn_vm *vm);
|
||||
bool is_x2apic_enabled(const struct acrn_vlapic *vlapic);
|
||||
bool is_xapic_enabled(const struct acrn_vlapic *vlapic);
|
||||
/**
|
||||
|
|
|
@ -227,7 +227,7 @@ bool is_rt_vm(const struct acrn_vm *vm);
|
|||
bool is_highest_severity_vm(const struct acrn_vm *vm);
|
||||
bool vm_hide_mtrr(const struct acrn_vm *vm);
|
||||
void update_vm_vlapic_state(struct acrn_vm *vm);
|
||||
|
||||
enum vm_vlapic_state check_vm_vlapic_state(const struct acrn_vm *vm);
|
||||
#endif /* !ASSEMBLER */
|
||||
|
||||
#endif /* VM_H_ */
|
||||
|
|
Loading…
Reference in New Issue