hv: extend lapic pass-through for DM launched VM

This commit extend lapic pass-through for DM launched VM, generally for hard RT scenarios.
Similar to the partition mode, the vlapic is working under the xapic mode at first, only
when x2apic mode is enabled, lapic is passed through, because the physical LAPICs are
under x2apic mode.

Main changes includes:
- add is_lapic_pt() to check if a vm is created with lapic pt or not, to combine
  codes of partition mode and DM launched vm with lapic passthrough, including:
  - reuse the irq delievery function and rename it to dispatch_interrupt_lapic_pt();
  - reuse switch_apicv_mode_x2apic();
  - reuse ICR handling codes to avoid malicious IPI;
- intercept ICR/APICID/LDR msr access when lapic_pt;
- for vm with lapic passthrough, irq is always disabled under root mode.

Tracked-On: #2351
Signed-off-by: Yan, Like <like.yan@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Yan, Like 2018-11-11 14:07:33 +08:00 committed by wenlingz
parent c853eb4bc2
commit a073ebeeca
7 changed files with 57 additions and 45 deletions

View File

@ -362,10 +362,10 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
}
case 0x0bU:
/* Patching X2APIC */
#ifdef CONFIG_PARTITION_MODE
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
#else
if (is_sos_vm(vcpu->vm)) {
if (is_lapic_pt(vcpu->vm)) {
/* for VM with LAPIC_PT, eg. PRE_LAUNCHED_VM or NORMAL_VM with LAPIC_PT*/
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
} else if (is_sos_vm(vcpu->vm)) {
cpuid_subleaf(leaf, subleaf, eax, ebx, ecx, edx);
} else {
*ecx = subleaf & 0xFFU;
@ -393,7 +393,6 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
break;
}
}
#endif
break;
case 0x0dU:

View File

@ -2013,7 +2013,6 @@ static inline uint32_t x2apic_msr_to_regoff(uint32_t msr)
return (((msr - 0x800U) & 0x3FFU) << 4U);
}
#ifdef CONFIG_PARTITION_MODE
/*
* If x2apic is pass-thru to guests, we have to special case the following
* 1. INIT Delivery mode
@ -2061,7 +2060,6 @@ vlapic_x2apic_pt_icr_access(struct acrn_vm *vm, uint64_t val)
}
return 0;
}
#endif
static int32_t vlapic_x2apic_access(struct acrn_vcpu *vcpu, uint32_t msr, bool write,
uint64_t *val)
@ -2076,24 +2074,32 @@ static int32_t vlapic_x2apic_access(struct acrn_vcpu *vcpu, uint32_t msr, bool w
*/
vlapic = vcpu_vlapic(vcpu);
if (is_x2apic_enabled(vlapic)) {
#ifdef CONFIG_PARTITION_MODE
struct acrn_vm_config *vm_config = get_vm_config(vcpu->vm->vm_id);
if((vm_config->guest_flags & LAPIC_PASSTHROUGH) != 0U ) {
if (msr == MSR_IA32_EXT_APIC_ICR) {
error = vlapic_x2apic_pt_icr_access(vcpu->vm, *val);
}
return error;
}
#endif
offset = x2apic_msr_to_regoff(msr);
if (write) {
if (!is_x2apic_read_only_msr(msr)) {
error = vlapic_write(vlapic, offset, *val);
if (is_lapic_pt(vcpu->vm)) {
switch (msr) {
case MSR_IA32_EXT_APIC_ICR:
error = vlapic_x2apic_pt_icr_access(vcpu->vm, *val);
break;
case MSR_IA32_EXT_APIC_LDR:
case MSR_IA32_EXT_XAPICID:
if (!write) {
offset = x2apic_msr_to_regoff(msr);
error = vlapic_read(vlapic, offset, val);
}
break;
default:
pr_err("%s: unexpected MSR[0x%x] access with lapic_pt", __func__, msr);
break;
}
} else {
if (!is_x2apic_write_only_msr(msr)) {
error = vlapic_read(vlapic, offset, val);
offset = x2apic_msr_to_regoff(msr);
if (write) {
if (!is_x2apic_read_only_msr(msr)) {
error = vlapic_write(vlapic, offset, *val);
}
} else {
if (!is_x2apic_write_only_msr(msr)) {
error = vlapic_read(vlapic, offset, val);
}
}
}
}

View File

@ -628,6 +628,12 @@ void update_msr_bitmap_x2apic_apicv(const struct acrn_vcpu *vcpu)
}
}
/*
* After switch to x2apic mode, most MSRs are passthrough to guest, but vlapic is still valid
* for virtualization of some MSRs for security consideration:
* - XAPICID/LDR: Read to XAPICID/LDR need to be trapped to guarantee guest always see right vlapic_id.
* - ICR: Write to ICR need to be trapped to avoid milicious IPI.
*/
void update_msr_bitmap_x2apic_passthru(const struct acrn_vcpu *vcpu)
{
uint32_t msr;
@ -638,6 +644,8 @@ void update_msr_bitmap_x2apic_passthru(const struct acrn_vcpu *vcpu)
msr <= MSR_IA32_EXT_APIC_SELF_IPI; msr++) {
enable_msr_interception(msr_bitmap, msr, INTERCEPT_DISABLE);
}
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_XAPICID, INTERCEPT_READ);
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_LDR, INTERCEPT_READ);
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_ICR, INTERCEPT_WRITE);
enable_msr_interception(msr_bitmap, MSR_IA32_TSC_DEADLINE, INTERCEPT_DISABLE);
}

View File

@ -372,7 +372,6 @@ int32_t external_interrupt_vmexit_handler(struct acrn_vcpu *vcpu)
#else
dispatch_interrupt(&ctx);
#endif
vcpu_retain_rip(vcpu);
TRACE_2L(TRACE_VMEXIT_EXTERNAL_INTERRUPT, ctx.vector, 0UL);

View File

@ -560,23 +560,10 @@ void init_vmcs(struct acrn_vcpu *vcpu)
init_exit_ctrl(vcpu);
}
#ifndef CONFIG_PARTITION_MODE
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
{
uint32_t value32;
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
value32 &= ~VMX_PROCBASED_CTLS2_VAPIC;
value32 |= VMX_PROCBASED_CTLS2_VX2APIC;
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
update_msr_bitmap_x2apic_apicv(vcpu);
}
#else
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
{
uint32_t value32;
struct acrn_vm_config *vm_config = get_vm_config(vcpu->vm->vm_id);
if((vm_config->guest_flags & LAPIC_PASSTHROUGH) != 0U ) {
if(is_lapic_pt(vcpu->vm)) {
/*
* Disable external interrupt exiting and irq ack
* Disable posted interrupt processing
@ -620,4 +607,3 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
update_msr_bitmap_x2apic_apicv(vcpu);
}
}
#endif

View File

@ -20,11 +20,13 @@ void vcpu_thread(struct sched_object *obj)
init_vmcs(vcpu);
}
/* handle pending softirq when irq enable*/
do_softirq();
CPU_IRQ_DISABLE();
/* handle risk softirq when disabling irq*/
do_softirq();
if (!is_lapic_pt(vcpu->vm)) {
/* handle pending softirq when irq enable*/
do_softirq();
CPU_IRQ_DISABLE();
/* handle risk softirq when disabling irq*/
do_softirq();
}
/* Check and process pending requests(including interrupt) */
ret = acrn_handle_pending_request(vcpu);
@ -55,7 +57,9 @@ void vcpu_thread(struct sched_object *obj)
profiling_pre_vmexit_handler(vcpu);
CPU_IRQ_ENABLE();
if (!is_lapic_pt(vcpu->vm)) {
CPU_IRQ_ENABLE();
}
/* Dispatch handler */
ret = vmexit_handler(vcpu);
if (ret < 0) {

View File

@ -176,6 +176,11 @@ struct vpci_vdev_array {
#define MAX_BOOTARGS_SIZE 1024U
#define MAX_CONFIG_NAME_SIZE 32U
/*
* PRE_LAUNCHED_VM is launched by ACRN hypervisor, with LAPIC_PT;
* SOS_VM is launched by ACRN hypervisor, without LAPIC_PT;
* NORMAL_VM is launched by ACRN devicemodel, with/without LAPIC_PT depends on usecases.
*/
enum acrn_vm_type {
UNDEFINED_VM = 0,
PRE_LAUNCHED_VM,
@ -332,4 +337,9 @@ static inline struct acrn_vm_config *get_vm_config(uint16_t vm_id)
#endif
}
static inline bool is_lapic_pt(const struct acrn_vm *vm)
{
return ((vm_configs[vm->vm_id].guest_flags & LAPIC_PASSTHROUGH) != 0U);
}
#endif /* VM_H_ */