hv: vlapic: add combined constraint for APICv
Add two functions to combine constraint for APICv: is_apicv_basic_feature_supported: check the physical platform whether support "Use TPR shadow", "Virtualize APIC accesses" and "Virtualize x2APIC mode" is_apicv_advanced_feature_supported: check the physical platform whether support "APIC-register virtualization", "Virtual-interrupt delivery" and "Process posted interrupts". If the physical platform only support APICv basic feature, enable "Use TPR shadow" and "Virtualize APIC accesses" for xAPIC mode; enable "Use TPR shadow" and "Virtualize x2APIC mode" for x2APIC. Otherwise, if the physical platform support APICv advanced feature, enable APICv feature for xAPIC mode and x2APIC mode. Tracked-On: #1842 Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
6f482b8856
commit
f769f7457b
|
@ -20,12 +20,17 @@
|
|||
|
||||
/* TODO: add more capability per requirement */
|
||||
/* APICv features */
|
||||
#define VAPIC_FEATURE_VIRT_ACCESS (1U << 0U)
|
||||
#define VAPIC_FEATURE_VIRT_REG (1U << 1U)
|
||||
#define VAPIC_FEATURE_INTR_DELIVERY (1U << 2U)
|
||||
#define VAPIC_FEATURE_TPR_SHADOW (1U << 3U)
|
||||
#define VAPIC_FEATURE_POST_INTR (1U << 4U)
|
||||
#define VAPIC_FEATURE_VX2APIC_MODE (1U << 5U)
|
||||
#define VAPIC_FEATURE_VIRT_ACCESS (1U << 0U)
|
||||
#define VAPIC_FEATURE_VIRT_REG (1U << 1U)
|
||||
#define VAPIC_FEATURE_INTR_DELIVERY (1U << 2U)
|
||||
#define VAPIC_FEATURE_TPR_SHADOW (1U << 3U)
|
||||
#define VAPIC_FEATURE_POST_INTR (1U << 4U)
|
||||
#define VAPIC_FEATURE_VX2APIC_MODE (1U << 5U)
|
||||
|
||||
/* BASIC features: must supported by the physical platform and will enabled by default */
|
||||
#define APICV_BASIC_FEATURE (VAPIC_FEATURE_TPR_SHADOW | VAPIC_FEATURE_VIRT_ACCESS | VAPIC_FEATURE_VX2APIC_MODE)
|
||||
/* ADVANCED features: enable them by default if the physical platform support them all, otherwise, disable them all */
|
||||
#define APICV_ADVANCED_FEATURE (VAPIC_FEATURE_VIRT_REG | VAPIC_FEATURE_INTR_DELIVERY | VAPIC_FEATURE_POST_INTR)
|
||||
|
||||
static struct cpu_capability {
|
||||
uint8_t apicv_features;
|
||||
|
@ -252,24 +257,14 @@ static bool is_ept_supported(void)
|
|||
return (cpu_caps.ept_features != 0U);
|
||||
}
|
||||
|
||||
static bool is_apicv_supported(void)
|
||||
static inline bool is_apicv_basic_feature_supported(void)
|
||||
{
|
||||
return (cpu_caps.apicv_features != 0U);
|
||||
return ((cpu_caps.apicv_features & APICV_BASIC_FEATURE) == APICV_BASIC_FEATURE);
|
||||
}
|
||||
|
||||
bool is_apicv_reg_virtualization_supported(void)
|
||||
bool is_apicv_advanced_feature_supported(void)
|
||||
{
|
||||
return ((cpu_caps.apicv_features & VAPIC_FEATURE_VIRT_REG) != 0U);
|
||||
}
|
||||
|
||||
bool is_apicv_intr_delivery_supported(void)
|
||||
{
|
||||
return ((cpu_caps.apicv_features & VAPIC_FEATURE_INTR_DELIVERY) != 0U);
|
||||
}
|
||||
|
||||
bool is_apicv_posted_intr_supported(void)
|
||||
{
|
||||
return ((cpu_caps.apicv_features & VAPIC_FEATURE_POST_INTR) != 0U);
|
||||
return ((cpu_caps.apicv_features & APICV_ADVANCED_FEATURE) == APICV_ADVANCED_FEATURE);
|
||||
}
|
||||
|
||||
bool cpu_has_vmx_ept_cap(uint32_t bit_mask)
|
||||
|
@ -397,7 +392,7 @@ int32_t detect_hardware_support(void)
|
|||
} else if (!is_ept_supported()) {
|
||||
pr_fatal("%s, EPT not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (!is_apicv_supported()) {
|
||||
} else if (!is_apicv_basic_feature_supported()) {
|
||||
pr_fatal("%s, APICV not supported\n", __func__);
|
||||
ret = -ENODEV;
|
||||
} else if (boot_cpu_data.cpuid_level < 0x15U) {
|
||||
|
|
|
@ -136,7 +136,7 @@ void vcpu_set_vmcs_eoi_exit(struct acrn_vcpu *vcpu)
|
|||
pr_dbg("%s", __func__);
|
||||
|
||||
spinlock_obtain(&(vcpu->arch.lock));
|
||||
if (is_apicv_intr_delivery_supported()) {
|
||||
if (is_apicv_advanced_feature_supported()) {
|
||||
exec_vmwrite64(VMX_EOI_EXIT0_FULL, vcpu->arch.eoi_exit_bitmap[0]);
|
||||
exec_vmwrite64(VMX_EOI_EXIT1_FULL, vcpu->arch.eoi_exit_bitmap[1]);
|
||||
exec_vmwrite64(VMX_EOI_EXIT2_FULL, vcpu->arch.eoi_exit_bitmap[2]);
|
||||
|
|
|
@ -483,7 +483,7 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
|||
* Here to sync the pending interrupts to irr and update rvi if
|
||||
* needed. And then try to handle vmcs event injection.
|
||||
*/
|
||||
if (is_apicv_intr_delivery_supported() &&
|
||||
if (is_apicv_advanced_feature_supported() &&
|
||||
bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT, pending_req_bits)) {
|
||||
vlapic = vcpu_vlapic(vcpu);
|
||||
vlapic_apicv_inject_pir(vlapic);
|
||||
|
@ -518,7 +518,7 @@ int32_t acrn_handle_pending_request(struct acrn_vcpu *vcpu)
|
|||
*/
|
||||
if (arch->irq_window_enabled != 1U) {
|
||||
if (bitmap_test(ACRN_REQUEST_EXTINT, pending_req_bits) ||
|
||||
(!is_apicv_intr_delivery_supported() && vcpu_pending_request(vcpu))) {
|
||||
(!is_apicv_advanced_feature_supported() && vcpu_pending_request(vcpu))) {
|
||||
tmp = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS);
|
||||
tmp |= VMX_PROCBASED_CTLS_IRQ_WIN;
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, tmp);
|
||||
|
@ -564,7 +564,7 @@ static inline int32_t acrn_inject_pending_vector(struct acrn_vcpu *vcpu, uint64_
|
|||
* the virtual interrupt injection conditions are satified,
|
||||
* then inject through vmcs.
|
||||
*/
|
||||
if ((!is_apicv_intr_delivery_supported()) &&
|
||||
if ((!is_apicv_advanced_feature_supported()) &&
|
||||
(bitmap_test_and_clear_lock(ACRN_REQUEST_EVENT,
|
||||
pending_req_bits))) {
|
||||
ret = vcpu_inject_vlapic_int(vcpu);
|
||||
|
|
|
@ -513,12 +513,12 @@ vlapic_accept_intr(struct acrn_vlapic *vlapic, uint32_t vector, bool level)
|
|||
if ((lapic->svr.v & APIC_SVR_ENABLE) == 0U) {
|
||||
dev_dbg(ACRN_DBG_LAPIC, "vlapic is software disabled, ignoring interrupt %u", vector);
|
||||
ret = false;
|
||||
} else if (is_apicv_intr_delivery_supported()) {
|
||||
} else if (is_apicv_advanced_feature_supported()) {
|
||||
pending_intr = apicv_set_intr_ready(vlapic, vector);
|
||||
|
||||
vlapic_set_tmr(vlapic, vector, level);
|
||||
|
||||
if ((pending_intr != 0) && (is_apicv_posted_intr_supported()) && (get_cpu_id() != vlapic->vcpu->pcpu_id)) {
|
||||
if ((pending_intr != 0) && (get_cpu_id() != vlapic->vcpu->pcpu_id)) {
|
||||
/*
|
||||
* Send interrupt to vCPU via posted interrupt way:
|
||||
* 1. If target vCPU is in non-root mode(running),
|
||||
|
|
|
@ -291,7 +291,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||
/* enable external interrupt VM Exit */
|
||||
value32 = check_vmx_ctrl(MSR_IA32_VMX_PINBASED_CTLS, VMX_PINBASED_CTLS_IRQ_EXIT);
|
||||
|
||||
if (is_apicv_posted_intr_supported()) {
|
||||
if (is_apicv_advanced_feature_supported()) {
|
||||
value32 |= VMX_PINBASED_CTLS_POST_IRQ;
|
||||
}
|
||||
|
||||
|
@ -340,7 +340,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||
value32 &= ~VMX_PROCBASED_CTLS2_VPID;
|
||||
}
|
||||
|
||||
if (is_apicv_intr_delivery_supported()) {
|
||||
if (is_apicv_advanced_feature_supported()) {
|
||||
value32 |= VMX_PROCBASED_CTLS2_VIRQ;
|
||||
} else {
|
||||
/*
|
||||
|
@ -372,7 +372,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||
value64 = vlapic_apicv_get_apic_page_addr(vcpu_vlapic(vcpu));
|
||||
exec_vmwrite64(VMX_VIRTUAL_APIC_PAGE_ADDR_FULL, value64);
|
||||
|
||||
if (is_apicv_intr_delivery_supported()) {
|
||||
if (is_apicv_advanced_feature_supported()) {
|
||||
/* Disable all EOI VMEXIT by default and
|
||||
* clear RVI and SVI.
|
||||
*/
|
||||
|
@ -382,10 +382,8 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
|
|||
exec_vmwrite64(VMX_EOI_EXIT3_FULL, 0UL);
|
||||
|
||||
exec_vmwrite16(VMX_GUEST_INTR_STATUS, 0U);
|
||||
if (is_apicv_posted_intr_supported()) {
|
||||
exec_vmwrite16(VMX_POSTED_INTR_VECTOR, VECTOR_POSTED_INTR);
|
||||
exec_vmwrite64(VMX_PIR_DESC_ADDR_FULL, apicv_get_pir_desc_paddr(vcpu));
|
||||
}
|
||||
exec_vmwrite16(VMX_POSTED_INTR_VECTOR, VECTOR_POSTED_INTR);
|
||||
exec_vmwrite64(VMX_PIR_DESC_ADDR_FULL, apicv_get_pir_desc_paddr(vcpu));
|
||||
}
|
||||
|
||||
/* Load EPTP execution control
|
||||
|
@ -587,7 +585,7 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
|
|||
|
||||
value32 = exec_vmread32(VMX_PIN_VM_EXEC_CONTROLS);
|
||||
value32 &= ~VMX_PINBASED_CTLS_IRQ_EXIT;
|
||||
if (is_apicv_posted_intr_supported()) {
|
||||
if (is_apicv_advanced_feature_supported()) {
|
||||
value32 &= ~VMX_PINBASED_CTLS_POST_IRQ;
|
||||
}
|
||||
exec_vmwrite32(VMX_PIN_VM_EXEC_CONTROLS, value32);
|
||||
|
@ -604,7 +602,7 @@ void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
|
|||
|
||||
value32 = exec_vmread32(VMX_PROC_VM_EXEC_CONTROLS2);
|
||||
value32 &= ~VMX_PROCBASED_CTLS2_VAPIC_REGS;
|
||||
if (is_apicv_intr_delivery_supported()) {
|
||||
if (is_apicv_advanced_feature_supported()) {
|
||||
value32 &= ~VMX_PROCBASED_CTLS2_VIRQ;
|
||||
}
|
||||
exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS2, value32);
|
||||
|
|
|
@ -622,7 +622,7 @@ void update_msr_bitmap_x2apic_apicv(const struct acrn_vcpu *vcpu)
|
|||
* TPR is virtualized even when register virtualization is not
|
||||
* supported
|
||||
*/
|
||||
if (is_apicv_reg_virtualization_supported()) {
|
||||
if (is_apicv_advanced_feature_supported()) {
|
||||
intercept_x2apic_msrs(msr_bitmap, INTERCEPT_WRITE);
|
||||
enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_CUR_COUNT, INTERCEPT_READ);
|
||||
/*
|
||||
|
|
|
@ -37,9 +37,7 @@ struct cpuinfo_x86 {
|
|||
};
|
||||
|
||||
bool has_monitor_cap(void);
|
||||
bool is_apicv_reg_virtualization_supported(void);
|
||||
bool is_apicv_intr_delivery_supported(void);
|
||||
bool is_apicv_posted_intr_supported(void);
|
||||
bool is_apicv_advanced_feature_supported(void);
|
||||
bool cpu_has_cap(uint32_t bit);
|
||||
bool cpu_has_vmx_ept_cap(uint32_t bit_mask);
|
||||
bool cpu_has_vmx_vpid_cap(uint32_t bit_mask);
|
||||
|
|
Loading…
Reference in New Issue