hv: cat: isolate hypervisor from rtvm

Currently, the clos id of the cpu cores in vmx root mode is the same as non-root mode.
For RTVM, if hypervisor share the same clos id with non-root mode, the cacheline may
be polluted due to the hypervisor code execution when vmexit.

The patch adds hv_clos in vm_configurations.c
Hypervisor initializes clos setting according to hv_clos during physical cpu cores initialization.
For RTVM,  MSR auto load/store areas are used to switch different settings for VMX root/non-root
mode for RTVM.

Tracked-On: #2462
Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Binbin Wu 2019-08-13 08:51:15 +00:00 committed by ACRN System Integration
parent 38ca8db19f
commit cd1ae7a89e
7 changed files with 42 additions and 13 deletions

View File

@ -13,8 +13,11 @@
#include <logmsg.h> #include <logmsg.h>
#include <cat.h> #include <cat.h>
#include <board.h> #include <board.h>
#include <vm_config.h>
#include <msr.h>
struct cat_hw_info cat_cap_info; struct cat_hw_info cat_cap_info;
const uint16_t hv_clos = 0U;
int32_t init_cat_cap_info(void) int32_t init_cat_cap_info(void)
{ {
@ -67,5 +70,17 @@ void setup_clos(uint16_t pcpu_id)
val = (uint64_t)platform_clos_array[i].clos_mask; val = (uint64_t)platform_clos_array[i].clos_mask;
msr_write_pcpu(msr_index, val, pcpu_id); msr_write_pcpu(msr_index, val, pcpu_id);
} }
/* set hypervisor CAT clos */
msr_write_pcpu(MSR_IA32_PQR_ASSOC, clos2prq_msr(hv_clos), pcpu_id);
} }
} }
uint64_t clos2prq_msr(uint16_t clos)
{
uint64_t prq_assoc;
prq_assoc = msr_read(MSR_IA32_PQR_ASSOC);
prq_assoc = (prq_assoc & 0xffffffffUL) | ((uint64_t)clos << 32U);
return prq_assoc;
}

View File

@ -758,21 +758,11 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
int32_t ret; int32_t ret;
struct acrn_vcpu *vcpu = NULL; struct acrn_vcpu *vcpu = NULL;
char thread_name[16]; char thread_name[16];
uint64_t orig_val, final_val;
struct acrn_vm_config *conf;
ret = create_vcpu(pcpu_id, vm, &vcpu); ret = create_vcpu(pcpu_id, vm, &vcpu);
if (ret == 0) { if (ret == 0) {
set_pcpu_used(pcpu_id); set_pcpu_used(pcpu_id);
/* Update CLOS for this CPU */
if (cat_cap_info.enabled) {
conf = get_vm_config(vm->vm_id);
orig_val = msr_read(MSR_IA32_PQR_ASSOC);
final_val = (orig_val & 0xffffffffUL) | (((uint64_t)conf->clos) << 32UL);
msr_write_pcpu(MSR_IA32_PQR_ASSOC, final_val, pcpu_id);
}
INIT_LIST_HEAD(&vcpu->sched_obj.run_list); INIT_LIST_HEAD(&vcpu->sched_obj.run_list);
snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id); snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id);
(void)strncpy_s(vcpu->sched_obj.name, 16U, thread_name, 16U); (void)strncpy_s(vcpu->sched_obj.name, 16U, thread_name, 16U);

View File

@ -451,7 +451,7 @@ static void init_entry_ctrl(const struct acrn_vcpu *vcpu)
* MSRs on load from memory on VM entry from mem address provided by * MSRs on load from memory on VM entry from mem address provided by
* VM-entry MSR load address field * VM-entry MSR load address field
*/ */
exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, MSR_AREA_COUNT); exec_vmwrite32(VMX_ENTRY_MSR_LOAD_COUNT, vcpu->arch.msr_area.count);
exec_vmwrite64(VMX_ENTRY_MSR_LOAD_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.guest)); exec_vmwrite64(VMX_ENTRY_MSR_LOAD_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.guest));
/* Set up VM entry interrupt information field pg 2909 24.8.3 */ /* Set up VM entry interrupt information field pg 2909 24.8.3 */
@ -493,8 +493,8 @@ static void init_exit_ctrl(const struct acrn_vcpu *vcpu)
* The 64 bit VM-exit MSR store and load address fields provide the * The 64 bit VM-exit MSR store and load address fields provide the
* corresponding addresses * corresponding addresses
*/ */
exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, MSR_AREA_COUNT); exec_vmwrite32(VMX_EXIT_MSR_STORE_COUNT, vcpu->arch.msr_area.count);
exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, MSR_AREA_COUNT); exec_vmwrite32(VMX_EXIT_MSR_LOAD_COUNT, vcpu->arch.msr_area.count);
exec_vmwrite64(VMX_EXIT_MSR_STORE_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.guest)); exec_vmwrite64(VMX_EXIT_MSR_STORE_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.guest));
exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.host)); exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.host));
} }

View File

@ -15,6 +15,7 @@
#include <sgx.h> #include <sgx.h>
#include <guest_pm.h> #include <guest_pm.h>
#include <ucode.h> #include <ucode.h>
#include <cat.h>
#include <trace.h> #include <trace.h>
#include <logmsg.h> #include <logmsg.h>
@ -283,10 +284,26 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, uint32_t mode)
*/ */
static void init_msr_area(struct acrn_vcpu *vcpu) static void init_msr_area(struct acrn_vcpu *vcpu)
{ {
struct acrn_vm_config *cfg = get_vm_config(vcpu->vm->vm_id);
vcpu->arch.msr_area.count = 0U;
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX; vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX;
vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id; vcpu->arch.msr_area.guest[MSR_AREA_TSC_AUX].value = vcpu->vcpu_id;
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX; vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].msr_index = MSR_IA32_TSC_AUX;
vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = vcpu->pcpu_id; vcpu->arch.msr_area.host[MSR_AREA_TSC_AUX].value = vcpu->pcpu_id;
vcpu->arch.msr_area.count++;
/* only load/restore MSR IA32_PQR_ASSOC when hv and guest have differnt settings */
if (cat_cap_info.enabled && (cfg->clos != hv_clos)) {
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.guest[MSR_AREA_IA32_PQR_ASSOC].value = clos2prq_msr(cfg->clos);
vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].msr_index = MSR_IA32_PQR_ASSOC;
vcpu->arch.msr_area.host[MSR_AREA_IA32_PQR_ASSOC].value = clos2prq_msr(hv_clos);
vcpu->arch.msr_area.count++;
pr_acrnlog("switch clos for VM %u vcpu_id %u, host 0x%x, guest 0x%x",
vcpu->vm->vm_id, vcpu->vcpu_id, hv_clos, cfg->clos);
}
} }
/** /**

View File

@ -19,11 +19,13 @@ struct cat_hw_info {
}; };
extern struct cat_hw_info cat_cap_info; extern struct cat_hw_info cat_cap_info;
extern const uint16_t hv_clos;
void setup_clos(uint16_t pcpu_id); void setup_clos(uint16_t pcpu_id);
#define CAT_RESID_L3 1U #define CAT_RESID_L3 1U
#define CAT_RESID_L2 2U #define CAT_RESID_L2 2U
int32_t init_cat_cap_info(void); int32_t init_cat_cap_info(void);
uint64_t clos2prq_msr(uint16_t clos);
#endif /* CAT_H */ #endif /* CAT_H */

View File

@ -270,6 +270,9 @@ void cpu_dead(void);
void trampoline_start16(void); void trampoline_start16(void);
void load_pcpu_state_data(void); void load_pcpu_state_data(void);
void init_pcpu_pre(bool is_bsp); void init_pcpu_pre(bool is_bsp);
/* The function should be called on the same CPU core as specified by pcpu_id,
* hereby, pcpu_id is actually the current physcial cpu id.
*/
void init_pcpu_post(uint16_t pcpu_id); void init_pcpu_post(uint16_t pcpu_id);
bool start_pcpus(uint64_t mask); bool start_pcpus(uint64_t mask);
void wait_pcpus_offline(uint64_t mask); void wait_pcpus_offline(uint64_t mask);

View File

@ -281,12 +281,14 @@ struct msr_store_entry {
enum { enum {
MSR_AREA_TSC_AUX = 0, MSR_AREA_TSC_AUX = 0,
MSR_AREA_IA32_PQR_ASSOC,
MSR_AREA_COUNT, MSR_AREA_COUNT,
}; };
struct msr_store_area { struct msr_store_area {
struct msr_store_entry guest[MSR_AREA_COUNT]; struct msr_store_entry guest[MSR_AREA_COUNT];
struct msr_store_entry host[MSR_AREA_COUNT]; struct msr_store_entry host[MSR_AREA_COUNT];
uint32_t count; /* actual count of entries to be loaded/restored during VMEntry/VMExit */
}; };
struct acrn_vcpu_arch { struct acrn_vcpu_arch {