hv: vmtrr: hide mtrr if hide_mtrr is true

Now we only configure "hide MTRR" explicitly to false for SOS. For other VMs,
we don't configure it which means hide_mtrr is false by default.
And remove global config MTRR_ENABLED

Tracked-On: #1842
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2019-03-15 23:23:36 +08:00 committed by wenlingz
parent 906c79eb40
commit c018b853e9
9 changed files with 30 additions and 28 deletions

View File

@ -274,10 +274,6 @@ config GPU_SBDF
00:02.0 in DRHD segment 0, this SBDF would be (0 << 16) | (0 << 8) |
(2 << 3) | (0 << 0), i.e. 0x00000010.
config MTRR_ENABLED
bool "Memory Type Range Registers (MTRR) enabled"
default y
config RELOC
bool "Enable hypervisor relocation"
default y

View File

@ -391,9 +391,9 @@ int32_t create_vcpu(uint16_t pcpu_id, struct acrn_vm *vm, struct acrn_vcpu **rtn
/* Create per vcpu vlapic */
vlapic_create(vcpu);
#ifdef CONFIG_MTRR_ENABLED
init_vmtrr(vcpu);
#endif
if (!vm_hide_mtrr(vm)) {
init_vmtrr(vcpu);
}
spinlock_init(&(vcpu->arch.lock));

View File

@ -322,10 +322,10 @@ void guest_cpuid(struct acrn_vcpu *vcpu, uint32_t *eax, uint32_t *ebx, uint32_t
*ebx &= ~APIC_ID_MASK;
*ebx |= (apicid << APIC_ID_SHIFT);
#ifndef CONFIG_MTRR_ENABLED
/* mask mtrr */
*edx &= ~CPUID_EDX_MTRR;
#endif
if (vm_hide_mtrr(vcpu->vm)) {
/* mask mtrr */
*edx &= ~CPUID_EDX_MTRR;
}
/* mask Debug Store feature */
*ecx &= ~(CPUID_ECX_DTES64 | CPUID_ECX_DS_CPL);

View File

@ -71,6 +71,16 @@ bool is_lapic_pt(const struct acrn_vm *vm)
return ((vm_config->guest_flags & LAPIC_PASSTHROUGH) != 0U);
}
/**
* @pre vm != NULL && vm_config != NULL && vm->vmid < CONFIG_MAX_VM_NUM
*/
bool vm_hide_mtrr(const struct acrn_vm *vm)
{
struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
return ((vm_config->guest_flags & HIDE_MTRR) != 0U);
}
/**
* @brief Initialize the I/O bitmap for \p vm
*

View File

@ -407,11 +407,11 @@ int32_t rdmsr_vmexit_handler(struct acrn_vcpu *vcpu)
case MSR_IA32_MTRR_FIX4K_F0000:
case MSR_IA32_MTRR_FIX4K_F8000:
{
#ifdef CONFIG_MTRR_ENABLED
v = read_vmtrr(vcpu, msr);
#else
err = -EACCES;
#endif
if (!vm_hide_mtrr(vcpu->vm)) {
v = read_vmtrr(vcpu, msr);
} else {
err = -EACCES;
}
break;
}
case MSR_IA32_BIOS_SIGN_ID:
@ -549,11 +549,11 @@ int32_t wrmsr_vmexit_handler(struct acrn_vcpu *vcpu)
case MSR_IA32_MTRR_FIX4K_F0000:
case MSR_IA32_MTRR_FIX4K_F8000:
{
#ifdef CONFIG_MTRR_ENABLED
write_vmtrr(vcpu, msr, v);
#else
err = -EACCES;
#endif
if (!vm_hide_mtrr(vcpu->vm)) {
write_vmtrr(vcpu, msr, v);
} else {
err = -EACCES;
}
break;
}
case MSR_IA32_BIOS_SIGN_ID:

View File

@ -12,8 +12,6 @@
#include <vm.h>
#include <logmsg.h>
#ifdef CONFIG_MTRR_ENABLED
#define MTRR_FIXED_RANGE_ALL_WB (MTRR_MEM_TYPE_WB \
| (MTRR_MEM_TYPE_WB << 8U) \
| (MTRR_MEM_TYPE_WB << 16U) \
@ -266,5 +264,3 @@ uint64_t read_vmtrr(const struct acrn_vcpu *vcpu, uint32_t msr)
return ret;
}
#endif /* CONFIG_MTRR_ENABLED */

View File

@ -256,9 +256,7 @@ struct acrn_vcpu_arch {
/* per vcpu lapic */
struct acrn_vlapic vlapic;
#ifdef CONFIG_MTRR_ENABLED
struct acrn_vmtrr vmtrr;
#endif
int32_t cur_context;
struct cpu_context contexts[NR_WORLD];

View File

@ -221,6 +221,7 @@ void vrtc_init(struct acrn_vm *vm);
#endif
bool is_lapic_pt(const struct acrn_vm *vm);
bool vm_hide_mtrr(const struct acrn_vm *vm);
#endif /* !ASSEMBLER */

View File

@ -50,7 +50,8 @@
#define SECURE_WORLD_ENABLED (1UL << 0U) /* Whether secure world is enabled */
#define LAPIC_PASSTHROUGH (1UL << 1U) /* Whether LAPIC is passed through */
#define IO_COMPLETION_POLLING (1UL << 2U) /* Whether need hypervisor poll IO completion */
#define CLOS_REQUIRED (1UL << 3U) /* Whether CLOS is required */
#define CLOS_REQUIRED (1UL << 3U) /* Whether CLOS is required */
#define HIDE_MTRR (1UL << 4U) /* Whether hide MTRR from VM */
/**
* @brief Hypercall