hv: nested: audit guest EPT mapping during shadow EPT entries setup

generate_shadow_ept_entry() didn't verify the correctness of the requested
guest EPT mapping. That might leak host memory access to L2 VM.

To simplify the implementation of the guest EPT audit, hide capabilities
'map 2-Mbyte page' and 'map 1-Gbyte page' from L1 VM. In addition,
minimize the attribute bits of EPT entry when create a shadow EPT entry.
Also, for invalid requested mapping address, reflect the EPT_VIOLATION to
L1 VM.

Here, we have some TODOs:
1) Enable large page support in generate_shadow_ept_entry()
2) Evaluate if need to emulate the invalid GPA access of L2 in HV directly.
3) Minimize EPT entry attributes.

Tracked-On: #5923
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shuo A Liu 2021-05-29 00:03:52 +08:00 committed by Xie, Nanlin
parent 3110e70d0a
commit 15e6c5b9cf
2 changed files with 71 additions and 13 deletions

View File

@ -166,7 +166,7 @@ void init_vmx_msrs(struct acrn_vcpu *vcpu)
* Hide 5 level EPT capability
* Hide accessed and dirty flags for EPT
*/
msr_value &= ~(VMX_EPT_PAGE_WALK_5 | VMX_EPT_AD);
msr_value &= ~(VMX_EPT_PAGE_WALK_5 | VMX_EPT_AD | VMX_EPT_2MB_PAGE | VMX_EPT_1GB_PAGE);
vcpu_set_guest_msr(vcpu, MSR_IA32_VMX_EPT_VPID_CAP, msr_value);
/* For now passthru the value from physical MSR to L1 guest */

View File

@ -207,33 +207,82 @@ void put_nept_desc(uint64_t guest_eptp)
}
}
static uint64_t get_leaf_entry(uint64_t gpa, uint64_t *eptp, enum _page_table_level *level)
{
enum _page_table_level pt_level = IA32E_PML4;
uint16_t offset;
uint64_t ept_entry = 0UL;
uint64_t *p_ept_entry = eptp;
while (pt_level <= IA32E_PT) {
offset = PAGING_ENTRY_OFFSET(gpa, pt_level);
ept_entry = p_ept_entry[offset];
if (is_present_ept_entry(ept_entry)) {
if (is_leaf_ept_entry(ept_entry, pt_level)) {
*level = pt_level;
break;
}
} else {
ept_entry = 0UL;
pr_err("%s, GPA[%llx] is invalid!", __func__, gpa);
break;
}
p_ept_entry = (uint64_t *)(ept_entry & EPT_ENTRY_PFN_MASK);
pt_level += 1;
}
return ept_entry;
}
/**
* @brief Shadow a guest EPT entry
* @pre vcpu != NULL
*/
static uint64_t generate_shadow_ept_entry(struct acrn_vcpu *vcpu, uint64_t guest_ept_entry,
enum _page_table_level guest_level)
enum _page_table_level guest_ept_level)
{
uint64_t shadow_ept_entry;
uint64_t shadow_ept_entry = 0UL;
uint64_t ept_entry;
enum _page_table_level ept_level;
/*
* Clone a shadow EPT entry w/o physical address bits from guest EPT entry
* TODO:
* Before the cloning, host EPT mapping audit is a necessary.
* Create a shadow EPT entry
* We only support 4K page for guest EPT. So it's simple to create a shadow EPT entry
* for it. The rules are:
* > Find the host EPT leaf entry of address in ept_entry[M-1:12], named as ept_entry
* > Minimize the attribute bits (according to ept_entry and guest_ept_entry) and
* set in shadow EPT entry shadow_ept_entry.
* > Set the HPA of guest_ept_entry[M-1:12] to shadow_ept_entry.
*/
shadow_ept_entry = guest_ept_entry & ~EPT_ENTRY_PFN_MASK;
if (is_leaf_ept_entry(guest_ept_entry, guest_level)) {
/*
* Use guest EPT entry HPA in shadow EPT entry
*/
shadow_ept_entry |= gpa2hpa(vcpu->vm, (guest_ept_entry & EPT_ENTRY_PFN_MASK));
if (is_leaf_ept_entry(guest_ept_entry, guest_ept_level)) {
ASSERT(guest_ept_level == IA32E_PT, "Only support 4K page for guest EPT!");
ept_entry = get_leaf_entry((guest_ept_entry & EPT_ENTRY_PFN_MASK), get_ept_entry(vcpu->vm), &ept_level);
if (ept_entry != 0UL) {
/*
* TODO:
* Now, take guest EPT entry attributes directly. We need take care
* of memory type, permission bits, reserved bits when we merge EPT
* entry and guest EPT entry.
*
* Just keep the code skeleton here for extend.
*/
shadow_ept_entry = guest_ept_entry & ~EPT_ENTRY_PFN_MASK;
/*
* Set the address.
* gpa2hpa() should be successful as ept_entry already be found.
*/
shadow_ept_entry |= gpa2hpa(vcpu->vm, (guest_ept_entry & EPT_ENTRY_PFN_MASK));
}
} else {
/* Use a HPA of a new page in shadow EPT entry */
shadow_ept_entry = guest_ept_entry & ~EPT_ENTRY_PFN_MASK;
shadow_ept_entry |= hva2hpa((void *)alloc_page(&sept_page_pool)) & EPT_ENTRY_PFN_MASK;
}
return shadow_ept_entry;
}
/*
@ -378,6 +427,15 @@ bool handle_l2_ept_violation(struct acrn_vcpu *vcpu)
/* Create a shadow EPT entry */
shadow_ept_entry = generate_shadow_ept_entry(vcpu, guest_ept_entry, pt_level);
p_shadow_ept_page[offset] = shadow_ept_entry;
if (shadow_ept_entry == 0UL) {
/*
* TODO:
* For invalid GPA in guest EPT entries, now reflect the violation to L1 VM.
* Need to revisit this and evaluate if need to emulate the invalid GPA
* access of L2 in HV directly.
*/
break;
}
}
/* Shadow EPT entry exists */