hv:bugfix for the API of creating secure world
Before this patch, HV accesses PML4E of secure world when the PML4 doesn't exist,will access null pointer. Fix as follow: Before the copy of PDPTE,will allocate memory and write PML4E, then copy the PDPTE. Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com> Reviewed-by: Jason Chen CJ <jason.cj.chen@intel.com> Reviewed-by: Anthony Xu <anthony.xu@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
f347d15e7d
commit
cf7fe07276
|
@ -96,56 +96,68 @@ _Static_assert(sizeof(struct trusty_startup_param)
|
||||||
exec_vmwrite(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
|
exec_vmwrite(VMX_GUEST_##SEG_NAME##_ATTR, seg.attr); \
|
||||||
}
|
}
|
||||||
|
|
||||||
void create_secure_world_ept(struct vm *vm, uint64_t gpa,
|
static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
||||||
uint64_t size, uint64_t rebased_gpa)
|
uint64_t size, uint64_t gpa_rebased)
|
||||||
{
|
{
|
||||||
int i = 0;
|
uint64_t nworld_pml4e = 0;
|
||||||
uint64_t pml4e = 0;
|
uint64_t sworld_pml4e = 0;
|
||||||
uint64_t entry = 0;
|
|
||||||
struct map_params map_params;
|
struct map_params map_params;
|
||||||
uint64_t hpa = gpa2hpa(vm, gpa);
|
uint64_t gpa = 0;
|
||||||
|
uint64_t hpa = gpa2hpa(vm, gpa_orig);
|
||||||
|
uint64_t table_present = (IA32E_EPT_R_BIT |
|
||||||
|
IA32E_EPT_W_BIT |
|
||||||
|
IA32E_EPT_X_BIT);
|
||||||
|
void *sub_table_addr = NULL;
|
||||||
struct vm *vm0 = get_vm_from_vmid(0);
|
struct vm *vm0 = get_vm_from_vmid(0);
|
||||||
|
|
||||||
/* Create secure world eptp */
|
if (!vm->sworld_control.sworld_enabled
|
||||||
if (vm->sworld_control.sworld_enabled && !vm->arch_vm.sworld_eptp)
|
|| vm->arch_vm.sworld_eptp != NULL) {
|
||||||
vm->arch_vm.sworld_eptp = alloc_paging_struct();
|
pr_err("Sworld is not enabled or Sworld eptp is not NULL");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
map_params.page_table_type = PTT_EPT;
|
map_params.page_table_type = PTT_EPT;
|
||||||
map_params.pml4_inverted = vm->arch_vm.m2p;
|
map_params.pml4_inverted = vm->arch_vm.m2p;
|
||||||
|
|
||||||
/* unmap gpa~gpa+size from guest ept mapping */
|
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
|
||||||
map_params.pml4_base = vm->arch_vm.nworld_eptp;
|
map_params.pml4_base = vm->arch_vm.nworld_eptp;
|
||||||
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0);
|
unmap_mem(&map_params, (void *)hpa, (void *)gpa_orig, size, 0);
|
||||||
|
|
||||||
/* Copy PDPT entries from Normal world to Secure world
|
/* Copy PDPT entries from Normal world to Secure world
|
||||||
* Secure world can access Normal World's memory,
|
* Secure world can access Normal World's memory,
|
||||||
* but Normal World can not access Secure World's memory.
|
* but Normal World can not access Secure World's memory.
|
||||||
* The PML4/PDPT for Secure world are separated from Normal World.
|
* The PML4/PDPT for Secure world are separated from
|
||||||
* PD/PT are shared in both Secure world's EPT and Normal World's EPT
|
* Normal World.PD/PT are shared in both Secure world's EPT
|
||||||
|
* and Normal World's EPT
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < IA32E_NUM_ENTRIES; i++) {
|
vm->arch_vm.sworld_eptp = alloc_paging_struct();
|
||||||
pml4e = MEM_READ64(vm->arch_vm.nworld_eptp);
|
|
||||||
entry = MEM_READ64((pml4e & IA32E_REF_MASK)
|
|
||||||
+ (i * IA32E_COMM_ENTRY_SIZE));
|
|
||||||
pml4e = MEM_READ64(vm->arch_vm.sworld_eptp);
|
|
||||||
MEM_WRITE64((pml4e & IA32E_REF_MASK)
|
|
||||||
+ (i * IA32E_COMM_ENTRY_SIZE),
|
|
||||||
entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Map rebased_gpa~rebased_gpa+size
|
/* The trusty memory is remapped to guest physical address
|
||||||
|
* of gpa_rebased to gpa_rebased + size
|
||||||
|
*/
|
||||||
|
sub_table_addr = alloc_paging_struct();
|
||||||
|
sworld_pml4e = HVA2HPA(sub_table_addr)
|
||||||
|
| table_present;
|
||||||
|
MEM_WRITE64(vm->arch_vm.sworld_eptp, sworld_pml4e);
|
||||||
|
|
||||||
|
nworld_pml4e = MEM_READ64(vm->arch_vm.nworld_eptp);
|
||||||
|
memcpy_s(HPA2HVA(sworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE,
|
||||||
|
HPA2HVA(nworld_pml4e & IA32E_REF_MASK), CPU_PAGE_SIZE);
|
||||||
|
|
||||||
|
/* Map gpa_rebased~gpa_rebased+size
|
||||||
* to secure ept mapping
|
* to secure ept mapping
|
||||||
*/
|
*/
|
||||||
map_params.pml4_base = vm->arch_vm.sworld_eptp;
|
map_params.pml4_base = vm->arch_vm.sworld_eptp;
|
||||||
map_mem(&map_params, (void *)hpa,
|
map_mem(&map_params, (void *)hpa,
|
||||||
(void *)rebased_gpa, size,
|
(void *)gpa_rebased, size,
|
||||||
(MMU_MEM_ATTR_READ |
|
(MMU_MEM_ATTR_READ |
|
||||||
MMU_MEM_ATTR_WRITE |
|
MMU_MEM_ATTR_WRITE |
|
||||||
MMU_MEM_ATTR_EXECUTE |
|
MMU_MEM_ATTR_EXECUTE |
|
||||||
MMU_MEM_ATTR_WB_CACHE));
|
MMU_MEM_ATTR_WB_CACHE));
|
||||||
|
|
||||||
/* Unap gap~gpa+size from sos ept mapping*/
|
/* Unmap trusty memory space from sos ept mapping*/
|
||||||
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
|
map_params.pml4_base = vm0->arch_vm.nworld_eptp;
|
||||||
|
map_params.pml4_inverted = vm0->arch_vm.m2p;
|
||||||
/* Get the gpa address in SOS */
|
/* Get the gpa address in SOS */
|
||||||
gpa = hpa2gpa(vm0, hpa);
|
gpa = hpa2gpa(vm0, hpa);
|
||||||
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0);
|
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0);
|
||||||
|
|
Loading…
Reference in New Issue