hv: mmu: add static paging table allocation for hypervisor

Add static paging table allocation API for hypervisor.
Note: must configure PLATFORM_RAM_SIZE and PLATFORM_MMIO_SIZE exactly as the platform.

Rename RAM_START/RAM_SIZE to HV_RAM_START/HV_RAM_SIZE for HV.

Tracked-On: #861
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2018-10-29 23:28:32 +08:00 committed by lijinxia
parent 74a5eec3a7
commit dc9d18a868
12 changed files with 163 additions and 39 deletions

View File

@ -121,6 +121,7 @@ C_SRCS += arch/x86/cpu.c
C_SRCS += arch/x86/cpuid.c
C_SRCS += arch/x86/mmu.c
C_SRCS += arch/x86/pagetable.c
C_SRCS += arch/x86/page.c
C_SRCS += arch/x86/notify.c
C_SRCS += arch/x86/vtd.c
C_SRCS += arch/x86/gdt.c

View File

@ -208,25 +208,32 @@ config LOW_RAM_SIZE
A 32-bit integer indicating the size of RAM region below address
0x10000, starting from address 0x0.
config RAM_START
hex "Address of the RAM region assigned to the hypervisor"
config HV_RAM_START
hex "Start physical address of the RAM region used by the hypervisor"
default 0x6e000000 if PLATFORM_SBL
default 0x00100000 if PLATFORM_UEFI
help
A 64-bit integer indicating the base address to where the hypervisor
A 64-bit integer indicating the base physical address to where the hypervisor
should be loaded to. If RELOC is disabled the bootloader is required
to load the hypervisor to this specific address. Otherwise the
hypervisor will not boot. With RELOC enabled the hypervisor is capable
of relocating its symbols to where it is placed at, and thus the
bootloader may not place the hypervisor at this specific address.
config RAM_SIZE
hex "Size of the RAM region assigned to the hypervisor"
default 0x02000000
config HV_RAM_SIZE
hex "Size of the RAM region used by the hypervisor"
default 0x04000000
help
A 64-bit integer indicating the size of RAM assigned to the
hypervisor. It is ensured at link time that the footprint of the
hypervisor does not exceed this size.
A 64-bit integer indicating the size of RAM used by the hypervisor.
It is ensured at link time that the footprint of the hypervisor
does not exceed this size.
config PLATFORM_RAM_SIZE
hex "Size of the physical platform RAM"
default 0x200000000
help
A 64-bit integer indicating the size of the physical platform RAM
(not included the MMIO).
config CONSTANT_ACPI
bool "The platform ACPI info is constant"

View File

@ -537,8 +537,8 @@ static void rebuild_vm0_e820(void)
uint32_t i;
uint64_t entry_start;
uint64_t entry_end;
uint64_t hv_start = get_hv_image_base();
uint64_t hv_end = hv_start + CONFIG_RAM_SIZE;
uint64_t hv_start_pa = get_hv_image_base();
uint64_t hv_end_pa = hv_start_pa + CONFIG_HV_RAM_SIZE;
struct e820_entry *entry, new_entry = {0};
/* hypervisor mem need be filter out from e820 table
@ -550,36 +550,36 @@ static void rebuild_vm0_e820(void)
entry_end = entry->baseaddr + entry->length;
/* No need handle in these cases*/
if ((entry->type != E820_TYPE_RAM) || (entry_end <= hv_start)
|| (entry_start >= hv_end)) {
if ((entry->type != E820_TYPE_RAM) || (entry_end <= hv_start_pa)
|| (entry_start >= hv_end_pa)) {
continue;
}
/* filter out hv mem and adjust length of this entry*/
if ((entry_start < hv_start) && (entry_end <= hv_end)) {
entry->length = hv_start - entry_start;
if ((entry_start < hv_start_pa) && (entry_end <= hv_end_pa)) {
entry->length = hv_start_pa - entry_start;
continue;
}
/* filter out hv mem and need to create a new entry*/
if ((entry_start < hv_start) && (entry_end > hv_end)) {
entry->length = hv_start - entry_start;
new_entry.baseaddr = hv_end;
new_entry.length = entry_end - hv_end;
if ((entry_start < hv_start_pa) && (entry_end > hv_end_pa)) {
entry->length = hv_start_pa - entry_start;
new_entry.baseaddr = hv_end_pa;
new_entry.length = entry_end - hv_end_pa;
new_entry.type = E820_TYPE_RAM;
continue;
}
/* This entry is within the range of hv mem
* change to E820_TYPE_RESERVED
*/
if ((entry_start >= hv_start) && (entry_end <= hv_end)) {
if ((entry_start >= hv_start_pa) && (entry_end <= hv_end_pa)) {
entry->type = E820_TYPE_RESERVED;
continue;
}
if ((entry_start >= hv_start) && (entry_start < hv_end)
&& (entry_end > hv_end)) {
entry->baseaddr = hv_end;
entry->length = entry_end - hv_end;
if ((entry_start >= hv_start_pa) && (entry_start < hv_end_pa)
&& (entry_end > hv_end_pa)) {
entry->baseaddr = hv_end_pa;
entry->length = entry_end - hv_end_pa;
continue;
}
@ -595,7 +595,7 @@ static void rebuild_vm0_e820(void)
entry->type = new_entry.type;
}
e820_mem.total_mem_size -= CONFIG_RAM_SIZE;
e820_mem.total_mem_size -= CONFIG_HV_RAM_SIZE;
}
/**
@ -649,7 +649,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
* will cause EPT violation if sos accesses hv memory
*/
hv_hpa = get_hv_image_base();
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_RAM_SIZE);
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
return 0;
}

View File

@ -268,7 +268,7 @@ void init_paging(void)
* to supervisor-mode for hypervisor owned memroy.
*/
hv_hpa = get_hv_image_base();
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa, CONFIG_RAM_SIZE,
mmu_modify_or_del((uint64_t *)mmu_pml4_addr, hv_hpa, CONFIG_HV_RAM_SIZE,
PAGE_CACHE_WB, PAGE_CACHE_MASK | PAGE_USER,
PTT_PRIMARY, MR_MODIFY);

View File

@ -0,0 +1,67 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <hypervisor.h>
#define PML4_PAGE_NUM(size) 1UL
#define PDPT_PAGE_NUM(size) (((size) + PML4E_SIZE - 1UL) >> PML4E_SHIFT)
#define PD_PAGE_NUM(size) (((size) + PDPTE_SIZE - 1UL) >> PDPTE_SHIFT)
#define PT_PAGE_NUM(size) (((size) + PDE_SIZE - 1UL) >> PDE_SHIFT)
#define DEFINE_PGTABLE_PAGE(prefix, lvl, LVL, size) \
static struct page prefix ## lvl ## _pages[LVL ## _PAGE_NUM(size)]
DEFINE_PGTABLE_PAGE(ppt_, pml4, PML4, CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE);
DEFINE_PGTABLE_PAGE(ppt_, pdpt, PDPT, CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE);
DEFINE_PGTABLE_PAGE(ppt_, pd, PD, CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE);
/* ppt: pripary page table */
static union pgtable_pages_info ppt_pages_info = {
.ppt = {
.pml4_base = ppt_pml4_pages,
.pdpt_base = ppt_pdpt_pages,
.pd_base = ppt_pd_pages,
}
};
static inline uint64_t ppt_get_default_access_right(void)
{
return (PAGE_PRESENT | PAGE_RW | PAGE_USER);
}
static inline uint64_t ppt_pgentry_present(uint64_t pte)
{
return pte & PAGE_PRESENT;
}
static inline struct page *ppt_get_pml4_page(const union pgtable_pages_info *info, __unused uint64_t gpa)
{
struct page *page = info->ppt.pml4_base;
(void)memset(page, 0U, PAGE_SIZE);
return page;
}
static inline struct page *ppt_get_pdpt_page(const union pgtable_pages_info *info, uint64_t gpa)
{
struct page *page = info->ppt.pdpt_base + (gpa >> PML4E_SHIFT);
(void)memset(page, 0U, PAGE_SIZE);
return page;
}
static inline struct page *ppt_get_pd_page(const union pgtable_pages_info *info, uint64_t gpa)
{
struct page *page = info->ppt.pd_base + (gpa >> PDPTE_SHIFT);
(void)memset(page, 0U, PAGE_SIZE);
return page;
}
const struct memory_ops ppt_mem_ops = {
.info = &ppt_pages_info,
.get_default_access_right = ppt_get_default_access_right,
.pgentry_present = ppt_pgentry_present,
.get_pml4_page = ppt_get_pml4_page,
.get_pdpt_page = ppt_get_pdpt_page,
.get_pd_page = ppt_get_pd_page,
};

View File

@ -33,7 +33,7 @@ static inline uint64_t elf64_r_type(uint64_t i)
uint64_t trampoline_start16_paddr;
/* get the delta between CONFIG_RAM_START and the actual load address */
/* get the delta between CONFIG_HV_RAM_START and the actual load address */
uint64_t get_hv_image_delta(void)
{
uint64_t addr;
@ -52,7 +52,7 @@ uint64_t get_hv_image_delta(void)
/* get the actual Hypervisor load address */
uint64_t get_hv_image_base(void)
{
return (get_hv_image_delta() + CONFIG_RAM_START);
return (get_hv_image_delta() + CONFIG_HV_RAM_START);
}
/*
@ -63,7 +63,7 @@ uint64_t get_hv_image_base(void)
* This function is valid if:
* - The hpa of HV code is always higher than trampoline code
* - The HV code is always relocated to higher address, compared
* with CONFIG_RAM_START
* with CONFIG_HV_RAM_START
*/
static uint64_t trampoline_relo_addr(const void *addr)
{

View File

@ -6,7 +6,7 @@ MEMORY
lowram : ORIGIN = 0, LENGTH = CONFIG_LOW_RAM_SIZE
/* 32 MBytes of RAM for HV */
ram : ORIGIN = CONFIG_RAM_START, LENGTH = CONFIG_RAM_SIZE
ram : ORIGIN = CONFIG_HV_RAM_START, LENGTH = CONFIG_HV_RAM_SIZE
}
SECTIONS

View File

@ -356,7 +356,7 @@ efi_main(EFI_HANDLE image, EFI_SYSTEM_TABLE *_table)
}
/* without relocateion enabled, hypervisor binary need to reside in
* fixed memory address starting from CONFIG_RAM_START, make a call
* fixed memory address starting from CONFIG_HV_RAM_START, make a call
* to emalloc_fixed_addr for that case. With CONFIG_RELOC enabled,
* hypervisor is able to do relocation, the only requirement is that
* it need to reside in memory below 4GB, call emalloc_reserved_mem()
@ -365,7 +365,7 @@ efi_main(EFI_HANDLE image, EFI_SYSTEM_TABLE *_table)
#ifdef CONFIG_RELOC
err = emalloc_reserved_mem(&hv_hpa, HV_RUNTIME_MEM_SIZE, MEM_ADDR_4GB);
#else
err = emalloc_fixed_addr(&hv_hpa, HV_RUNTIME_MEM_SIZE, CONFIG_RAM_START);
err = emalloc_fixed_addr(&hv_hpa, HV_RUNTIME_MEM_SIZE, CONFIG_HV_RAM_START);
#endif
if (err != EFI_SUCCESS)
goto failed;

View File

@ -78,13 +78,13 @@ typedef void(*hv_func)(int, struct multiboot_info*);
#define MBOOT_INFO_SIZE (sizeof(struct multiboot_info))
#define BOOT_CTX_SIZE (sizeof(struct efi_context))
#define HV_RUNTIME_MEM_SIZE \
(CONFIG_RAM_SIZE + MBOOT_MMAP_SIZE + MBOOT_INFO_SIZE + BOOT_CTX_SIZE)
(CONFIG_HV_RAM_SIZE + MBOOT_MMAP_SIZE + MBOOT_INFO_SIZE + BOOT_CTX_SIZE)
#define MBOOT_MMAP_PTR(addr) \
((struct multiboot_mmap *)((VOID *)addr + CONFIG_RAM_SIZE))
((struct multiboot_mmap *)((VOID *)addr + CONFIG_HV_RAM_SIZE))
#define MBOOT_INFO_PTR(addr) ((struct multiboot_info *) \
((VOID *)addr + CONFIG_RAM_SIZE + MBOOT_MMAP_SIZE))
((VOID *)addr + CONFIG_HV_RAM_SIZE + MBOOT_MMAP_SIZE))
#define BOOT_CTX_PTR(addr) ((struct efi_context *) \
((VOID *)addr + CONFIG_RAM_SIZE + MBOOT_MMAP_SIZE + MBOOT_INFO_SIZE))
((VOID *)addr + CONFIG_HV_RAM_SIZE + MBOOT_MMAP_SIZE + MBOOT_INFO_SIZE))
struct efi_info {

View File

@ -383,7 +383,7 @@ static int32_t local_set_vm_memory_region(struct vm *vm,
if (((hpa <= base_paddr) &&
((hpa + region->size) > base_paddr)) ||
((hpa >= base_paddr) &&
(hpa < (base_paddr + CONFIG_RAM_SIZE)))) {
(hpa < (base_paddr + CONFIG_HV_RAM_SIZE)))) {
pr_err("%s: overlap the HV memory region.", __func__);
return -EFAULT;
}
@ -515,7 +515,7 @@ static int32_t write_protect_page(struct vm *vm,const struct wp_data *wp)
base_paddr = get_hv_image_base();
if (((hpa <= base_paddr) && (hpa + CPU_PAGE_SIZE > base_paddr)) ||
((hpa >= base_paddr) &&
(hpa < base_paddr + CONFIG_RAM_SIZE))) {
(hpa < base_paddr + CONFIG_HV_RAM_SIZE))) {
pr_err("%s: overlap the HV memory region.", __func__);
return -EINVAL;
}

View File

@ -28,6 +28,7 @@
#include <vioapic.h>
#include <vm.h>
#include <cpuid.h>
#include <page.h>
#include <mmu.h>
#include <pgtable.h>
#include <irq.h>

View File

@ -0,0 +1,48 @@
/*
* Copyright (C) 2018 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PAGE_H
#define PAGE_H
#define PAGE_SHIFT 12U
#define PAGE_SIZE (1UL << PAGE_SHIFT)
/* size of the low MMIO address space: 2GB */
#define PLATFORM_LO_MMIO_SIZE 0x80000000UL
struct page {
uint8_t contents[PAGE_SIZE];
} __aligned(PAGE_SIZE);
union pgtable_pages_info {
struct {
struct page *pml4_base;
struct page *pdpt_base;
struct page *pd_base;
struct page *pt_base;
} ppt;
struct {
struct page *nworld_pml4_base;
struct page *nworld_pdpt_base;
struct page *nworld_pd_base;
struct page *nworld_pt_base;
struct page *sworld_pgtable_base;
} ept;
};
struct memory_ops {
union pgtable_pages_info *info;
uint64_t (*get_default_access_right)(void);
uint64_t (*pgentry_present)(uint64_t pte);
struct page *(*get_pml4_page)(const union pgtable_pages_info *info, uint64_t gpa);
struct page *(*get_pdpt_page)(const union pgtable_pages_info *info, uint64_t gpa);
struct page *(*get_pd_page)(const union pgtable_pages_info *info, uint64_t gpa);
struct page *(*get_pt_page)(const union pgtable_pages_info *info, uint64_t gpa);
};
extern const struct memory_ops ppt_mem_ops;
#endif /* PAGE_H */