remove repetitive erasing for guest memory

The guest memory is allocated through hugetlb. Huge page fault is
triggered by writing a byte to the starting address for each huge page.
In hugetlb_fault(), The physical pages are allocated and *clear*.

So no need to erasing the memory again after hugetlb setup. This will
bring about 130ms (changes with platform) latency for each 1G memory.

Notice: this means we depends on kernel to erase the memory in huge page
fault!

Tracked-On: #7298
Signed-off-by: Conghui <conghui.chen@intel.com>
Acked-by: Wang, Yu1 <yu1.wang@intel.com>
This commit is contained in:
Conghui 2022-04-15 16:24:49 +08:00 committed by acrnsi-robot
parent 2d0089f06b
commit 844c6e0bbc
2 changed files with 3 additions and 9 deletions

View File

@ -245,6 +245,8 @@ static int mmap_hugetlbfs_from_level(struct vmctx *ctx, int level, size_t len,
pr_info("touch %ld pages with pagesz 0x%lx\n", len/pagesz, pagesz);
/* Access to the address will trigger hugetlb_fault() in kernel,
* it will allocate and clear the huge page.*/
for (i = 0; i < len/pagesz; i++) {
*(volatile char *)addr = *addr;
addr += pagesz;

View File

@ -357,8 +357,6 @@ vm_map_memseg_vma(struct vmctx *ctx, size_t len, vm_paddr_t gpa,
int
vm_setup_memory(struct vmctx *ctx, size_t memsize)
{
int ret;
/*
* If 'memsize' cannot fit entirely in the 'lowmem' segment then
* create another 'highmem' segment above 4GB for the remainder.
@ -374,13 +372,7 @@ vm_setup_memory(struct vmctx *ctx, size_t memsize)
ctx->biosmem = high_bios_size();
ctx->fbmem = (16 * 1024 * 1024);
ret = hugetlb_setup_memory(ctx);
if (ret == 0) {
/* mitigate reset attack */
bzero((void *)ctx->baseaddr, ctx->lowmem);
bzero((void *)(ctx->baseaddr + ctx->highmem_gpa_base), ctx->highmem);
}
return ret;
return hugetlb_setup_memory(ctx);
}
void