diff --git a/devicemodel/core/hugetlb.c b/devicemodel/core/hugetlb.c index 5ee737f7c..c33e9470c 100644 --- a/devicemodel/core/hugetlb.c +++ b/devicemodel/core/hugetlb.c @@ -245,6 +245,8 @@ static int mmap_hugetlbfs_from_level(struct vmctx *ctx, int level, size_t len, pr_info("touch %ld pages with pagesz 0x%lx\n", len/pagesz, pagesz); + /* Access to the address will trigger hugetlb_fault() in kernel, + * it will allocate and clear the huge page.*/ for (i = 0; i < len/pagesz; i++) { *(volatile char *)addr = *addr; addr += pagesz; diff --git a/devicemodel/core/vmmapi.c b/devicemodel/core/vmmapi.c index ccf112e70..336aee01f 100644 --- a/devicemodel/core/vmmapi.c +++ b/devicemodel/core/vmmapi.c @@ -357,8 +357,6 @@ vm_map_memseg_vma(struct vmctx *ctx, size_t len, vm_paddr_t gpa, int vm_setup_memory(struct vmctx *ctx, size_t memsize) { - int ret; - /* * If 'memsize' cannot fit entirely in the 'lowmem' segment then * create another 'highmem' segment above 4GB for the remainder. @@ -374,13 +372,7 @@ vm_setup_memory(struct vmctx *ctx, size_t memsize) ctx->biosmem = high_bios_size(); ctx->fbmem = (16 * 1024 * 1024); - ret = hugetlb_setup_memory(ctx); - if (ret == 0) { - /* mitigate reset attack */ - bzero((void *)ctx->baseaddr, ctx->lowmem); - bzero((void *)(ctx->baseaddr + ctx->highmem_gpa_base), ctx->highmem); - } - return ret; + return hugetlb_setup_memory(ctx); } void