hv: replace CPU_PAGE_SIZE with PAGE_SIZE

replace CPU_PAGE_SIZE with PAGE_SIZE
These two MACROs are duplicated and PAGE_SIZE is a more reasonable name.

Tracked-On: #861
Signed-off-by: Shiqing Gao <shiqing.gao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shiqing Gao 2018-12-03 09:15:28 +08:00 committed by wenlingz
parent e8e25bd6fc
commit 2f15d3569c
24 changed files with 54 additions and 49 deletions

View File

@ -128,7 +128,7 @@ primary_start_long_mode:
/* Initialize temporary stack pointer */
lea ld_bss_end(%rip), %rsp
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
add $0x1000,%rsp
/* 16 = CPU_STACK_ALIGN */
and $(~(16 - 1)),%rsp
@ -217,23 +217,23 @@ cpu_primary32_gdt_ptr:
.quad cpu_primary32_gdt
/* PML4, PDPT, and PD tables initialized to map first 4 GBytes of memory */
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
.align 0x1000
.global cpu_boot32_page_tables_start
cpu_boot32_page_tables_start:
/* 0x3 = (PAGE_PRESENT | PAGE_RW) */
.quad cpu_primary32_pdpt_addr + 0x3
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
.align 0x1000
cpu_primary32_pdpt_addr:
address = 0
.rept 4
/* 0x3 = (PAGE_PRESENT | PAGE_RW) */
.quad cpu_primary32_pdt_addr + address + 0x3
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
address = address + 0x1000
.endr
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
.align 0x1000
cpu_primary32_pdt_addr:
address = 0

View File

@ -193,13 +193,13 @@ trampoline_gdt_ptr:
cpu_boot_page_tables_ptr:
.long cpu_boot_page_tables_start
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
.align 0x1000
.global cpu_boot_page_tables_start
cpu_boot_page_tables_start:
/* 0x3 = (PAGE_PRESENT | PAGE_RW) */
.quad trampoline_pdpt_addr + 0x3
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
.align 0x1000
.global trampoline_pdpt_addr
trampoline_pdpt_addr:
@ -207,10 +207,10 @@ trampoline_pdpt_addr:
.rept 4
/* 0x3 = (PAGE_PRESENT | PAGE_RW) */
.quad trampoline_pdt_addr + address + 0x3
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
address = address + 0x1000
.endr
/*0x1000 = CPU_PAGE_SIZE*/
/*0x1000 = PAGE_SIZE*/
.align 0x1000
trampoline_pdt_addr:
address = 0

View File

@ -14,7 +14,7 @@ spinlock_t trampoline_spinlock = {
.tail = 0U
};
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(CPU_PAGE_SIZE);
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(PAGE_SIZE);
uint16_t phys_cpu_num = 0U;
static uint64_t pcpu_sync = 0UL;
static uint16_t up_count = 0U;

View File

@ -18,7 +18,7 @@ void destroy_ept(struct acrn_vm *vm)
}
if (vm->arch_vm.nworld_eptp != NULL) {
(void)memset(vm->arch_vm.nworld_eptp, 0U, CPU_PAGE_SIZE);
(void)memset(vm->arch_vm.nworld_eptp, 0U, PAGE_SIZE);
}
}

View File

@ -664,7 +664,7 @@ uint64_t e820_alloc_low_memory(uint32_t size_arg)
struct e820_entry *entry, *new_entry;
/* We want memory in page boundary and integral multiple of pages */
size = (((size + CPU_PAGE_SIZE) - 1U) >> CPU_PAGE_SHIFT)
size = (((size + PAGE_SIZE) - 1U) >> CPU_PAGE_SHIFT)
<< CPU_PAGE_SHIFT;
for (i = 0U; i < e820_entries; i++) {

View File

@ -535,7 +535,7 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
vcpu->arch.cur_context = NORMAL_WORLD;
vcpu->arch.irq_window_enabled = 0;
vcpu->arch.inject_event_pending = false;
(void)memset(vcpu->arch.vmcs, 0U, CPU_PAGE_SIZE);
(void)memset(vcpu->arch.vmcs, 0U, PAGE_SIZE);
for (i = 0; i < NR_WORLD; i++) {
(void)memset(&vcpu->arch.contexts[i], 0U,

View File

@ -84,7 +84,7 @@ vlapic_dump_isr(__unused struct acrn_vlapic *vlapic, __unused char *msg) {}
#endif
/*APIC-v APIC-access address */
static uint8_t apicv_apic_access_addr[CPU_PAGE_SIZE] __aligned(CPU_PAGE_SIZE);
static uint8_t apicv_apic_access_addr[PAGE_SIZE] __aligned(PAGE_SIZE);
static int
apicv_set_intr_ready(struct acrn_vlapic *vlapic, uint32_t vector);
@ -1581,7 +1581,7 @@ vlapic_write(struct acrn_vlapic *vlapic, uint32_t offset,
uint32_t data32 = (uint32_t)data;
int retval;
ASSERT(((offset & 0xfU) == 0U) && (offset < CPU_PAGE_SIZE),
ASSERT(((offset & 0xfU) == 0U) && (offset < PAGE_SIZE),
"%s: invalid offset %#x", __func__, offset);
dev_dbg(ACRN_DBG_LAPIC, "vlapic write offset %#x, data %#lx",
@ -2241,12 +2241,12 @@ int vlapic_create(struct acrn_vcpu *vcpu)
/* only need unmap it from SOS as UOS never mapped it */
if (is_vm0(vcpu->vm)) {
ept_mr_del(vcpu->vm, pml4_page,
DEFAULT_APIC_BASE, CPU_PAGE_SIZE);
DEFAULT_APIC_BASE, PAGE_SIZE);
}
ept_mr_add(vcpu->vm, pml4_page,
vlapic_apicv_get_apic_access_addr(),
DEFAULT_APIC_BASE, CPU_PAGE_SIZE,
DEFAULT_APIC_BASE, PAGE_SIZE,
EPT_WR | EPT_RD | EPT_UNCACHED);
}

View File

@ -11,7 +11,7 @@
/* Local variables */
static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(CPU_PAGE_SIZE);
static struct acrn_vm vm_array[CONFIG_MAX_VM_NUM] __aligned(PAGE_SIZE);
static uint64_t vmid_bitmap;
@ -188,7 +188,7 @@ int create_vm(struct vm_description *vm_desc, struct acrn_vm **rtn_vm)
err:
if (vm->arch_vm.nworld_eptp != NULL) {
(void)memset(vm->arch_vm.nworld_eptp, 0U, CPU_PAGE_SIZE);
(void)memset(vm->arch_vm.nworld_eptp, 0U, PAGE_SIZE);
}
return status;

View File

@ -432,10 +432,10 @@ static void deny_guest_pio_access(struct acrn_vm *vm, uint16_t port_address,
void setup_io_bitmap(struct acrn_vm *vm)
{
if (is_vm0(vm)) {
(void)memset(vm->arch_vm.io_bitmap, 0x00U, CPU_PAGE_SIZE * 2U);
(void)memset(vm->arch_vm.io_bitmap, 0x00U, PAGE_SIZE * 2U);
} else {
/* block all IO port access from Guest */
(void)memset(vm->arch_vm.io_bitmap, 0xFFU, CPU_PAGE_SIZE * 2U);
(void)memset(vm->arch_vm.io_bitmap, 0xFFU, PAGE_SIZE * 2U);
}
}

View File

@ -21,7 +21,7 @@ struct trusty_mem {
struct trusty_key_info key_info;
struct trusty_startup_param startup_param;
} data;
uint8_t page[CPU_PAGE_SIZE];
uint8_t page[PAGE_SIZE];
} first_page;
/* The left memory is for trusty's code/data/heap/stack
@ -88,7 +88,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
* and Normal World's EPT
*/
pml4_base = vm->arch_vm.ept_mem_ops.info->ept.sworld_pgtable_base;
(void)memset(pml4_base, 0U, CPU_PAGE_SIZE);
(void)memset(pml4_base, 0U, PAGE_SIZE);
vm->arch_vm.sworld_eptp = pml4_base;
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp);
@ -97,7 +97,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
*/
sub_table_addr = vm->arch_vm.ept_mem_ops.info->ept.sworld_pgtable_base +
TRUSTY_PML4_PAGE_NUM(TRUSTY_EPT_REBASE_GPA);
(void)memset(sub_table_addr, 0U, CPU_PAGE_SIZE);
(void)memset(sub_table_addr, 0U, PAGE_SIZE);
sworld_pml4e = hva2hpa(sub_table_addr) | table_present;
set_pgentry((uint64_t *)pml4_base, sworld_pml4e);

View File

@ -924,7 +924,7 @@ static void init_exec_ctrl(struct acrn_vcpu *vcpu)
value64 = hva2hpa(vm->arch_vm.io_bitmap);
exec_vmwrite64(VMX_IO_BITMAP_A_FULL, value64);
pr_dbg("VMX_IO_BITMAP_A: 0x%016llx ", value64);
value64 = hva2hpa((void *)&(vm->arch_vm.io_bitmap[CPU_PAGE_SIZE]));
value64 = hva2hpa((void *)&(vm->arch_vm.io_bitmap[PAGE_SIZE]));
exec_vmwrite64(VMX_IO_BITMAP_B_FULL, value64);
pr_dbg("VMX_IO_BITMAP_B: 0x%016llx ", value64);

View File

@ -129,8 +129,8 @@ struct context_table {
struct page buses[CONFIG_IOMMU_BUS_NUM];
};
static struct page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(CPU_PAGE_SIZE);
static struct page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(PAGE_SIZE);
static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(PAGE_SIZE);
static inline uint8_t* get_root_table(uint32_t dmar_index)
{

View File

@ -507,7 +507,7 @@ static int32_t set_vm_memory_region(struct acrn_vm *vm,
uint64_t prot;
uint64_t *pml4_page;
if ((region->size & (CPU_PAGE_SIZE - 1UL)) != 0UL) {
if ((region->size & (PAGE_SIZE - 1UL)) != 0UL) {
pr_err("%s: [vm%d] map size 0x%x is not page aligned",
__func__, target_vm->vm_id, region->size);
return -EINVAL;
@ -650,7 +650,7 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
vm->vm_id, wp->gpa, hpa);
base_paddr = get_hv_image_base();
if (((hpa <= base_paddr) && ((hpa + CPU_PAGE_SIZE) > base_paddr)) ||
if (((hpa <= base_paddr) && ((hpa + PAGE_SIZE) > base_paddr)) ||
((hpa >= base_paddr) &&
(hpa < (base_paddr + CONFIG_HV_RAM_SIZE)))) {
pr_err("%s: overlap the HV memory region.", __func__);
@ -661,7 +661,7 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
prot_clr = (wp->set != 0U) ? EPT_WR : 0UL;
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
wp->gpa, CPU_PAGE_SIZE, prot_set, prot_clr);
wp->gpa, PAGE_SIZE, prot_set, prot_clr);
return 0;
}

View File

@ -11,7 +11,7 @@
#define MAX_STR_SIZE 256U
#define SHELL_PROMPT_STR "ACRN:\\>"
#define SHELL_LOG_BUF_SIZE (CPU_PAGE_SIZE * CONFIG_MAX_PCPU_NUM / 2U)
#define SHELL_LOG_BUF_SIZE (PAGE_SIZE * CONFIG_MAX_PCPU_NUM / 2U)
static char shell_log_buf[SHELL_LOG_BUF_SIZE];
/* Input Line Other - Switch to the "other" input line (there are only two

View File

@ -10,7 +10,7 @@
#include <spinlock.h>
#define SHELL_CMD_MAX_LEN 100U
#define SHELL_STRING_MAX_LEN (CPU_PAGE_SIZE << 2U)
#define SHELL_STRING_MAX_LEN (PAGE_SIZE << 2U)
/* Shell Command Function */
typedef int (*shell_cmd_fn_t)(int argc, char **argv);

View File

@ -29,6 +29,8 @@
#ifndef APICREG_H
#define APICREG_H
#include <page.h>
/*
* Local && I/O APIC definitions.
*/
@ -76,7 +78,7 @@ struct lapic_regs { /*OFFSET(Hex)*/
/*roundup sizeof current struct to 4KB*/
struct lapic_reg rsv5[192]; /*400 -- FF0*/
} __aligned(CPU_PAGE_SIZE);
} __aligned(PAGE_SIZE);
enum LAPIC_REGISTERS {
LAPIC_ID = 0x2,

View File

@ -40,7 +40,6 @@
/* Define page size */
#define CPU_PAGE_SHIFT 12U
#define CPU_PAGE_SIZE 0x1000U
#define CPU_PAGE_MASK 0xFFFFFFFFFFFFF000UL
#define MMU_PTE_PAGE_SHIFT CPU_PAGE_SHIFT

View File

@ -195,7 +195,7 @@ struct msr_store_area {
struct acrn_vcpu_arch {
/* vmcs region for this vcpu, MUST be 4KB-aligned */
uint8_t vmcs[CPU_PAGE_SIZE];
uint8_t vmcs[PAGE_SIZE];
/* per vcpu lapic */
struct acrn_vlapic vlapic;
int cur_context;
@ -233,7 +233,7 @@ struct acrn_vcpu_arch {
/* List of MSRS to be stored and loaded on VM exits or VM entries */
struct msr_store_area msr_area;
} __aligned(CPU_PAGE_SIZE);
} __aligned(PAGE_SIZE);
struct acrn_vm;
struct acrn_vcpu {
@ -264,7 +264,7 @@ struct acrn_vcpu {
#endif /* CONFIG_MTRR_ENABLED */
uint64_t reg_cached;
uint64_t reg_updated;
} __aligned(CPU_PAGE_SIZE);
} __aligned(PAGE_SIZE);
struct vcpu_dump {
struct acrn_vcpu *vcpu;

View File

@ -30,6 +30,8 @@
#ifndef VLAPIC_H
#define VLAPIC_H
#include <page.h>
/**
* @file vlapic.h
@ -104,7 +106,7 @@ struct acrn_vlapic {
*/
uint32_t svr_last;
uint32_t lvt_last[VLAPIC_MAXLVT_INDEX + 1];
} __aligned(CPU_PAGE_SIZE);
} __aligned(PAGE_SIZE);
/* APIC write handlers */

View File

@ -26,7 +26,7 @@ struct vm_hw_info {
struct acrn_vcpu vcpu_array[CONFIG_MAX_VCPUS_PER_VM];
uint16_t created_vcpus; /* Number of created vcpus */
uint64_t gpa_lowtop; /* top lowmem gpa of this VM */
} __aligned(CPU_PAGE_SIZE);
} __aligned(PAGE_SIZE);
struct sw_linux {
void *ramdisk_src_addr; /* HVA */
@ -88,9 +88,9 @@ enum vm_state {
struct vm_arch {
/* I/O bitmaps A and B for this VM, MUST be 4-Kbyte aligned */
uint8_t io_bitmap[CPU_PAGE_SIZE*2];
uint8_t io_bitmap[PAGE_SIZE*2];
/* MSR bitmap region for this VM, MUST be 4-Kbyte aligned */
uint8_t msr_bitmap[CPU_PAGE_SIZE];
uint8_t msr_bitmap[PAGE_SIZE];
uint64_t guest_init_pml4;/* Guest init pml4 */
/* EPT hierarchy for Normal World */
@ -108,7 +108,7 @@ struct vm_arch {
struct vm_io_handler_desc emul_pio[EMUL_PIO_IDX_MAX];
/* reference to virtual platform to come here (as needed) */
} __aligned(CPU_PAGE_SIZE);
} __aligned(PAGE_SIZE);
#define CPUID_CHECK_SUBLEAF (1U << 0U)
@ -160,7 +160,7 @@ struct acrn_vm {
spinlock_t softirq_dev_lock;
struct list_head softirq_dev_entry_list;
uint64_t intr_inject_delay_delta; /* delay of intr injection */
} __aligned(CPU_PAGE_SIZE);
} __aligned(PAGE_SIZE);
#ifdef CONFIG_PARTITION_MODE
struct vpci_vdev_array {

View File

@ -55,7 +55,7 @@
static inline uint64_t round_page_up(uint64_t addr)
{
return (((addr + (uint64_t)CPU_PAGE_SIZE) - 1UL) & CPU_PAGE_MASK);
return (((addr + (uint64_t)PAGE_SIZE) - 1UL) & CPU_PAGE_MASK);
}
static inline uint64_t round_page_down(uint64_t addr)

View File

@ -28,6 +28,8 @@
#define TRUSTY_PGTABLE_PAGE_NUM(size) \
(TRUSTY_PML4_PAGE_NUM(size) + TRUSTY_PDPT_PAGE_NUM(size) + TRUSTY_PD_PAGE_NUM(size) + TRUSTY_PT_PAGE_NUM(size))
struct acrn_vm;
struct page {
uint8_t contents[PAGE_SIZE];
} __aligned(PAGE_SIZE);

View File

@ -21,7 +21,7 @@
struct per_cpu_region {
/* vmxon_region MUST be 4KB-aligned */
uint8_t vmxon_region[CPU_PAGE_SIZE];
uint8_t vmxon_region[PAGE_SIZE];
#ifdef HV_DEBUG
uint64_t *sbuf[ACRN_SBUF_ID_MAX];
char logbuf[LOG_MESSAGE_MAX_SIZE];
@ -51,7 +51,7 @@ struct per_cpu_region {
#ifdef PROFILING_ON
struct profiling_info_wrapper profiling_info;
#endif
} __aligned(CPU_PAGE_SIZE); /* per_cpu_region size aligned with CPU_PAGE_SIZE */
} __aligned(PAGE_SIZE); /* per_cpu_region size aligned with PAGE_SIZE */
extern struct per_cpu_region per_cpu_data[];
extern uint16_t phys_cpu_num;

View File

@ -210,15 +210,15 @@ static void deallocate_mem(struct mem_pool *pool, const void *ptr)
}
/*
* The return address will be CPU_PAGE_SIZE aligned if 'num_bytes' is greater
* than CPU_PAGE_SIZE.
* The return address will be PAGE_SIZE aligned if 'num_bytes' is greater
* than PAGE_SIZE.
*/
void *malloc(unsigned int num_bytes)
{
void *memory = NULL;
/* Check if bytes requested extend page-size */
if (num_bytes < CPU_PAGE_SIZE) {
if (num_bytes < PAGE_SIZE) {
/*
* Request memory allocation from smaller segmented memory pool
*/