HV: Avoiding assignment opperation inside macro
To follow the Misra-c standard, the assignment operation inside function-like macro should be avoided. Replaced the violations macro using inline function instead. Signed-off-by: Yang, Yu-chu <yu-chu.yang@intel.com>
This commit is contained in:
parent
688b0cdb9c
commit
0419816574
|
@ -733,7 +733,7 @@ uint64_t create_guest_initial_paging(struct vm *vm)
|
|||
/* PML4 used 1 page, skip it to fetch PDPT */
|
||||
pdpt_base_paddr = GUEST_INIT_PAGE_TABLE_START + PAGE_SIZE_4K;
|
||||
entry = pdpt_base_paddr | table_present;
|
||||
MEM_WRITE64(pml4_addr, entry);
|
||||
mem_write64(pml4_addr, entry);
|
||||
|
||||
/* Write PDPTE, PDPT used 1 page, skip it to fetch PD */
|
||||
pd_base_paddr = pdpt_base_paddr + PAGE_SIZE_4K;
|
||||
|
@ -742,7 +742,7 @@ uint64_t create_guest_initial_paging(struct vm *vm)
|
|||
for (i = 0; i < 4; i++) {
|
||||
entry = ((pd_base_paddr + (i * PAGE_SIZE_4K))
|
||||
| table_present);
|
||||
MEM_WRITE64(addr, entry);
|
||||
mem_write64(addr, entry);
|
||||
addr += IA32E_COMM_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
|
@ -755,7 +755,7 @@ uint64_t create_guest_initial_paging(struct vm *vm)
|
|||
addr = pml4_addr + 2 * PAGE_SIZE_4K;
|
||||
for (i = 0; i < entry_num; i++) {
|
||||
entry = (i * (1 << MMU_PDE_PAGE_SHIFT)) | table_present;
|
||||
MEM_WRITE64(addr, entry);
|
||||
mem_write64(addr, entry);
|
||||
addr += IA32E_COMM_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
|
@ -776,7 +776,7 @@ uint64_t create_guest_initial_paging(struct vm *vm)
|
|||
addr = (pml4_addr + PAGE_SIZE_4K + table_offset);
|
||||
table_present = (IA32E_COMM_P_BIT | IA32E_COMM_RW_BIT);
|
||||
entry = (pd_base_paddr | table_present);
|
||||
MEM_WRITE64(addr, entry);
|
||||
mem_write64(addr, entry);
|
||||
|
||||
/* Write PDE for trusty with 2M page size */
|
||||
entry_num = TRUSTY_MEMORY_SIZE / (1 << MMU_PDE_PAGE_SHIFT);
|
||||
|
@ -788,7 +788,7 @@ uint64_t create_guest_initial_paging(struct vm *vm)
|
|||
entry = (TRUSTY_EPT_REBASE_GPA +
|
||||
(i * (1 << MMU_PDE_PAGE_SHIFT)))
|
||||
| table_present;
|
||||
MEM_WRITE64(addr, entry);
|
||||
mem_write64(addr, entry);
|
||||
addr += IA32E_COMM_ENTRY_SIZE;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -747,7 +747,7 @@ emulate_movs(struct vcpu *vcpu, __unused uint64_t gpa, struct vie *vie,
|
|||
* Repeat the instruction if the count register is not zero.
|
||||
*/
|
||||
if ((rcx & vie_size2mask(vie->addrsize)) != 0UL)
|
||||
VCPU_RETAIN_RIP(vcpu);
|
||||
vcpu_retain_rip(vcpu);
|
||||
}
|
||||
done:
|
||||
ASSERT(error == 0, "%s: unexpected error %d", __func__, error);
|
||||
|
@ -812,7 +812,7 @@ emulate_stos(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
|
|||
* Repeat the instruction if the count register is not zero.
|
||||
*/
|
||||
if ((rcx & vie_size2mask(vie->addrsize)) != 0UL)
|
||||
VCPU_RETAIN_RIP(vcpu);
|
||||
vcpu_retain_rip(vcpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2296,7 +2296,7 @@ int veoi_vmexit_handler(struct vcpu *vcpu)
|
|||
struct lapic_reg *tmrptr;
|
||||
uint32_t idx, mask;
|
||||
|
||||
VCPU_RETAIN_RIP(vcpu);
|
||||
vcpu_retain_rip(vcpu);
|
||||
|
||||
vlapic = vcpu->arch_vcpu.vlapic;
|
||||
lapic = vlapic->apic_page;
|
||||
|
@ -2326,7 +2326,7 @@ int apic_write_vmexit_handler(struct vcpu *vcpu)
|
|||
offset = (qual & 0xFFFUL);
|
||||
|
||||
handled = 1;
|
||||
VCPU_RETAIN_RIP(vcpu);
|
||||
vcpu_retain_rip(vcpu);
|
||||
vlapic = vcpu->arch_vcpu.vlapic;
|
||||
|
||||
switch (offset) {
|
||||
|
|
|
@ -335,7 +335,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
|||
table_entry |= (uint64_t)paddr;
|
||||
|
||||
/* Write the table entry to map this memory */
|
||||
MEM_WRITE64(table_base + table_offset, table_entry);
|
||||
mem_write64(table_base + table_offset, table_entry);
|
||||
|
||||
/* Invalidate TLB and page-structure cache,
|
||||
* if it is the first mapping no need to invalidate TLB
|
||||
|
@ -350,7 +350,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
|||
/* Table is present.
|
||||
* Write the table entry to map this memory
|
||||
*/
|
||||
MEM_WRITE64(table_base + table_offset, 0);
|
||||
mem_write64(table_base + table_offset, 0);
|
||||
|
||||
/* Unmap, need to invalidate TLB and
|
||||
* page-structure cache
|
||||
|
@ -369,7 +369,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
|||
table_entry |= (uint64_t) paddr;
|
||||
|
||||
/* Write the table entry to map this memory */
|
||||
MEM_WRITE64(table_base + table_offset, table_entry);
|
||||
mem_write64(table_base + table_offset, table_entry);
|
||||
|
||||
/* Modify, need to invalidate TLB and
|
||||
* page-structure cache
|
||||
|
@ -390,7 +390,7 @@ static uint32_t map_mem_region(void *vaddr, void *paddr,
|
|||
table_entry |= attr;
|
||||
|
||||
/* Write the table entry to map this memory */
|
||||
MEM_WRITE64(table_base + table_offset, table_entry);
|
||||
mem_write64(table_base + table_offset, table_entry);
|
||||
|
||||
/* Modify, need to invalidate TLB and
|
||||
* page-structure cache
|
||||
|
@ -550,7 +550,7 @@ static void *walk_paging_struct(void *addr, void *table_base,
|
|||
if (map_params->page_table_type == PTT_HOST)
|
||||
entry_present |= attr;
|
||||
|
||||
MEM_WRITE64(table_base + table_offset,
|
||||
mem_write64(table_base + table_offset,
|
||||
HVA2HPA(sub_table_addr) | entry_present);
|
||||
} else {
|
||||
/* Get address of the sub-table */
|
||||
|
@ -930,7 +930,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
|
|||
}
|
||||
/* write all entries and keep original attr*/
|
||||
for (i = 0U; i < IA32E_NUM_ENTRIES; i++) {
|
||||
MEM_WRITE64(sub_tab_addr + (i * IA32E_COMM_ENTRY_SIZE),
|
||||
mem_write64(sub_tab_addr + (i * IA32E_COMM_ENTRY_SIZE),
|
||||
(attr | (pa + (i * next_page_size))));
|
||||
}
|
||||
if (map_params->page_table_type == PTT_EPT) {
|
||||
|
@ -939,7 +939,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
|
|||
* bit 0(R) bit1(W) bit2(X) bit3~5 MUST be reserved
|
||||
* (here &0x07)
|
||||
*/
|
||||
MEM_WRITE64(entry.entry_base + entry.entry_off,
|
||||
mem_write64(entry.entry_base + entry.entry_off,
|
||||
(entry.entry_val & 0x07UL) |
|
||||
HVA2HPA(sub_tab_addr));
|
||||
} else {
|
||||
|
@ -948,7 +948,7 @@ static uint64_t break_page_table(struct map_params *map_params, void *paddr,
|
|||
* bit0(P) bit1(RW) bit2(U/S) bit3(PWT) bit4(PCD)
|
||||
* bit5(A) bit6(D or Ignore)
|
||||
*/
|
||||
MEM_WRITE64(entry.entry_base + entry.entry_off,
|
||||
mem_write64(entry.entry_base + entry.entry_off,
|
||||
(entry.entry_val & 0x7fUL) |
|
||||
HVA2HPA(sub_tab_addr));
|
||||
}
|
||||
|
|
|
@ -126,7 +126,7 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||
*/
|
||||
sub_table_addr = alloc_paging_struct();
|
||||
sworld_pml4e = HVA2HPA(sub_table_addr) | table_present;
|
||||
MEM_WRITE64(pml4_base, sworld_pml4e);
|
||||
mem_write64(pml4_base, sworld_pml4e);
|
||||
|
||||
|
||||
nworld_pml4e = MEM_READ64(HPA2HVA(vm->arch_vm.nworld_eptp));
|
||||
|
|
|
@ -331,7 +331,7 @@ int interrupt_window_vmexit_handler(struct vcpu *vcpu)
|
|||
exec_vmwrite(VMX_PROC_VM_EXEC_CONTROLS, value32);
|
||||
}
|
||||
|
||||
VCPU_RETAIN_RIP(vcpu);
|
||||
vcpu_retain_rip(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
|
|||
(((intr_info & VMX_INT_TYPE_MASK) >> 8)
|
||||
!= VMX_INT_TYPE_EXT_INT)) {
|
||||
pr_err("Invalid VM exit interrupt info:%x", intr_info);
|
||||
VCPU_RETAIN_RIP(vcpu);
|
||||
vcpu_retain_rip(vcpu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -353,7 +353,7 @@ int external_interrupt_vmexit_handler(struct vcpu *vcpu)
|
|||
|
||||
dispatch_interrupt(&ctx);
|
||||
|
||||
VCPU_RETAIN_RIP(vcpu);
|
||||
vcpu_retain_rip(vcpu);
|
||||
|
||||
TRACE_2L(TRACE_VMEXIT_EXTERNAL_INTERRUPT, ctx.vector, 0);
|
||||
|
||||
|
@ -524,7 +524,7 @@ int exception_vmexit_handler(struct vcpu *vcpu)
|
|||
}
|
||||
|
||||
/* Handle all other exceptions */
|
||||
VCPU_RETAIN_RIP(vcpu);
|
||||
vcpu_retain_rip(vcpu);
|
||||
|
||||
vcpu_queue_exception(vcpu, exception_vector, int_err_code);
|
||||
|
||||
|
|
|
@ -273,7 +273,10 @@ struct vcpu {
|
|||
|
||||
#define is_vcpu_bsp(vcpu) ((vcpu)->vcpu_id == 0)
|
||||
/* do not update Guest RIP for next VM Enter */
|
||||
#define VCPU_RETAIN_RIP(vcpu) ((vcpu)->arch_vcpu.inst_len = 0)
|
||||
static inline void vcpu_retain_rip(struct vcpu *vcpu)
|
||||
{
|
||||
(vcpu)->arch_vcpu.inst_len = 0;
|
||||
}
|
||||
|
||||
/* External Interfaces */
|
||||
struct vcpu* get_ever_run_vcpu(uint16_t pcpu_id);
|
||||
|
|
|
@ -257,19 +257,32 @@ enum _page_table_present {
|
|||
#define PAGE_SIZE_2M MEM_2M
|
||||
#define PAGE_SIZE_1G MEM_1G
|
||||
|
||||
/* Macros for reading/writing memory */
|
||||
/* Macros for reading memory */
|
||||
#define MEM_READ8(addr) (*(volatile uint8_t *)(addr))
|
||||
#define MEM_WRITE8(addr, data) \
|
||||
(*(volatile uint8_t *)(addr) = (uint8_t)(data))
|
||||
#define MEM_READ16(addr) (*(volatile uint16_t *)(addr))
|
||||
#define MEM_WRITE16(addr, data) \
|
||||
(*(volatile uint16_t *)(addr) = (uint16_t)(data))
|
||||
#define MEM_READ32(addr) (*(volatile uint32_t *)(addr))
|
||||
#define MEM_WRITE32(addr, data) \
|
||||
(*(volatile uint32_t *)(addr) = (uint32_t)(data))
|
||||
#define MEM_READ64(addr) (*(volatile uint64_t *)(addr))
|
||||
#define MEM_WRITE64(addr, data) \
|
||||
(*(volatile uint64_t *)(addr) = (uint64_t)(data))
|
||||
|
||||
/* Inline functions for writing memory */
|
||||
static inline void mem_write8(void *addr, uint8_t data)
|
||||
{
|
||||
*(volatile uint8_t *)(addr) = (uint8_t)(data);
|
||||
}
|
||||
|
||||
static inline void mem_write16(void *addr, uint16_t data)
|
||||
{
|
||||
*(volatile uint16_t *)(addr) = (uint16_t)(data);
|
||||
}
|
||||
|
||||
static inline void mem_write32(void *addr, uint32_t data)
|
||||
{
|
||||
*(volatile uint32_t *)(addr) = (uint32_t)(data);
|
||||
}
|
||||
|
||||
static inline void mem_write64(void *addr, uint64_t data)
|
||||
{
|
||||
*(volatile uint64_t *)(addr) = (uint64_t)(data);
|
||||
}
|
||||
|
||||
/* Typedef for MMIO handler and range check routine */
|
||||
typedef int(*hv_mem_io_handler_t)(struct vcpu *, struct mem_io *, void *);
|
||||
|
|
Loading…
Reference in New Issue