hv: refine atomic_load/store_xxx name

rename atomic_load/store_xxx32 to atomic_load/store
rename atomic_load/store_xxx64 to atomic_load64/store64

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li, Fei1 2018-05-17 10:11:43 +08:00 committed by lijinxia
parent 336a8883db
commit 1f3da93e74
10 changed files with 47 additions and 78 deletions

View File

@ -98,7 +98,7 @@ is_entry_invalid(struct ptdev_remapping_info *entry)
static inline bool
is_entry_active(struct ptdev_remapping_info *entry)
{
return atomic_load_acq_int(&entry->active) == ACTIVE_FLAG;
return atomic_load((int *)&entry->active) == ACTIVE_FLAG;
}
/* require ptdev_lock protect */

View File

@ -82,9 +82,9 @@ int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
*/
vcpu->vcpu_id = atomic_xadd(&vm->hw.created_vcpus, 1);
/* vm->hw.vcpu_array[vcpu->vcpu_id] = vcpu; */
atomic_store_rel_64(
(unsigned long *)&vm->hw.vcpu_array[vcpu->vcpu_id],
(unsigned long)vcpu);
atomic_store64(
(long *)&vm->hw.vcpu_array[vcpu->vcpu_id],
(long)vcpu);
ASSERT(vcpu->vcpu_id < vm->hw.num_vcpus,
"Allocated vcpu_id is out of range!");
@ -221,9 +221,9 @@ int destroy_vcpu(struct vcpu *vcpu)
ASSERT(vcpu != NULL, "Incorrect arguments");
/* vcpu->vm->hw.vcpu_array[vcpu->vcpu_id] = NULL; */
atomic_store_rel_64(
(unsigned long *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
(unsigned long)NULL);
atomic_store64(
(long *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
(long)NULL);
atomic_dec(&vcpu->vm->hw.created_vcpus);
@ -282,13 +282,13 @@ void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
vcpu->state = new_state;
get_schedule_lock(vcpu->pcpu_id);
if (atomic_load_acq_32(&vcpu->running) == 1) {
if (atomic_load(&vcpu->running) == 1) {
remove_vcpu_from_runqueue(vcpu);
make_reschedule_request(vcpu);
release_schedule_lock(vcpu->pcpu_id);
if (vcpu->pcpu_id != pcpu_id) {
while (atomic_load_acq_32(&vcpu->running) == 1)
while (atomic_load(&vcpu->running) == 1)
__asm__ __volatile("pause" ::: "memory");
}
} else {

View File

@ -484,7 +484,7 @@ vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset)
uint32_t val;
idx = lvt_off_to_idx(offset);
val = atomic_load_acq_32(&vlapic->lvt_last[idx]);
val = atomic_load((int *)&vlapic->lvt_last[idx]);
return val;
}
@ -547,7 +547,7 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
vlapic_update_lvtt(vlapic, val);
*lvtptr = val;
atomic_store_rel_32(&vlapic->lvt_last[idx], val);
atomic_store((int *)&vlapic->lvt_last[idx], val);
}
static void
@ -1097,7 +1097,7 @@ vlapic_pending_intr(struct vlapic *vlapic, int *vecptr)
irrptr = &lapic->irr[0];
for (i = 7; i >= 0; i--) {
val = atomic_load_acq_int(&irrptr[i].val);
val = atomic_load((int *)&irrptr[i].val);
bitpos = fls(val);
if (bitpos >= 0) {
vector = i * 32 + bitpos;
@ -2007,7 +2007,7 @@ apicv_pending_intr(struct vlapic *vlapic, __unused int *vecptr)
pir_desc = vlapic->pir_desc;
pending = atomic_load_acq_long(&pir_desc->pending);
pending = atomic_load64((long *)&pir_desc->pending);
if (!pending)
return 0;

View File

@ -127,7 +127,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
snprintf(&vm->attr.name[0], MAX_VM_NAME_LEN, "vm_%d",
vm->attr.id);
atomic_store_rel_int(&vm->hw.created_vcpus, 0);
atomic_store(&vm->hw.created_vcpus, 0);
/* gpa_lowtop are used for system start up */
vm->hw.gpa_lowtop = 0;

View File

@ -363,7 +363,7 @@ static void complete_request(struct vcpu *vcpu)
req_buf = (struct vhm_request_buffer *)
vcpu->vm->sw.io_shared_page;
req_buf->req_queue[vcpu->vcpu_id].valid = false;
atomic_store_rel_32(&vcpu->ioreq_pending, 0);
atomic_store(&vcpu->ioreq_pending, 0);
return;
}
@ -900,7 +900,7 @@ int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
fire_vhm_interrupt();
/* pause vcpu, wait for VHM to handle the MMIO request */
atomic_store_rel_32(&vcpu->ioreq_pending, 1);
atomic_store(&vcpu->ioreq_pending, 1);
pause_vcpu(vcpu, VCPU_PAUSED);
return 0;

View File

@ -146,7 +146,7 @@ static void context_switch_out(struct vcpu *vcpu)
/* cancel event(int, gp, nmi and exception) injection */
cancel_event_injection(vcpu);
atomic_store_rel_32(&vcpu->running, 0);
atomic_store(&vcpu->running, 0);
/* do prev vcpu context switch out */
/* For now, we don't need to invalid ept.
* But if we have more than one vcpu on one pcpu,
@ -163,7 +163,7 @@ static void context_switch_in(struct vcpu *vcpu)
if (vcpu == NULL)
return;
atomic_store_rel_32(&vcpu->running, 1);
atomic_store(&vcpu->running, 1);
/* FIXME:
* Now, we don't need to load new vcpu VMCS because
* we only do switch between vcpu loop and idle loop.

View File

@ -156,7 +156,7 @@ struct tgt_uart {
uint64_t base_address;
uint32_t clock_frequency;
uint32_t buffer_size;
unsigned int open_count;
int open_count;
/* Target specific function pointers. */
int (*init)(struct tgt_uart *tgt_uart);

View File

@ -161,10 +161,10 @@ static int uart16550_init(struct tgt_uart *tgt_uart)
status = -ENODEV;
} else {
if (strcmp(tgt_uart->uart_id, "STDIO") == 0) {
atomic_store_rel_int(&tgt_uart->open_count, 0);
atomic_store(&tgt_uart->open_count, 0);
} else {
/* set open count to 1 to prevent open */
atomic_store_rel_int(&tgt_uart->open_count, 1);
atomic_store(&tgt_uart->open_count, 1);
status = -EINVAL;
}
}

View File

@ -254,8 +254,8 @@ struct vcpu {
unsigned long pending_pre_work; /* any pre work pending? */
bool launched; /* Whether the vcpu is launched on target pcpu */
unsigned int paused_cnt; /* how many times vcpu is paused */
unsigned int running; /* vcpu is picked up and run? */
unsigned int ioreq_pending; /* ioreq is ongoing or not? */
int running; /* vcpu is picked up and run? */
int ioreq_pending; /* ioreq is ongoing or not? */
struct vhm_request req; /* used by io/ept emulation */
struct mem_io mmio; /* used by io/ept emulation */

View File

@ -32,6 +32,30 @@
#define BUS_LOCK "lock ; "
#define build_atomic_load(name, size, type, ptr) \
static inline type name(const volatile type *ptr) \
{ \
type ret; \
asm volatile("mov" size " %1,%0" \
: "=r" (ret) \
: "m" (*ptr) \
: "cc", "memory"); \
return ret; \
}
build_atomic_load(atomic_load, "l", int, p)
build_atomic_load(atomic_load64, "q", long, p)
#define build_atomic_store(name, size, type, ptr, v) \
static inline void name(volatile type *ptr, type v) \
{ \
asm volatile("mov" size " %1,%0" \
: "=m" (*ptr) \
: "r" (v) \
: "cc", "memory"); \
}
build_atomic_store(atomic_store, "l", int, p, v)
build_atomic_store(atomic_store64, "q", long, p, v)
/*
* #define atomic_set_int(P, V) (*(unsigned int *)(P) |= (V))
*/
@ -135,56 +159,6 @@ static inline long atomic_swap_long(unsigned long *p, unsigned long v)
*/
#define atomic_readandclear_long(p) atomic_swap_long(p, 0)
/*
* #define atomic_load_acq_int(P) (*(unsigned int*)(P))
*/
static inline int atomic_load_acq_int(unsigned int *p)
{
int ret;
__asm __volatile("movl %1,%0"
: "=r"(ret)
: "m" (*p)
: "cc", "memory");
return ret;
}
/*
* #define atomic_store_rel_int(P, V) (*(unsigned int *)(P) = (V))
*/
static inline void atomic_store_rel_int(unsigned int *p, unsigned int v)
{
__asm __volatile("movl %1,%0"
: "=m" (*p)
: "r" (v)
: "cc", "memory");
}
/*
* #define atomic_load_acq_long(P) (*(unsigned long*)(P))
*/
static inline long atomic_load_acq_long(unsigned long *p)
{
long ret;
__asm __volatile("movq %1,%0"
: "=r"(ret)
: "m" (*p)
: "cc", "memory");
return ret;
}
/*
* #define atomic_store_rel_long(P, V) (*(unsigned long *)(P) = (V))
*/
static inline void atomic_store_rel_long(unsigned long *p, unsigned long v)
{
__asm __volatile("movq %1,%0"
: "=m" (*p)
: "r" (v)
: "cc", "memory");
}
static inline int atomic_cmpxchg_int(unsigned int *p,
int old, int new)
{
@ -198,11 +172,6 @@ static inline int atomic_cmpxchg_int(unsigned int *p,
return ret;
}
#define atomic_load_acq_32 atomic_load_acq_int
#define atomic_store_rel_32 atomic_store_rel_int
#define atomic_load_acq_64 atomic_load_acq_long
#define atomic_store_rel_64 atomic_store_rel_long
#define build_atomic_xadd(name, size, type, ptr, v) \
static inline type name(type *ptr, type v) \
{ \