hv: vioapic/vpic: clean up spinlock wrappers
remove the following unnecessary spinlock wrappers #define VIOAPIC_LOCK(vioapic) spinlock_obtain(&((vioapic)->mtx)) #define VIOAPIC_UNLOCK(vioapic) spinlock_release(&((vioapic)->mtx)) #define VPIC_LOCK_INIT(vpic) spinlock_init(&((vpic)->lock)) #define VPIC_LOCK(vpic) spinlock_obtain(&((vpic)->lock)) #define VPIC_UNLOCK(vpic) spinlock_release(&((vpic)->lock)) Tracked-On: #861 Signed-off-by: Shiqing Gao <shiqing.gao@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
4f20c44ec3
commit
1d2ed1adee
|
@ -38,9 +38,6 @@
|
|||
#define ACRN_DBG_IOAPIC 6U
|
||||
#define ACRN_IOAPIC_VERSION 0x11U
|
||||
|
||||
#define VIOAPIC_LOCK(vioapic) spinlock_obtain(&((vioapic)->mtx))
|
||||
#define VIOAPIC_UNLOCK(vioapic) spinlock_release(&((vioapic)->mtx))
|
||||
|
||||
#define IOAPIC_ID_MASK 0x0f000000U
|
||||
#define MASK_ALL_INTERRUPTS 0x0001000000010000UL
|
||||
#define IOAPIC_RTE_LOW_INTVEC ((uint32_t)IOAPIC_RTE_INTVEC)
|
||||
|
@ -140,7 +137,7 @@ vioapic_set_irqstate(struct vm *vm, uint32_t irq, enum irqstate irqstate)
|
|||
|
||||
vioapic = vm_ioapic(vm);
|
||||
|
||||
VIOAPIC_LOCK(vioapic);
|
||||
spinlock_obtain(&(vioapic->mtx));
|
||||
switch (irqstate) {
|
||||
case IRQSTATE_ASSERT:
|
||||
vioapic_set_pinstate(vioapic, pin, true);
|
||||
|
@ -155,7 +152,7 @@ vioapic_set_irqstate(struct vm *vm, uint32_t irq, enum irqstate irqstate)
|
|||
default:
|
||||
panic("vioapic_set_irqstate: invalid irqstate %d", irqstate);
|
||||
}
|
||||
VIOAPIC_UNLOCK(vioapic);
|
||||
spinlock_release(&(vioapic->mtx));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -192,7 +189,7 @@ vioapic_update_tmr(struct vcpu *vcpu)
|
|||
vlapic = vcpu_vlapic(vcpu);
|
||||
vioapic = vm_ioapic(vcpu->vm);
|
||||
|
||||
VIOAPIC_LOCK(vioapic);
|
||||
spinlock_obtain(&(vioapic->mtx));
|
||||
pincount = vioapic_pincount(vcpu->vm);
|
||||
for (pin = 0U; pin < pincount; pin++) {
|
||||
rte = vioapic->rtbl[pin];
|
||||
|
@ -210,7 +207,7 @@ vioapic_update_tmr(struct vcpu *vcpu)
|
|||
vlapic_set_tmr_one_vec(vlapic, delmode, vector, level);
|
||||
}
|
||||
vlapic_apicv_batch_set_tmr(vlapic);
|
||||
VIOAPIC_UNLOCK(vioapic);
|
||||
spinlock_release(&(vioapic->mtx));
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
|
@ -254,8 +251,9 @@ vioapic_indirect_read(struct acrn_vioapic *vioapic, uint32_t addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Due to the race between vcpus, ensure to do VIOAPIC_LOCK(vioapic) &
|
||||
* VIOAPIC_UNLOCK(vioapic) by caller.
|
||||
/*
|
||||
* Due to the race between vcpus, ensure to do spinlock_obtain(&(vioapic->mtx))
|
||||
* & spinlock_release(&(vioapic->mtx)) by caller.
|
||||
*/
|
||||
static void
|
||||
vioapic_indirect_write(struct acrn_vioapic *vioapic, uint32_t addr,
|
||||
|
@ -396,7 +394,7 @@ vioapic_mmio_rw(struct acrn_vioapic *vioapic, uint64_t gpa,
|
|||
|
||||
offset = (uint32_t)(gpa - VIOAPIC_BASE);
|
||||
|
||||
VIOAPIC_LOCK(vioapic);
|
||||
spinlock_obtain(&(vioapic->mtx));
|
||||
|
||||
/* The IOAPIC specification allows 32-bit wide accesses to the
|
||||
* IOAPIC_REGSEL (offset 0) and IOAPIC_WINDOW (offset 16) registers.
|
||||
|
@ -425,7 +423,7 @@ vioapic_mmio_rw(struct acrn_vioapic *vioapic, uint64_t gpa,
|
|||
break;
|
||||
}
|
||||
|
||||
VIOAPIC_UNLOCK(vioapic);
|
||||
spinlock_release(&(vioapic->mtx));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -457,7 +455,7 @@ vioapic_process_eoi(struct vm *vm, uint32_t vector)
|
|||
* XXX keep track of the pins associated with this vector instead
|
||||
* of iterating on every single pin each time.
|
||||
*/
|
||||
VIOAPIC_LOCK(vioapic);
|
||||
spinlock_obtain(&(vioapic->mtx));
|
||||
for (pin = 0U; pin < pincount; pin++) {
|
||||
rte = vioapic->rtbl[pin];
|
||||
if (((rte.u.lo_32 & IOAPIC_RTE_LOW_INTVEC) != vector) ||
|
||||
|
@ -473,7 +471,7 @@ vioapic_process_eoi(struct vm *vm, uint32_t vector)
|
|||
vioapic_send_intr(vioapic, pin);
|
||||
}
|
||||
}
|
||||
VIOAPIC_UNLOCK(vioapic);
|
||||
spinlock_release(&(vioapic->mtx));
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -29,9 +29,6 @@
|
|||
|
||||
#include <hypervisor.h>
|
||||
|
||||
#define VPIC_LOCK_INIT(vpic) spinlock_init(&((vpic)->lock))
|
||||
#define VPIC_LOCK(vpic) spinlock_obtain(&((vpic)->lock))
|
||||
#define VPIC_UNLOCK(vpic) spinlock_release(&((vpic)->lock))
|
||||
/* TODO: add spinlock_locked support? */
|
||||
/*#define VPIC_LOCKED(vpic) spinlock_locked(&((vpic)->lock))*/
|
||||
|
||||
|
@ -471,7 +468,7 @@ static void vpic_set_irqstate(struct vm *vm, uint32_t irq,
|
|||
return;
|
||||
}
|
||||
|
||||
VPIC_LOCK(vpic);
|
||||
spinlock_obtain(&(vpic->lock));
|
||||
switch (irqstate) {
|
||||
case IRQSTATE_ASSERT:
|
||||
vpic_set_pinstate(vpic, pin, true);
|
||||
|
@ -486,7 +483,7 @@ static void vpic_set_irqstate(struct vm *vm, uint32_t irq,
|
|||
default:
|
||||
ASSERT(false, "vpic_set_irqstate: invalid irqstate");
|
||||
}
|
||||
VPIC_UNLOCK(vpic);
|
||||
spinlock_release(&(vpic->lock));
|
||||
}
|
||||
|
||||
/* hypervisor interface: assert/deassert/pulse irq */
|
||||
|
@ -539,7 +536,7 @@ void vpic_pending_intr(struct vm *vm, uint32_t *vecptr)
|
|||
|
||||
i8259 = &vpic->i8259[0];
|
||||
|
||||
VPIC_LOCK(vpic);
|
||||
spinlock_obtain(&(vpic->lock));
|
||||
|
||||
pin = vpic_get_highest_irrpin(i8259);
|
||||
if (pin == 2U) {
|
||||
|
@ -553,7 +550,7 @@ void vpic_pending_intr(struct vm *vm, uint32_t *vecptr)
|
|||
*/
|
||||
if (pin >= NR_VPIC_PINS_PER_CHIP) {
|
||||
*vecptr = VECTOR_INVALID;
|
||||
VPIC_UNLOCK(vpic);
|
||||
spinlock_release(&(vpic->lock));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -561,7 +558,7 @@ void vpic_pending_intr(struct vm *vm, uint32_t *vecptr)
|
|||
|
||||
dev_dbg(ACRN_DBG_PIC, "Got pending vector 0x%x\n", *vecptr);
|
||||
|
||||
VPIC_UNLOCK(vpic);
|
||||
spinlock_release(&(vpic->lock));
|
||||
}
|
||||
|
||||
static void vpic_pin_accepted(struct i8259_reg_state *i8259, uint8_t pin)
|
||||
|
@ -589,7 +586,7 @@ void vpic_intr_accepted(struct vm *vm, uint32_t vector)
|
|||
|
||||
vpic = vm_pic(vm);
|
||||
|
||||
VPIC_LOCK(vpic);
|
||||
spinlock_obtain(&(vpic->lock));
|
||||
|
||||
pin = (uint8_t)(vector & 0x7U);
|
||||
|
||||
|
@ -606,7 +603,7 @@ void vpic_intr_accepted(struct vm *vm, uint32_t vector)
|
|||
|
||||
vpic_notify_intr(vpic);
|
||||
|
||||
VPIC_UNLOCK(vpic);
|
||||
spinlock_release(&(vpic->lock));
|
||||
}
|
||||
|
||||
static int vpic_read(struct acrn_vpic *vpic, struct i8259_reg_state *i8259,
|
||||
|
@ -614,7 +611,7 @@ static int vpic_read(struct acrn_vpic *vpic, struct i8259_reg_state *i8259,
|
|||
{
|
||||
uint8_t pin;
|
||||
|
||||
VPIC_LOCK(vpic);
|
||||
spinlock_obtain(&(vpic->lock));
|
||||
|
||||
if (i8259->poll) {
|
||||
i8259->poll = false;
|
||||
|
@ -640,7 +637,7 @@ static int vpic_read(struct acrn_vpic *vpic, struct i8259_reg_state *i8259,
|
|||
}
|
||||
}
|
||||
|
||||
VPIC_UNLOCK(vpic);
|
||||
spinlock_release(&(vpic->lock));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -654,7 +651,7 @@ static int vpic_write(struct acrn_vpic *vpic, struct i8259_reg_state *i8259,
|
|||
error = 0;
|
||||
val = (uint8_t)*eax;
|
||||
|
||||
VPIC_LOCK(vpic);
|
||||
spinlock_obtain(&(vpic->lock));
|
||||
|
||||
if ((port & ICU_IMR_OFFSET) != 0U) {
|
||||
switch (i8259->icw_num) {
|
||||
|
@ -689,7 +686,7 @@ static int vpic_write(struct acrn_vpic *vpic, struct i8259_reg_state *i8259,
|
|||
vpic_notify_intr(vpic);
|
||||
}
|
||||
|
||||
VPIC_UNLOCK(vpic);
|
||||
spinlock_release(&(vpic->lock));
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -793,7 +790,7 @@ static int vpic_elc_handler(struct vm *vm, bool in, uint16_t port, size_t bytes,
|
|||
return -1;
|
||||
}
|
||||
|
||||
VPIC_LOCK(vpic);
|
||||
spinlock_obtain(&(vpic->lock));
|
||||
|
||||
if (in) {
|
||||
if (is_master) {
|
||||
|
@ -819,7 +816,7 @@ static int vpic_elc_handler(struct vm *vm, bool in, uint16_t port, size_t bytes,
|
|||
}
|
||||
}
|
||||
|
||||
VPIC_UNLOCK(vpic);
|
||||
spinlock_release(&(vpic->lock));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -880,5 +877,5 @@ void vpic_init(struct vm *vm)
|
|||
vm->arch_vm.vpic.i8259[0].mask = 0xffU;
|
||||
vm->arch_vm.vpic.i8259[1].mask = 0xffU;
|
||||
|
||||
VPIC_LOCK_INIT(vpic);
|
||||
spinlock_init(&(vpic->lock));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue