From 4c1cb60684b37a251cc111775423b12beaecb832 Mon Sep 17 00:00:00 2001 From: Yin Fengwei Date: Thu, 1 Nov 2018 16:08:37 +0800 Subject: [PATCH] hv: Remove the up_count_spinlock and use atomic for up_count It's possible that the up_count_spinlock is not release during system enter S3. The case is like following: BSP AP stop_cpus cpu_dead cpu_set_current_state spinlock_abtain up_count-- wait_for up_count == 1 enter S3 spinlock_release Especially, considering the real spinlock release could be delayed by cache. Actually, the most content protected by up_count_spinlock is per cpu data and could be changed without lock. Only left is up_count. This patchset remove the up_count_spinlock and use atomic API for up_count changing. Tracked-On: #1691 Signed-off-by: Yin Fengwei Reviewed-by: Eddie Dong Acked-by: Anthony Xu --- hypervisor/arch/x86/cpu.c | 23 +++++++---------------- hypervisor/include/lib/atomic.h | 2 ++ 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/hypervisor/arch/x86/cpu.c b/hypervisor/arch/x86/cpu.c index 1a50d15e9..be54aca73 100644 --- a/hypervisor/arch/x86/cpu.c +++ b/hypervisor/arch/x86/cpu.c @@ -14,15 +14,10 @@ spinlock_t trampoline_spinlock = { .tail = 0U }; -static spinlock_t up_count_spinlock = { - .head = 0U, - .tail = 0U -}; - struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(CPU_PAGE_SIZE); uint16_t phys_cpu_num = 0U; static uint64_t pcpu_sync = 0UL; -static volatile uint16_t up_count = 0U; +static uint16_t up_count = 0U; static uint64_t startup_paddr = 0UL; /* physical cpu active bitmap, support up to 64 cpus */ @@ -309,12 +304,10 @@ static void init_percpu_lapic_id(void) static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state) { - spinlock_obtain(&up_count_spinlock); - /* Check if state is initializing */ if (state == PCPU_STATE_INITIALIZING) { /* Increment CPU up count */ - up_count++; + atomic_inc16(&up_count); /* Save this CPU's logical ID to the TSC AUX MSR */ set_current_cpu_id(pcpu_id); @@ -322,13 +315,11 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state) /* If cpu is dead, decrement CPU up count */ if (state == PCPU_STATE_DEAD) { - up_count--; + atomic_dec16(&up_count); } /* Set state for the specified CPU */ per_cpu(boot_state, pcpu_id) = state; - - spinlock_release(&up_count_spinlock); } #ifdef STACK_PROTECTOR @@ -645,7 +636,7 @@ void start_cpus(void) * configured time-out has expired */ timeout = CONFIG_CPU_UP_TIMEOUT * 1000U; - while ((up_count != expected_up) && (timeout != 0U)) { + while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) { /* Delay 10us */ udelay(10U); @@ -654,7 +645,7 @@ void start_cpus(void) } /* Check to see if all expected CPUs are actually up */ - if (up_count != expected_up) { + if (atomic_load16(&up_count) != expected_up) { /* Print error */ pr_fatal("Secondary CPUs failed to come up"); @@ -682,7 +673,7 @@ void stop_cpus(void) } expected_up = 1U; - while ((up_count != expected_up) && (timeout != 0U)) { + while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) { /* Delay 10us */ udelay(10U); @@ -690,7 +681,7 @@ void stop_cpus(void) timeout -= 10U; } - if (up_count != expected_up) { + if (atomic_load16(&up_count) != expected_up) { pr_fatal("Can't make all APs offline"); /* if partial APs is down, it's not easy to recover diff --git a/hypervisor/include/lib/atomic.h b/hypervisor/include/lib/atomic.h index 07a8ffadf..c31416f3f 100644 --- a/hypervisor/include/lib/atomic.h +++ b/hypervisor/include/lib/atomic.h @@ -41,6 +41,7 @@ static inline type name(const volatile type *ptr) \ : "cc", "memory"); \ return ret; \ } +build_atomic_load(atomic_load16, "w", uint16_t) build_atomic_load(atomic_load32, "l", uint32_t) build_atomic_load(atomic_load64, "q", uint64_t) @@ -63,6 +64,7 @@ static inline void name(type *ptr) \ : "=m" (*ptr) \ : "m" (*ptr)); \ } +build_atomic_inc(atomic_inc16, "w", uint16_t) build_atomic_inc(atomic_inc32, "l", uint32_t) build_atomic_inc(atomic_inc64, "q", uint64_t)