hv: Remove the up_count_spinlock and use atomic for up_count

It's possible that the up_count_spinlock is not release during
system enter S3. The case is like following:

   BSP                                  AP
 stop_cpus                            cpu_dead
                                         cpu_set_current_state
                                            spinlock_abtain
                                                up_count--

   wait_for up_count == 1
 enter S3

                                            spinlock_release

Especially, considering the real spinlock release could be delayed
by cache.

Actually, the most content protected by up_count_spinlock is per
cpu data and could be changed without lock. Only left is up_count.
This patchset remove the up_count_spinlock and use atomic API
for up_count changing.

Tracked-On: #1691
Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
Reviewed-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Yin Fengwei 2018-11-01 16:08:37 +08:00 committed by lijinxia
parent b74720636b
commit 4c1cb60684
2 changed files with 9 additions and 16 deletions

View File

@ -14,15 +14,10 @@ spinlock_t trampoline_spinlock = {
.tail = 0U .tail = 0U
}; };
static spinlock_t up_count_spinlock = {
.head = 0U,
.tail = 0U
};
struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(CPU_PAGE_SIZE); struct per_cpu_region per_cpu_data[CONFIG_MAX_PCPU_NUM] __aligned(CPU_PAGE_SIZE);
uint16_t phys_cpu_num = 0U; uint16_t phys_cpu_num = 0U;
static uint64_t pcpu_sync = 0UL; static uint64_t pcpu_sync = 0UL;
static volatile uint16_t up_count = 0U; static uint16_t up_count = 0U;
static uint64_t startup_paddr = 0UL; static uint64_t startup_paddr = 0UL;
/* physical cpu active bitmap, support up to 64 cpus */ /* physical cpu active bitmap, support up to 64 cpus */
@ -309,12 +304,10 @@ static void init_percpu_lapic_id(void)
static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state) static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
{ {
spinlock_obtain(&up_count_spinlock);
/* Check if state is initializing */ /* Check if state is initializing */
if (state == PCPU_STATE_INITIALIZING) { if (state == PCPU_STATE_INITIALIZING) {
/* Increment CPU up count */ /* Increment CPU up count */
up_count++; atomic_inc16(&up_count);
/* Save this CPU's logical ID to the TSC AUX MSR */ /* Save this CPU's logical ID to the TSC AUX MSR */
set_current_cpu_id(pcpu_id); set_current_cpu_id(pcpu_id);
@ -322,13 +315,11 @@ static void cpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
/* If cpu is dead, decrement CPU up count */ /* If cpu is dead, decrement CPU up count */
if (state == PCPU_STATE_DEAD) { if (state == PCPU_STATE_DEAD) {
up_count--; atomic_dec16(&up_count);
} }
/* Set state for the specified CPU */ /* Set state for the specified CPU */
per_cpu(boot_state, pcpu_id) = state; per_cpu(boot_state, pcpu_id) = state;
spinlock_release(&up_count_spinlock);
} }
#ifdef STACK_PROTECTOR #ifdef STACK_PROTECTOR
@ -645,7 +636,7 @@ void start_cpus(void)
* configured time-out has expired * configured time-out has expired
*/ */
timeout = CONFIG_CPU_UP_TIMEOUT * 1000U; timeout = CONFIG_CPU_UP_TIMEOUT * 1000U;
while ((up_count != expected_up) && (timeout != 0U)) { while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) {
/* Delay 10us */ /* Delay 10us */
udelay(10U); udelay(10U);
@ -654,7 +645,7 @@ void start_cpus(void)
} }
/* Check to see if all expected CPUs are actually up */ /* Check to see if all expected CPUs are actually up */
if (up_count != expected_up) { if (atomic_load16(&up_count) != expected_up) {
/* Print error */ /* Print error */
pr_fatal("Secondary CPUs failed to come up"); pr_fatal("Secondary CPUs failed to come up");
@ -682,7 +673,7 @@ void stop_cpus(void)
} }
expected_up = 1U; expected_up = 1U;
while ((up_count != expected_up) && (timeout != 0U)) { while ((atomic_load16(&up_count) != expected_up) && (timeout != 0U)) {
/* Delay 10us */ /* Delay 10us */
udelay(10U); udelay(10U);
@ -690,7 +681,7 @@ void stop_cpus(void)
timeout -= 10U; timeout -= 10U;
} }
if (up_count != expected_up) { if (atomic_load16(&up_count) != expected_up) {
pr_fatal("Can't make all APs offline"); pr_fatal("Can't make all APs offline");
/* if partial APs is down, it's not easy to recover /* if partial APs is down, it's not easy to recover

View File

@ -41,6 +41,7 @@ static inline type name(const volatile type *ptr) \
: "cc", "memory"); \ : "cc", "memory"); \
return ret; \ return ret; \
} }
build_atomic_load(atomic_load16, "w", uint16_t)
build_atomic_load(atomic_load32, "l", uint32_t) build_atomic_load(atomic_load32, "l", uint32_t)
build_atomic_load(atomic_load64, "q", uint64_t) build_atomic_load(atomic_load64, "q", uint64_t)
@ -63,6 +64,7 @@ static inline void name(type *ptr) \
: "=m" (*ptr) \ : "=m" (*ptr) \
: "m" (*ptr)); \ : "m" (*ptr)); \
} }
build_atomic_inc(atomic_inc16, "w", uint16_t)
build_atomic_inc(atomic_inc32, "l", uint32_t) build_atomic_inc(atomic_inc32, "l", uint32_t)
build_atomic_inc(atomic_inc64, "q", uint64_t) build_atomic_inc(atomic_inc64, "q", uint64_t)