ARC: atomics: Add compiler barrier to atomic operations...

... to avoid unwanted gcc optimizations

SMP kernels fail to boot with commit 596ff4a09b
("cpumask: re-introduce constant-sized cpumask optimizations").

|
| percpu: BUG: failure at mm/percpu.c:2981/pcpu_build_alloc_info()!
|

The write operation performed by the SCOND instruction in the atomic
inline asm code is not properly passed to the compiler. The compiler
cannot correctly optimize a nested loop that runs through the cpumask
in the pcpu_build_alloc_info() function.

Fix this by add a compiler barrier (memory clobber in inline asm).

Apparently atomic ops used to have memory clobber implicitly via
surrounding smp_mb(). However commit b64be68369
("ARC: atomics: implement relaxed variants") removed the smp_mb() for
the relaxed variants, but failed to add the explicit compiler barrier.

Link: https://github.com/foss-for-synopsys-dwc-arc-processors/linux/issues/135
Cc: <stable@vger.kernel.org> # v6.3+
Fixes: b64be68369 ("ARC: atomics: implement relaxed variants")
Signed-off-by: Pavel Kozlov <pavel.kozlov@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
[vgupta: tweaked the changelog and added Fixes tag]
This commit is contained in:
Pavel Kozlov 2023-08-15 19:11:36 +04:00 committed by Vineet Gupta
parent 4d3696801b
commit 42f51fb24f
2 changed files with 6 additions and 6 deletions

View File

@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \ [i] "ir" (i) \
: "cc"); \ : "cc", "memory"); \
} \ } \
#define ATOMIC_OP_RETURN(op, asm_op) \ #define ATOMIC_OP_RETURN(op, asm_op) \
@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
: [val] "=&r" (val) \ : [val] "=&r" (val) \
: [ctr] "r" (&v->counter), \ : [ctr] "r" (&v->counter), \
[i] "ir" (i) \ [i] "ir" (i) \
: "cc"); \ : "cc", "memory"); \
\ \
return val; \ return val; \
} }
@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
[orig] "=&r" (orig) \ [orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \ : [ctr] "r" (&v->counter), \
[i] "ir" (i) \ [i] "ir" (i) \
: "cc"); \ : "cc", "memory"); \
\ \
return orig; \ return orig; \
} }

View File

@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
" bnz 1b \n" \ " bnz 1b \n" \
: "=&r"(val) \ : "=&r"(val) \
: "r"(&v->counter), "ir"(a) \ : "r"(&v->counter), "ir"(a) \
: "cc"); \ : "cc", "memory"); \
} \ } \
#define ATOMIC64_OP_RETURN(op, op1, op2) \ #define ATOMIC64_OP_RETURN(op, op1, op2) \
@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \ " bnz 1b \n" \
: [val] "=&r"(val) \ : [val] "=&r"(val) \
: "r"(&v->counter), "ir"(a) \ : "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \ : "cc", "memory"); \
\ \
return val; \ return val; \
} }
@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \ " bnz 1b \n" \
: "=&r"(orig), "=&r"(val) \ : "=&r"(orig), "=&r"(val) \
: "r"(&v->counter), "ir"(a) \ : "r"(&v->counter), "ir"(a) \
: "cc"); /* memory clobber comes from smp_mb() */ \ : "cc", "memory"); \
\ \
return orig; \ return orig; \
} }