diff options
60 files changed, 7044 insertions, 2127 deletions
diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst index 322851bada16..976e85adffe8 100644 --- a/Documentation/core-api/refcount-vs-atomic.rst +++ b/Documentation/core-api/refcount-vs-atomic.rst @@ -54,6 +54,13 @@ must propagate to all other CPUs before the release operation (A-cumulative property). This is implemented using :c:func:`smp_store_release`. +An ACQUIRE memory ordering guarantees that all post loads and +stores (all po-later instructions) on the same CPU are +completed after the acquire operation. It also guarantees that all +po-later stores on the same CPU must propagate to all other CPUs +after the acquire operation executes. This is implemented using +:c:func:`smp_acquire__after_ctrl_dep`. + A control dependency (on success) for refcounters guarantees that if a reference for an object was successfully obtained (reference counter increment or addition happened, function returned true), @@ -119,13 +126,24 @@ Memory ordering guarantees changes: result of obtaining pointer to the object! -case 5) - decrement-based RMW ops that return a value ------------------------------------------------------ +case 5) - generic dec/sub decrement-based RMW ops that return a value +--------------------------------------------------------------------- Function changes: * :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test` * :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test` + +Memory ordering guarantees changes: + + * fully ordered --> RELEASE ordering + ACQUIRE ordering on success + + +case 6) other decrement-based RMW ops that return a value +--------------------------------------------------------- + +Function changes: + * no atomic counterpart --> :c:func:`refcount_dec_if_one` * ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)`` @@ -136,7 +154,7 @@ Memory ordering guarantees changes: .. note:: :c:func:`atomic_add_unless` only provides full order on success. -case 6) - lock-based RMW +case 7) - lock-based RMW ------------------------ Function changes: @@ -6,7 +6,8 @@ # 2) Generate timeconst.h # 3) Generate asm-offsets.h (may need bounds.h and timeconst.h) # 4) Check for missing system calls -# 5) Generate constants.py (may need bounds.h) +# 5) check atomics headers are up-to-date +# 6) Generate constants.py (may need bounds.h) ##### # 1) Generate bounds.h @@ -59,7 +60,20 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE $(call cmd,syscalls) ##### -# 5) Generate constants for Python GDB integration +# 5) Check atomic headers are up-to-date +# + +always += old-atomics +targets += old-atomics + +quiet_cmd_atomics = CALL $< + cmd_atomics = $(CONFIG_SHELL) $< + +old-atomics: scripts/atomic/check-atomics.sh FORCE + $(call cmd,atomics) + +##### +# 6) Generate constants for Python GDB integration # extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py diff --git a/MAINTAINERS b/MAINTAINERS index ea60bf137aaf..5e5529b9ffc8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2608,6 +2608,7 @@ L: linux-kernel@vger.kernel.org S: Maintained F: arch/*/include/asm/atomic*.h F: include/*/atomic*.h +F: scripts/atomic/ ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER M: Bradley Grove <linuxdrivers@attotech.com> diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 9bca54dda75c..1f4e9ee641c9 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -42,124 +42,131 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) - -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_add_return_acquire atomic_add_return_acquire -#define atomic_add_return_release atomic_add_return_release -#define atomic_add_return atomic_add_return - -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_sub_return_acquire atomic_sub_return_acquire -#define atomic_sub_return_release atomic_sub_return_release -#define atomic_sub_return atomic_sub_return - -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_add_acquire atomic_fetch_add_acquire -#define atomic_fetch_add_release atomic_fetch_add_release -#define atomic_fetch_add atomic_fetch_add - -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed -#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#define atomic_fetch_sub_release atomic_fetch_sub_release -#define atomic_fetch_sub atomic_fetch_sub - -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_and_acquire atomic_fetch_and_acquire -#define atomic_fetch_and_release atomic_fetch_and_release -#define atomic_fetch_and atomic_fetch_and - -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#define atomic_fetch_andnot atomic_fetch_andnot - -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_or_acquire atomic_fetch_or_acquire -#define atomic_fetch_or_release atomic_fetch_or_release -#define atomic_fetch_or atomic_fetch_or - -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed -#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#define atomic_fetch_xor_release atomic_fetch_xor_release -#define atomic_fetch_xor atomic_fetch_xor - -#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) -#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new)) -#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new)) -#define atomic_xchg(v, new) xchg(&((v)->counter), (new)) - -#define atomic_cmpxchg_relaxed(v, old, new) \ - cmpxchg_relaxed(&((v)->counter), (old), (new)) -#define atomic_cmpxchg_acquire(v, old, new) \ - cmpxchg_acquire(&((v)->counter), (old), (new)) -#define atomic_cmpxchg_release(v, old, new) \ - cmpxchg_release(&((v)->counter), (old), (new)) -#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) - -#define atomic_andnot atomic_andnot +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire +#define arch_atomic_add_return_release arch_atomic_add_return_release +#define arch_atomic_add_return arch_atomic_add_return + +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire +#define arch_atomic_sub_return_release arch_atomic_sub_return_release +#define arch_atomic_sub_return arch_atomic_sub_return + +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire +#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release +#define arch_atomic_fetch_add arch_atomic_fetch_add + +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire +#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release +#define arch_atomic_fetch_and arch_atomic_fetch_and + +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot + +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire +#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release +#define arch_atomic_fetch_or arch_atomic_fetch_or + +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#define arch_atomic_xchg_relaxed(v, new) \ + arch_xchg_relaxed(&((v)->counter), (new)) +#define arch_atomic_xchg_acquire(v, new) \ + arch_xchg_acquire(&((v)->counter), (new)) +#define arch_atomic_xchg_release(v, new) \ + arch_xchg_release(&((v)->counter), (new)) +#define arch_atomic_xchg(v, new) \ + arch_xchg(&((v)->counter), (new)) + +#define arch_atomic_cmpxchg_relaxed(v, old, new) \ + arch_cmpxchg_relaxed(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg_acquire(v, old, new) \ + arch_cmpxchg_acquire(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg_release(v, old, new) \ + arch_cmpxchg_release(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg(v, old, new) \ + arch_cmpxchg(&((v)->counter), (old), (new)) + +#define arch_atomic_andnot arch_atomic_andnot /* - * 64-bit atomic operations. + * 64-bit arch_atomic operations. */ -#define ATOMIC64_INIT ATOMIC_INIT -#define atomic64_read atomic_read -#define atomic64_set atomic_set - -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#define atomic64_add_return_acquire atomic64_add_return_acquire -#define atomic64_add_return_release atomic64_add_return_release -#define atomic64_add_return atomic64_add_return - -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#define atomic64_sub_return_acquire atomic64_sub_return_acquire -#define atomic64_sub_return_release atomic64_sub_return_release -#define atomic64_sub_return atomic64_sub_return - -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#define atomic64_fetch_add_release atomic64_fetch_add_release -#define atomic64_fetch_add atomic64_fetch_add - -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed -#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#define atomic64_fetch_sub_release atomic64_fetch_sub_release -#define atomic64_fetch_sub atomic64_fetch_sub - -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#define atomic64_fetch_and_release atomic64_fetch_and_release -#define atomic64_fetch_and atomic64_fetch_and - -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#define atomic64_fetch_andnot atomic64_fetch_andnot - -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#define atomic64_fetch_or_release atomic64_fetch_or_release -#define atomic64_fetch_or atomic64_fetch_or - -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed -#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#define atomic64_fetch_xor_release atomic64_fetch_xor_release -#define atomic64_fetch_xor atomic64_fetch_xor - -#define atomic64_xchg_relaxed atomic_xchg_relaxed -#define atomic64_xchg_acquire atomic_xchg_acquire -#define atomic64_xchg_release atomic_xchg_release -#define atomic64_xchg atomic_xchg - -#define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed -#define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire -#define atomic64_cmpxchg_release atomic_cmpxchg_release -#define atomic64_cmpxchg atomic_cmpxchg - -#define atomic64_andnot atomic64_andnot - -#define atomic64_dec_if_positive atomic64_dec_if_positive +#define ATOMIC64_INIT ATOMIC_INIT +#define arch_atomic64_read arch_atomic_read +#define arch_atomic64_set arch_atomic_set + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire +#define arch_atomic64_add_return_release arch_atomic64_add_return_release +#define arch_atomic64_add_return arch_atomic64_add_return + +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire +#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release +#define arch_atomic64_sub_return arch_atomic64_sub_return + +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release +#define arch_atomic64_fetch_add arch_atomic64_fetch_add + +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release +#define arch_atomic64_fetch_and arch_atomic64_fetch_and + +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot + +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release +#define arch_atomic64_fetch_or arch_atomic64_fetch_or + +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + +#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed +#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire +#define arch_atomic64_xchg_release arch_atomic_xchg_release +#define arch_atomic64_xchg arch_atomic_xchg + +#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release +#define arch_atomic64_cmpxchg arch_atomic_cmpxchg + +#define arch_atomic64_andnot arch_atomic64_andnot + +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive + +#include <asm-generic/atomic-instrumented.h> #endif #endif diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index af7b99005453..e321293e0c89 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -39,7 +39,7 @@ #define ATOMIC_OP(op, asm_op) \ __LL_SC_INLINE void \ -__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int result; \ @@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "Ir" (i)); \ } \ -__LL_SC_EXPORT(atomic_##op); +__LL_SC_EXPORT(arch_atomic_##op); #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE int \ -__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int result; \ @@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic_##op##_return##name); +__LL_SC_EXPORT(arch_atomic_##op##_return##name); #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE int \ -__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int val, result; \ @@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic_fetch_##op##name); +__LL_SC_EXPORT(arch_atomic_fetch_##op##name); #define ATOMIC_OPS(...) \ ATOMIC_OP(__VA_ARGS__) \ @@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor) #define ATOMIC64_OP(op, asm_op) \ __LL_SC_INLINE void \ -__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \ { \ long result; \ unsigned long tmp; \ @@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "Ir" (i)); \ } \ -__LL_SC_EXPORT(atomic64_##op); +__LL_SC_EXPORT(arch_atomic64_##op); #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE long \ -__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\ { \ long result; \ unsigned long tmp; \ @@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic64_##op##_return##name); +__LL_SC_EXPORT(arch_atomic64_##op##_return##name); #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE long \ -__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \ { \ long result, val; \ unsigned long tmp; \ @@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic64_fetch_##op##name); +__LL_SC_EXPORT(arch_atomic64_fetch_##op##name); #define ATOMIC64_OPS(...) \ ATOMIC64_OP(__VA_ARGS__) \ @@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor) #undef ATOMIC64_OP __LL_SC_INLINE long -__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) +__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) { long result; unsigned long tmp; @@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) return result; } -__LL_SC_EXPORT(atomic64_dec_if_positive); +__LL_SC_EXPORT(arch_atomic64_dec_if_positive); #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ __LL_SC_INLINE u##sz \ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index a424355240c5..9256a3921e4b 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -25,9 +25,9 @@ #error "please don't include this file directly" #endif -#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) +#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op) #define ATOMIC_OP(op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd) #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OPS #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ -static inline int atomic_add_return##name(int i, atomic_t *v) \ +static inline int arch_atomic_add_return##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") #undef ATOMIC_OP_ADD_RETURN -static inline void atomic_and(int i, atomic_t *v) +static inline void arch_atomic_and(int i, atomic_t *v) { register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; @@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v) } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ -static inline int atomic_fetch_and##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND -static inline void atomic_sub(int i, atomic_t *v) +static inline void arch_atomic_sub(int i, atomic_t *v) { register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; @@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v) } #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ -static inline int atomic_sub_return##name(int i, atomic_t *v) \ +static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") #undef ATOMIC_OP_SUB_RETURN #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ -static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") #undef ATOMIC_FETCH_OP_SUB #undef __LL_SC_ATOMIC -#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) +#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op) #define ATOMIC64_OP(op, asm_op) \ -static inline void atomic64_##op(long i, atomic64_t *v) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd) #undef AT |