summaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-11-25 17:42:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-11-25 17:42:56 -0800
commit2981dcf333b37e3753b5c1b5814418c4de1a8e34 (patch)
tree7b082d99452fb90fd39dd619cb5c65bd66c979c6 /arch/mips/include
parent5ef30d74232ed204a461a5dbd0309718d63abd01 (diff)
parenta8d0f11ee50ddbd9f243c7a8b1a393a4f23ba093 (diff)
Merge tag 'mips_5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS updates from Paul Burton: "The main MIPS changes for 5.5: - Atomics-related code sees some rework & cleanup, most notably allowing Loongson LL/SC errata workarounds to be more bulletproof & their correctness to be checked at build time. - Command line setup code is simplified somewhat, resolving various corner cases. - MIPS kernels can now be built with kcov code coverage support. - We can now build with CONFIG_FORTIFY_SOURCE=y. - Miscellaneous cleanups. And some platform specific changes: - We now disable some broken TLB functionality on certain Ingenic systems, and JZ4780 systems gain some devicetree nodes to support more devices. - Loongson support sees a number of cleanups, and we gain initial support for Loongson 3A R4 systems. - We gain support for MediaTek MT7688-based GARDENA Smart Gateway systems. - SGI IP27 (Origin 2*) see a number of fixes, cleanups & simplifications. - SGI IP30 (Octane) systems are now supported" * tag 'mips_5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (107 commits) MIPS: SGI-IP27: Enable ethernet phy on second Origin 200 module MIPS: PCI: Fix fake subdevice ID for IOC3 MIPS: Ingenic: Disable abandoned HPTLB function. MIPS: PCI: remember nasid changed by set interrupt affinity MIPS: SGI-IP27: Fix crash, when CPUs are disabled via nr_cpus parameter mips: add support for folded p4d page tables mips: drop __pXd_offset() macros that duplicate pXd_index() ones mips: fix build when "48 bits virtual memory" is enabled MIPS: math-emu: Reuse name array in debugfs_fpuemu() MIPS: allow building with kcov coverage MIPS: Loongson64: Drop setup_pcimap MIPS: Loongson2ef: Convert to early_printk_8250 MIPS: Drop CPU_SUPPORTS_UNCACHED_ACCELERATED MIPS: Loongson{2ef, 32, 64} convert to generic fw cmdline MIPS: Drop pmon.h MIPS: Loongson: Unify LOONGSON3/LOONGSON64 Kconfig usage MIPS: Loongson: Rename LOONGSON1 to LOONGSON32 MIPS: Loongson: Fix return value of loongson_hwmon_init MIPS: add support for SGI Octane (IP30) MIPS: PCI: make phys_to_dma/dma_to_phys for pci-xtalk-bridge common ...
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/atomic.h571
-rw-r--r--arch/mips/include/asm/barrier.h228
-rw-r--r--arch/mips/include/asm/bitops.h443
-rw-r--r--arch/mips/include/asm/bootinfo.h4
-rw-r--r--arch/mips/include/asm/bugs.h18
-rw-r--r--arch/mips/include/asm/cmpxchg.h59
-rw-r--r--arch/mips/include/asm/cop2.h2
-rw-r--r--arch/mips/include/asm/cpu-type.h11
-rw-r--r--arch/mips/include/asm/cpu.h10
-rw-r--r--arch/mips/include/asm/fixmap.h2
-rw-r--r--arch/mips/include/asm/futex.h15
-rw-r--r--arch/mips/include/asm/hazards.h2
-rw-r--r--arch/mips/include/asm/io.h2
-rw-r--r--arch/mips/include/asm/irqflags.h2
-rw-r--r--arch/mips/include/asm/llsc.h19
-rw-r--r--arch/mips/include/asm/mach-ip22/spaces.h12
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h5
-rw-r--r--arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h83
-rw-r--r--arch/mips/include/asm/mach-ip30/irq.h87
-rw-r--r--arch/mips/include/asm/mach-ip30/kernel-entry-init.h13
-rw-r--r--arch/mips/include/asm/mach-ip30/mangle-port.h22
-rw-r--r--arch/mips/include/asm/mach-ip30/spaces.h20
-rw-r--r--arch/mips/include/asm/mach-ip30/war.h26
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h44
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_mfgpt.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_vsm.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/loongson.h326
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/machine.h (renamed from arch/mips/include/asm/mach-loongson64/machine.h)12
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h36
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/mem.h (renamed from arch/mips/include/asm/mach-loongson64/mem.h)6
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/pci.h46
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/spaces.h10
-rw-r--r--arch/mips/include/asm/mach-loongson32/prom.h20
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h3
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h4
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h32
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson.h115
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson_regs.h227
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h29
-rw-r--r--arch/mips/include/asm/mach-loongson64/pci.h31
-rw-r--r--arch/mips/include/asm/mach-loongson64/topology.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h6
-rw-r--r--arch/mips/include/asm/module.h12
-rw-r--r--arch/mips/include/asm/pci/bridge.h1
-rw-r--r--arch/mips/include/asm/pgalloc.h4
-rw-r--r--arch/mips/include/asm/pgtable-32.h6
-rw-r--r--arch/mips/include/asm/pgtable-64.h44
-rw-r--r--arch/mips/include/asm/pgtable.h11
-rw-r--r--arch/mips/include/asm/pmon.h46
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h362
-rw-r--r--arch/mips/include/asm/sgi/heart.h272
-rw-r--r--arch/mips/include/asm/sgi/sgi.h48
-rw-r--r--arch/mips/include/asm/sgialib.h22
-rw-r--r--arch/mips/include/asm/sgiarcs.h103
-rw-r--r--arch/mips/include/asm/sn/agent.h2
-rw-r--r--arch/mips/include/asm/sn/arch.h31
-rw-r--r--arch/mips/include/asm/sn/gda.h4
-rw-r--r--arch/mips/include/asm/sn/hub.h4
-rw-r--r--arch/mips/include/asm/sn/ioc3.h9
-rw-r--r--arch/mips/include/asm/sn/mapped_kernel.h4
-rw-r--r--arch/mips/include/asm/sn/sn0/arch.h18
-rw-r--r--arch/mips/include/asm/sn/sn_private.h5
-rw-r--r--arch/mips/include/asm/sn/types.h4
-rw-r--r--arch/mips/include/asm/string.h121
-rw-r--r--arch/mips/include/asm/sync.h207
-rw-r--r--arch/mips/include/asm/unroll.h77
70 files changed, 2090 insertions, 1938 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index bb8658cc7f12..e5ac88392d1f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -20,157 +20,176 @@
#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cmpxchg.h>
+#include <asm/llsc.h>
+#include <asm/sync.h>
#include <asm/war.h>
-/*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#if R10000_LLSC_WAR
-# define __scbeqz "beqzl"
-#else
-# define __scbeqz "beqz"
-#endif
-
-#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC_OPS(pfx, type) \
+static __always_inline type pfx##_read(const pfx##_t *v) \
+{ \
+ return READ_ONCE(v->counter); \
+} \
+ \
+static __always_inline void pfx##_set(pfx##_t *v, type i) \
+{ \
+ WRITE_ONCE(v->counter, i); \
+} \
+ \
+static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \
+{ \
+ return cmpxchg(&v->counter, o, n); \
+} \
+ \
+static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
+{ \
+ return xchg(&v->counter, n); \
+}
-/*
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define ATOMIC_INIT(i) { (i) }
+ATOMIC_OPS(atomic, int)
-/*
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
+#ifdef CONFIG_64BIT
+# define ATOMIC64_INIT(i) { (i) }
+ATOMIC_OPS(atomic64, s64)
+#endif
-#define ATOMIC_OP(op, c_op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
-{ \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %0, %1 # atomic_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
+#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ void pfx##_##op(type i, pfx##_t * v) \
+{ \
+ type temp; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ return; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " " #sc " %0, %1 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
}
-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
-{ \
- int result; \
- \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %1, %2 # atomic_" #op "_return \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- result c_op i; \
- v->counter = result; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
+#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
+{ \
+ type temp, result; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " " #sc " %0, %2 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+ \
+ return result; \
}
-#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
-{ \
- int result; \
- \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %1, %2 # atomic_fetch_" #op " \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- " move %0, %1 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
+#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+{ \
+ int temp, result; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " " #sc " %0, %2 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ " move %0, %1 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+ \
+ return result; \
}
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
-ATOMIC_OPS(add, +=, addu)
-ATOMIC_OPS(sub, -=, subu)
+ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
+ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
+ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
+# define atomic64_add_return_relaxed atomic64_add_return_relaxed
+# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#endif /* CONFIG_64BIT */
+
#undef ATOMIC_OPS
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
-ATOMIC_OPS(and, &=, and)
-ATOMIC_OPS(or, |=, or)
-ATOMIC_OPS(xor, ^=, xor)
+ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
+ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
+ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
+ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
+ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
+# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#endif
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -184,258 +203,66 @@ ATOMIC_OPS(xor, ^=, xor)
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
-{
- int result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc) {
- int temp;
-
- loongson_llsc_mb();
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_LEVEL" \n"
- "1: ll %1, %2 # atomic_sub_if_positive\n"
- " .set pop \n"
- " subu %0, %1, %3 \n"
- " move %1, %0 \n"
- " bltz %0, 2f \n"
- " .set push \n"
- " .set "MIPS_ISA_LEVEL" \n"
- " sc %1, %2 \n"
- "\t" __scbeqz " %1, 1b \n"
- "2: \n"
- " .set pop \n"
- : "=&r" (result), "=&r" (temp),
- "+" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i) : __LLSC_CLOBBER);
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- if (result >= 0)
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
+#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
+static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
+{ \
+ type temp, result; \
+ \
+ smp_mb__before_atomic(); \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result -= i; \
+ if (result >= 0) \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ smp_mb__after_atomic(); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
+ " .set pop \n" \
+ " " #op " %0, %1, %3 \n" \
+ " move %1, %0 \n" \
+ " bltz %0, 2f \n" \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " #sc " %1, %2 \n" \
+ " " __SC_BEQZ "%1, 1b \n" \
+ "2: " __SYNC(full, loongson3_war) " \n" \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) \
+ : __LLSC_CLOBBER); \
+ \
+ /* \
+ * In the Loongson3 workaround case we already have a \
+ * completion barrier at 2: above, which is needed due to the \
+ * bltz that can branch to code outside of the LL/SC loop. As \
+ * such, we don't need to emit another barrier here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_mb__after_atomic(); \
+ \
+ return result; \
}
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
-
-/*
- * atomic_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- */
+ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT
-
-#define ATOMIC64_INIT(i) { (i) }
-
-/*
- * atomic64_read - read atomic variable
- * @v: pointer of type atomic64_t
- *
- */
-#define atomic64_read(v) READ_ONCE((v)->counter)
-
-/*
- * atomic64_set - set atomic variable
- * @v: pointer of type atomic64_t
- * @i: required value
- */
-#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
-
-#define ATOMIC64_OP(op, c_op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
-{ \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %0, %1 # atomic64_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
-}
-
-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
-{ \
- s64 result; \
- \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i) : __LLSC_CLOBBER); \
- } else { \