summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2020-08-06 10:16:38 +0200
committerIngo Molnar <mingo@kernel.org>2020-08-06 10:16:38 +0200
commita703f3633ff1d982bc4adfe7e0921bedb1701216 (patch)
treeeb85b29a0bbcb29045e197ab77e18ffc8649a722
parenta7ef9b28aa8d72a1656fa6f0a01bbd1493886317 (diff)
parentb5e6a027bd327daa679ca55182a920659e2cbb90 (diff)
Merge branch 'WIP.locking/seqlocks' into locking/urgent
Pick up the full seqlock series PeterZ is working on. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/locking/index.rst1
-rw-r--r--Documentation/locking/mutex-design.rst2
-rw-r--r--Documentation/locking/seqlock.rst222
-rw-r--r--arch/alpha/include/asm/atomic.h1
-rw-r--r--arch/arc/include/asm/atomic.h2
-rw-r--r--arch/arm/include/asm/atomic.h2
-rw-r--r--arch/arm/include/asm/percpu.h2
-rw-r--r--arch/arm/include/asm/thread_info.h5
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/h8300/include/asm/atomic.h2
-rw-r--r--arch/hexagon/include/asm/atomic.h2
-rw-r--r--arch/ia64/include/asm/atomic.h1
-rw-r--r--arch/m68k/include/asm/atomic.h2
-rw-r--r--arch/mips/include/asm/atomic.h1
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/powerpc/include/asm/atomic.h2
-rw-r--r--arch/powerpc/include/asm/dtl.h52
-rw-r--r--arch/powerpc/include/asm/lppaca.h44
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/svm.c1
-rw-r--r--arch/riscv/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/thread_info.h1
-rw-r--r--arch/sh/include/asm/atomic.h2
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h1
-rw-r--r--arch/sparc/include/asm/percpu_64.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h2
-rw-r--r--arch/x86/entry/common.c88
-rw-r--r--arch/x86/include/asm/atomic.h2
-rw-r--r--arch/x86/include/asm/idtentry.h31
-rw-r--r--arch/x86/kernel/kvm.c6
-rw-r--r--arch/x86/kernel/nmi.c9
-rw-r--r--arch/x86/kernel/traps.c23
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/xtensa/include/asm/atomic.h2
-rw-r--r--block/blk-iocost.c5
-rw-r--r--drivers/dma-buf/dma-resv.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/fs_struct.c4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/userfaultfd.c4
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/qspinlock.h1
-rw-r--r--include/asm-generic/qspinlock_types.h8
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/dma-resv.h4
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/hardirq.h28
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/irqflags.h23
-rw-r--r--include/linux/kvm_irqfd.h2
-rw-r--r--include/linux/lockdep.h230
-rw-r--r--include/linux/lockdep_types.h194
-rw-r--r--include/linux/rwsem.h20
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/seqlock.h1015
-rw-r--r--include/linux/spinlock.h1
-rw-r--r--include/linux/spinlock_types.h2
-rw-r--r--include/linux/types.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--init/init_task.c3
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/futex.c114
-rw-r--r--kernel/kcsan/core.c5
-rw-r--r--kernel/kcsan/report.c9
-rw-r--r--kernel/locking/lockdep.c80
-rw-r--r--kernel/softirq.c8
-rw-r--r--kernel/time/hrtimer.c13
-rw-r--r--kernel/time/timekeeping.c19
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nft_set_rbtree.c4
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--tools/include/linux/irqflags.h4
-rw-r--r--virt/kvm/eventfd.c2
86 files changed, 1583 insertions, 822 deletions
diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst
index d785878cad65..7003bd5aeff4 100644
--- a/Documentation/locking/index.rst
+++ b/Documentation/locking/index.rst
@@ -14,6 +14,7 @@ locking
mutex-design
rt-mutex-design
rt-mutex
+ seqlock
spinlocks
ww-mutex-design
preempt-locking
diff --git a/Documentation/locking/mutex-design.rst b/Documentation/locking/mutex-design.rst
index 4d8236b81fa5..8f3e9a5141f9 100644
--- a/Documentation/locking/mutex-design.rst
+++ b/Documentation/locking/mutex-design.rst
@@ -18,7 +18,7 @@ as an alternative to these. This new data structure provided a number
of advantages, including simpler interfaces, and at that time smaller
code (see Disadvantages).
-[1] http://lwn.net/Articles/164802/
+[1] https://lwn.net/Articles/164802/
Implementation
--------------
diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst
new file mode 100644
index 000000000000..62c5ad98c11c
--- /dev/null
+++ b/Documentation/locking/seqlock.rst
@@ -0,0 +1,222 @@
+======================================
+Sequence counters and sequential locks
+======================================
+
+Introduction
+============
+
+Sequence counters are a reader-writer consistency mechanism with
+lockless readers (read-only retry loops), and no writer starvation. They
+are used for data that's rarely written to (e.g. system time), where the
+reader wants a consistent set of information and is willing to retry if
+that information changes.
+
+A data set is consistent when the sequence count at the beginning of the
+read side critical section is even and the same sequence count value is
+read again at the end of the critical section. The data in the set must
+be copied out inside the read side critical section. If the sequence
+count has changed between the start and the end of the critical section,
+the reader must retry.
+
+Writers increment the sequence count at the start and the end of their
+critical section. After starting the critical section the sequence count
+is odd and indicates to the readers that an update is in progress. At
+the end of the write side critical section the sequence count becomes
+even again which lets readers make progress.
+
+A sequence counter write side critical section must never be preempted
+or interrupted by read side sections. Otherwise the reader will spin for
+the entire scheduler tick due to the odd sequence count value and the
+interrupted writer. If that reader belongs to a real-time scheduling
+class, it can spin forever and the kernel will livelock.
+
+This mechanism cannot be used if the protected data contains pointers,
+as the writer can invalidate a pointer that the reader is following.
+
+
+.. _seqcount_t:
+
+Sequence counters (``seqcount_t``)
+==================================
+
+This is the the raw counting mechanism, which does not protect against
+multiple writers. Write side critical sections must thus be serialized
+by an external lock.
+
+If the write serialization primitive is not implicitly disabling
+preemption, preemption must be explicitly disabled before entering the
+write side section. If the read section can be invoked from hardirq or
+softirq contexts, interrupts or bottom halves must also be respectively
+disabled before entering the write section.
+
+If it's desired to automatically handle the sequence counter
+requirements of writer serialization and non-preemptibility, use
+:ref:`seqlock_t` instead.
+
+Initialization::
+
+ /* dynamic */
+ seqcount_t foo_seqcount;
+ seqcount_init(&foo_seqcount);
+
+ /* static */
+ static seqcount_t foo_seqcount = SEQCNT_ZERO(foo_seqcount);
+
+ /* C99 struct init */
+ struct {
+ .seq = SEQCNT_ZERO(foo.seq),
+ } foo;
+
+Write path::
+
+ /* Serialized context with disabled preemption */
+
+ write_seqcount_begin(&foo_seqcount);
+
+ /* ... [[write-side critical section]] ... */
+
+ write_seqcount_end(&foo_seqcount);
+
+Read path::
+
+ do {
+ seq = read_seqcount_begin(&foo_seqcount);
+
+ /* ... [[read-side critical section]] ... */
+
+ } while (read_seqcount_retry(&foo_seqcount, seq));
+
+
+.. _seqcount_locktype_t:
+
+Sequence counters with associated locks (``seqcount_LOCKTYPE_t``)
+-----------------------------------------------------------------
+
+As discussed at :ref:`seqcount_t`, sequence count write side critical
+sections must be serialized and non-preemptible. This variant of
+sequence counters associate the lock used for writer serialization at
+initialization time, which enables lockdep to validate that the write
+side critical sections are properly serialized.
+
+This lock association is a NOOP if lockdep is disabled and has neither
+storage nor runtime overhead. If lockdep is enabled, the lock pointer is
+stored in struct seqcount and lockdep's "lock is held" assertions are
+injected at the beginning of the write side critical section to validate
+that it is properly protected.
+
+For lock types which do not implicitly disable preemption, preemption
+protection is enforced in the write side function.
+
+The following sequence counters with associated locks are defined:
+
+ - ``seqcount_spinlock_t``
+ - ``seqcount_raw_spinlock_t``
+ - ``seqcount_rwlock_t``
+ - ``seqcount_mutex_t``
+ - ``seqcount_ww_mutex_t``
+
+The plain seqcount read and write APIs branch out to the specific
+seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel
+API explosion per each new seqcount LOCKTYPE.
+
+Initialization (replace "LOCKTYPE" with one of the supported locks)::
+
+ /* dynamic */
+ seqcount_LOCKTYPE_t foo_seqcount;
+ seqcount_LOCKTYPE_init(&foo_seqcount, &lock);
+
+ /* static */
+ static seqcount_LOCKTYPE_t foo_seqcount =
+ SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock);
+
+ /* C99 struct init */
+ struct {
+ .seq = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock),
+ } foo;
+
+Write path: same as in :ref:`seqcount_t`, while running from a context
+with the associated LOCKTYPE lock acquired.
+
+Read path: same as in :ref:`seqcount_t`.
+
+.. _seqlock_t:
+
+Sequential locks (``seqlock_t``)
+================================
+
+This contains the :ref:`seqcount_t` mechanism earlier discussed, plus an
+embedded spinlock for writer serialization and non-preemptibility.
+
+If the read side section can be invoked from hardirq or softirq context,
+use the write side function variants which disable interrupts or bottom
+halves respectively.
+
+Initialization::
+
+ /* dynamic */
+ seqlock_t foo_seqlock;
+ seqlock_init(&foo_seqlock);
+
+ /* static */
+ static DEFINE_SEQLOCK(foo_seqlock);
+
+ /* C99 struct init */
+ struct {
+ .seql = __SEQLOCK_UNLOCKED(foo.seql)
+ } foo;
+
+Write path::
+
+ write_seqlock(&foo_seqlock);
+
+ /* ... [[write-side critical section]] ... */
+
+ write_sequnlock(&foo_seqlock);
+
+Read path, three categories:
+
+1. Normal Sequence readers which never block a writer but they must
+ retry if a writer is in progress by detecting change in the sequence
+ number. Writers do not wait for a sequence reader::
+
+ do {
+ seq = read_seqbegin(&foo_seqlock);
+
+ /* ... [[read-side critical section]] ... */
+
+ } while (read_seqretry(&foo_seqlock, seq));
+
+2. Locking readers which will wait if a writer or another locking reader
+ is in progress. A locking reader in progress will also block a writer
+ from entering its critical section. This read lock is
+ exclusive. Unlike rwlock_t, only one locking reader can acquire it::
+
+ read_seqlock_excl(&foo_seqlock);
+
+ /* ... [[read-side critical section]] ... */
+
+ read_sequnlock_excl(&foo_seqlock);
+
+3. Conditional lockless reader (as in 1), or locking reader (as in 2),
+ according to a passed marker. This is used to avoid lockless readers
+ starvation (too much retry loops) in case of a sharp spike in write
+ activity. First, a lockless read is tried (even marker passed). If
+ that trial fails (odd sequence counter is returned, which is used as
+ the next iteration marker), the lockless read is transformed to a
+ full locking read and no retry loop is necessary::
+
+ /* marker; even initialization */
+ int seq = 0;
+ do {
+ read_seqbegin_or_lock(&foo_seqlock, &seq);
+
+ /* ... [[read-side critical section]] ... */
+
+ } while (need_seqretry(&foo_seqlock, seq));
+ done_seqretry(&foo_seqlock, seq);
+
+
+API documentation
+=================
+
+.. kernel-doc:: include/linux/seqlock.h
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 2144530d1428..e2093994fd0d 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -24,7 +24,6 @@
#define __atomic_acquire_fence()
#define __atomic_post_full_fence()
-#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 7298ce84762e..c614857eb209 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -14,8 +14,6 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define ATOMIC_INIT(i) { (i) }
-
#ifndef CONFIG_ARC_PLAT_EZNPS
#define atomic_read(v) READ_ONCE((v)->counter)
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 75bb2c543e59..455eb19a5ac1 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -15,8 +15,6 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-#define ATOMIC_INIT(i) { (i) }
-
#ifdef __KERNEL__
/*
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index f44f448537f2..e2fcb3cfd3de 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -5,6 +5,8 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+register unsigned long current_stack_pointer asm ("sp");
+
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 3609a6980c34..536b6b979f63 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -76,11 +76,6 @@ struct thread_info {
}
/*
- * how to get the current stack pointer in C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
* how to get the thread information struct from C
*/
static inline struct thread_info *current_thread_info(void) __attribute_const__;
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index a08890da696c..015ddffaf6ca 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -99,8 +99,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}
-#define ATOMIC_INIT(i) { (i) }
-
#define arch_atomic_read(v) __READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index c6b6a06231b2..a990d151f163 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -12,8 +12,6 @@
* resource counting etc..
*/
-#define ATOMIC_INIT(i) { (i) }
-
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 0231d69c8bf2..4ab895d7111f 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -12,8 +12,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
-
/* Normal writes in our arch don't clear lock reservations */
static inline void atomic_set(atomic_t *v, int new)
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 50440f3ddc43..f267d956458f 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -19,7 +19,6 @@
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 47228b0d4163..756c5cc58f94 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -16,8 +16,6 @@
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
-#define ATOMIC_INIT(i) { (i) }
-
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index e5ac88392d1f..f904084fcb1f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -45,7 +45,6 @@ static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
return xchg(&v->counter, n); \
}
-#define ATOMIC_INIT(i) { (i) }
ATOMIC_OPS(atomic, int)
#ifdef CONFIG_64BIT
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 118953d41763..f960e2f32b1b 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -136,8 +136,6 @@ ATOMIC_OPS(xor, ^=)
#undef ATOMIC_OP_RETURN<