diff options
72 files changed, 641 insertions, 234 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 503f80a43bec..d357695ee4fe 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2322,6 +2322,15 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt F: drivers/input/touchscreen/atmel_mxt_ts.c F: include/linux/platform_data/atmel_mxt_ts.h +ATOMIC INFRASTRUCTURE +M: Will Deacon <will.deacon@arm.com> +M: Peter Zijlstra <peterz@infradead.org> +R: Boqun Feng <boqun.feng@gmail.com> +L: linux-kernel@vger.kernel.org +S: Maintained +F: arch/*/include/asm/atomic*.h +F: include/*/atomic*.h + ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER M: Bradley Grove <linuxdrivers@attotech.com> L: linux-scsi@vger.kernel.org @@ -7555,7 +7564,7 @@ S: Maintained F: drivers/ata/sata_promise.* LIBLOCKDEP -M: Sasha Levin <sasha.levin@oracle.com> +M: Sasha Levin <alexander.levin@verizon.com> S: Maintained F: tools/lib/lockdep/ diff --git a/arch/Kconfig b/arch/Kconfig index 6c00e5b00f8b..f76b214cf7ad 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -867,4 +867,13 @@ config STRICT_MODULE_RWX config ARCH_WANT_RELAX_ORDER bool +config REFCOUNT_FULL + bool "Perform full reference count validation at the expense of speed" + help + Enabling this switches the refcounting infrastructure from a fast + unchecked atomic_t implementation to a fully state checked + implementation, which can be (slightly) slower but provides protections + against various use-after-free conditions that can be used in + security flaw exploits. + source "kernel/gcov/Kconfig" diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 1a70e6c0f259..94709ab41ed8 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S @@ -24,8 +24,7 @@ * has an opportunity to return -EFAULT to the user if needed. * The 64-bit routines just return a "long long" with the value, * since they are only used from kernel space and don't expect to fault. - * Support for 16-bit ops is included in the framework but we don't provide - * any (x86_64 has an atomic_inc_short(), so we might want to some day). + * Support for 16-bit ops is included in the framework but we don't provide any. * * Note that the caller is advised to issue a suitable L1 or L2 * prefetch on the address being manipulated to avoid extra stalls. diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index caa5798c92f4..33380b871463 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -246,19 +246,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) return c; } -/** - * atomic_inc_short - increment of a short integer - * @v: pointer to type int - * - * Atomically adds 1 to @v - * Returns the new value of @u - */ -static __always_inline short int atomic_inc_short(short int *v) -{ - asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); - return *v; -} - #ifdef CONFIG_X86_32 # include <asm/atomic64_32.h> #else diff --git a/include/linux/refcount.h b/include/linux/refcount.h index b34aa649d204..591792c8e5b0 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -41,6 +41,7 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } +#ifdef CONFIG_REFCOUNT_FULL extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); extern void refcount_add(unsigned int i, refcount_t *r); @@ -48,10 +49,45 @@ extern __must_check bool refcount_inc_not_zero(refcount_t *r); extern void refcount_inc(refcount_t *r); extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); -extern void refcount_sub(unsigned int i, refcount_t *r); extern __must_check bool refcount_dec_and_test(refcount_t *r); extern void refcount_dec(refcount_t *r); +#else +static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r) +{ + return atomic_add_unless(&r->refs, i, 0); +} + +static inline void refcount_add(unsigned int i, refcount_t *r) +{ + atomic_add(i, &r->refs); +} + +static inline __must_check bool refcount_inc_not_zero(refcount_t *r) +{ + return atomic_add_unless(&r->refs, 1, 0); +} + +static inline void refcount_inc(refcount_t *r) +{ + atomic_inc(&r->refs); +} + +static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) +{ + return atomic_sub_and_test(i, &r->refs); +} + +static inline __must_check bool refcount_dec_and_test(refcount_t *r) +{ + return atomic_dec_and_test(&r->refs); +} + +static inline void refcount_dec(refcount_t *r) +{ + atomic_dec(&r->refs); +} +#endif /* CONFIG_REFCOUNT_FULL */ extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 1abba5ce2a2f..44fd002f7cd5 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -37,6 +37,9 @@ struct rt_mutex { int line; void *magic; #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; struct rt_mutex_waiter; @@ -58,19 +61,33 @@ struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ , .name = #mutexname, .file = __FILE__, .line = __LINE__ -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) + +# define rt_mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + __rt_mutex_init(mutex, __func__, &__key); \ +} while (0) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) +# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL) # define rt_mutex_debug_task_free(t) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ + , .dep_map = { .name = #mutexname } +#else +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) +#endif + #define __RT_MUTEX_INITIALIZER(mutexname) \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .waiters = RB_ROOT \ , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} + __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) @@ -86,7 +103,7 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) return lock->owner != NULL; } -extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); extern void rt_mutex_destroy(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock); diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 58e366ad36f4..ac35e648b0e5 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -166,12 +166,16 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) memset(waiter, 0x22, sizeof(*waiter)); } -void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) +void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) { /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lock->name = name; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif } diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index b585af9a1b50..5078c6ddf4a5 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h @@ -11,7 +11,7 @@ extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); -extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); +extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); extern void debug_rt_mutex_lock(struct rt_mutex *lock); extern void debug_rt_mutex_unlock(struct rt_mutex *lock); extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 28cd09e635ed..78069895032a 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1481,6 +1481,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock) { might_sleep(); + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_lock); @@ -1496,9 +1497,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { + int ret; + might_sleep(); - return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + + return ret; } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); @@ -1526,11 +1534,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) { + int ret; + might_sleep(); - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, RT_MUTEX_MIN_CHAINWALK, rt_mutex_slowlock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + + return ret; } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); @@ -1547,10 +1562,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); */ int __sched rt_mutex_trylock(struct rt_mutex *lock) { + int ret; + if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) return 0; - return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); + ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; } EXPORT_SYMBOL_GPL(rt_mutex_trylock); @@ -1561,6 +1582,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); */ void __sched rt_mutex_unlock(struct rt_mutex *lock) { + mutex_release(&lock->dep_map, 1, _RET_IP_); rt_mutex_fastunlock(lock, rt_mutex_slowunlock); } EXPORT_SYMBOL_GPL(rt_mutex_unlock); @@ -1620,7 +1642,6 @@ void rt_mutex_destroy(struct rt_mutex *lock) lock->magic = NULL; #endif } - EXPORT_SYMBOL_GPL(rt_mutex_destroy); /** @@ -1632,14 +1653,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); * * Initializing of a locked rt lock is not allowed */ -void __rt_mutex_init(struct rt_mutex *lock, const char *name) +void __rt_mutex_init(struct rt_mutex *lock, const char *name, + struct lock_class_key *key) { lock->owner = NULL; raw_spin_lock_init(&lock->wait_lock); lock->waiters = RB_ROOT; lock->waiters_leftmost = NULL; - debug_rt_mutex_init(lock, name); + if (name && key) + debug_rt_mutex_init(lock, name, key); } EXPORT_SYMBOL_GPL(__rt_mutex_init); @@ -1660,7 +1683,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { - __rt_mutex_init(lock, NULL); + __rt_mutex_init(lock, NULL, NULL); debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); } diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index 6607802efa8b..5c253caffe91 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h @@ -17,7 +17,7 @@ #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) #define debug_rt_mutex_proxy_unlock(l) do { } while (0) #define debug_rt_mutex_unlock(l) do { } while (0) -#define debug_rt_mutex_init(m, n) do { } while (0) +#define debug_rt_mutex_init(m, n, k) do { } while (0) #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) #define debug_rt_mutex_print_deadlock(w) do { } while (0) #define debug_rt_mutex_reset_waiter(w) do { } while (0) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a7a751a75cfd..9c5d40a50930 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1052,6 +1052,7 @@ config DEBUG_LOCK_ALLOC depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select LOCKDEP help This feature will check whether any held lock (spinlock, rwlock, @@ -1067,6 +1068,7 @@ config PROVE_LOCKING select LOCKDEP select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_LOCK_ALLOC select TRACE_IRQFLAGS default n @@ -1121,6 +1123,7 @@ config LOCK_STAT select LOCKDEP select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_LOCK_ALLOC default n help diff --git a/lib/locking-selftest-rtmutex.h b/lib/locking-selftest-rtmutex.h new file mode 100644 index 000000000000..e3cb83989d16 --- /dev/null +++ b/lib/locking-selftest-rtmutex.h @@ -0,0 +1,11 @@ +#undef LOCK +#define LOCK RTL + +#undef UNLOCK +#define UNLOCK RTU + +#undef RLOCK +#undef WLOCK + +#undef INIT +#define INIT RTI diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index f3a217ea0388..6f2b135dc5e8 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/debug_locks.h> #include <linux/irqflags.h> +#include <linux/rtmutex.h> /* * Change this to 1 if you want to see the failure printouts: @@ -46,6 +47,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); #define LOCKTYPE_MUTEX 0x4 #define LOCKTYPE_RWSEM 0x8 #define LOCKTYPE_WW 0x10 +#define LOCKTYPE_RTMUTEX 0x20 static struct ww_acquire_ctx t, t2; static struct ww_mutex o, o2, o3; @@ -74,6 +76,15 @@ static DECLARE_RWSEM(rwsem_B); static DECLARE_RWSEM(rwsem_C); static DECLARE_RWSEM(rwsem_D); +#ifdef CONFIG_RT_MUTEXES + +static DEFINE_RT_MUTEX(rtmutex_A); +static DEFINE_RT_MUTEX(rtmutex_B); +static DEFINE_RT_MUTEX(rtmutex_C); +static DEFINE_RT_MUTEX(rtmutex_D); + +#endif + /* * Locks that we initialize dynamically as well so that * e.g. X1 and X2 becomes two instances of the same class, @@ -108,6 +119,17 @@ static DECLARE_RWSEM(rwsem_Y2); static DECLARE_RWSEM(rwsem_Z1); static DECLARE_RWSEM(rwsem_Z2); +#ifdef CONFIG_RT_MUTEXES + +static DEFINE_RT_MUTEX(rtmutex_X1); +static DEFINE_RT_MUTEX(rtmutex_X2); +static DEFINE_RT_MUTEX(rtmutex_Y1); +static DEFINE_RT_MUTEX(rtmutex_Y2); +static DEFINE_RT_MUTEX(rtmutex_Z1); +static DEFINE_RT_MUTEX(rtmutex_Z2); + +#endif + /* * non-inlined runtime initializers, to let separate locks share * the same lock-class: @@ -129,6 +151,17 @@ INIT_CLASS_FUNC(Z) static void init_shared_classes(void) { +#ifdef CONFIG_RT_MUTEXES + static struct lock_class_key rt_X, rt_Y, rt_Z; + + __rt_mutex_init(&rtmutex_X1, __func__, &rt_X); + __rt_mutex_init(&rtmutex_X2, __func__, &rt_X); + __rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y); + __rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y); + __rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z); + __rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z); +#endif + init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1); init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2); @@ -193,6 +226,10 @@ static void init_shared_classes(void) #define MU(x) mutex_unlock(&mutex_##x) |