From bd54211b8e199ffb701ec98bf4f301e4a6f38a92 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 4 Feb 2019 23:12:08 -0500 Subject: XArray tests: RCU lock prohibits GFP_KERNEL Drop and reacquire the RCU read lock while using GFP_KERNEL. Reported-by: Li RongQing Signed-off-by: Matthew Wilcox --- lib/test_xarray.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index c596a957f764..671a93ee09e6 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -107,8 +107,11 @@ static noinline void check_xas_retry(struct xarray *xa) XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); XA_BUG_ON(xa, xas.xa_node != NULL); + rcu_read_unlock(); XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); + + rcu_read_lock(); XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); xas.xa_node = XAS_RESTART; XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); -- cgit v1.2.3 From 809ab9371ca0a96b44d9866ad82849410759a45b Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Sat, 26 Jan 2019 00:52:26 -0500 Subject: XArray: Update xa_erase family descriptions xa_erase does not allocate memory and doesn't have a gfp parameter. Update the descriptions of all four variants to be more useful. Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 12 ++++++------ lib/xarray.c | 17 ++++++++--------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5d9d318bcf7a..e11841537631 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -526,9 +526,9 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. @@ -550,9 +550,9 @@ static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. diff --git a/lib/xarray.c b/lib/xarray.c index 81c3171ddde9..fb783bf2a441 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1294,13 +1294,12 @@ static void *xas_result(struct xa_state *xas, void *curr) * @xa: XArray. * @index: Index into array. * - * If the entry at this index is a multi-index entry then all indices will - * be erased, and the entry will no longer be a multi-index entry. - * This function expects the xa_lock to be held on entry. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * - * Context: Any context. Expects xa_lock to be held on entry. May - * release and reacquire xa_lock if @gfp flags permit. - * Return: The old entry at this index. + * Context: Any context. Expects xa_lock to be held on entry. + * Return: The entry which used to be at this index. */ void *__xa_erase(struct xarray *xa, unsigned long index) { @@ -1314,9 +1313,9 @@ EXPORT_SYMBOL(__xa_erase); * @xa: XArray. * @index: Index of entry. * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. * * Context: Any context. Takes and releases the xa_lock. * Return: The entry which used to be at this index. -- cgit v1.2.3 From fd9dc93e36231fb6d520e0edd467058fad4fd12d Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 6 Feb 2019 13:07:11 -0500 Subject: XArray: Change xa_insert to return -EBUSY Userspace translates EEXIST to "File exists" which isn't a very good error message for the problem. "Device or resource busy" is a better indication of what went wrong. Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 2 +- fs/nilfs2/btnode.c | 2 +- include/linux/xarray.h | 6 +++--- lib/test_xarray.c | 4 ++-- lib/xarray.c | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 5d54b27c6eba..42bb1a62650f 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -85,7 +85,7 @@ which was at that index; if it returns the same entry which was passed as If you want to only store a new entry to an index if the current entry at that index is ``NULL``, you can use :c:func:`xa_insert` which -returns ``-EEXIST`` if the entry is not empty. +returns ``-EBUSY`` if the entry is not empty. You can enquire whether a mark is set on an entry by using :c:func:`xa_get_mark`. If the entry is not ``NULL``, you can set a mark diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index f2129a5d9f23..4391fd3abd8f 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -189,7 +189,7 @@ retry: */ if (!err) return 0; - else if (err != -EEXIST) + else if (err != -EBUSY) goto failed_unlock; err = invalidate_inode_pages2_range(btnc, newkey, newkey); diff --git a/include/linux/xarray.h b/include/linux/xarray.h index e11841537631..57cf35c4d094 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -664,7 +664,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert(struct xarray *xa, unsigned long index, @@ -693,7 +693,7 @@ static inline int xa_insert(struct xarray *xa, unsigned long index, * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, @@ -722,7 +722,7 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ static inline int xa_insert_irq(struct xarray *xa, unsigned long index, diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 671a93ee09e6..9d894e93456c 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -346,7 +346,7 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); - XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST); + XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); @@ -388,7 +388,7 @@ static noinline void check_reserve(struct xarray *xa) /* But xa_insert does not */ xa_reserve(xa, 12345678, GFP_KERNEL); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != - -EEXIST); + -EBUSY); XA_BUG_ON(xa, xa_empty(xa)); XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); XA_BUG_ON(xa, !xa_empty(xa)); diff --git a/lib/xarray.c b/lib/xarray.c index fb783bf2a441..1b97ca58bd15 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1451,7 +1451,7 @@ EXPORT_SYMBOL(__xa_cmpxchg); * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) @@ -1471,7 +1471,7 @@ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) if (xa_track_free(xa)) xas_clear_mark(&xas, XA_FREE_MARK); } else { - xas_set_err(&xas, -EEXIST); + xas_set_err(&xas, -EBUSY); } } while (__xas_nomem(&xas, gfp)); -- cgit v1.2.3 From 3ccaf57a6a63ad171a951dcaddffc453b2414c7b Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 26 Oct 2018 14:43:22 -0400 Subject: XArray: Add support for 1s-based allocation A lot of places want to allocate IDs starting at 1 instead of 0. While the xa_alloc() API supports this, it's not very efficient if lots of IDs are allocated, due to having to walk down to the bottom of the tree to see if ID 1 is available, then all the way over to the next non-allocated ID. This method marks ID 0 as being occupied which wastes one slot in the XArray, but preserves xa_empty() as working. Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 10 +++-- include/linux/xarray.h | 14 ++++++- lib/test_xarray.c | 88 ++++++++++++++++++++++++--------------- lib/xarray.c | 11 +++++ 4 files changed, 86 insertions(+), 37 deletions(-) diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 42bb1a62650f..e90c4925cd37 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -131,17 +131,21 @@ If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`, the XArray changes to track whether entries are in use or not. -You can call :c:func:`xa_alloc` to store the entry at any unused index +You can call :c:func:`xa_alloc` to store the entry at an unused index in the XArray. If you need to modify the array from interrupt context, you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable interrupts while allocating the ID. -Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert` -will mark the entry as being allocated. Unlike a normal XArray, storing +Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert` will +also mark the entry as being allocated. Unlike a normal XArray, storing ``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`. To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if you only want to free the entry if it's ``NULL``). +By default, the lowest free entry is allocated starting from 0. If you +want to allocate entries starting at 1, it is more efficient to use +:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. + You cannot use ``XA_MARK_0`` with an allocating XArray as this mark is used to track whether an entry is free or not. The other marks are available for your use. diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 57cf35c4d094..99dd0838b4ba 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -220,10 +220,13 @@ enum xa_lock_type { #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) +#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ (__force unsigned)(mark))) +/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) +#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) /** * struct xarray - The anchor of the XArray. @@ -279,7 +282,7 @@ struct xarray { #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) /** - * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs. + * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. * @name: A string that names your XArray. * * This is intended for file scope definitions of allocating XArrays. @@ -287,6 +290,15 @@ struct xarray { */ #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) +/** + * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of allocating XArrays. + * See also DEFINE_XARRAY(). + */ +#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) + void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_erase(struct xarray *, unsigned long index); diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 9d894e93456c..cd74f8f32abe 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -589,64 +589,86 @@ static noinline void check_multi_store(struct xarray *xa) #endif } -static DEFINE_XARRAY_ALLOC(xa0); - -static noinline void check_xa_alloc(void) +static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) { int i; u32 id; - /* An empty array should assign 0 to the first alloc */ - xa_alloc_index(&xa0, 0, GFP_KERNEL); + XA_BUG_ON(xa, !xa_empty(xa)); + /* An empty array should assign %base to the first alloc */ + xa_alloc_index(xa, base, GFP_KERNEL); /* Erasing it should make the array empty again */ - xa_erase_index(&xa0, 0); - XA_BUG_ON(&xa0, !xa_empty(&xa0)); + xa_erase_index(xa, base); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* And it should assign %base again */ + xa_alloc_index(xa, base, GFP_KERNEL); + + /* Allocating and then erasing a lot should not lose base */ + for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) + xa_alloc_index(xa, i, GFP_KERNEL); + for (i = base; i < 2 * XA_CHUNK_SIZE; i++) + xa_erase_index(xa, i); + xa_alloc_index(xa, base, GFP_KERNEL); + + /* Destroying the array should do the same as erasing */ + xa_destroy(xa); - /* And it should assign 0 again */ - xa_alloc_index(&xa0, 0, GFP_KERNEL); + /* And it should assign %base again */ + xa_alloc_index(xa, base, GFP_KERNEL); - /* The next assigned ID should be 1 */ - xa_alloc_index(&xa0, 1, GFP_KERNEL); - xa_erase_index(&xa0, 1); + /* The next assigned ID should be base+1 */ + xa_alloc_index(xa, base + 1, GFP_KERNEL); + xa_erase_index(xa, base + 1); /* Storing a value should mark it used */ - xa_store_index(&xa0, 1, GFP_KERNEL); - xa_alloc_index(&xa0, 2, GFP_KERNEL); + xa_store_index(xa, base + 1, GFP_KERNEL); + xa_alloc_index(xa, base + 2, GFP_KERNEL); - /* If we then erase 0, it should be free */ - xa_erase_index(&xa0, 0); - xa_alloc_index(&xa0, 0, GFP_KERNEL); + /* If we then erase base, it should be free */ + xa_erase_index(xa, base); + xa_alloc_index(xa, base, GFP_KERNEL); - xa_erase_index(&xa0, 1); - xa_erase_index(&xa0, 2); + xa_erase_index(xa, base + 1); + xa_erase_index(xa, base + 2); for (i = 1; i < 5000; i++) { - xa_alloc_index(&xa0, i, GFP_KERNEL); + xa_alloc_index(xa, base + i, GFP_KERNEL); } - xa_destroy(&xa0); + xa_destroy(xa); + /* Check that we fail properly at the limit of allocation */ id = 0xfffffffeU; - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != 0); - XA_BUG_ON(&xa0, id != 0xfffffffeU); - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, id != 0xfffffffeU); + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != 0); - XA_BUG_ON(&xa0, id != 0xffffffffU); - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, id != 0xffffffffU); + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != -ENOSPC); - XA_BUG_ON(&xa0, id != 0xffffffffU); - xa_destroy(&xa0); + XA_BUG_ON(xa, id != 0xffffffffU); + xa_destroy(xa); id = 10; - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), + XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), GFP_KERNEL) != -ENOSPC); - XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), + XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); + XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), GFP_KERNEL) != -ENOSPC); - xa_erase_index(&xa0, 3); - XA_BUG_ON(&xa0, !xa_empty(&xa0)); + xa_erase_index(xa, 3); + XA_BUG_ON(xa, !xa_empty(xa)); +} + +static DEFINE_XARRAY_ALLOC(xa0); +static DEFINE_XARRAY_ALLOC1(xa1); + +static noinline void check_xa_alloc(void) +{ + check_xa_alloc_1(&xa0, 0); + check_xa_alloc_1(&xa1, 1); } static noinline void __check_store_iter(struct xarray *xa, unsigned long start, diff --git a/lib/xarray.c b/lib/xarray.c index 1b97ca58bd15..468fb7b7963f 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -57,6 +57,11 @@ static inline bool xa_track_free(const struct xarray *xa) return xa->xa_flags & XA_FLAGS_TRACK_FREE; } +static inline bool xa_zero_busy(const struct xarray *xa) +{ + return xa->xa_flags & XA_FLAGS_ZERO_BUSY; +} + static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) { if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) @@ -432,6 +437,8 @@ static void xas_shrink(struct xa_state *xas) break; if (!xa_is_node(entry) && node->shift) break; + if (xa_is_zero(entry) && xa_zero_busy(xa)) + entry = NULL; xas->xa_node = XAS_BOUNDS; RCU_INIT_POINTER(xa->xa_head, entry); @@ -628,6 +635,8 @@ static void *xas_create(struct xa_state *xas, bool allow_root) if (xas_top(node)) { entry = xa_head_locked(xa); xas->xa_node = NULL; + if (!entry && xa_zero_busy(xa)) + entry = XA_ZERO_ENTRY; shift = xas_expand(xas, entry); if (shift < 0) return NULL; @@ -1942,6 +1951,8 @@ void xa_destroy(struct xarray *xa) entry = xa_head_locked(xa); RCU_INIT_POINTER(xa->xa_head, NULL); xas_init_marks(&xas); + if (xa_zero_busy(xa)) + xa_mark_clear(xa, XA_FREE_MARK); /* lockdep checks we're still holding the lock in xas_free_nodes() */ if (xa_is_node(entry)) xas_free_nodes(&xas, xa_to_node(entry)); -- cgit v1.2.3 From a3e4d3f97ec844de005a679585c04c5c03dfbdb6 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 31 Dec 2018 10:41:01 -0500 Subject: XArray: Redesign xa_alloc API It was too easy to forget to initialise the start index. Add an xa_limit data structure which can be used to pass min & max, and define a couple of special values for common cases. Also add some more tests cribbed from the IDR test suite. Change the return value from -ENOSPC to -EBUSY to match xa_insert(). Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 80 +++++++++++++++++++++++++++++----------------- lib/test_xarray.c | 86 ++++++++++++++++++++++++++++++++++++++++---------- lib/xarray.c | 29 ++++++++--------- 3 files changed, 135 insertions(+), 60 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 99dd0838b4ba..883bb958e462 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -200,6 +200,27 @@ static inline int xa_err(void *entry) return 0; } +/** + * struct xa_limit - Represents a range of IDs. + * @min: The lowest ID to allocate (inclusive). + * @max: The maximum ID to allocate (inclusive). + * + * This structure is used either directly or via the XA_LIMIT() macro + * to communicate the range of IDs that are valid for allocation. + * Two common ranges are predefined for you: + * * xa_limit_32b - [0 - UINT_MAX] + * * xa_limit_31b - [0 - INT_MAX] + */ +struct xa_limit { + u32 max; + u32 min; +}; + +#define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } + +#define xa_limit_32b XA_LIMIT(0, UINT_MAX) +#define xa_limit_31b XA_LIMIT(0, INT_MAX) + typedef unsigned __bitwise xa_mark_t; #define XA_MARK_0 ((__force xa_mark_t)0U) #define XA_MARK_1 ((__force xa_mark_t)1U) @@ -476,7 +497,8 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); -int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); +int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, + struct xa_limit, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -753,26 +775,26 @@ static inline int xa_insert_irq(struct xarray *xa, unsigned long index, * xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * - * Context: Process context. Takes and releases the xa_lock. May sleep if + * Context: Any context. Takes and releases the xa_lock. May sleep if * the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock(xa); return err; @@ -782,26 +804,26 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, * xa_alloc_bh() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock_bh(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_bh(xa); return err; @@ -811,26 +833,26 @@ static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, * xa_alloc_irq() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). * @entry: New entry. + * @limit: Range of ID to allocate. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. May sleep if the @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, - gfp_t gfp) +static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) { int err; xa_lock_irq(xa); - err = __xa_alloc(xa, id, max, entry, gfp); + err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_irq(xa); return err; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index cd74f8f32abe..b5a6b981454d 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -40,9 +40,9 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) { - u32 id = 0; + u32 id; - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, gfp) != 0); XA_BUG_ON(xa, id != index); } @@ -640,28 +640,81 @@ static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) xa_destroy(xa); /* Check that we fail properly at the limit of allocation */ - id = 0xfffffffeU; - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), + XA_LIMIT(UINT_MAX - 1, UINT_MAX), GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0xfffffffeU); - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), + XA_LIMIT(UINT_MAX - 1, UINT_MAX), GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0xffffffffU); - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), - GFP_KERNEL) != -ENOSPC); - XA_BUG_ON(xa, id != 0xffffffffU); + id = 3; + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), + XA_LIMIT(UINT_MAX - 1, UINT_MAX), + GFP_KERNEL) != -EBUSY); + XA_BUG_ON(xa, id != 3); xa_destroy(xa); - id = 10; - XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), - GFP_KERNEL) != -ENOSPC); + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), + GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); - XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), - GFP_KERNEL) != -ENOSPC); + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), + GFP_KERNEL) != -EBUSY); xa_erase_index(xa, 3); XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) +{ + unsigned int i, id; + unsigned long index; + void *entry; + + /* Allocate and free a NULL and check xa_empty() behaves */ + XA_BUG_ON(xa, !xa_empty(xa)); + XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != base); + XA_BUG_ON(xa, xa_empty(xa)); + XA_BUG_ON(xa, xa_erase(xa, id) != NULL); + XA_BUG_ON(xa, !xa_empty(xa)); + + /* Ditto, but check destroy instead of erase */ + XA_BUG_ON(xa, !xa_empty(xa)); + XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != base); + XA_BUG_ON(xa, xa_empty(xa)); + xa_destroy(xa); + XA_BUG_ON(xa, !xa_empty(xa)); + + for (i = base; i < base + 10; i++) { + XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, + GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != i); + } + + XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); + XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); + XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 5); + + xa_for_each(xa, index, entry) { + xa_erase_index(xa, index); + } + + for (i = base; i < base + 9; i++) { + XA_BUG_ON(xa, xa_erase(xa, i) != NULL); + XA_BUG_ON(xa, xa_empty(xa)); + } + XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); + XA_BUG_ON(xa, xa_empty(xa)); + XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); + XA_BUG_ON(xa, !xa_empty(xa)); + + xa_destroy(xa); +} + static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); @@ -669,6 +722,8 @@ static noinline void check_xa_alloc(void) { check_xa_alloc_1(&xa0, 0); check_xa_alloc_1(&xa1, 1); + check_xa_alloc_2(&xa0, 0); + check_xa_alloc_2(&xa1, 1); } static noinline void __check_store_iter(struct xarray *xa, unsigned long start, @@ -1219,9 +1274,8 @@ static void check_align_1(struct xarray *xa, char *name) void *entry; for (i = 0; i < 8; i++) { - id = 0; - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL) - != 0); + XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, + GFP_KERNEL) != 0); XA_BUG_ON(xa, id != i); } xa_for_each(xa, index, entry) diff --git a/lib/xarray.c b/lib/xarray.c index 468fb7b7963f..c707388fb05e 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1615,23 +1615,23 @@ EXPORT_SYMBOL(xa_store_range); * __xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. - * @max: Maximum ID to allocate (inclusive). + * @limit: Range for allocated ID. * @entry: New entry. * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @id and @max. - * Updates the @id pointer with the index, then stores the entry at that - * index. A concurrent lookup will not see an uninitialised @id. + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. - * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if - * there is no more space in the XArray. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. */ -int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) +int __xa_alloc(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, gfp_t gfp) { XA_STATE(xas, xa, 0); - int err; if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; @@ -1642,18 +1642,17 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) entry = XA_ZERO_ENTRY; do { - xas.xa_index = *id; - xas_find_marked(&xas, max, XA_FREE_MARK); + xas.xa_index = limit.min; + xas_find_marked(&xas, limit.max, XA_FREE_MARK); if (xas.xa_node == XAS_RESTART) - xas_set_err(&xas, -ENOSPC); + xas_set_err(&xas, -EBUSY); + else + *id = xas.xa_index; xas_store(&xas, entry); xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); - err = xas_error(&xas); - if (!err) - *id = xas.xa_index; - return err; + return xas_error(&xas); } EXPORT_SYMBOL(__xa_alloc); -- cgit v1.2.3 From 2fa044e51a1f35d7b04cbde07ec513b0ba195e38 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 6 Nov 2018 14:13:35 -0500 Subject: XArray: Add cyclic allocation This differs slightly from the IDR equivalent in five ways. 1. It can allocate up to UINT_MAX instead of being limited to INT_MAX, like xa_alloc(). Also like xa_alloc(), it will write to the 'id' pointer before placing the entry in the XArray. 2. The 'next' cursor is allocated separately from the XArray instead of being part of the IDR. This saves memory for all the users which do not use the cyclic allocation API and suits some users better. 3. It returns -EBUSY instead of -ENOSPC. 4. It will attempt to wrap back to the minimum value on memory allocation failure as well as on an -EBUSY error, assuming that a user would rather allocate a small ID than suffer an ID allocation failure. 5. It reports whether it has wrapped, which is important to some users. Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 4 +- include/linux/xarray.h | 102 ++++++++++++++++++++++++++++++++++++++ lib/test_xarray.c | 53 ++++++++++++++++++++ lib/xarray.c | 50 +++++++++++++++++++ 4 files changed, 208 insertions(+), 1 deletion(-) diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index e90c4925cd37..c7436da5c4ad 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -144,7 +144,9 @@ you only want to free the entry if it's ``NULL``). By default, the lowest free entry is allocated starting from 0. If you want to allocate entries starting at 1, it is more efficient to use -:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. +:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. If you want to +allocate IDs up to a maximum, then wrap back around to the lowest free +ID, you can use :c:func:`xa_alloc_cyclic`. You cannot use ``XA_MARK_0`` with an allocating XArray as this mark is used to track whether an entry is free or not. The other marks are diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 883bb958e462..5ed6b462e754 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -242,6 +242,7 @@ enum xa_lock_type { #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) +#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ (__force unsigned)(mark))) @@ -499,6 +500,8 @@ void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, struct xa_limit, gfp_t); +int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, + struct xa_limit, u32 *next, gfp_t); int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -858,6 +861,105 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, return err; } +/** + * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_irq(xa); + + return err; +} + /** * xa_reserve() - Reserve this index in the XArray. * @xa: XArray. diff --git a/lib/test_xarray.c b/lib/test_xarray.c index b5a6b981454d..eaf53f742c72 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -715,6 +715,57 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) xa_destroy(xa); } +static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) +{ + struct xa_limit limit = XA_LIMIT(1, 0x3fff); + u32 next = 0; + unsigned int i, id; + unsigned long index; + void *entry; + + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, + &next, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 1); + + next = 0x3ffd; + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, + &next, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 0x3ffd); + xa_erase_index(xa, 0x3ffd); + xa_erase_index(xa, 1); + XA_BUG_ON(xa, !xa_empty(xa)); + + for (i = 0x3ffe; i < 0x4003; i++) { + if (i < 0x4000) + entry = xa_mk_index(i); + else + entry = xa_mk_index(i - 0x3fff); + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, + &next, GFP_KERNEL) != (id == 1)); + XA_BUG_ON(xa, xa_mk_index(id) != entry); + } + + /* Check wrap-around is handled correctly */ + if (base != 0) + xa_erase_index(xa, base); + xa_erase_index(xa, base + 1); + next = UINT_MAX; + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), + xa_limit_32b, &next, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != UINT_MAX); + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), + xa_limit_32b, &next, GFP_KERNEL) != 1); + XA_BUG_ON(xa, id != base); + XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), + xa_limit_32b, &next, GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != base + 1); + + xa_for_each(xa, index, entry) + xa_erase_index(xa, index); + + XA_BUG_ON(xa, !xa_empty(xa)); +} + static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); @@ -724,6 +775,8 @@ static noinline void check_xa_alloc(void) check_xa_alloc_1(&xa1, 1); check_xa_alloc_2(&xa0, 0); check_xa_alloc_2(&xa1, 1); + check_xa_alloc_3(&xa0, 0); + check_xa_alloc_3(&xa1, 1); } static noinline void __check_store_iter(struct xarray *xa, unsigned long start, diff --git a/lib/xarray.c b/lib/xarray.c index c707388fb05e..89e37ac50850 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1656,6 +1656,56 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry, } EXPORT_SYMBOL(__xa_alloc); +/** + * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + u32 min = limit.min; + int ret; + + limit.min = max(min, *next); + ret = __xa_alloc(xa, id, entry, limit, gfp); + if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) { + xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED; + ret = 1; + } + + if (ret < 0 && limit.min > min) { + limit.min = min; + ret = __xa_alloc(xa, id, entry, limit, gfp); + if (ret == 0) + ret = 1; + } + + if (ret >= 0) { + *next = *id + 1; + if (*next == 0) + xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED; + } + return ret; +} +EXPORT_SYMBOL(__xa_alloc_cyclic); + /** * __xa_set_mark() - Set this mark on this entry while locked. * @xa: XArray. -- cgit v1.2.3 From f818b82b80164014d7ee3df89bb110808778c796 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 8 Feb 2019 14:02:45 -0500 Subject: XArray: Mark xa_insert and xa_reserve as must_check If the user doesn't care about the return value from xa_insert(), then they should be using xa_store() instead. The point of xa_reserve() is to get the return value early before taking another lock, so this should also be __must_check. Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 23 ++++++++++++----------- lib/test_xarray.c | 10 +++++----- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5ed6b462e754..687c150071a5 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -497,12 +497,13 @@ void *__xa_erase(struct xarray *, unsigned long index); void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); -int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); +int __must_check __xa_insert(struct xarray *, unsigned long index, + void *entry, gfp_t); int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, struct xa_limit, gfp_t); int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, struct xa_limit, u32 *next, gfp_t); -int __xa_reserve(struct xarray *, unsigned long index, gfp_t); +int __must_check __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -704,8 +705,8 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -733,8 +734,8 @@ static inline int xa_insert(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert_bh(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert_bh(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -762,8 +763,8 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index, * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ -static inline int xa_insert_irq(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) +static inline int __must_check xa_insert_irq(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) { int err; @@ -978,7 +979,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, * May sleep if the @gfp flags permit. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; @@ -1002,7 +1003,7 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) * disabling softirqs. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; @@ -1026,7 +1027,7 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) * disabling interrupts. * Return: 0 if the reservation succeeded or -ENOMEM if it failed. */ -static inline +static inline __must_check int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) { int ret; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index eaf53f742c72..3eaa40ddc390 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -364,21 +364,21 @@ static noinline void check_reserve(struct xarray *xa) /* An array with a reserved entry is not empty */ XA_BUG_ON(xa, !xa_empty(xa)); - xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_empty(xa)); XA_BUG_ON(xa, xa_load(xa, 12345678)); xa_release(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* Releasing a used entry does nothing */ - xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); xa_release(xa, 12345678); xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* cmpxchg sees a reserved entry as NULL */ - xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); @@ -386,7 +386,7 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); /* But xa_insert does not */ - xa_reserve(xa, 12345678, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != -EBUSY); XA_BUG_ON(xa, xa_empty(xa)); @@ -395,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa) /* Can iterate through a reserved entry */ xa_store_index(xa, 5, GFP_KERNEL); - xa_reserve(xa, 6, GFP_KERNEL); + XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); xa_store_index(xa, 7, GFP_KERNEL); xa_for_each(xa, index, entry) { -- cgit v1.2.3 From b38f6c50270683abf35a388f82cafecce971a003 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 20 Feb 2019 11:30:49 -0500 Subject: XArray: Fix xa_release in allocating arrays xa_cmpxchg() was a little too magic in turning ZERO entries into NULL, and would leave the entry set to the ZERO entry instead of releasing it for future use. After careful review of existing users of xa_cmpxchg(), change the semantics so that it does not translate either incoming argument from NULL into ZERO entries. Add several tests to the test-suite to make sure this problem doesn't come back. Reported-by: Jason Gunthorpe Signed-off-by: Matthew Wilcox --- include/linux/xarray.h | 36 +++++++++++++++++++++++------------- lib/test_xarray.c | 28 ++++++++++++++++++++++++---- lib/xarray.c | 6 +----- 3 files changed, 48 insertions(+), 22 deletions(-) diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 687c150071a5..588733abd19d 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -131,6 +131,12 @@ static inline unsigned int xa_pointer_tag(void *entry) * xa_mk_internal() - Create an internal entry. * @v: Value to turn into an internal entry. * + * Internal entries are used for a number of purposes. Entries 0-255 are + * used for sibling entries (only 0-62 are used by the current code). 256 + * is used for the retry entry. 257 is used for the reserved / zero entry. + * Negative internal entries are used to represent errnos. Node pointers + * are also tagged as internal entries in some situations. + * * Context: Any context. * Return: An XArray internal entry corresponding to this value. */ @@ -163,6 +169,22 @@ static inline bool xa_is_internal(const void *entry) return ((unsigned long)entry & 3) == 2; } +#define XA_ZERO_ENTRY xa_mk_internal(257) + +/** + * xa_is_zero() - Is the entry a zero entry? + * @entry: Entry retrieved from the XArray + * + * The normal API will return NULL as the contents of a slot containing + * a zero entry. You can only see zero entries by using the advanced API. + * + * Return: %true if the entry is a zero entry. + */ +static inline bool xa_is_zero(const void *entry) +{ + return unlikely(entry == XA_ZERO_ENTRY); +} + /** * xa_is_err() - Report whether an XArray operation returned an error * @entry: Result from calling an XArray function @@ -1050,7 +1072,7 @@ int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) */ static inline void xa_release(struct xarray *xa, unsigned long index) { - xa_cmpxchg(xa, index, NULL, NULL, 0); + xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); } /* Everything below here is the Advanced API. Proceed with caution. */ @@ -1210,18 +1232,6 @@ static inline bool xa_is_sibling(const void *entry) } #define XA_RETRY_ENTRY xa_mk_internal(256) -#define XA_ZERO_ENTRY xa_mk_internal(257) - -/** - * xa_is_zero() - Is the entry a zero entry? - * @entry: Entry retrieved from the XArray - * - * Return: %true if the entry is a zero entry. - */ -static inline bool xa_is_zero(const void *entry) -{ - return unlikely(entry == XA_ZERO_ENTRY); -} /** * xa_is_retry() - Is the entry a retry entry? diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 3eaa40ddc390..52f8ecff8c0c 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -361,6 +361,7 @@ static noinline void check_reserve(struct xarray *xa) { void *entry; unsigned long index; + int count; /* An array with a reserved entry is not empty */ XA_BUG_ON(xa, !xa_empty(xa)); @@ -377,15 +378,15 @@ static noinline void check_reserve(struct xarray *xa) xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); - /* cmpxchg sees a reserved entry as NULL */ + /* cmpxchg sees a reserved entry as ZERO */ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); - XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), - GFP_NOWAIT) != NULL); + XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, + xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); - /* But xa_insert does not */ + /* xa_insert treats it as busy */ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != -EBUSY); @@ -398,9 +399,27 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); xa_store_index(xa, 7, GFP_KERNEL); + count = 0; xa_for_each(xa, index, entry) { XA_BUG_ON(xa, index != 5 && index != 7); + count++; + } + XA_BUG_ON(xa, count != 2); + + /* If we free a reserved entry, we should be able to allocate it */ + if (xa->xa_flags & XA_FLAGS_ALLOC) { + u32 id; + + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), + XA_LIMIT(5, 10), GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 8); + + xa_release(xa, 6); + XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), + XA_LIMIT(5, 10), GFP_KERNEL) != 0); + XA_BUG_ON(xa, id != 6); } + xa_destroy(xa); } @@ -1486,6 +1505,7 @@ static int xarray_checks(void) check_xas_erase(&array); check_cmpxchg(&array); check_reserve(&array); + check_reserve(&xa0); check_multi_store(&array); check_xa_alloc(); check_find(&array); diff --git a/lib/xarray.c b/lib/xarray.c index 89e37ac50850..b9a6cf42feee 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1429,16 +1429,12 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); - if (xa_track_free(xa) && !entry) - entry = XA_ZERO_ENTRY; do { curr = xas_load(&xas); - if (curr == XA_ZERO_ENTRY) - curr = NULL; if (curr == old) { xas_store(&xas, entry); - if (xa_track_free(xa)) + if (xa_track_free(xa) && entry && !curr) xas_clear_mark(&xas, XA_FREE_MARK); } } while (__xas_nomem(&xas, gfp)); -- cgit v1.2.3 From 962033d55d0761e0716a01a715c6659c8c8dfc41 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 20 Feb 2019 11:51:22 -0500 Subject: XArray: Use xa_cmpxchg to implement xa_reserve Jason feels this is clearer, and it saves a function and an exported symbol. Suggested-by: Jason Gunthorpe Signed-off-by: Matthew Wilcox --- Documentation/core-api/xarray.rst | 1 - include/linux/xarray.h | 25 +++---------------------- lib/xarray.c | 36 ------------------------------------ 3 files changed, 3 insertions(+), 59 deletions(-) diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index c7436da5c4ad..ef6f9f98f595 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -215,7 +215,6 @@ Assumes xa_lock held on entry: * :c:func:`__xa_erase` * :c:func:`__xa_cmpxchg` * :c:func:`__xa_alloc` - * :c:func:`__xa_reserve` * :c:func:`__xa_set_mark` * :c:func:`__xa_clear_mark` diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 588733abd19d..0e01e6129145 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -525,7 +525,6 @@ int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, struct xa_limit, gfp_t); int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, struct xa_limit, u32 *next, gfp_t); -int __must_check __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -1004,13 +1003,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, static inline __must_check int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock(xa); - - return ret; + return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** @@ -1028,13 +1021,7 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) static inline __must_check int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock_bh(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock_bh(xa); - - return ret; + return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** @@ -1052,13 +1039,7 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) static inline __must_check int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) { - int ret; - - xa_lock_irq(xa); - ret = __xa_reserve(xa, index, gfp); - xa_unlock_irq(xa); - - return ret; + return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); } /** diff --git a/lib/xarray.c b/lib/xarray.c index b9a6cf42feee..3f10198f00b7 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1484,42 +1484,6 @@ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) } EXPORT_SYMBOL(__xa_insert); -/** - * __xa_reserve() - Reserve this index in the XArray. - * @xa: XArray. - * @index: Index into array. - * @gfp: Memory allocation flags. - * - * Ensures there is somewhere to store an entry at @index in the array. - * If there is already something stored at @index, this function does - * nothing. If there was nothing there, the entry is marked as reserved. - * Loading from a reserved entry returns a %NULL pointer. - * - * If you do not use the entry that you have reserved, call xa_release() - * or xa_erase() to free any unnecessary memory. - * - * Context: Any context. Expects the xa_lock to be held on entry. May - * release the lock, sleep and reacquire the lock if the @gfp flags permit. - * Return: 0 if the reservation succeeded or -ENOMEM if it failed. - */ -int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) -{ - XA_STATE(xas, xa, index); - void *curr; - - do { - curr = xas_load(&xas); - if (!curr) { - xas_store(&xas, XA_ZERO_ENTRY); - if (xa_track_free(xa)) - xas_clear_mark(&xas, XA_FREE_MARK); - } - } while (__xas_nomem(&xas, gfp)); - - return xas_error(&xas); -} -EXPORT_SYMBOL(__xa_reserve); - #ifdef CONFIG_XARRAY_MULTI static void xas_set_range(struct xa_state *xas, unsigned long first, unsigned long last) -- cgit v1.2.3 From 2fbe967b3eb7466f679307b38564b8271c093241 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Thu, 21 Feb 2019 17:36:45 -0500 Subject: XArray: Fix xa_erase of 2-byte aligned entries xas_store() was interpreting the entry it found in the array as a node entry if the bottom two bits had value 2. That's only true if either the entry is in the root node or in a non-leaf node. Signed-off-by: Matthew Wilcox --- lib/test_xarray.c | 16 +++++++++++++++- lib/xarray.c | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 52f8ecff8c0c..bc202d468a6b 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1355,6 +1355,20 @@ static void check_align_1(struct xarray *xa, char *name) xa_destroy(xa); } +static void check_align_2(struct xarray *xa, char *name) +{ + int i; + + XA_BUG_ON(xa, !xa_empty(xa)); + + for (i = 0; i < 8; i++) { + XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL); + xa_erase(xa, 0); + } + + XA_BUG_ON(xa, !xa_empty(xa)); +} + static noinline void check_align(struct xarray *xa) { char name[] = "Motorola 68000"; @@ -1363,7 +1377,7 @@ static noinline void check_align(struct xarray *xa) check_align_1(xa, name + 1); check_align_1(xa, name + 2); check_align_1(xa, name + 3); -// check_align_2(xa, name); + check_align_2(xa, name); } static LIST_HEAD(shadow_nodes); diff --git a/lib/xarray.c b/lib/xarray.c index 3f10198f00b7..2cc3798672f7 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -800,7 +800,7 @@ void *xas_store(struct xa_state *xas, void *entry) * entry is set to NULL. */ rcu_assign_pointer(*slot, entry); - if (xa_is_node(next)) + if (xa_is_node(next) && (!node || node->shift)) xas_free_nodes(xas, xa_to_node(next)); if (!node) break; -- cgit v1.2.3 From 4a5c8d898948d1ac876522cdd62f07a78104bfe9 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Thu, 21 Feb 2019 17:54:44 -0500 Subject: XArray: Fix xa_reserve for 2-byte aligned entries If we reserve index 0, the next entry to be stored there might be 2-byte aligned. That means we have to create the root xa_node at the time of reserving the initial entry. Signed-off-by: Matthew Wilcox --- lib/test_xarray.c | 10 ++++++++++ lib/xarray.c | 8 +++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index bc202d468a6b..5d4bad8bd96a 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1355,6 +1355,10 @@ static void check_align_1(struct xarray *xa, char *name) xa_destroy(xa); } +/* + * We should always be able to store without allocating memory after + * reserving a slot. + */ static void check_align_2(struct xarray *xa, char *name) { int i; @@ -1366,6 +1370,12 @@ static void check_align_2(struct xarray *xa, char *name) xa_erase(xa, 0); } + for (i = 0; i < 8; i++) { + XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0); + XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL); + xa_erase(xa, 0); + } + XA_BUG_ON(xa, !xa_empty(xa)); } diff --git a/lib/xarray.c b/lib/xarray.c index 2cc3798672f7..6be3acbb861f 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -767,10 +767,12 @@ void *xas_store(struct xa_state *xas, void *entry) void *first, *next; bool value = xa_is_value(entry); - if (entry) - first = xas_create(xas, !xa_is_node(entry)); - else + if (entry) { + bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry); + first = xas_create(xas, allow_root); + } else { first = xas_load(xas); + } if (xas_invalid(xas)) return first; -- cgit v1.2.3