From 8d24c0b43125ec26cc80e04588477a9a2afc025c Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:14 +0100 Subject: rhashtable: Do hashing inside of rhashtable_lookup_compare() Hash the key inside of rhashtable_lookup_compare() like rhashtable_lookup() does. This allows to simplify the hashing functions and keep them private. Signed-off-by: Thomas Graf Cc: netfilter-devel@vger.kernel.org Signed-off-by: David S. Miller --- lib/rhashtable.c | 91 +++++++++++++++++++------------------------------------- 1 file changed, 30 insertions(+), 61 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6c3c723e902b..1ee0eb636ca3 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -42,69 +42,39 @@ static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) return (void *) he - ht->p.head_offset; } -static u32 __hashfn(const struct rhashtable *ht, const void *key, - u32 len, u32 hsize) +static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) { - u32 h; - - h = ht->p.hashfn(key, len, ht->p.hash_rnd); - - return h & (hsize - 1); -} - -/** - * rhashtable_hashfn - compute hash for key of given length - * @ht: hash table to compute for - * @key: pointer to key - * @len: length of key - * - * Computes the hash value using the hash function provided in the 'hashfn' - * of struct rhashtable_params. The returned value is guaranteed to be - * smaller than the number of buckets in the hash table. - */ -u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len) -{ - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - - return __hashfn(ht, key, len, tbl->size); + return hash & (tbl->size - 1); } -EXPORT_SYMBOL_GPL(rhashtable_hashfn); -static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize) +static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) { - if (unlikely(!ht->p.key_len)) { - u32 h; - - h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); + u32 hash; - return h & (hsize - 1); - } + if (unlikely(!ht->p.key_len)) + hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); + else + hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, + ht->p.hash_rnd); - return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize); + return hash; } -/** - * rhashtable_obj_hashfn - compute hash for hashed object - * @ht: hash table to compute for - * @ptr: pointer to hashed object - * - * Computes the hash value using the hash function `hashfn` respectively - * 'obj_hashfn' depending on whether the hash table is set up to work with - * a fixed length key. The returned value is guaranteed to be smaller than - * the number of buckets in the hash table. - */ -u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr) +static u32 key_hashfn(const struct rhashtable *ht, const void *key, u32 len) { struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + u32 hash; + + hash = ht->p.hashfn(key, len, ht->p.hash_rnd); - return obj_hashfn(ht, ptr, tbl->size); + return rht_bucket_index(tbl, hash); } -EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn); static u32 head_hashfn(const struct rhashtable *ht, - const struct rhash_head *he, u32 hsize) + const struct bucket_table *tbl, + const struct rhash_head *he) { - return obj_hashfn(ht, rht_obj(ht, he), hsize); + return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); } static struct bucket_table *bucket_table_alloc(size_t nbuckets) @@ -170,9 +140,9 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * reaches a node that doesn't hash to the same bucket as the * previous node p. Call the previous node p; */ - h = head_hashfn(ht, p, new_tbl->size); + h = head_hashfn(ht, new_tbl, p); rht_for_each(he, p->next, ht) { - if (head_hashfn(ht, he, new_tbl->size) != h) + if (head_hashfn(ht, new_tbl, he) != h) break; p = he; } @@ -184,7 +154,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, next = NULL; if (he) { rht_for_each(he, he->next, ht) { - if (head_hashfn(ht, he, new_tbl->size) == h) { + if (head_hashfn(ht, new_tbl, he) == h) { next = he; break; } @@ -237,9 +207,9 @@ int rhashtable_expand(struct rhashtable *ht) * single imprecise chain. */ for (i = 0; i < new_tbl->size; i++) { - h = i & (old_tbl->size - 1); + h = rht_bucket_index(old_tbl, i); rht_for_each(he, old_tbl->buckets[h], ht) { - if (head_hashfn(ht, he, new_tbl->size) == i) { + if (head_hashfn(ht, new_tbl, he) == i) { RCU_INIT_POINTER(new_tbl->buckets[i], he); break; } @@ -353,7 +323,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) ASSERT_RHT_MUTEX(ht); - hash = head_hashfn(ht, obj, tbl->size); + hash = head_hashfn(ht, tbl, obj); RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); rcu_assign_pointer(tbl->buckets[hash], obj); ht->nelems++; @@ -413,7 +383,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) ASSERT_RHT_MUTEX(ht); - h = head_hashfn(ht, obj, tbl->size); + h = head_hashfn(ht, tbl, obj); pprev = &tbl->buckets[h]; rht_for_each(he, tbl->buckets[h], ht) { @@ -452,7 +422,7 @@ void *rhashtable_lookup(const struct rhashtable *ht, const void *key) BUG_ON(!ht->p.key_len); - h = __hashfn(ht, key, ht->p.key_len, tbl->size); + h = key_hashfn(ht, key, ht->p.key_len); rht_for_each_rcu(he, tbl->buckets[h], ht) { if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, ht->p.key_len)) @@ -467,7 +437,7 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup); /** * rhashtable_lookup_compare - search hash table with compare function * @ht: hash table - * @hash: hash value of desired entry + * @key: the pointer to the key * @compare: compare function, must return true on match * @arg: argument passed on to compare function * @@ -479,15 +449,14 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup); * * Returns the first entry on which the compare function returned true. */ -void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, +void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg) { const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); struct rhash_head *he; + u32 hash; - if (unlikely(hash >= tbl->size)) - return NULL; - + hash = key_hashfn(ht, key, ht->p.key_len); rht_for_each_rcu(he, tbl->buckets[hash], ht) { if (!compare(rht_obj(ht, he), arg)) continue; -- cgit v1.2.3 From a4b18cda4c2676a4b4b59622b2e0394dc153e00b Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:15 +0100 Subject: rhashtable: Use rht_obj() instead of manual offset calculation Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 1ee0eb636ca3..b658245826a1 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -427,7 +427,7 @@ void *rhashtable_lookup(const struct rhashtable *ht, const void *key) if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, ht->p.key_len)) continue; - return (void *) he - ht->p.head_offset; + return rht_obj(ht, he); } return NULL; @@ -460,7 +460,7 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, rht_for_each_rcu(he, tbl->buckets[hash], ht) { if (!compare(rht_obj(ht, he), arg)) continue; - return (void *) he - ht->p.head_offset; + return rht_obj(ht, he); } return NULL; -- cgit v1.2.3 From 88d6ed15acff1cb44b1d1f3c0a393b7f7744957a Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:16 +0100 Subject: rhashtable: Convert bucket iterators to take table and index This patch is in preparation to introduce per bucket spinlocks. It extends all iterator macros to take the bucket table and bucket index. It also introduces a new rht_dereference_bucket() to handle protected accesses to buckets. It introduces a barrier() to the RCU iterators to the prevent the compiler from caching the first element. The lockdep verifier is introduced as stub which always succeeds and properly implement in the next patch when the locks are introduced. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index b658245826a1..ce450d095fdf 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -35,6 +35,12 @@ int lockdep_rht_mutex_is_held(const struct rhashtable *ht) return ht->p.mutex_is_held(ht->p.parent); } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); + +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) +{ + return 1; +} +EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #endif static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) @@ -141,7 +147,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * previous node p. Call the previous node p; */ h = head_hashfn(ht, new_tbl, p); - rht_for_each(he, p->next, ht) { + rht_for_each_continue(he, p->next, old_tbl, n) { if (head_hashfn(ht, new_tbl, he) != h) break; p = he; @@ -153,7 +159,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, */ next = NULL; if (he) { - rht_for_each(he, he->next, ht) { + rht_for_each_continue(he, he->next, old_tbl, n) { if (head_hashfn(ht, new_tbl, he) == h) { next = he; break; @@ -208,7 +214,7 @@ int rhashtable_expand(struct rhashtable *ht) */ for (i = 0; i < new_tbl->size; i++) { h = rht_bucket_index(old_tbl, i); - rht_for_each(he, old_tbl->buckets[h], ht) { + rht_for_each(he, old_tbl, h) { if (head_hashfn(ht, new_tbl, he) == i) { RCU_INIT_POINTER(new_tbl->buckets[i], he); break; @@ -286,7 +292,7 @@ int rhashtable_shrink(struct rhashtable *ht) * to the new bucket. */ for (pprev = &ntbl->buckets[i]; *pprev != NULL; - pprev = &rht_dereference(*pprev, ht)->next) + pprev = &rht_dereference_bucket(*pprev, ntbl, i)->next) ; RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); } @@ -386,7 +392,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) h = head_hashfn(ht, tbl, obj); pprev = &tbl->buckets[h]; - rht_for_each(he, tbl->buckets[h], ht) { + rht_for_each(he, tbl, h) { if (he != obj) { pprev = &he->next; continue; @@ -423,7 +429,7 @@ void *rhashtable_lookup(const struct rhashtable *ht, const void *key) BUG_ON(!ht->p.key_len); h = key_hashfn(ht, key, ht->p.key_len); - rht_for_each_rcu(he, tbl->buckets[h], ht) { + rht_for_each_rcu(he, tbl, h) { if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, ht->p.key_len)) continue; @@ -457,7 +463,7 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, u32 hash; hash = key_hashfn(ht, key, ht->p.key_len); - rht_for_each_rcu(he, tbl->buckets[hash], ht) { + rht_for_each_rcu(he, tbl, hash) { if (!compare(rht_obj(ht, he), arg)) continue; return rht_obj(ht, he); @@ -625,6 +631,7 @@ static int __init test_rht_lookup(struct rhashtable *ht) static void test_bucket_stats(struct rhashtable *ht, bool quiet) { unsigned int cnt, rcu_cnt, i, total = 0; + struct rhash_head *pos; struct test_obj *obj; struct bucket_table *tbl; @@ -635,14 +642,14 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) if (!quiet) pr_info(" [%#4x/%zu]", i, tbl->size); - rht_for_each_entry_rcu(obj, tbl->buckets[i], node) { + rht_for_each_entry_rcu(obj, pos, tbl, i, node) { cnt++; total++; if (!quiet) pr_cont(" [%p],", obj); } - rht_for_each_entry_rcu(obj, tbl->buckets[i], node) + rht_for_each_entry_rcu(obj, pos, tbl, i, node) rcu_cnt++; if (rcu_cnt != cnt) @@ -664,7 +671,8 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) static int __init test_rhashtable(struct rhashtable *ht) { struct bucket_table *tbl; - struct test_obj *obj, *next; + struct test_obj *obj; + struct rhash_head *pos, *next; int err; unsigned int i; @@ -733,7 +741,7 @@ static int __init test_rhashtable(struct rhashtable *ht) error: tbl = rht_dereference_rcu(ht->tbl, ht); for (i = 0; i < tbl->size; i++) - rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node) + rht_for_each_entry_safe(obj, pos, next, tbl, i, node) kfree(obj); return err; -- cgit v1.2.3 From b8e1943e9f754219bcfb40bac4a605b5348acb25 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:17 +0100 Subject: rhashtable: Factor out bucket_tail() function Subsequent patches will require access to the bucket tail. Access to the tail is relatively cheap as the automatic resizing of the table should keep the number of entries per bucket to no more than 0.75 on average. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index ce450d095fdf..0bd29c178910 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -83,6 +83,18 @@ static u32 head_hashfn(const struct rhashtable *ht, return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); } +static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) +{ + struct rhash_head __rcu **pprev; + + for (pprev = &tbl->buckets[n]; + rht_dereference_bucket(*pprev, tbl, n); + pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) + ; + + return pprev; +} + static struct bucket_table *bucket_table_alloc(size_t nbuckets) { struct bucket_table *tbl; @@ -266,7 +278,6 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); - struct rhash_head __rcu **pprev; unsigned int i; ASSERT_RHT_MUTEX(ht); @@ -286,15 +297,9 @@ int rhashtable_shrink(struct rhashtable *ht) */ for (i = 0; i < ntbl->size; i++) { ntbl->buckets[i] = tbl->buckets[i]; + RCU_INIT_POINTER(*bucket_tail(ntbl, i), + tbl->buckets[i + ntbl->size]); - /* Link each bucket in the new table to the first bucket - * in the old table that contains entries which will hash - * to the new bucket. - */ - for (pprev = &ntbl->buckets[i]; *pprev != NULL; - pprev = &rht_dereference_bucket(*pprev, ntbl, i)->next) - ; - RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); } /* Publish the new, valid hash table */ -- cgit v1.2.3 From 897362e446436d245972e72c6bc5b33bd7a5c659 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:18 +0100 Subject: nft_hash: Remove rhashtable_remove_pprev() The removal function of nft_hash currently stores a reference to the previous element during lookup which is used to optimize removal later on. This was possible because a lock is held throughout calling rhashtable_lookup() and rhashtable_remove(). With the introdution of deferred table resizing in parallel to lookups and insertions, the nftables lock will no longer synchronize all table mutations and the stored pprev may become invalid. Removing this optimization makes removal slightly more expensive on average but allows taking the resize cost out of the insert and remove path. Signed-off-by: Thomas Graf Cc: netfilter-devel@vger.kernel.org Signed-off-by: David S. Miller --- lib/rhashtable.c | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 0bd29c178910..e6b85c4a5828 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -344,32 +344,6 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) } EXPORT_SYMBOL_GPL(rhashtable_insert); -/** - * rhashtable_remove_pprev - remove object from hash table given previous element - * @ht: hash table - * @obj: pointer to hash head inside object - * @pprev: pointer to previous element - * - * Identical to rhashtable_remove() but caller is alreayd aware of the element - * in front of the element to be deleted. This is in particular useful for - * deletion when combined with walking or lookup. - */ -void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev) -{ - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); - - ASSERT_RHT_MUTEX(ht); - - RCU_INIT_POINTER(*pprev, obj->next); - ht->nelems--; - - if (ht->p.shrink_decision && - ht->p.shrink_decision(ht, tbl->size)) - rhashtable_shrink(ht); -} -EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); - /** * rhashtable_remove - remove object from hash table * @ht: hash table @@ -403,7 +377,13 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) continue; } - rhashtable_remove_pprev(ht, he, pprev); + RCU_INIT_POINTER(*pprev, he->next); + ht->nelems--; + + if (ht->p.shrink_decision && + ht->p.shrink_decision(ht, tbl->size)) + rhashtable_shrink(ht); + return true; } -- cgit v1.2.3 From 97defe1ecf868b8127f8e62395499d6a06e4c4b1 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:20 +0100 Subject: rhashtable: Per bucket locks & deferred expansion/shrinking Introduces an array of spinlocks to protect bucket mutations. The number of spinlocks per CPU is configurable and selected based on the hash of the bucket. This allows for parallel insertions and removals of entries which do not share a lock. The patch also defers expansion and shrinking to a worker queue which allows insertion and removal from atomic context. Insertions and deletions may occur in parallel to it and are only held up briefly while the particular bucket is linked or unzipped. Mutations of the bucket table pointer is protected by a new mutex, read access is RCU protected. In the event of an expansion or shrinking, the new bucket table allocated is exposed as a so called future table as soon as the resize process starts. Lookups, deletions, and insertions will briefly use both tables. The future table becomes the main table after an RCU grace period and initial linking of the old to the new table was performed. Optimization of the chains to make use of the new number of buckets follows only the new table is in use. The side effect of this is that during that RCU grace period, a bucket traversal using any rht_for_each() variant on the main table will not see any insertions performed during the RCU grace period which would at that point land in the future table. The lookup will see them as it searches both tables if needed. Having multiple insertions and removals occur in parallel requires nelems to become an atomic counter. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 458 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 344 insertions(+), 114 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e6b85c4a5828..312e3437c7bc 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -26,19 +26,42 @@ #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4UL +#define BUCKET_LOCKS_PER_CPU 128UL + +enum { + RHT_LOCK_NORMAL, + RHT_LOCK_NESTED, + RHT_LOCK_NESTED2, +}; + +/* The bucket lock is selected based on the hash and protects mutations + * on a group of hash buckets. + * + * IMPORTANT: When holding the bucket lock of both the old and new table + * during expansions and shrinking, the old bucket lock must always be + * acquired first. + */ +static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) +{ + return &tbl->locks[hash & tbl->locks_mask]; +} #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) +#define ASSERT_BUCKET_LOCK(TBL, HASH) \ + BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH)) #ifdef CONFIG_PROVE_LOCKING -int lockdep_rht_mutex_is_held(const struct rhashtable *ht) +int lockdep_rht_mutex_is_held(struct rhashtable *ht) { - return ht->p.mutex_is_held(ht->p.parent); + return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { - return 1; + spinlock_t *lock = bucket_lock(tbl, hash); + + return (debug_locks) ? lockdep_is_held(lock) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #endif @@ -66,7 +89,7 @@ static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) return hash; } -static u32 key_hashfn(const struct rhashtable *ht, const void *key, u32 len) +static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) { struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); u32 hash; @@ -95,7 +118,49 @@ static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) return pprev; } -static struct bucket_table *bucket_table_alloc(size_t nbuckets) +static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) +{ + unsigned int i, size; +#if defined(CONFIG_PROVE_LOCKING) + unsigned int nr_pcpus = 2; +#else + unsigned int nr_pcpus = num_possible_cpus(); +#endif + + nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); + size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); + + /* Never allocate more than one lock per bucket */ + size = min_t(unsigned int, size, tbl->size); + + if (sizeof(spinlock_t) != 0) { +#ifdef CONFIG_NUMA + if (size * sizeof(spinlock_t) > PAGE_SIZE) + tbl->locks = vmalloc(size * sizeof(spinlock_t)); + else +#endif + tbl->locks = kmalloc_array(size, sizeof(spinlock_t), + GFP_KERNEL); + if (!tbl->locks) + return -ENOMEM; + for (i = 0; i < size; i++) + spin_lock_init(&tbl->locks[i]); + } + tbl->locks_mask = size - 1; + + return 0; +} + +static void bucket_table_free(const struct bucket_table *tbl) +{ + if (tbl) + kvfree(tbl->locks); + + kvfree(tbl); +} + +static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, + size_t nbuckets) { struct bucket_table *tbl; size_t size; @@ -110,12 +175,12 @@ static struct bucket_table *bucket_table_alloc(size_t nbuckets) tbl->size = nbuckets; - return tbl; -} + if (alloc_bucket_locks(ht, tbl) < 0) { + bucket_table_free(tbl); + return NULL; + } -static void bucket_table_free(const struct bucket_table *tbl) -{ - kvfree(tbl); + return tbl; } /** @@ -126,7 +191,7 @@ static void bucket_table_free(const struct bucket_table *tbl) bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) { /* Expand table when exceeding 75% load */ - return ht->nelems > (new_size / 4 * 3); + return atomic_read(&ht->nelems) > (new_size / 4 * 3); } EXPORT_SYMBOL_GPL(rht_grow_above_75); @@ -138,41 +203,59 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) { /* Shrink table beneath 30% load */ - return ht->nelems < (new_size * 3 / 10); + return atomic_read(&ht->nelems) < (new_size * 3 / 10); } EXPORT_SYMBOL_GPL(rht_shrink_below_30); static void hashtable_chain_unzip(const struct rhashtable *ht, const struct bucket_table *new_tbl, - struct bucket_table *old_tbl, size_t n) + struct bucket_table *old_tbl, + size_t old_hash) { struct rhash_head *he, *p, *next; - unsigned int h; + spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL; + unsigned int new_hash, new_hash2; + + ASSERT_BUCKET_LOCK(old_tbl, old_hash); /* Old bucket empty, no work needed. */ - p = rht_dereference(old_tbl->buckets[n], ht); + p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, + old_hash); if (!p) return; + new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); + new_bucket_lock = bucket_lock(new_tbl, new_hash); + /* Advance the old bucket pointer one or more times until it * reaches a node that doesn't hash to the same bucket as the * previous node p. Call the previous node p; */ - h = head_hashfn(ht, new_tbl, p); - rht_for_each_continue(he, p->next, old_tbl, n) { - if (head_hashfn(ht, new_tbl, he) != h) + rht_for_each_continue(he, p->next, old_tbl, old_hash) { + new_hash2 = head_hashfn(ht, new_tbl, he); + if (new_hash != new_hash2) break; p = he; } - RCU_INIT_POINTER(old_tbl->buckets[n], p->next); + rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); + + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + + /* If we have encountered an entry that maps to a different bucket in + * the new table, lock down that bucket as well as we might cut off + * the end of the chain. + */ + new_bucket_lock2 = bucket_lock(new_tbl, new_hash); + if (new_bucket_lock != new_bucket_lock2) + spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2); /* Find the subsequent node which does hash to the same * bucket as node P, or NULL if no such node exists. */ next = NULL; if (he) { - rht_for_each_continue(he, he->next, old_tbl, n) { - if (head_hashfn(ht, new_tbl, he) == h) { + rht_for_each_continue(he, he->next, old_tbl, old_hash) { + if (head_hashfn(ht, new_tbl, he) == new_hash) { next = he; break; } @@ -182,7 +265,23 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /* Set p's next pointer to that subsequent node pointer, * bypassing the nodes which do not hash to p's bucket */ - RCU_INIT_POINTER(p->next, next); + rcu_assign_pointer(p->next, next); + + if (new_bucket_lock != new_bucket_lock2) + spin_unlock_bh(new_bucket_lock2); + spin_unlock_bh(new_bucket_lock); +} + +static void link_old_to_new(struct bucket_table *new_tbl, + unsigned int new_hash, struct rhash_head *entry) +{ + spinlock_t *new_bucket_lock; + + new_bucket_lock = bucket_lock(new_tbl, new_hash); + + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); + spin_unlock_bh(new_bucket_lock); } /** @@ -195,43 +294,59 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. * - * The caller must ensure that no concurrent table mutations take place. - * It is however valid to have concurrent lookups if they are RCU protected. + * The caller must ensure that no concurrent resizing occurs by holding + * ht->mutex. + * + * It is valid to have concurrent insertions and deletions protected by per + * bucket locks or concurrent RCU protected lookups and traversals. */ int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_head *he; - unsigned int i, h; - bool complete; + spinlock_t *old_bucket_lock; + unsigned int new_hash, old_hash; + bool complete = false; ASSERT_RHT_MUTEX(ht); if (ht->p.max_shift && ht->shift >= ht->p.max_shift) return 0; - new_tbl = bucket_table_alloc(old_tbl->size * 2); + new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); if (new_tbl == NULL) return -ENOMEM; ht->shift++; - /* For each new bucket, search the corresponding old bucket - * for the first entry that hashes to the new bucket, and - * link the new bucket to that entry. Since all the entries - * which will end up in the new bucket appear in the same - * old bucket, this constructs an entirely valid new hash - * table, but with multiple buckets "zipped" together into a - * single imprecise chain. + /* Make insertions go into the new, empty table right away. Deletions + * and lookups will be attempted in both tables until we synchronize. + * The synchronize_rcu() guarantees for the new table to be picked up + * so no new additions go into the old table while we relink. + */ + rcu_assign_pointer(ht->future_tbl, new_tbl); + synchronize_rcu(); + + /* For each new bucket, search the corresponding old bucket for the + * first entry that hashes to the new bucket, and link the end of + * newly formed bucket chain (containing entries added to future + * table) to that entry. Since all the entries which will end up in + * the new bucket appear in the same old bucket, this constructs an + * entirely valid new hash table, but with multiple buckets + * "zipped" together into a single imprecise chain. */ - for (i = 0; i < new_tbl->size; i++) { - h = rht_bucket_index(old_tbl, i); - rht_for_each(he, old_tbl, h) { - if (head_hashfn(ht, new_tbl, he) == i) { - RCU_INIT_POINTER(new_tbl->buckets[i], he); + for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { + old_hash = rht_bucket_index(old_tbl, new_hash); + old_bucket_lock = bucket_lock(old_tbl, old_hash); + + spin_lock_bh(old_bucket_lock); + rht_for_each(he, old_tbl, old_hash) { + if (head_hashfn(ht, new_tbl, he) == new_hash) { + link_old_to_new(new_tbl, new_hash, he); break; } } + spin_unlock_bh(old_bucket_lock); } /* Publish the new table pointer. Lookups may now traverse @@ -241,7 +356,7 @@ int rhashtable_expand(struct rhashtable *ht) rcu_assign_pointer(ht->tbl, new_tbl); /* Unzip interleaved hash chains */ - do { + while (!complete && !ht->being_destroyed) { /* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. @@ -253,12 +368,17 @@ int rhashtable_expand(struct rhashtable *ht) * table): ... */ complete = true; - for (i = 0; i < old_tbl->size; i++) { - hashtable_chain_unzip(ht, new_tbl, old_tbl, i); - if (old_tbl->buckets[i] != NULL) + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { + old_bucket_lock = bucket_lock(old_tbl, old_hash); + spin_lock_bh(old_bucket_lock); + + hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); + if (old_tbl->buckets[old_hash] != NULL) complete = false; + + spin_unlock_bh(old_bucket_lock); } - } while (!complete); + } bucket_table_free(old_tbl); return 0; @@ -272,38 +392,65 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. * + * The caller must ensure that no concurrent resizing occurs by holding + * ht->mutex. + * * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. + * + * It is valid to have concurrent insertions and deletions protected by per + * bucket locks or concurrent RCU protected lookups and traversals. */ int rhashtable_shrink(struct rhashtable *ht) { - struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); - unsigned int i; + struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); + spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2; + unsigned int new_hash; ASSERT_RHT_MUTEX(ht); if (ht->shift <= ht->p.min_shift) return 0; - ntbl = bucket_table_alloc(tbl->size / 2); - if (ntbl == NULL) + new_tbl = bucket_table_alloc(ht, tbl->size / 2); + if (new_tbl == NULL) return -ENOMEM; - ht->shift--; + rcu_assign_pointer(ht->future_tbl, new_tbl); + synchronize_rcu(); - /* Link each bucket in the new table to the first bucket - * in the old table that contains entries which will hash - * to the new bucket. + /* Link the first entry in the old bucket to the end of the + * bucket in the new table. As entries are concurrently being + * added to the new table, lock down the new bucket. As we + * always divide the size in half when shrinking, each bucket + * in the new table maps to exactly two buckets in the old + * table. + * + * As removals can occur concurrently on the old table, we need + * to lock down both matching buckets in the old table. */ - for (i = 0; i < ntbl->size; i++) { - ntbl->buckets[i] = tbl->buckets[i]; - RCU_INIT_POINTER(*bucket_tail(ntbl, i), - tbl->buckets[i + ntbl->size]); - + for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { + old_bucket_lock1 = bucket_lock(tbl, new_hash); + old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size); + new_bucket_lock = bucket_lock(new_tbl, new_hash); + + spin_lock_bh(old_bucket_lock1); + spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); + + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), + tbl->buckets[new_hash]); + rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), + tbl->buckets[new_hash + new_tbl->size]); + + spin_unlock_bh(new_bucket_lock); + spin_unlock_bh(old_bucket_lock2); + spin_unlock_bh(old_bucket_lock1); } /* Publish the new, valid hash table */ - rcu_assign_pointer(ht->tbl, ntbl); + rcu_assign_pointer(ht->tbl, new_tbl); + ht->shift--; /* Wait for readers. No new readers will have references to the * old hash table. @@ -316,31 +463,63 @@ int rhashtable_shrink(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_shrink); +static void rht_deferred_worker(struct work_struct *work) +{ + struct rhashtable *ht; + struct bucket_table *tbl; + + ht = container_of(work, struct rhashtable, run_work.work); + mutex_lock(&ht->mutex); + tbl = rht_dereference(ht->tbl, ht); + + if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) + rhashtable_expand(ht); + else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) + rhashtable_shrink(ht); + + mutex_unlock(&ht->mutex); +} + /** * rhashtable_insert - insert object into hash hash table * @ht: hash table * @obj: pointer to hash head inside object * - * Will automatically grow the table via rhashtable_expand() if the the - * grow_decision function specified at rhashtable_init() returns true. + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. * - * The caller must ensure that no concurrent table mutations occur. It is - * however valid to have concurrent lookups if they are RCU protected. + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). */ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); - u32 hash; + struct bucket_table *tbl; + spinlock_t *lock; + unsigned hash; - ASSERT_RHT_MUTEX(ht); + rcu_read_lock(); + tbl = rht_dereference_rcu(ht->future_tbl, ht); hash = head_hashfn(ht, tbl, obj); + lock = bucket_lock(tbl, hash); + + spin_lock_bh(lock); RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); rcu_assign_pointer(tbl->buckets[hash], obj); - ht->nelems++; + spin_unlock_bh(lock); - if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) - rhashtable_expand(ht); + atomic_inc(&ht->nelems); + + /* Only grow the table if no resizing is currently in progress. */ + if (ht->tbl != ht->future_tbl && + ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) + schedule_delayed_work(&ht->run_work, 0); + + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rhashtable_insert); @@ -361,32 +540,56 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); */ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) { - struct bucket_table *tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *tbl; struct rhash_head __rcu **pprev; struct rhash_head *he; - u32 h; + spinlock_t *lock; + unsigned int hash; - ASSERT_RHT_MUTEX(ht); + rcu_read_lock(); + tbl = rht_dereference_rcu(ht->tbl, ht); + hash = head_hashfn(ht, tbl, obj); - h = head_hashfn(ht, tbl, obj); + lock = bucket_lock(tbl, hash); + spin_lock_bh(lock); - pprev = &tbl->buckets[h]; - rht_for_each(he, tbl, h) { +restart: + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { if (he != obj) { pprev = &he->next; continue; } - RCU_INIT_POINTER(*pprev, he->next); - ht->nelems--; + rcu_assign_pointer(*pprev, obj->next); + atomic_dec(&ht->nelems); - if (ht->p.shrink_decision && + spin_unlock_bh(lock); + + if (ht->tbl != ht->future_tbl && + ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) - rhashtable_shrink(ht); + schedule_delayed_work(&ht->run_work, 0); + + rcu_read_unlock(); return true; } + if (tbl != rht_dereference_rcu(ht->tbl, ht)) { + spin_unlock_bh(lock); + + tbl = rht_dereference_rcu(ht->tbl, ht); + hash = head_hashfn(ht, tbl, obj); + + lock = bucket_lock(tbl, hash); + spin_lock_bh(lock); + goto restart; + } + + spin_unlock_bh(lock); + rcu_read_unlock(); + return false; } EXPORT_SYMBOL_GPL(rhashtable_remove); @@ -402,25 +605,35 @@ EXPORT_SYMBOL_GPL(rhashtable_remove); * This lookup function may only be used for fixed key hash table (key_len * paramter set). It will BUG() if used inappropriately. * - * Lookups may occur in parallel with hash mutations as long as the lookup is - * guarded by rcu_read_lock(). The caller must take care of this. + * Lookups may occur in parallel with hashtable mutations and resizing. */ -void *rhashtable_lookup(const struct rhashtable *ht, const void *key) +void *rhashtable_lookup(struct rhashtable *ht, const void *key) { - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + const struct bucket_table *tbl, *old_tbl; struct rhash_head *he; - u32 h; + u32 hash; BUG_ON(!ht->p.key_len); - h = key_hashfn(ht, key, ht->p.key_len); - rht_for_each_rcu(he, tbl, h) { + rcu_read_lock(); + old_tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = rht_dereference_rcu(ht->future_tbl, ht); + hash = key_hashfn(ht, key, ht->p.key_len); +restart: + rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, ht->p.key_len)) continue; + rcu_read_unlock(); return rht_obj(ht, he); } + if (unlikely(tbl != old_tbl)) { + tbl = old_tbl; + goto restart; + } + + rcu_read_unlock(); return NULL; } EXPORT_SYMBOL_GPL(rhashtable_lookup); @@ -435,25 +648,36 @@ EXPORT_SYMBOL_GPL(rhashtable_lookup); * Traverses the bucket chain behind the provided hash value and calls the * specified compare function for each entry. * - * Lookups may occur in parallel with hash mutations as long as the lookup is - * guarded by rcu_read_lock(). The caller must take care of this. + * Lookups may occur in parallel with hashtable mutations and resizing. * * Returns the first entry on which the compare function returned true. */ -void *rhashtable_lookup_compare(const struct rhashtable *ht, const void *key, +void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, bool (*compare)(void *, void *), void *arg) { - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + const struct bucket_table *tbl, *old_tbl; struct rhash_head *he; u32 hash; + rcu_read_lock(); + + old_tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = rht_dereference_rcu(ht->future_tbl, ht); hash = key_hashfn(ht, key, ht->p.key_len); - rht_for_each_rcu(he, tbl, hash) { +restart: + rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { if (!compare(rht_obj(ht, he), arg)) continue; + rcu_read_unlock(); return rht_obj(ht, he); } + if (unlikely(tbl != old_tbl)) { + tbl = old_tbl; + goto restart; + } + rcu_read_unlock(); + return NULL; } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); @@ -485,9 +709,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, - * #ifdef CONFIG_PROVE_LOCKING - * .mutex_is_held = &my_mutex_is_held, - * #endif * }; * * Configuration Example 2: Variable length keys @@ -507,9 +728,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .head_offset = offsetof(struct test_obj, node), * .hashfn = jhash, * .obj_hashfn = my_hash_fn, - * #ifdef CONFIG_PROVE_LOCKING - * .mutex_is_held = &my_mutex_is_held, - * #endif * }; */ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) @@ -529,18 +747,29 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (params->nelem_hint) size = rounded_hashtable_size(params); - tbl = bucket_table_alloc(size); + memset(ht, 0, sizeof(*ht)); + mutex_init(&ht->mutex); + memcpy(&ht->p, params, sizeof(*params)); + + if (params->locks_mul) + ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); + else + ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; + + tbl = bucket_table_alloc(ht, size); if (tbl == NULL) return -ENOMEM; - memset(ht, 0, sizeof(*ht)); ht->shift = ilog2(tbl->size); - memcpy(&ht->p, params, sizeof(*params)); RCU_INIT_POINTER(ht->tbl, tbl); + RCU_INIT_POINTER(ht->future_tbl, tbl); if (!ht->p.hash_rnd) get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); + if (ht->p.grow_decision || ht->p.shrink_decision) + INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker); + return 0; } EXPORT_SYMBOL_GPL(rhashtable_init); @@ -553,9 +782,16 @@ EXPORT_SYMBOL_GPL(rhashtable_init); * has to make sure that no resizing may happen by unpublishing the hashtable * and waiting for the quiescent cycle before releasing the bucket array. */ -void rhashtable_destroy(const struct rhashtable *ht) +void rhashtable_destroy(struct rhashtable *ht) { - bucket_table_free(ht->tbl); + ht->being_destroyed = true; + + mutex_lock(&ht->mutex); + + cancel_delayed_work(&ht->run_work); + bucket_table_free(rht_dereference(ht->tbl, ht)); + + mutex_unlock(&ht->mutex); } EXPORT_SYMBOL_GPL(rhashtable_destroy); @@ -570,13 +806,6 @@ EXPORT_SYMBOL_GPL(rhashtable_destroy); #define TEST_PTR ((void *) 0xdeadbeef) #define TEST_NEXPANDS 4 -#ifdef CONFIG_PROVE_LOCKING -static int test_mutex_is_held(void *parent) -{ - return 1; -} -#endif - struct test_obj { void *ptr; int value; @@ -646,10 +875,10 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) i, tbl->buckets[i], cnt); } - pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", - total, ht->nelems, TEST_ENTRIES); + pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n", + total, atomic_read(&ht->nelems), TEST_ENTRIES); - if (total != ht->nelems || total != TEST_ENTRIES) + if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES) pr_warn("Test failed: Total count mismatch ^^^"); } @@ -688,7 +917,9 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table expansion iteration %u...\n", i); + mutex_lock(&ht->mutex); rhashtable_expand(ht); + mutex_unlock(&ht->mutex); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -698,7 +929,9 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_NEXPANDS; i++) { pr_info(" Table shrinkage iteration %u...\n", i); + mutex_lock(&ht->mutex); rhashtable_shrink(ht); + mutex_unlock(&ht->mutex); rcu_read_lock(); pr_info(" Verifying lookups...\n"); @@ -741,9 +974,6 @@ static int __init test_rht_init(void) .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), .hashfn = jhash, -#ifdef CONFIG_PROVE_LOCKING - .mutex_is_held = &test_mutex_is_held, -#endif .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, }; -- cgit v1.2.3 From f89bd6f87a53ce5a7d60662429591ebac2745c10 Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 2 Jan 2015 23:00:21 +0100 Subject: rhashtable: Supports for nulls marker In order to allow for wider usage of rhashtable, use a special nulls marker to terminate each chain. The reason for not using the existing nulls_list is that the prev pointer usage would not be valid as entries can be linked in two different buckets at the same time. The 4 nulls base bits can be set through the rhashtable_params structure like this: struct rhashtable_params params = { [...] .nulls_base = (1U << RHT_BASE_SHIFT), }; This reduces the hash length from 32 bits to 27 bits. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 312e3437c7bc..cbad192d3b3d 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -28,6 +28,9 @@ #define HASH_MIN_SIZE 4UL #define BUCKET_LOCKS_PER_CPU 128UL +/* Base bits plus 1 bit for nulls marker */ +#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) + enum { RHT_LOCK_NORMAL, RHT_LOCK_NESTED, @@ -86,7 +89,7 @@ static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, ht->p.hash_rnd); - return hash; + return hash >> HASH_RESERVED_SPACE; } static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) @@ -95,6 +98,7 @@ static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) u32 hash; hash = ht->p.hashfn(key, len, ht->p.hash_rnd); + hash >>= HASH_RESERVED_SPACE; return rht_bucket_index(tbl, hash); } @@ -111,7 +115,7 @@ static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) struct rhash_head __rcu **pprev; for (pprev = &tbl->buckets[n]; - rht_dereference_bucket(*pprev, tbl, n); + !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) ; @@ -164,6 +168,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, { struct bucket_table *tbl; size_t size; + int i; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); @@ -180,6 +185,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, return NULL; } + for (i = 0; i < nbuckets; i++) + INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); + return tbl; } @@ -221,7 +229,7 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /* Old bucket empty, no work needed. */ p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, old_hash); - if (!p) + if (rht_is_a_nulls(p)) return; new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); @@ -252,8 +260,8 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, /* Find the subsequent node which does hash to the same * bucket as node P, or NULL if no such node exists. */ - next = NULL; - if (he) { + INIT_RHT_NULLS_HEAD(next, ht, old_hash); + if (!rht_is_a_nulls(he)) { rht_for_each_continue(he, he->next, old_tbl, old_hash) { if (head_hashfn(ht, new_tbl, he) == new_hash) { next = he; @@ -369,11 +377,15 @@ int rhashtable_expand(struct rhashtable *ht) */ complete = true; for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { + struct rhash_head *head; + old_bucket_lock = bucket_lock(old_tbl, old_hash); spin_lock_bh(old_bucket_lock); hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); - if (old_tbl->buckets[old_hash] != NULL) + head = rht_dereference_bucket(old_tbl->buckets[old_hash], + old_tbl, old_hash); + if (!rht_is_a_nulls(head)) complete = false; spin_unlock_bh(old_bucket_lock); @@ -498,6 +510,7 @@ static void rht_deferred_worker(struct work_struct *work) void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl; + struct rhash_head *head; spinlock_t *lock; unsigned hash; @@ -508,7 +521,12 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) lock = bucket_lock(tbl, hash); spin_lock_bh(lock); - RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + if (rht_is_a_nulls(head)) + INIT_RHT_NULLS_HEAD(obj->next, ht, hash); + else + RCU_INIT_POINTER(obj->next, head); + rcu_assign_pointer(tbl->buckets[hash], obj); spin_unlock_bh(lock); @@ -709,6 +727,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, + * .nulls_base = (1U << RHT_BASE_SHIFT), * }; * * Configuration Example 2: Variable length keys @@ -741,6 +760,9 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) (!params->key_len && !params->obj_hashfn)) return -EINVAL; + if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) + return -EINVAL; + params->min_shift = max_t(size_t, params->min_shift, ilog2(HASH_MIN_SIZE)); @@ -974,6 +996,7 @@ static int __init test_rht_init(void) .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), .hashfn = jhash, + .nulls_base = (3U << RHT_BASE_SHIFT), .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, }; -- cgit v1.2.3 From efb975a67ea7846b966080f999589de570686aa0 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 7 Jan 2015 13:41:52 +0800 Subject: rhashtable: optimize rhashtable_lookup routine Define an internal compare function and relevant compare argument, and then make use of rhashtable_lookup_compare() to lookup key in hash table, reducing duplicated code between rhashtable_lookup() and rhashtable_lookup_compare(). Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 41 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 23 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index cbad192d3b3d..f2fdd7a7cb16 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -612,6 +612,19 @@ restart: } EXPORT_SYMBOL_GPL(rhashtable_remove); +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +static bool rhashtable_compare(void *ptr, void *arg) +{ + struct rhashtable_compare_arg *x = arg; + struct rhashtable *ht = x->ht; + + return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); +} + /** * rhashtable_lookup - lookup key in hash table * @ht: hash table @@ -627,32 +640,14 @@ EXPORT_SYMBOL_GPL(rhashtable_remove); */ void *rhashtable_lookup(struct rhashtable *ht, const void *key) { - const struct bucket_table *tbl, *old_tbl; - struct rhash_head *he; - u32 hash; + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; BUG_ON(!ht->p.key_len); - rcu_read_lock(); - old_tbl = rht_dereference_rcu(ht->tbl, ht); - tbl = rht_dereference_rcu(ht->future_tbl, ht); - hash = key_hashfn(ht, key, ht->p.key_len); -restart: - rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { - if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, - ht->p.key_len)) - continue; - rcu_read_unlock(); - return rht_obj(ht, he); - } - - if (unlikely(tbl != old_tbl)) { - tbl = old_tbl; - goto restart; - } - - rcu_read_unlock(); - return NULL; + return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); } EXPORT_SYMBOL_GPL(rhashtable_lookup); -- cgit v1.2.3 From 54c5b7d311c8e1801f9dcce9f388a7420a25fa90 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 7 Jan 2015 13:41:53 +0800 Subject: rhashtable: introduce rhashtable_wakeup_worker helper function Introduce rhashtable_wakeup_worker() helper function to reduce duplicated code where to wake up worker. By the way, as long as the both "future_tbl" and "tbl" bucket table pointers point to the same bucket array, we should try to wake up the resizing worker thread, otherwise, it indicates the work of resizing hash table is not finished yet. However, currently we will wake up the worker thread only when the two pointers point to different bucket array. Obviously this is wrong. So, the issue is also fixed as well in the patch. Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index f2fdd7a7cb16..20006854fce0 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -492,6 +492,19 @@ static void rht_deferred_worker(struct work_struct *work) mutex_unlock(&ht->mutex); } +static void rhashtable_wakeup_worker(struct rhashtable *ht) +{ + struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + size_t size = tbl->size; + + /* Only adjust the table if no resizing is currently in progress. */ + if (tbl == new_tbl && + ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || + (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) + schedule_delayed_work(&ht->run_work, 0); +} + /** * rhashtable_insert - insert object into hash hash table * @ht: hash table @@ -532,10 +545,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) atomic_inc(&ht->nelems); - /* Only grow the table if no resizing is currently in progress. */ - if (ht->tbl != ht->future_tbl && - ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) - schedule_delayed_work(&ht->run_work, 0); + rhashtable_wakeup_worker(ht); rcu_read_unlock(); } @@ -584,10 +594,7 @@ restart: spin_unlock_bh(lock); - if (ht->tbl != ht->future_tbl && - ht->p.shrink_decision && - ht->p.shrink_decision(ht, tbl->size)) - schedule_delayed_work(&ht->run_work, 0); + rhashtable_wakeup_worker(ht); rcu_read_unlock(); -- cgit v1.2.3 From db30485408326a6f466a843b291b23535f63eda0 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 7 Jan 2015 13:41:54 +0800 Subject: rhashtable: involve rhashtable_lookup_insert routine Involve a new function called rhashtable_lookup_insert() which makes lookup and insertion atomic under bucket lock protection, helping us avoid to introduce an extra lock when we search and insert an object into hash table. Signed-off-by: Ying Xue Signed-off-by: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 82 insertions(+), 15 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 20006854fce0..4430233c4e11 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -505,8 +505,26 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht) schedule_delayed_work(&ht->run_work, 0); } +static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, + struct bucket_table *tbl, u32 hash) +{ + struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash], + tbl, hash); + + if (rht_is_a_nulls(head)) + INIT_RHT_NULLS_HEAD(obj->next, ht, hash); + else + RCU_INIT_POINTER(obj->next, head); + + rcu_assign_pointer(tbl->buckets[hash], obj); + + atomic_inc(&ht->nelems); + + rhashtable_wakeup_worker(ht); +} + /** - * rhashtable_insert - insert object into hash hash table + * rhashtable_insert - insert object into hash table * @ht: hash table * @obj: pointer to hash head inside object * @@ -523,7 +541,6 @@ static void rhashtable_wakeup_worker(struct rhashtable *ht) void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) { struct bucket_table *tbl; - struct rhash_head *head; spinlock_t *lock; unsigned hash; @@ -534,19 +551,9 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) lock = bucket_lock(tbl, hash); spin_lock_bh(lock); - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); - if (rht_is_a_nulls(head)) - INIT_RHT_NULLS_HEAD(obj->next, ht, hash); - else - RCU_INIT_POINTER(obj->next, head); - - rcu_assign_pointer(tbl->buckets[hash], obj); + __rhashtable_insert(ht, obj, tbl, hash); spin_unlock_bh(lock); - atomic_inc(&ht->nelems); - - rhashtable_wakeup_worker(ht); - rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rhashtable_insert); @@ -560,7 +567,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); * walk the bucket chain upon removal. The removal operation is thus * considerable slow if the hash table is not correctly sized. * - * Will automatically shrink the table via rhashtable_expand() if the the + * Will automatically shrink the table via rhashtable_expand() if the * shrink_decision function specified at rhashtable_init() returns true. * * The caller must ensure that no concurrent table mutations occur. It is @@ -641,7 +648,7 @@ static bool rhashtable_compare(void *ptr, void *arg) * for a entry with an identical key. The first matching entry is returned. * * This lookup function may only be used for fixed key hash table (key_len - * paramter set). It will BUG() if used inappropriately. + * parameter set). It will BUG() if used inappropriately. * * Lookups may occur in parallel with hashtable mutations and resizing. */ @@ -702,6 +709,66 @@ restart: } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); +/** + * rhashtable_lookup_insert - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * This lookup function may only be used for fixed key hash table (key_len + * parameter set). It will BUG() if used inappropriately. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) +{ + struct bucket_table *new_tbl, *old_tbl; + spinlock_t *new_bucket_lock, *old_bucket_lock; + u32 new_hash, old_hash; + bool success = true; + + BUG_ON(!ht->p.key_len); + + rcu_read_lock(); + + old_tbl = rht_dereference_rcu(ht->tbl, ht); + old_hash = head_hashfn(ht, old_tbl, obj); + old_bucket_lock = bucket_lock(old_tbl, old_hash); + spin_lock_bh(old_bucket_lock); + + new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + new_hash = head_hashfn(ht, new_tbl, obj); + new_bucket_lock = bucket_lock(new_tbl, new_hash); + if (unlikely(old_tbl != new_tbl)) + spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); + + if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) { + success = false; + goto exit; + } + + __rhashtable_insert(ht, obj, new_tbl, new_hash); + +exit: + if (unlikely(old_tbl != new_tbl)) + spin_unlock_bh(new_bucket_lock); + spin_unlock_bh(old_bucket_lock); + + rcu_read_unlock(); + + return success; +} +EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); + static size_t rounded_hashtable_size(struct rhashtable_params *params) { return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), -- cgit v1.2.3 From bd6d4db552ceb52fb19890a454836dcda59743ce Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 7 Jan 2015 13:41:55 +0800 Subject: rhashtable: future table needs to be traversed when remove an object When remove an object from hash table, we currently only traverse old bucket table to check whether the object exists. If the object is not found in it, we will try again. But in the second search loop, we still search the object from the old table instead of future table. As a result, the object may be not removed from hash table especially when resizing is currently in progress and the object is just saved in the future table. Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 4430233c4e11..1aef942976fe 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -608,10 +608,10 @@ restart: return true; } - if (tbl != rht_dereference_rcu(ht->tbl, ht)) { + if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) { spin_unlock_bh(lock); - tbl = rht_dereference_rcu(ht->tbl, ht); + tbl = rht_dereference_rcu(ht->future_tbl, ht); hash = head_hashfn(ht, tbl, obj); lock = bucket_lock(tbl, hash); -- cgit v1.2.3 From c0c09bfdc4150b3918526660768585cd477adf35 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 7 Jan 2015 13:41:56 +0800 Subject: rhashtable: avoid unnecessary wakeup for worker queue Move condition statements of verifying whether hash table size exceeds its maximum threshold or reaches its minimum threshold from resizing functions to resizing decision functions, avoiding unnecessary wakeup for worker queue thread. Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 1aef942976fe..7fb474b18f1b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -199,7 +199,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) { /* Expand table when exceeding 75% load */ - return atomic_read(&ht->nelems) > (new_size / 4 * 3); + return atomic_read(&ht->nelems) > (new_size / 4 * 3) && + (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); } EXPORT_SYMBOL_GPL(rht_grow_above_75); @@ -211,7 +212,8 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) { /* Shrink table beneath 30% load */ - return atomic_read(&ht->nelems) < (new_size * 3 / 10); + return atomic_read(&ht->nelems) < (new_size * 3 / 10) && + (atomic_read(&ht->shift) > ht->p.min_shift); } EXPORT_SYMBOL_GPL(rht_shrink_below_30); @@ -318,14 +320,11 @@ int rhashtable_expand(struct rhashtable *ht) ASSERT_RHT_MUTEX(ht); - if (ht->p.max_shift && ht->shift >= ht->p.max_shift) - return 0; - new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); if (new_tbl == NULL) return -ENOMEM; - ht->shift++; + atomic_inc(&ht->shift); /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. @@ -421,9 +420,6 @@ int rhashtable_shrink(struct rhashtable *ht) ASSERT_RHT_MUTEX(ht); - if (ht->shift <= ht->p.min_shift) - return 0; - new_tbl = bucket_table_alloc(ht, tbl->size / 2); if (new_tbl == NULL) return -ENOMEM; @@ -462,7 +458,7 @@ int rhashtable_shrink(struct rhashtable *ht) /* Publish the new, valid hash table */ rcu_assign_pointer(ht->tbl, new_tbl); - ht->shift--; + atomic_dec(&ht->shift); /* Wait for readers. No new readers will have references to the * old hash table. @@ -851,7 +847,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (tbl == NULL) return -ENOMEM; - ht->shift = ilog2(tbl->size); + atomic_set(&ht->shift, ilog2(tbl->size)); RCU_INIT_POINTER(ht->tbl, tbl); RCU_INIT_POINTER(ht->future_tbl, tbl); -- cgit v1.2.3 From 545a148e43bed67618cc90b66f9864fba0878890 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Wed, 7 Jan 2015 13:41:57 +0800 Subject: rhashtable: initialize atomic nelems variable Signed-off-by: Ying Xue Cc: Thomas Graf Acked-by: Thomas Graf Signed-off-by: David S. Miller --- lib/rhashtable.c | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 7fb474b18f1b..8023b554905c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -847,6 +847,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (tbl == NULL) return -ENOMEM; + atomic_set(&ht->nelems, 0); atomic_set(&ht->shift, ilog2(tbl->size)); RCU_INIT_POINTER(ht->tbl, tbl); RCU_INIT_POIN