[PATCH 3/5] rhashtable: allow rht_bucket_var to return NULL.

From: NeilBrown
Date: Fri Jul 06 2018 - 03:23:30 EST


Rather than returning a pointer a static nulls, rht_bucket_var()
now returns NULL if the bucket doesn't exist.
This will make the next patch, which stores a bitlock in the
bucket pointer, somewhat cleaner.

Signed-off-by: NeilBrown <neilb@xxxxxxxx>
---
include/linux/rhashtable.h | 11 +++++++++--
lib/rhashtable.c | 29 ++++++++++++++++++++---------
2 files changed, 29 insertions(+), 11 deletions(-)

diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 26ed3b299028..a4ff6ae524a0 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -284,6 +284,8 @@ void rhashtable_destroy(struct rhashtable *ht);

struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
+struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash);
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash);
@@ -313,7 +315,7 @@ static inline struct rhash_head __rcu *const *rht_bucket(
static inline struct rhash_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
- return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}

@@ -916,6 +918,8 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock);

pprev = rht_bucket_var(tbl, hash);
+ if (!pprev)
+ goto out;
rht_for_each_continue(he, *pprev, tbl, hash) {
struct rhlist_head *list;

@@ -960,6 +964,7 @@ static inline int __rhashtable_remove_fast_one(
break;
}

+out:
spin_unlock_bh(lock);

if (err > 0) {
@@ -1068,6 +1073,8 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock);

pprev = rht_bucket_var(tbl, hash);
+ if (!pprev)
+ goto out;
rht_for_each_continue(he, *pprev, tbl, hash) {
if (he != obj_old) {
pprev = &he->next;
@@ -1079,7 +1086,7 @@ static inline int __rhashtable_replace_fast(
err = 0;
break;
}
-
+out:
spin_unlock_bh(lock);

return err;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 27a5ffa993d4..7a68c1f0b6d0 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -240,8 +240,10 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
goto out;

err = -ENOENT;
+ if (!pprev)
+ goto out;

- rht_for_each(entry, old_tbl, old_hash) {
+ rht_for_each_continue(entry, *pprev, old_tbl, old_hash) {
err = 0;
next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

@@ -498,6 +500,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,

elasticity = RHT_ELASTICITY;
pprev = rht_bucket_var(tbl, hash);
+ if (!pprev)
+ return ERR_PTR(-ENOENT);
rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *list;
struct rhlist_head *plist;
@@ -1160,11 +1164,10 @@ void rhashtable_destroy(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);

-struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
- static struct rhash_head __rcu *rhnull;
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
unsigned int subhash = hash;
@@ -1182,15 +1185,23 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
subhash >>= shift;
}

- if (!ntbl) {
- if (!rhnull)
- INIT_RHT_NULLS_HEAD(rhnull, NULL, 0);
- return &rhnull;
- }
+ if (!ntbl)
+ return NULL;

return &ntbl[subhash].bucket;

}
+EXPORT_SYMBOL_GPL(__rht_bucket_nested);
+
+struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ static struct rhash_head __rcu *rhnull;
+
+ if (!rhnull)
+ INIT_RHT_NULLS_HEAD(rhnull);
+ return __rht_bucket_nested(tbl, hash) ?: &rhnull;
+}
EXPORT_SYMBOL_GPL(rht_bucket_nested);

struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,