[PATCH] rhashtable: Restore insecure_elasticity toggle
From: Herbert Xu
Date: Fri Apr 17 2026 - 21:38:50 EST
Some users of rhashtable cannot handle insertion failures, and
are happy to accept the consequences of a hash table that having
very long chains.
Restore the insecure_elasticity toggle for these users. In
addition to disabling the chain length checks, this also removes
the emergency resize that would otherwise occur when the hash
table occupancy hits 100% (an async resize is still scheduled
at 75%).
Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
index 015c8298bebc..72082428d6c6 100644
--- a/include/linux/rhashtable-types.h
+++ b/include/linux/rhashtable-types.h
@@ -49,6 +49,7 @@ typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
* @head_offset: Offset of rhash_head in struct to be hashed
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
+ * @insecure_elasticity: Set to true to disable chain length checks
* @automatic_shrinking: Enable automatic shrinking of tables
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
@@ -61,6 +62,7 @@ struct rhashtable_params {
u16 head_offset;
unsigned int max_size;
u16 min_size;
+ bool insecure_elasticity;
bool automatic_shrinking;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 0480509a6339..c793849d3f61 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -821,14 +821,15 @@ static __always_inline void *__rhashtable_insert_fast(
goto out;
}
- if (elasticity <= 0)
+ if (elasticity <= 0 && !params->insecure_elasticity)
goto slow_path;
data = ERR_PTR(-E2BIG);
if (unlikely(rht_grow_above_max(ht, tbl)))
goto out_unlock;
- if (unlikely(rht_grow_above_100(ht, tbl)))
+ if (unlikely(rht_grow_above_100(ht, tbl)) &&
+ !params->insecure_elasticity)
goto slow_path;
/* Inserting at head of list makes unlocking free. */
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6074ed5f66f3..b60d55e5b19b 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -538,7 +538,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
return NULL;
}
- if (elasticity <= 0)
+ if (elasticity <= 0 && !ht->p->insecure_elasticity)
return ERR_PTR(-EAGAIN);
return ERR_PTR(-ENOENT);
@@ -568,7 +568,8 @@ static struct bucket_table *rhashtable_insert_one(
if (unlikely(rht_grow_above_max(ht, tbl)))
return ERR_PTR(-E2BIG);
- if (unlikely(rht_grow_above_100(ht, tbl)))
+ if (unlikely(rht_grow_above_100(ht, tbl)) &&
+ !ht->p->insecure_elasticity)
return ERR_PTR(-EAGAIN);
head = rht_ptr(bkt, tbl, hash);
--
Email: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt