[RFC PATCH 3/2] (Rant) Fix various hash abuses
From: George Spelvin
Date: Mon May 02 2016 - 00:00:22 EST
(This patch is not seriously meant to be applied as-is, but should
be divided up and sent to the various subsystems. I haven't even
compiled all of the code I touched.)
While checking the call sites of hash functions in for the previous
patches, I found numerous stupidities or even bugs. This patch
fixes them.
Particularly common was calling hash_long() on values declared as
32 bits, where hash_32 would be just as good and faster. (Except
for the different rounding of the constant phi used, it would compute
the same hash value!)
The Mellanox mlx4 driver did this and a bit more: it XORed together IP
addresses into a 32-bit value and then apparently hoped that hash_long()
would fix the resultant collisions. Migrated to jhash_3words(),
Lustre had several places where it did the opposite: used hash_long()
on 64-bit values. That would ignore the high 32 bits on a 32-bit kernel.
It's not all Lustre's fault; the same bug was in include/linux/hashtable.h.
Several place, I changed hash_long() with a cast to hash_ptr().
It does the same thing, just with less clutter.
CIFS did some strange things with hash_64 that could be better done
with modular reduction.
ima_hash_key() from security/integrity/ima/ima.h was too hard to figure
out so I just added a rude comment to it, but it doesn't inspire feelings
of security or integrity.
Signed-off-by: George Spelvin <linux@xxxxxxxxxxx>
Cc: kvm-ppc@xxxxxxxxxxxxxxx
Cc: Alexander Graf <agraf@xxxxxxxx>
Cc: linux-bcache@xxxxxxxxxxxxxxx
Cc: Kent Overstreet <kent.overstreet@xxxxxxxxx>
Cc: Eugenia Emantayev <eugenia@xxxxxxxxxxxx>
Cc: lustre-devel@xxxxxxxxxxxxxxxx
Cc: Oleg Drokin <oleg.drokin@xxxxxxxxx>
Cc: Andreas Dilger <andreas.dilger@xxxxxxxxx>
Cc: Steve French <sfrench@xxxxxxxxx>
Cc: linux-cifs@xxxxxxxxxxxxxxx
Cc: Tyler Hicks <tyhicks@xxxxxxxxxxxxx>
Cc: ecryptfs@xxxxxxxxxxxxxxx
Cc: "J. Bruce Fields" <bfields@xxxxxxxxxxxx>
Cc: Jeff Layton <jlayton@xxxxxxxxxxxxxxx>
Cc: Sasha Levin <levinsasha928@xxxxxxxxx>
Cc: Peter Zijlstra <pzijlstr@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Eric W. Biederman <ebiederm@xxxxxxxxxxxx>
Cc: Mimi Zohar <zohar@xxxxxxxxxxxxxxxxxx>
Cc: Dmitry Kasatkin <dmitry.kasatkin@xxxxxxxxx>
Cc: linux-ima-devel@xxxxxxxxxxxxxxxxxxxxx
Cc: Kentaro Takeda <takedakn@xxxxxxxxxxxxx>
Cc: Tetsuo Handa <penguin-kernel@xxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
---
(Cc: list above is a reminder to self. I haven't spammed everyone on it
yet while I think about this.)
arch/powerpc/kvm/book3s_mmu_hpte.c | 6 +++---
drivers/md/bcache/request.c | 2 +-
drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 13 ++++---------
drivers/staging/lustre/include/linux/lnet/lib-lnet.h | 2 +-
drivers/staging/lustre/lnet/lnet/api-ni.c | 2 +-
drivers/staging/lustre/lnet/lnet/lib-ptl.c | 4 ++--
drivers/staging/lustre/lustre/include/lustre_fid.h | 2 +-
drivers/staging/lustre/lustre/ldlm/ldlm_resource.c | 4 ++--
drivers/staging/lustre/lustre/obdclass/lu_object.c | 4 ++--
fs/cifs/cifsfs.h | 16 ++++++++++------
fs/ecryptfs/messaging.c | 2 +-
fs/nfsd/nfs4idmap.c | 4 ++--
include/linux/hashtable.h | 2 +-
kernel/locking/lockdep.c | 2 +-
kernel/sched/wait.c | 3 +--
lib/debugobjects.c | 2 +-
net/sunrpc/auth.c | 2 +-
net/sunrpc/svcauth_unix.c | 2 +-
security/integrity/ima/ima.h | 11 ++++++++++-
security/tomoyo/memory.c | 2 +-
tools/perf/builtin-lock.c | 4 ++--
21 files changed, 49 insertions(+), 42 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 5a1ab125..e0499ac9 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -41,7 +41,7 @@ static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
{
- return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
+ return hash_32((eaddr & 0x0ffff000) >> PTE_SIZE,
HPTEG_HASH_BITS_PTE_LONG);
}
@@ -52,14 +52,14 @@ static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
{
- return hash_64((vpage & 0xffffff000ULL) >> 12,
+ return hash_32((vpage & 0xffffff000ULL) >> 12,
HPTEG_HASH_BITS_VPTE_LONG);
}
#ifdef CONFIG_PPC_BOOK3S_64
static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
{
- return hash_64((vpage & 0xffffffff0ULL) >> 4,
+ return hash_32((vpage & 0xffffffff0ULL) >> 4,
HPTEG_HASH_BITS_VPTE_64K);
}
#endif
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 25fa8445..5137ab31 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.c = d->c;
s->iop.bio = NULL;
s->iop.inode = d->id;
- s->iop.write_point = hash_long((unsigned long) current, 16);
+ s->iop.write_point = hash_ptr(current, 16);
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b4b258c8..180d0b7d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -36,7 +36,7 @@
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/hash.h>
+#include <linux/jhash.h>
#include <net/ip.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
@@ -194,14 +194,9 @@ static inline struct hlist_head *
filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
__be16 src_port, __be16 dst_port)
{
- unsigned long l;
- int bucket_idx;
-
- l = (__force unsigned long)src_port |
- ((__force unsigned long)dst_port << 2);
- l ^= (__force unsigned long)(src_ip ^ dst_ip);
-
- bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
+ u32 ports = (__force u32)src_port << 16 | (__force u32)dst_port;
+ int bucket_idx = jhash_3words(ports, (__force u32)src_ip,
+ (__force u32)dst_ip, 0) >> (32 - MLX4_EN_FILTER_HASH_SHIFT);
return &priv->filter_hash[bucket_idx];
}
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index dfc0208d..9b42fb55 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -428,7 +428,7 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist);
static inline int
lnet_nid2peerhash(lnet_nid_t nid)
{
- return hash_long(nid, LNET_PEER_HASH_BITS);
+ return hash_64(nid, LNET_PEER_HASH_BITS);
}
static inline struct list_head *
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 87647555..4de382a2 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -700,7 +700,7 @@ lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
if (number == 1)
return 0;
- val = hash_long(key, LNET_CPT_BITS);
+ val = hash_64(key, LNET_CPT_BITS);
/* NB: LNET_CP_NUMBER doesn't have to be PO2 */
if (val < number)
return val;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 3947e8b7..dc600c3b 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -360,13 +360,13 @@ lnet_mt_match_head(struct lnet_match_table *mtable,
lnet_process_id_t id, __u64 mbits)
{
struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
- unsigned long hash = mbits;
+ u64 hash = mbits;
if (!lnet_ptl_is_wildcard(ptl)) {
hash += id.nid + id.pid;
LASSERT(lnet_ptl_is_unique(ptl));
- hash = hash_long(hash, LNET_MT_HASH_BITS);
+ hash = hash_64(hash, LNET_MT_HASH_BITS);
}
return &mtable->mt_mhash[hash & LNET_MT_HASH_MASK];
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index ab4a9239..5a19bd50 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -594,7 +594,7 @@ static inline __u32 fid_hash(const struct lu_fid *f, int bits)
/* all objects with same id and different versions will belong to same
* collisions list.
*/
- return hash_long(fid_flatten(f), bits);
+ return hash_64(fid_flatten(f), bits);
}
/**
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 9dede87a..d3b66496 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -475,9 +475,9 @@ static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
} else {
val = fid_oid(&fid);
}
- hash = hash_long(hash, hs->hs_bkt_bits);
+ hash = hash_32(hash, hs->hs_bkt_bits);
/* give me another random factor */
- hash -= hash_long((unsigned long)hs, val % 11 + 3);
+ hash -= hash_ptr(hs, val % 11 + 3);
hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 978568ad..369a193b 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -869,10 +869,10 @@ static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
hash = fid_flatten32(fid);
hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
- hash = hash_long(hash, hs->hs_bkt_bits);
+ hash = hash_32(hash, hs->hs_bkt_bits);
/* give me another random factor */
- hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
+ hash -= hash_ptr(hs, fid_oid(fid) % 11 + 3);
hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 83aac8ba..b10522e3 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -22,20 +22,24 @@
#ifndef _CIFSFS_H
#define _CIFSFS_H
-#include <linux/hash.h>
-
#define ROOT_I 2
/*
* ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
- * so that it will fit. We use hash_64 to convert the value to 31 bits, and
- * then add 1, to ensure that we don't end up with a 0 as the value.
+ * so that it will fit, and we have to ensure that only a zero fileid will
+ * map to the zero ino_t.
+ *
+ * This can be done very simply by reducing mod-2^32-1, similar to IP
+ * checksums. After the first addition, the sum is at most 0x1fffffffe.
+ * The second sum cannot overflow 32 bits.
*/
static inline ino_t
cifs_uniqueid_to_ino_t(u64 fileid)
{
- if ((sizeof(ino_t)) < (sizeof(u64)))
- return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
+ if (sizeof(ino_t) < sizeof(u64)) {
+ fileid = (fileid & 0xffffffff) + (fileid >> 32);
+ fileid = (fileid & 0xffffffff) + (fileid >> 32);
+ }
return (ino_t)fileid;
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 286f10b0..bd5ae06b 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -33,7 +33,7 @@ static struct hlist_head *ecryptfs_daemon_hash;
struct mutex ecryptfs_daemon_hash_mux;
static int ecryptfs_hash_bits;
#define ecryptfs_current_euid_hash(uid) \
- hash_long((unsigned long)from_kuid(&init_user_ns, current_euid()), ecryptfs_hash_bits)
+ hash_32(from_kuid(&init_user_ns, current_euid()), ecryptfs_hash_bits)
static u32 ecryptfs_msg_counter;
static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 5b20577d..8e99a418 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -111,8 +111,8 @@ idtoname_hash(struct ent *ent)
{
uint32_t hash;
- hash = hash_str(ent->authname, ENT_HASHBITS);
- hash = hash_long(hash ^ ent->id, ENT_HASHBITS);
+ hash = hash_str(ent->authname, 32);
+ hash = hash_32(hash ^ ent->id, ENT_HASHBITS);
/* Flip LSB for user/group */
if (ent->type == IDMAP_TYPE_GROUP)
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 661e5c2a..91e3fc5d 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -28,7 +28,7 @@
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define hash_min(val, bits) \
- (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_64(val, bits))
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
{
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 78c1c0ee..11dca9fc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -286,7 +286,7 @@ LIST_HEAD(all_lock_classes);
*/
#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
-#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
+#define __classhashfn(key) hash_ptr(key, CLASSHASH_BITS)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct hlist_head classhash_table[CLASSHASH_SIZE];
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index f15d6b6a..a23611d2 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -485,9 +485,8 @@ EXPORT_SYMBOL(wake_up_bit);
wait_queue_head_t *bit_waitqueue(void *word, int bit)
{
- const int shift = BITS_PER_LONG == 32 ? 5 : 6;
const struct zone *zone = page_zone(virt_to_page(word));
- unsigned long val = (unsigned long)word << shift | bit;
+ unsigned long val = (unsigned long)word * BITS_PER_LONG + bit;
return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
}
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 519b5a10..df65cfba 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -242,7 +242,7 @@ static void debug_objects_oom(void)
*/
static struct debug_bucket *get_bucket(unsigned long addr)
{
- unsigned long hash;
+ unsigned int hash;
hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
return &obj_hash[hash];
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 02f53674..1524dd3b 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -551,7 +551,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
*entry, *new;
unsigned int nr;
- nr = hash_long(from_kuid(&init_user_ns, acred->uid), cache->hashbits);
+ nr = hash_32(from_kuid(&init_user_ns, acred->uid), cache->hashbits);
rcu_read_lock();
hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) {
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index dfacdc95..44773bdc 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -416,7 +416,7 @@ struct unix_gid {
static int unix_gid_hash(kuid_t uid)
{
- return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
+ return hash_32(from_kuid(&init_user_ns, uid), GID_HASHBITS);
}
static void unix_gid_put(struct kref *kref)
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 5d0f6116..80296292 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -135,9 +135,18 @@ struct ima_h_table {
};
extern struct ima_h_table ima_htable;
+/*
+ * FIXME: What the hell is the point of hashing one byte to produce
+ * a 9-bit hash value? Should this just be "return *digest"? Or should
+ * we be hashing more than the first byte of the digest? Callers seem
+ * to pass a buffer of TPM_DIGEST_SIZE bytes.
+ *
+ * It's always fun to be WTFing over "security" code for
+ * "integrity management".
+ */
static inline unsigned long ima_hash_key(u8 *digest)
{
- return hash_long(*digest, IMA_HASH_BITS);
+ return hash_32(*digest, IMA_HASH_BITS);
}
enum ima_hooks {
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
index 0e995716..594c4aac 100644
--- a/security/tomoyo/memory.c
+++ b/security/tomoyo/memory.c
@@ -155,7 +155,7 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name)
return NULL;
len = strlen(name) + 1;
hash = full_name_hash((const unsigned char *) name, len - 1);
- head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
+ head = &tomoyo_name_list[hash_32(hash, TOMOYO_HASH_BITS)];
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return NULL;
list_for_each_entry(ptr, head, head.list) {
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index ce3bfb48..34f764a9 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -35,8 +35,8 @@ static struct perf_session *session;
static struct list_head lockhash_table[LOCKHASH_SIZE];
-#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
-#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
+#define __lockhashfn(addr) hash_ptr(addr, LOCKHASH_BITS)
+#define lockhashentry(addr) (lockhash_table + __lockhashfn(addr))
struct lock_stat {
struct list_head hash_entry;
--
2.8.1