[PATCH v3 net-next 2/3] net/udp: Add 4-tuple hash list basis
From: Philo Lu
Date: Thu Oct 10 2024 - 05:07:07 EST
Add a new hash list, hash4, in udp table. It will be used to implement
4-tuple hash for connected udp sockets. This patch adds the hlist to
table, and implements helpers and the initialization. 4-tuple hash is
implemented in the following patch.
Signed-off-by: Philo Lu <lulie@xxxxxxxxxxxxxxxxx>
Signed-off-by: Cambda Zhu <cambda@xxxxxxxxxxxxxxxxx>
Signed-off-by: Fred Chen <fred.cc@xxxxxxxxxxxxxxx>
Signed-off-by: Yubing Qiu <yubing.qiuyubing@xxxxxxxxxxxxxxx>
---
include/linux/udp.h | 7 +++++++
include/net/udp.h | 16 +++++++++++++++-
net/ipv4/udp.c | 17 +++++++++++++----
3 files changed, 35 insertions(+), 5 deletions(-)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 3eb3f2b9a2a0..c04808360a05 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -56,6 +56,10 @@ struct udp_sock {
int pending; /* Any pending frames ? */
__u8 encap_type; /* Is this an Encapsulation socket? */
+ /* For UDP 4-tuple hash */
+ __u16 udp_lrpa_hash;
+ struct hlist_node udp_lrpa_node;
+
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -206,6 +210,9 @@ static inline void udp_allow_gso(struct sock *sk)
#define udp_portaddr_for_each_entry_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
+#define udp_lrpa_for_each_entry_rcu(__up, list) \
+ hlist_for_each_entry_rcu(__up, list, udp_lrpa_node)
+
#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
#endif /* _LINUX_UDP_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 595364729138..80f9622d0db3 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -50,7 +50,7 @@ struct udp_skb_cb {
#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
/**
- * struct udp_hslot - UDP hash slot used by udp_table.hash
+ * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4
*
* @head: head of list of sockets
* @count: number of sockets in 'head' list
@@ -79,12 +79,15 @@ struct udp_hslot_main {
*
* @hash: hash table, sockets are hashed on (local port)
* @hash2: hash table, sockets are hashed on (local port, local address)
+ * @hash4: hash table, connected sockets are hashed on
+ * (local port, local address, remote port, remote address)
* @mask: number of slots in hash tables, minus 1
* @log: log2(number of slots in hash table)
*/
struct udp_table {
struct udp_hslot *hash;
struct udp_hslot_main *hash2;
+ struct udp_hslot *hash4;
unsigned int mask;
unsigned int log;
};
@@ -113,6 +116,17 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
return &table->hash2[hash & table->mask].hslot;
}
+static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
+ unsigned int hash)
+{
+ return &table->hash4[hash & table->mask];
+}
+
+static inline bool udp_hashed4(const struct sock *sk)
+{
+ return !hlist_unhashed(&udp_sk(sk)->udp_lrpa_node);
+}
+
extern struct proto udp_prot;
extern atomic_long_t udp_memory_allocated;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 36d617235acd..9f4cc6f778ce 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -3425,7 +3425,7 @@ void __init udp_table_init(struct udp_table *table, const char *name)
{
unsigned int i, slot_size;
- slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main);
+ slot_size = 2 * sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main);
table->hash = alloc_large_system_hash(name,
slot_size,
uhash_entries,
@@ -3437,6 +3437,7 @@ void __init udp_table_init(struct udp_table *table, const char *name)
UDP_HTABLE_SIZE_MAX);
table->hash2 = (void *)(table->hash + (table->mask + 1));
+ table->hash4 = (void *)(table->hash2 + (table->mask + 1));
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_HEAD(&table->hash[i].head);
table->hash[i].count = 0;
@@ -3448,6 +3449,11 @@ void __init udp_table_init(struct udp_table *table, const char *name)
spin_lock_init(&table->hash2[i].hslot.lock);
table->hash2[i].hash4_cnt = 0;
}
+ for (i = 0; i <= table->mask; i++) {
+ INIT_HLIST_HEAD(&table->hash4[i].head);
+ table->hash4[i].count = 0;
+ spin_lock_init(&table->hash4[i].lock);
+ }
}
u32 udp_flow_hashrnd(void)
@@ -3480,16 +3486,15 @@ static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_ent
if (!udptable)
goto out;
- slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main);
+ slot_size = 2 * sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main);
udptable->hash = vmalloc_huge(hash_entries * slot_size,
GFP_KERNEL_ACCOUNT);
if (!udptable->hash)
goto free_table;
udptable->hash2 = UDP_HSLOT_MAIN(udptable->hash + hash_entries);
- udptable->mask = hash_entries - 1;
+ udptable->hash4 = (struct udp_hslot *)(udptable->hash2 + hash_entries);
udptable->log = ilog2(hash_entries);
-
for (i = 0; i < hash_entries; i++) {
INIT_HLIST_HEAD(&udptable->hash[i].head);
udptable->hash[i].count = 0;
@@ -3499,6 +3504,10 @@ static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_ent
udptable->hash2[i].hslot.count = 0;
spin_lock_init(&udptable->hash2[i].hslot.lock);
udptable->hash2[i].hash4_cnt = 0;
+
+ INIT_HLIST_HEAD(&udptable->hash4[i].head);
+ udptable->hash4[i].count = 0;
+ spin_lock_init(&udptable->hash4[i].lock);
}
return udptable;
--
2.32.0.3.g01195cf9f