[PATCH bpf-next v1 1/3] bpf, sockmap: avoid using sk_socket after free

From: Jiayuan Chen
Date: Wed Feb 26 2025 - 08:24:56 EST


Use RCU lock to protect sk_socket, preventing concurrent close and release
by another thread.

Because TCP/UDP are already within a relatively large critical section:
'''
ip_local_deliver_finish
rcu_read_lock
ip_protocol_deliver_rcu
tcp_rcv/udp_rcv
rcu_read_unlock
'''

Adding rcu_read_{un}lock() at the entrance and exit of sk_data_ready
will not increase performance overhead.

Reported-by: syzbot+dd90a702f518e0eac072@xxxxxxxxxxxxxxxxxxxxxxxxx
Closes: https://lore.kernel.org/bpf/6734c033.050a0220.2a2fcc.0015.GAE@xxxxxxxxxx/
Signed-off-by: Jiayuan Chen <jiayuan.chen@xxxxxxxxx>
---
net/core/skmsg.c | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 0ddc4c718833..1b71ae1d1bf5 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -1222,27 +1222,35 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)

static void sk_psock_verdict_data_ready(struct sock *sk)
{
- struct socket *sock = sk->sk_socket;
+ struct socket *sock;
const struct proto_ops *ops;
int copied;

trace_sk_data_ready(sk);

+ /* We need RCU to prevent the sk_socket from being released.
+ * Especially for Unix sockets, we are currently in the process
+ * context and do not have RCU protection.
+ */
+ rcu_read_lock();
+ sock = sk->sk_socket;
if (unlikely(!sock))
- return;
+ goto unlock;
+
ops = READ_ONCE(sock->ops);
if (!ops || !ops->read_skb)
- return;
+ goto unlock;
+
copied = ops->read_skb(sk, sk_psock_verdict_recv);
if (copied >= 0) {
struct sk_psock *psock;

- rcu_read_lock();
psock = sk_psock(sk);
if (psock)
sk_psock_data_ready(sk, psock);
- rcu_read_unlock();
}
+unlock:
+ rcu_read_unlock();
}

void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
--
2.47.1