[PATCH 3.4 029/107] net: Clone skb before setting peeked flag
From: lizf
Date: Wed Mar 16 2016 - 04:09:00 EST
From: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
3.4.111-rc1 review patch. If anyone has any objections, please let me know.
------------------
commit 738ac1ebb96d02e0d23bc320302a6ea94c612dec upstream.
Shared skbs must not be modified and this is crucial for broadcast
and/or multicast paths where we use it as an optimisation to avoid
unnecessary cloning.
The function skb_recv_datagram breaks this rule by setting peeked
without cloning the skb first. This causes funky races which leads
to double-free.
This patch fixes this by cloning the skb and replacing the skb
in the list when setting skb->peeked.
Fixes: a59322be07c9 ("[UDP]: Only increment counter on first peek/recv")
Reported-by: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx>
Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx>
[lizf: Backported to 3.4: adjust context]
Signed-off-by: Zefan Li <lizefan@xxxxxxxxxx>
---
net/core/datagram.c | 41 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 38 insertions(+), 3 deletions(-)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index da7e0c8..ba96ad9 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -127,6 +127,35 @@ out_noerr:
goto out;
}
+static int skb_set_peeked(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+
+ if (skb->peeked)
+ return 0;
+
+ /* We have to unshare an skb before modifying it. */
+ if (!skb_shared(skb))
+ goto done;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ skb->prev->next = nskb;
+ skb->next->prev = nskb;
+ nskb->prev = skb->prev;
+ nskb->next = skb->next;
+
+ consume_skb(skb);
+ skb = nskb;
+
+done:
+ skb->peeked = 1;
+
+ return 0;
+}
+
/**
* __skb_recv_datagram - Receive a datagram skbuff
* @sk: socket
@@ -161,7 +190,9 @@ out_noerr:
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err)
{
+ struct sk_buff_head *queue = &sk->sk_receive_queue;
struct sk_buff *skb;
+ unsigned long cpu_flags;
long timeo;
/*
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -180,8 +211,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
- unsigned long cpu_flags;
- struct sk_buff_head *queue = &sk->sk_receive_queue;
spin_lock_irqsave(&queue->lock, cpu_flags);
skb_queue_walk(queue, skb) {
@@ -191,7 +220,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
*off -= skb->len;
continue;
}
- skb->peeked = 1;
+
+ error = skb_set_peeked(skb);
+ if (error)
+ goto unlock_err;
+
atomic_inc(&skb->users);
} else
__skb_unlink(skb, queue);
@@ -210,6 +243,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
return NULL;
+unlock_err:
+ spin_unlock_irqrestore(&queue->lock, cpu_flags);
no_packet:
*err = error;
return NULL;
--
1.9.1