[PATCH net-next v7 3/9] tun/tap: add ptr_ring consume helper with netdev queue wakeup

From: Simon Schippers
Date: Wed Jan 07 2026 - 17:04:36 EST


Introduce {tun,tap}_ring_consume() helpers that wrap __ptr_ring_consume()
and wake the corresponding netdev subqueue when consuming an entry frees
space in the underlying ptr_ring.

Stopping of the netdev queue when the ptr_ring is full will be introduced
in an upcoming commit.

Co-developed-by: Tim Gebauer <tim.gebauer@xxxxxxxxxxxxxx>
Signed-off-by: Tim Gebauer <tim.gebauer@xxxxxxxxxxxxxx>
Signed-off-by: Simon Schippers <simon.schippers@xxxxxxxxxxxxxx>
---
drivers/net/tap.c | 23 ++++++++++++++++++++++-
drivers/net/tun.c | 25 +++++++++++++++++++++++--
2 files changed, 45 insertions(+), 3 deletions(-)

diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 1197f245e873..2442cf7ac385 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -753,6 +753,27 @@ static ssize_t tap_put_user(struct tap_queue *q,
return ret ? ret : total;
}

+static void *tap_ring_consume(struct tap_queue *q)
+{
+ struct ptr_ring *ring = &q->ring;
+ struct net_device *dev;
+ void *ptr;
+
+ spin_lock(&ring->consumer_lock);
+
+ ptr = __ptr_ring_consume(ring);
+ if (unlikely(ptr && __ptr_ring_consume_created_space(ring, 1))) {
+ rcu_read_lock();
+ dev = rcu_dereference(q->tap)->dev;
+ netif_wake_subqueue(dev, q->queue_index);
+ rcu_read_unlock();
+ }
+
+ spin_unlock(&ring->consumer_lock);
+
+ return ptr;
+}
+
static ssize_t tap_do_read(struct tap_queue *q,
struct iov_iter *to,
int noblock, struct sk_buff *skb)
@@ -774,7 +795,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
TASK_INTERRUPTIBLE);

/* Read frames from the queue */
- skb = ptr_ring_consume(&q->ring);
+ skb = tap_ring_consume(q);
if (skb)
break;
if (noblock) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8192740357a0..7148f9a844a4 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2113,13 +2113,34 @@ static ssize_t tun_put_user(struct tun_struct *tun,
return total;
}

+static void *tun_ring_consume(struct tun_file *tfile)
+{
+ struct ptr_ring *ring = &tfile->tx_ring;
+ struct net_device *dev;
+ void *ptr;
+
+ spin_lock(&ring->consumer_lock);
+
+ ptr = __ptr_ring_consume(ring);
+ if (unlikely(ptr && __ptr_ring_consume_created_space(ring, 1))) {
+ rcu_read_lock();
+ dev = rcu_dereference(tfile->tun)->dev;
+ netif_wake_subqueue(dev, tfile->queue_index);
+ rcu_read_unlock();
+ }
+
+ spin_unlock(&ring->consumer_lock);
+
+ return ptr;
+}
+
static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
{
DECLARE_WAITQUEUE(wait, current);
void *ptr = NULL;
int error = 0;

- ptr = ptr_ring_consume(&tfile->tx_ring);
+ ptr = tun_ring_consume(tfile);
if (ptr)
goto out;
if (noblock) {
@@ -2131,7 +2152,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)

while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- ptr = ptr_ring_consume(&tfile->tx_ring);
+ ptr = tun_ring_consume(tfile);
if (ptr)
break;
if (signal_pending(current)) {
--
2.43.0