[PATCH net v4 4/8] net/sched: netem: refactor dequeue into helper functions
From: Stephen Hemminger
Date: Mon Apr 06 2026 - 13:29:24 EST
Extract the tfifo removal, slot accounting, and child/direct dequeue
paths from the monolithic netem_dequeue() into separate helpers:
netem_pull_tfifo() - remove head packet from tfifo
netem_slot_account() - update slot pacing counters
netem_dequeue_child() - enqueue to child, then dequeue from child
netem_dequeue_direct()- dequeue from tfifo when no child
This replaces the goto-based control flow with straightforward function
calls, making the code easier to follow and modify.
No functional change intended.
Signed-off-by: Stephen Hemminger <stephen@xxxxxxxxxxxxxxxxxx>
---
net/sched/sch_netem.c | 190 +++++++++++++++++++++++++++---------------
1 file changed, 123 insertions(+), 67 deletions(-)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 556f9747f0e7..e264f7aefb97 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -689,99 +689,155 @@ static struct sk_buff *netem_peek(struct netem_sched_data *q)
return q->t_head;
}
-static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
+/*
+ * Pop the head packet from the tfifo and prepare it for delivery.
+ * skb->dev shares the rbnode area and must be restored after removal.
+ */
+static struct sk_buff *netem_pull_tfifo(struct netem_sched_data *q,
+ struct Qdisc *sch)
{
- if (skb == q->t_head) {
+ struct sk_buff *skb;
+
+ if (q->t_head) {
+ skb = q->t_head;
q->t_head = skb->next;
if (!q->t_head)
q->t_tail = NULL;
} else {
- rb_erase(&skb->rbnode, &q->t_root);
+ struct rb_node *p = rb_first(&q->t_root);
+
+ if (!p)
+ return NULL;
+ skb = rb_to_skb(p);
+ rb_erase(p, &q->t_root);
}
+
+ q->t_len--;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->dev = qdisc_dev(sch);
+
+ return skb;
}
-static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+/* Update slot pacing counters after releasing a packet */
+static void netem_slot_account(struct netem_sched_data *q,
+ const struct sk_buff *skb, u64 now)
+{
+ if (!q->slot.slot_next)
+ return;
+
+ q->slot.packets_left--;
+ q->slot.bytes_left -= qdisc_pkt_len(skb);
+ if (q->slot.packets_left <= 0 || q->slot.bytes_left <= 0)
+ get_slot_next(q, now);
+}
+
+/*
+ * Transfer time-ready packets from the tfifo into the child qdisc,
+ * then dequeue from the child.
+ */
+static struct sk_buff *netem_dequeue_child(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
+ u64 now = ktime_get_ns();
struct sk_buff *skb;
-tfifo_dequeue:
- skb = __qdisc_dequeue_head(&sch->q);
- if (skb) {
-deliver:
- qdisc_qstats_backlog_dec(sch, skb);
- qdisc_bstats_update(sch, skb);
- return skb;
- }
skb = netem_peek(q);
if (skb) {
- u64 time_to_send;
- u64 now = ktime_get_ns();
+ u64 time_to_send = netem_skb_cb(skb)->time_to_send;
- /* if more time remaining? */
- time_to_send = netem_skb_cb(skb)->time_to_send;
if (q->slot.slot_next && q->slot.slot_next < time_to_send)
get_slot_next(q, now);
if (time_to_send <= now && q->slot.slot_next <= now) {
- netem_erase_head(q, skb);
- q->t_len--;
- skb->next = NULL;
- skb->prev = NULL;
- /* skb->dev shares skb->rbnode area,
- * we need to restore its value.
- */
- skb->dev = qdisc_dev(sch);
-
- if (q->slot.slot_next) {
- q->slot.packets_left--;
- q->slot.bytes_left -= qdisc_pkt_len(skb);
- if (q->slot.packets_left <= 0 ||
- q->slot.bytes_left <= 0)
- get_slot_next(q, now);
- }
-
- if (q->qdisc) {
- unsigned int pkt_len = qdisc_pkt_len(skb);
- struct sk_buff *to_free = NULL;
- int err;
-
- err = qdisc_enqueue(skb, q->qdisc, &to_free);
- kfree_skb_list(to_free);
- if (err != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(err))
- qdisc_qstats_drop(sch);
- sch->qstats.backlog -= pkt_len;
- sch->q.qlen--;
- qdisc_tree_reduce_backlog(sch, 1, pkt_len);
- }
- goto tfifo_dequeue;
- }
- sch->q.qlen--;
- goto deliver;
- }
-
- if (q->qdisc) {
- skb = q->qdisc->ops->dequeue(q->qdisc);
- if (skb) {
+ struct sk_buff *to_free = NULL;
+ unsigned int pkt_len;
+ int err;
+
+ skb = netem_pull_tfifo(q, sch);
+ netem_slot_account(q, skb, now);
+
+ pkt_len = qdisc_pkt_len(skb);
+ err = qdisc_enqueue(skb, q->qdisc, &to_free);
+ kfree_skb_list(to_free);
+ if (err != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(err))
+ qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= pkt_len;
sch->q.qlen--;
- goto deliver;
+ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
}
}
-
- qdisc_watchdog_schedule_ns(&q->watchdog,
- max(time_to_send,
- q->slot.slot_next));
}
- if (q->qdisc) {
- skb = q->qdisc->ops->dequeue(q->qdisc);
- if (skb) {
- sch->q.qlen--;
- goto deliver;
- }
+ skb = q->qdisc->ops->dequeue(q->qdisc);
+ if (skb)
+ sch->q.qlen--;
+
+ return skb;
+}
+
+/* Dequeue directly from the tfifo when no child qdisc is configured. */
+static struct sk_buff *netem_dequeue_direct(struct Qdisc *sch)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ u64 time_to_send;
+ u64 now;
+
+ skb = netem_peek(q);
+ if (!skb)
+ return NULL;
+
+ now = ktime_get_ns();
+ time_to_send = netem_skb_cb(skb)->time_to_send;
+
+ if (q->slot.slot_next && q->slot.slot_next < time_to_send)
+ get_slot_next(q, now);
+
+ if (time_to_send > now || q->slot.slot_next > now)
+ return NULL;
+
+ skb = netem_pull_tfifo(q, sch);
+ netem_slot_account(q, skb, now);
+ sch->q.qlen--;
+
+ return skb;
+}
+
+static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ /* First check the reorder queue */
+ skb = __qdisc_dequeue_head(&sch->q);
+ if (skb)
+ goto deliver;
+
+ if (q->qdisc)
+ skb = netem_dequeue_child(sch);
+ else
+ skb = netem_dequeue_direct(sch);
+
+ if (skb)
+ goto deliver;
+
+ /* Nothing ready — schedule watchdog for next packet */
+ skb = netem_peek(q);
+ if (skb) {
+ u64 time_to_send = netem_skb_cb(skb)->time_to_send;
+
+ qdisc_watchdog_schedule_ns(&q->watchdog,
+ max(time_to_send, q->slot.slot_next));
}
return NULL;
+
+deliver:
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ return skb;
}
static void netem_reset(struct Qdisc *sch)
--
2.53.0