[PATCH net v4 5/8] net/sched: netem: batch-transfer ready packets to avoid child re-entrancy
From: Stephen Hemminger
Date: Mon Apr 06 2026 - 13:29:55 EST
netem_dequeue_child() previously transferred one packet from the tfifo
to the child qdisc per dequeue call. Parents like HFSC that track
class active/inactive state on qlen transitions could see an enqueue
during dequeue, causing double-insertion into the eltree
(CVE-2025-37890, CVE-2025-38001). Non-work-conserving children like
TBF could also refuse to return a just-enqueued packet, making netem
return NULL despite having backlog, which causes parents like DRR to
incorrectly deactivate the class.
Move all time-ready packets into the child before calling its dequeue.
This separates the enqueue and dequeue phases so the parent sees
consistent qlen transitions.
Fixes: 50612537e9ab ("netem: fix classful handling")
Signed-off-by: Stephen Hemminger <stephen@xxxxxxxxxxxxxxxxxx>
---
net/sched/sch_netem.c | 49 +++++++++++++++++++++----------------------
1 file changed, 24 insertions(+), 25 deletions(-)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e264f7aefb97..b93f0e886a2b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -734,8 +734,10 @@ static void netem_slot_account(struct netem_sched_data *q,
}
/*
- * Transfer time-ready packets from the tfifo into the child qdisc,
- * then dequeue from the child.
+ * Transfer all time-ready packets from the tfifo into the child qdisc,
+ * then dequeue from the child. Batching the transfers avoids calling
+ * qdisc_enqueue() inside the parent's dequeue path, which confuses
+ * parents that track active/inactive state on qlen transitions (HFSC).
*/
static struct sk_buff *netem_dequeue_child(struct Qdisc *sch)
{
@@ -743,31 +745,28 @@ static struct sk_buff *netem_dequeue_child(struct Qdisc *sch)
u64 now = ktime_get_ns();
struct sk_buff *skb;
- skb = netem_peek(q);
- if (skb) {
- u64 time_to_send = netem_skb_cb(skb)->time_to_send;
-
- if (q->slot.slot_next && q->slot.slot_next < time_to_send)
- get_slot_next(q, now);
-
- if (time_to_send <= now && q->slot.slot_next <= now) {
- struct sk_buff *to_free = NULL;
- unsigned int pkt_len;
- int err;
+ while ((skb = netem_peek(q)) != NULL) {
+ struct sk_buff *to_free = NULL;
+ unsigned int pkt_len;
+ int err;
- skb = netem_pull_tfifo(q, sch);
- netem_slot_account(q, skb, now);
+ if (netem_skb_cb(skb)->time_to_send > now)
+ break;
+ if (q->slot.slot_next && q->slot.slot_next > now)
+ break;
- pkt_len = qdisc_pkt_len(skb);
- err = qdisc_enqueue(skb, q->qdisc, &to_free);
- kfree_skb_list(to_free);
- if (err != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(err))
- qdisc_qstats_drop(sch);
- sch->qstats.backlog -= pkt_len;
- sch->q.qlen--;
- qdisc_tree_reduce_backlog(sch, 1, pkt_len);
- }
+ skb = netem_pull_tfifo(q, sch);
+ netem_slot_account(q, skb, now);
+
+ pkt_len = qdisc_pkt_len(skb);
+ err = qdisc_enqueue(skb, q->qdisc, &to_free);
+ kfree_skb_list(to_free);
+ if (unlikely(err != NET_XMIT_SUCCESS)) {
+ if (net_xmit_drop_count(err))
+ qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= pkt_len;
+ sch->q.qlen--;
+ qdisc_tree_reduce_backlog(sch, 1, pkt_len);
}
}
--
2.53.0