[PATCH 3/3] net: stmmac: Remove stmmac_rx()'s `limit`, check desc validity instead

From: Sam Edwards

Date: Sun Mar 15 2026 - 22:11:23 EST


A previous patch ("net: stmmac: Fix NULL deref when RX encounters a
dirty descriptor") fixed a bug where the receive loop may advance to a
still-dirty descriptor (i.e. one with OWN=0 but its buffer(s)
removed+NULLed), causing a panic. That fix worked by tightening the
loop's iteration limit so that it must stop short of the last non-dirty
descriptor in the ring.

This works, and is minimal enough for stable, but isn't an overall clean
approach: it deliberately ignores a (potentially-ready) descriptor, and
is avoiding the real issue -- that both "dirty" and "ready" descriptors
are OWN=0, and the loop doesn't understand the ambiguity.

Thus, strengthen the loop by explicitly checking whether the page(s) are
allocated for each descriptor, disambiguating "ready" pages from "dirty"
ones. Next, because `cur_rx` is now allowed to advance to a dirty
page, also remove the clamp from the beginning of stmmac_rx(). Finally,
resolve the "head == tail ring buffer ambiguity" problem this creates in
stmmac_rx_dirty() by explicitly checking if `cur_rx` is missing its
buffer(s).

Note that this changes the valid range of stmmac_rx_dirty()'s return
value from `0 <= x < dma_rx_size` to `0 <= x <= dma_rx_size`.

Signed-off-by: Sam Edwards <CFSworks@xxxxxxxxx>
---
.../net/ethernet/stmicro/stmmac/stmmac_main.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d18ee145f5ca..9074668db8be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -375,8 +375,11 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
u32 dirty;
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[rx_q->cur_rx];

- if (rx_q->dirty_rx <= rx_q->cur_rx)
+ if (!buf->page || (priv->sph_active && !buf->sec_page))
+ dirty = priv->dma_conf.dma_rx_size;
+ else if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
@@ -5593,7 +5596,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
- int budget = limit;
u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
@@ -5610,8 +5612,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)

dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
- limit = min(priv->dma_conf.dma_rx_size - stmmac_rx_dirty(priv, queue) - 1,
- (unsigned int)limit);

if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -5656,6 +5656,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
entry = next_entry;
buf = &rx_q->buf_pool[entry];

+ /* don't eat our own tail */
+ if (unlikely(!buf->page || (priv->sph_active && !buf->sec_page)))
+ break;
+
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
@@ -5874,8 +5878,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
/* If the RX queue is completely dirty, we can't expect a future
* interrupt; tell NAPI to keep polling.
*/
- if (unlikely(stmmac_rx_dirty(priv, queue) == priv->dma_conf.dma_rx_size - 1))
- return budget;
+ if (unlikely(stmmac_rx_dirty(priv, queue) == priv->dma_conf.dma_rx_size))
+ return limit;

return count;
}
--
2.52.0