Re: [net-next,v3,05/13] net: renesas: rswitch: add exception path for packets with unknown dst MAC
From: Paolo Abeni
Date: Thu Apr 02 2026 - 09:04:34 EST
This is an AI-generated review of your patch. The human sending this
email has considered the AI review valid, or at least plausible.
For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
---
net: renesas: rswitch: add exception path for packets with unknown dst MAC
This commit creates a shared RX queue for the exception path to handle
packets with unknown MAC addresses that cannot be processed by HW
forwarding. The queue receives packets with unknown src/dst addresses
and forwards them to the CPU for MAC learning.
> diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
> index 15d3fa48b0e3..bf123b564ed1 100644
> --- a/drivers/net/ethernet/renesas/rswitch.h
> +++ b/drivers/net/ethernet/renesas/rswitch.h
> @@ -1148,6 +1148,7 @@ struct rswitch_gwca {
> struct rswitch_gwca_queue *queues;
> int num_queues;
> struct rswitch_gwca_queue ts_queue;
> + struct rswitch_gwca_queue *l2_shared_rx_queue;
> DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
> u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
> u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
> diff --git a/drivers/net/ethernet/renesas/rswitch_main.c b/drivers/net/ethernet/renesas/rswitch_main.c
> index d404bc41bd1e..3c142fa3c810 100644
> --- a/drivers/net/ethernet/renesas/rswitch_main.c
> +++ b/drivers/net/ethernet/renesas/rswitch_main.c
> @@ -680,6 +680,34 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
> return rswitch_gwca_queue_ext_ts_format(ndev->dev.parent, priv, rdev->rx_queue);
> }
>
> +static int rswitch_shared_rx_queue_alloc(struct rswitch_private *priv)
> +{
> + struct rswitch_gwca *gwca = &priv->gwca;
> + struct device *dev = &priv->pdev->dev;
> +
> + int err;
> +
> + gwca->l2_shared_rx_queue = rswitch_gwca_get(priv);
> + if (!gwca->l2_shared_rx_queue)
> + return -EBUSY;
> +
> + err = rswitch_gwca_queue_alloc(NULL, priv, gwca->l2_shared_rx_queue, false, RX_RING_SIZE);
> + if (err < 0) {
> + rswitch_gwca_put(priv, gwca->l2_shared_rx_queue);
> + return err;
> + }
> +
> + return rswitch_gwca_queue_ext_ts_format(dev, priv, gwca->l2_shared_rx_queue);
> +}
> +
> +static void rswitch_shared_rx_queue_free(struct rswitch_private *priv)
> +{
> + struct rswitch_gwca *gwca = &priv->gwca;
> +
> + rswitch_gwca_queue_free(&priv->pdev->dev, gwca->l2_shared_rx_queue);
> + rswitch_gwca_put(priv, gwca->l2_shared_rx_queue);
> +}
> +
> static int rswitch_gwca_hw_init(struct rswitch_private *priv)
> {
> unsigned int i;
> @@ -717,6 +745,12 @@ static int rswitch_gwca_hw_init(struct rswitch_private *priv)
> return err;
> }
>
> + err = rswitch_shared_rx_queue_alloc(priv);
> + if (err < 0) {
> + rswitch_shared_rx_queue_free(priv);
> + return err;
> + }
Does this cause a NULL pointer dereference if rswitch_shared_rx_queue_alloc()
fails? When rswitch_gwca_queue_alloc() fails internally (e.g., under memory
pressure during netdev_alloc_frag()), the 'out:' label inside
rswitch_gwca_queue_alloc() already calls rswitch_gwca_queue_free(), which sets
gq->rx_bufs = NULL and gq->rx_ring = NULL.
Then rswitch_shared_rx_queue_alloc() calls rswitch_gwca_put() and returns
error. The caller rswitch_gwca_hw_init() then calls
rswitch_shared_rx_queue_free(), which calls rswitch_gwca_queue_free() a second
time.
In this second call, gq->rx_bufs is NULL but gq->ring_size is still
RX_RING_SIZE, so the loop in rswitch_gwca_queue_free():
for (i = 0; i < gq->ring_size; i++)
skb_free_frag(gq->rx_bufs[i])
dereferences NULL (gq->rx_bufs[i] = NULL[i]). This causes a kernel NULL
pointer dereference crash during device probe under memory pressure.
Should the cleanup not call rswitch_shared_rx_queue_free() when
rswitch_shared_rx_queue_alloc() fails, since the alloc function already
performs partial cleanup internally?
> +
> return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
> }
[ ... ]
> @@ -1513,6 +1564,14 @@ static int rswitch_serdes_set_params(struct rswitch_device *rdev)
> return phy_set_speed(rdev->serdes, rdev->etha->speed);
> }
>
> +static void rswitch_etha_set_exception_path(struct rswitch_private *priv)
> +{
> + iowrite32(FDMACUFEF, priv->addr + FWCEPRC2);
> + iowrite32(FIELD_PREP(EPCS, GWCA_INDEX) |
> + FIELD_PREP(EPCSD, priv->gwca.l2_shared_rx_queue->index),
> + priv->addr + FWCEPTC);
> +}
> +
[ ... ]
Looking at rswitch_init() in the existing code:
err = rswitch_gwca_hw_init(priv);
if (err)
goto err_gwca_hw_init;
err = rswitch_ether_port_init_all(priv);
if (err)
goto err_ether_port_init_all;
err_ether_port_init_all:
rswitch_gwca_hw_deinit(priv);
err_gwca_hw_init:
When rswitch_gwca_hw_init() succeeds (allocating the shared RX queue via
rswitch_shared_rx_queue_alloc()), but a subsequent initialization step fails
(e.g., rswitch_ether_port_init_all() or register_netdev()), does the error
path leak the shared RX queue?
The error path at 'err_ether_port_init_all:' only calls
rswitch_gwca_hw_deinit(), which merely changes the GWCA hardware mode. It does
NOT call rswitch_shared_rx_queue_free() to release the DMA memory
(dma_alloc_coherent), rx_bufs (kzalloc), and page fragments allocated by
rswitch_shared_rx_queue_alloc(). These are not device-managed allocations.
Should the cleanup call rswitch_shared_rx_queue_free() before or after
rswitch_gwca_hw_deinit()?
[ ... ]
> @@ -952,15 +987,31 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
> return 0;
> }
>
> -static void rswitch_queue_interrupt(struct net_device *ndev)
> +static void rswitch_queue_interrupt(struct rswitch_private *priv, struct rswitch_gwca_queue *gq)
> {
> - struct rswitch_device *rdev = netdev_priv(ndev);
> + struct rswitch_ext_ts_desc *desc;
> + struct rswitch_device *rdev;
> + struct net_device *ndev;
> + u32 spn;
> +
> + if (gq->index == priv->gwca.l2_shared_rx_queue->index) {
> + desc = &gq->rx_ring[gq->cur];
> + spn = FIELD_GET(SPN, desc->info1);
> + ndev = priv->rdev[spn]->ndev;
> + rdev = netdev_priv(ndev);
> + gq->ndev = ndev;
> + rdev->rx_queue = gq;
Can this permanently break per-port RX processing? When a shared RX queue
interrupt fires for a port, rswitch_queue_interrupt() overwrites
rdev->rx_queue to point to the shared queue (gq) instead of the port's
per-port RX queue. This assignment is never reversed. After this point:
(1) rswitch_poll() -> rswitch_rx() processes from rdev->rx_queue which now
points to the shared queue, so the per-port RX queue is never drained.
(2) When NAPI completes in rswitch_poll(), it re-enables rdev->rx_queue->index
(the shared queue index) instead of the original per-port RX queue index.
(3) The per-port RX queue IRQ is never re-enabled by NAPI completion
(rswitch_enadis_data_irq only re-enables rdev->rx_queue->index and
l2_shared_rx_queue->index, which are now the same):
rswitch_poll():
if (napi_complete_done(napi, budget - quota)) {
spin_lock_irqsave(&priv->lock, flags);
rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
rswitch_enadis_data_irq(priv, priv->gwca.l2_shared_rx_queue->index, true);
...
}
This means the per-port RX queue becomes permanently orphaned after the first
shared-queue packet for that port, causing packet loss for all regular
(non-exception-path) traffic on that port.
Should the code save and restore the original rdev->rx_queue, or use a
separate mechanism to process shared queue packets without overwriting the
per-port queue pointer?
> + } else {
> + rdev = netdev_priv(gq->ndev);
> + }
> +
> +
> + if (napi_schedule_prep(&rdev->napi)) {
> + spin_lock(&priv->lock);
> + rswitch_enadis_data_irq(priv, rdev->tx_queue->index, false);
> + rswitch_enadis_data_irq(priv, rdev->rx_queue->index, false);
> + rswitch_enadis_data_irq(priv, priv->gwca.l2_shared_rx_queue->index, false);
> + spin_unlock(&priv->lock);
> __napi_schedule(&rdev->napi);
> }
> }
[ ... ]