[PATCH net-next v3 5/6] net: bcmgenet: add XDP_REDIRECT and ndo_xdp_xmit support
From: Nicolai Buchwitz
Date: Thu Mar 19 2026 - 08:00:58 EST
Add XDP_REDIRECT support and implement ndo_xdp_xmit for receiving
redirected frames from other devices.
XDP_REDIRECT calls xdp_do_redirect() in the RX path with
xdp_do_flush() once per NAPI poll cycle. ndo_xdp_xmit batches frames
into ring 16 under a single spinlock acquisition.
Advertise NETDEV_XDP_ACT_REDIRECT and NETDEV_XDP_ACT_NDO_XMIT in
xdp_features.
Signed-off-by: Nicolai Buchwitz <nb@xxxxxxxxxxx>
---
.../net/ethernet/broadcom/genet/bcmgenet.c | 98 +++++++++++++++----
1 file changed, 80 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 54df1694f1cd..a5ad25ccb148 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2320,23 +2320,23 @@ static struct sk_buff *bcmgenet_xdp_build_skb(struct bcmgenet_rx_ring *ring,
return skb;
}
+/* Submit a single XDP frame to the TX ring. Caller must hold ring->lock.
+ * Returns true on success. Does not ring the doorbell - caller must
+ * write TDMA_PROD_INDEX after batching.
+ */
static bool
bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring,
struct xdp_frame *xdpf, bool dma_map)
{
- struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
dma_addr_t mapping;
unsigned int dma_len;
u32 len_stat;
- spin_lock(&ring->lock);
-
- if (ring->free_bds < 1) {
- spin_unlock(&ring->lock);
+ if (ring->free_bds < 1)
return false;
- }
tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
@@ -2350,7 +2350,6 @@ bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
*/
if (unlikely(xdpf->headroom < sizeof(struct status_64))) {
bcmgenet_put_txcb(priv, ring);
- spin_unlock(&ring->lock);
return false;
}
@@ -2364,7 +2363,6 @@ bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
tx_cb_ptr->skb = NULL;
tx_cb_ptr->xdpf = NULL;
bcmgenet_put_txcb(priv, ring);
- spin_unlock(&ring->lock);
return false;
}
} else {
@@ -2396,12 +2394,15 @@ bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
ring->prod_index++;
ring->prod_index &= DMA_P_INDEX_MASK;
+ return true;
+}
+
+static void
+bcmgenet_xdp_ring_doorbell(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index,
TDMA_PROD_INDEX);
-
- spin_unlock(&ring->lock);
-
- return true;
}
static unsigned int
@@ -2417,7 +2418,11 @@ bcmgenet_run_xdp(struct bcmgenet_rx_ring *ring, struct bpf_prog *prog,
switch (act) {
case XDP_PASS:
return XDP_PASS;
- case XDP_TX:
+ case XDP_TX: {
+ struct bcmgenet_tx_ring *tx_ring;
+
+ tx_ring = &priv->tx_rings[DESC_INDEX];
+
/* Prepend a zeroed TSB (Transmit Status Block). The GENET
* MAC has TBUF_64B_EN set globally, so hardware expects every
* TX buffer to begin with a 64-byte struct status_64. Back
@@ -2429,14 +2434,26 @@ bcmgenet_run_xdp(struct bcmgenet_rx_ring *ring, struct bpf_prog *prog,
memset(xdp->data, 0, sizeof(struct status_64));
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf) ||
- unlikely(!bcmgenet_xdp_xmit_frame(priv, xdpf, false))) {
- page_pool_put_full_page(ring->page_pool, rx_page,
- true);
+ if (unlikely(!xdpf))
+ goto drop_page;
+
+ spin_lock(&tx_ring->lock);
+ if (unlikely(!bcmgenet_xdp_xmit_frame(priv, tx_ring,
+ xdpf, false))) {
+ spin_unlock(&tx_ring->lock);
+ xdp_return_frame_rx_napi(xdpf);
return XDP_DROP;
}
+ bcmgenet_xdp_ring_doorbell(priv, tx_ring);
+ spin_unlock(&tx_ring->lock);
return XDP_TX;
+ }
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(priv->dev, xdp, prog)))
+ goto drop_page;
+ return XDP_REDIRECT;
case XDP_DROP:
+drop_page:
page_pool_put_full_page(ring->page_pool, rx_page, true);
return XDP_DROP;
default:
@@ -2459,6 +2476,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct bpf_prog *xdp_prog;
+ bool xdp_flush = false;
struct enet_cb *cb;
struct sk_buff *skb;
u32 dma_length_status;
@@ -2597,6 +2615,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
xdp_act = bcmgenet_run_xdp(ring, xdp_prog, &xdp,
rx_page);
+ if (xdp_act == XDP_REDIRECT)
+ xdp_flush = true;
if (xdp_act != XDP_PASS)
goto next;
@@ -2670,6 +2690,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
}
+ if (xdp_flush)
+ xdp_do_flush();
+
ring->dim.bytes = bytes_processed;
ring->dim.packets = rxpktprocessed;
@@ -3995,10 +4018,16 @@ static int bcmgenet_xdp_setup(struct net_device *dev,
return -EOPNOTSUPP;
}
+ if (!prog)
+ xdp_features_clear_redirect_target(dev);
+
old_prog = xchg(&priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
+ if (prog)
+ xdp_features_set_redirect_target(dev, false);
+
return 0;
}
@@ -4012,6 +4041,37 @@ static int bcmgenet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int
+bcmgenet_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
+ int sent = 0;
+ int i;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (unlikely(!netif_running(dev)))
+ return -ENETDOWN;
+
+ spin_lock(&ring->lock);
+
+ for (i = 0; i < num_frames; i++) {
+ if (!bcmgenet_xdp_xmit_frame(priv, ring, frames[i], true))
+ break;
+ sent++;
+ }
+
+ if (sent)
+ bcmgenet_xdp_ring_doorbell(priv, ring);
+
+ spin_unlock(&ring->lock);
+
+ return sent;
+}
+
static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_open = bcmgenet_open,
.ndo_stop = bcmgenet_close,
@@ -4024,6 +4084,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_get_stats64 = bcmgenet_get_stats64,
.ndo_change_carrier = bcmgenet_change_carrier,
.ndo_bpf = bcmgenet_xdp,
+ .ndo_xdp_xmit = bcmgenet_xdp_xmit,
};
/* GENET hardware parameters/characteristics */
@@ -4326,7 +4387,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
NETIF_F_RXCSUM;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
- dev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
netdev_sw_irq_coalesce_default_on(dev);
--
2.51.0