[PATCH net-next v5 5/6] net: bcmgenet: add XDP_REDIRECT and ndo_xdp_xmit support
From: Nicolai Buchwitz
Date: Sat Mar 28 2026 - 19:06:59 EST
Add XDP_REDIRECT support and implement ndo_xdp_xmit for receiving
redirected frames from other devices.
XDP_REDIRECT calls xdp_do_redirect() in the RX path with
xdp_do_flush() once per NAPI poll cycle. ndo_xdp_xmit batches frames
into ring 16 under a single spinlock acquisition.
Advertise NETDEV_XDP_ACT_REDIRECT and NETDEV_XDP_ACT_NDO_XMIT in
xdp_features.
Signed-off-by: Nicolai Buchwitz <nb@xxxxxxxxxxx>
---
.../net/ethernet/broadcom/genet/bcmgenet.c | 87 ++++++++++++++++---
1 file changed, 73 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 687c3b12d44f..0a857625af4a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2328,22 +2328,22 @@ static struct sk_buff *bcmgenet_xdp_build_skb(struct bcmgenet_rx_ring *ring,
return skb;
}
+/* Submit a single XDP frame to the TX ring. Caller must hold ring->lock.
+ * Returns true on success. Does not ring the doorbell - caller must
+ * write TDMA_PROD_INDEX after batching.
+ */
static bool bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring,
struct xdp_frame *xdpf, bool dma_map)
{
- struct bcmgenet_tx_ring *ring = &priv->xdp_tx_ring;
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
dma_addr_t mapping;
unsigned int dma_len;
u32 len_stat;
- spin_lock(&ring->lock);
-
- if (ring->free_bds < 1) {
- spin_unlock(&ring->lock);
+ if (ring->free_bds < 1)
return false;
- }
tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
@@ -2357,7 +2357,6 @@ static bool bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
*/
if (unlikely(xdpf->headroom < sizeof(struct status_64))) {
bcmgenet_put_txcb(priv, ring);
- spin_unlock(&ring->lock);
return false;
}
@@ -2371,7 +2370,6 @@ static bool bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
tx_cb_ptr->skb = NULL;
tx_cb_ptr->xdpf = NULL;
bcmgenet_put_txcb(priv, ring);
- spin_unlock(&ring->lock);
return false;
}
} else {
@@ -2403,12 +2401,14 @@ static bool bcmgenet_xdp_xmit_frame(struct bcmgenet_priv *priv,
ring->prod_index++;
ring->prod_index &= DMA_P_INDEX_MASK;
+ return true;
+}
+
+static void bcmgenet_xdp_ring_doorbell(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index,
TDMA_PROD_INDEX);
-
- spin_unlock(&ring->lock);
-
- return true;
}
static unsigned int bcmgenet_run_xdp(struct bcmgenet_rx_ring *ring,
@@ -2417,6 +2417,7 @@ static unsigned int bcmgenet_run_xdp(struct bcmgenet_rx_ring *ring,
struct page *rx_page)
{
struct bcmgenet_priv *priv = ring->priv;
+ struct bcmgenet_tx_ring *tx_ring;
struct xdp_frame *xdpf;
unsigned int act;
@@ -2448,11 +2449,25 @@ static unsigned int bcmgenet_run_xdp(struct bcmgenet_rx_ring *ring,
true);
return XDP_DROP;
}
- if (unlikely(!bcmgenet_xdp_xmit_frame(priv, xdpf, false))) {
+
+ tx_ring = &priv->xdp_tx_ring;
+ spin_lock(&tx_ring->lock);
+ if (unlikely(!bcmgenet_xdp_xmit_frame(priv, tx_ring,
+ xdpf, false))) {
+ spin_unlock(&tx_ring->lock);
xdp_return_frame_rx_napi(xdpf);
return XDP_DROP;
}
+ bcmgenet_xdp_ring_doorbell(priv, tx_ring);
+ spin_unlock(&tx_ring->lock);
return XDP_TX;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(priv->dev, xdp, prog))) {
+ page_pool_put_full_page(ring->page_pool, rx_page,
+ true);
+ return XDP_DROP;
+ }
+ return XDP_REDIRECT;
case XDP_DROP:
page_pool_put_full_page(ring->page_pool, rx_page, true);
return XDP_DROP;
@@ -2476,6 +2491,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct bpf_prog *xdp_prog;
+ bool xdp_flush = false;
struct enet_cb *cb;
struct sk_buff *skb;
u32 dma_length_status;
@@ -2614,6 +2630,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
xdp_act = bcmgenet_run_xdp(ring, xdp_prog, &xdp,
rx_page);
+ if (xdp_act == XDP_REDIRECT)
+ xdp_flush = true;
if (xdp_act != XDP_PASS)
goto next;
@@ -2687,6 +2705,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
}
+ if (xdp_flush)
+ xdp_do_flush();
+
ring->dim.bytes = bytes_processed;
ring->dim.packets = rxpktprocessed;
@@ -4017,12 +4038,18 @@ static int bcmgenet_xdp_setup(struct net_device *dev,
return -EOPNOTSUPP;
}
+ if (!prog)
+ xdp_features_clear_redirect_target(dev);
+
old_prog = xchg(&priv->xdp_prog, prog);
if (old_prog) {
synchronize_net();
bpf_prog_put(old_prog);
}
+ if (prog)
+ xdp_features_set_redirect_target(dev, false);
+
return 0;
}
@@ -4036,6 +4063,36 @@ static int bcmgenet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int bcmgenet_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_tx_ring *ring = &priv->xdp_tx_ring;
+ int sent = 0;
+ int i;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (unlikely(!netif_running(dev)))
+ return -ENETDOWN;
+
+ spin_lock(&ring->lock);
+
+ for (i = 0; i < num_frames; i++) {
+ if (!bcmgenet_xdp_xmit_frame(priv, ring, frames[i], true))
+ break;
+ sent++;
+ }
+
+ if (sent)
+ bcmgenet_xdp_ring_doorbell(priv, ring);
+
+ spin_unlock(&ring->lock);
+
+ return sent;
+}
+
static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_open = bcmgenet_open,
.ndo_stop = bcmgenet_close,
@@ -4048,6 +4105,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_get_stats64 = bcmgenet_get_stats64,
.ndo_change_carrier = bcmgenet_change_carrier,
.ndo_bpf = bcmgenet_xdp,
+ .ndo_xdp_xmit = bcmgenet_xdp_xmit,
};
/* GENET hardware parameters/characteristics */
@@ -4350,7 +4408,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
NETIF_F_RXCSUM;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
- dev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
netdev_sw_irq_coalesce_default_on(dev);
--
2.51.0