[PATCH net-next 5/8] net: macb: move macb_xdp_submit_frame() body to helper function
From: Théo Lebrun
Date: Wed Mar 04 2026 - 13:35:05 EST
Part of macb_xdp_submit_frame() is specific to the handling of an XDP
buffer (pick a queue for emission, DMA map or sync, report emitted
bytes), part is chitchat with hardware to update DMA descriptor and
start transmit.
Move the hardware specific code out of macb_xdp_submit_frame() into a
macb_xdp_submit_buff() helper function. The goal is to make code
reusable to support XSK buffers.
The macb_xdp_submit_frame() body is modified slightly: we bring the
dma_map_single() call outside of the queue->tx_ptr_lock critical
section, to minimise its span.
Signed-off-by: Théo Lebrun <theo.lebrun@xxxxxxxxxxx>
---
drivers/net/ethernet/cadence/macb_main.c | 143 +++++++++++++++++--------------
1 file changed, 78 insertions(+), 65 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ed94f9f0894b..65c2ec2a843c 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1208,6 +1208,52 @@ static bool ptp_one_step_sync(struct sk_buff *skb)
return false;
}
+static void macb_xdp_submit_buff(struct macb *bp, unsigned int queue_index,
+ struct macb_tx_buff buff)
+{
+ struct macb_queue *queue = &bp->queues[queue_index];
+ struct net_device *netdev = bp->dev;
+ struct macb_tx_buff *tx_buff;
+ struct macb_dma_desc *desc;
+ unsigned int next_head;
+ u32 ctrl;
+
+ next_head = queue->tx_head + 1;
+
+ ctrl = MACB_BIT(TX_USED);
+ desc = macb_tx_desc(queue, next_head);
+ desc->ctrl = ctrl;
+
+ desc = macb_tx_desc(queue, queue->tx_head);
+ tx_buff = macb_tx_buff(queue, queue->tx_head);
+ *tx_buff = buff;
+
+ ctrl = (u32)buff.size;
+ ctrl |= MACB_BIT(TX_LAST);
+
+ if (unlikely(macb_tx_ring_wrap(bp, queue->tx_head) == (bp->tx_ring_size - 1)))
+ ctrl |= MACB_BIT(TX_WRAP);
+
+ /* Set TX buffer descriptor */
+ macb_set_addr(bp, desc, buff.mapping);
+ /* desc->addr must be visible to hardware before clearing
+ * 'TX_USED' bit in desc->ctrl.
+ */
+ wmb();
+ desc->ctrl = ctrl;
+ queue->tx_head = next_head;
+
+ /* Make newly initialized descriptor visible to hardware */
+ wmb();
+
+ spin_lock(&bp->lock);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock(&bp->lock);
+
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
+ netif_stop_subqueue(netdev, queue_index);
+}
+
static int macb_tx_complete(struct macb_queue *queue, int budget)
{
struct macb *bp = queue->bp;
@@ -1430,44 +1476,25 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
}
static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
- struct net_device *dev, bool dma_map,
+ struct net_device *netdev, bool dma_map,
dma_addr_t addr)
{
+ struct device *dev = &bp->pdev->dev;
enum macb_tx_buff_type buff_type;
- struct macb_tx_buff *tx_buff;
int cpu = smp_processor_id();
- struct macb_dma_desc *desc;
struct macb_queue *queue;
- unsigned int next_head;
unsigned long flags;
dma_addr_t mapping;
u16 queue_index;
int err = 0;
- u32 ctrl;
-
- queue_index = cpu % bp->num_queues;
- queue = &bp->queues[queue_index];
- buff_type = dma_map ? MACB_TYPE_XDP_NDO : MACB_TYPE_XDP_TX;
-
- spin_lock_irqsave(&queue->tx_ptr_lock, flags);
-
- /* This is a hard error, log it. */
- if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) {
- netif_stop_subqueue(dev, queue_index);
- netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
- queue->tx_head, queue->tx_tail);
- err = -ENOMEM;
- goto unlock;
- }
if (dma_map) {
- mapping = dma_map_single(&bp->pdev->dev,
- xdpf->data,
- xdpf->len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- err = -ENOMEM;
- goto unlock;
- }
+ mapping = dma_map_single(dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ err = dma_mapping_error(&bp->pdev->dev, mapping);
+ if (unlikely(err))
+ return err;
+
+ buff_type = MACB_TYPE_XDP_NDO;
} else {
/* progs can adjust the head. Sync and set the adjusted one.
* This also implicitly takes into account ip alignment,
@@ -1476,52 +1503,38 @@ static int macb_xdp_submit_frame(struct macb *bp, struct xdp_frame *xdpf,
mapping = addr + xdpf->headroom + sizeof(*xdpf);
dma_sync_single_for_device(&bp->pdev->dev, mapping,
xdpf->len, DMA_BIDIRECTIONAL);
+
+ buff_type = MACB_TYPE_XDP_TX;
}
- next_head = queue->tx_head + 1;
+ queue_index = cpu % bp->num_queues;
+ queue = &bp->queues[queue_index];
- ctrl = MACB_BIT(TX_USED);
- desc = macb_tx_desc(queue, next_head);
- desc->ctrl = ctrl;
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
- desc = macb_tx_desc(queue, queue->tx_head);
- tx_buff = macb_tx_buff(queue, queue->tx_head);
- tx_buff->ptr = xdpf;
- tx_buff->type = buff_type;
- tx_buff->mapping = dma_map ? mapping : 0;
- tx_buff->size = xdpf->len;
- tx_buff->mapped_as_page = false;
+ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) {
+ /* This is a hard error, log it. */
+ netif_stop_subqueue(netdev, queue_index);
+ netdev_dbg(netdev, "tx_head = %u, tx_tail = %u\n",
+ queue->tx_head, queue->tx_tail);
+ err = -ENOMEM;
+ } else {
+ macb_xdp_submit_buff(bp, queue_index, (struct macb_tx_buff){
+ .ptr = xdpf,
+ .mapping = dma_map ? mapping : 0,
+ .size = xdpf->len,
+ .mapped_as_page = false,
+ .type = buff_type,
+ });
- ctrl = (u32)tx_buff->size;
- ctrl |= MACB_BIT(TX_LAST);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index), xdpf->len);
+ }
- if (unlikely(macb_tx_ring_wrap(bp, queue->tx_head) == (bp->tx_ring_size - 1)))
- ctrl |= MACB_BIT(TX_WRAP);
-
- /* Set TX buffer descriptor */
- macb_set_addr(bp, desc, mapping);
- /* desc->addr must be visible to hardware before clearing
- * 'TX_USED' bit in desc->ctrl.
- */
- wmb();
- desc->ctrl = ctrl;
- queue->tx_head = next_head;
-
- /* Make newly initialized descriptor visible to hardware */
- wmb();
-
- spin_lock(&bp->lock);
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- spin_unlock(&bp->lock);
-
- netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index), xdpf->len);
-
- if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
- netif_stop_subqueue(dev, queue_index);
-
-unlock:
spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+ if (err && dma_map)
+ dma_unmap_single(dev, mapping, xdpf->len, DMA_TO_DEVICE);
+
return err;
}
--
2.53.0