[PATCH net-next V6 1/3] net/mlx5e: DMA-sync earlier in mlx5e_skb_from_cqe_mpwrq_nonlinear
From: Tariq Toukan
Date: Thu May 07 2026 - 06:01:04 EST
From: Christoph Paasch <cpaasch@xxxxxxxxxx>
Doing the call to dma_sync_single_for_cpu() earlier will allow us to
adjust headlen based on the actual size of the protocol headers.
Doing this earlier means that we don't need to call
mlx5e_copy_skb_header() anymore and rather can call
skb_copy_to_linear_data() directly.
Reviewed-by: Eric Dumazet <edumazet@xxxxxxxxxx>
Reviewed-by: Saeed Mahameed <saeedm@xxxxxxxxxx>
Signed-off-by: Christoph Paasch <cpaasch@xxxxxxxxxx>
Signed-off-by: Dragos Tatulea <dtatulea@xxxxxxxxxx>
Signed-off-by: Tariq Toukan <tariqt@xxxxxxxxxx>
---
.../net/ethernet/mellanox/mlx5/core/en_rx.c | 22 +++++++++++++------
1 file changed, 15 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b60aa47c75b..75ccf40a7f17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1923,11 +1923,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
unsigned int truesize = 0;
u32 pg_consumed_bytes;
struct bpf_prog *prog;
+ void *va, *head_addr;
struct sk_buff *skb;
u32 linear_frame_sz;
u16 linear_data_len;
u16 linear_hr;
- void *va;
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
@@ -1940,9 +1940,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
prog = rcu_dereference(rq->xdp_prog);
+ head_addr = netmem_address(head_page->netmem) + head_offset;
+
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
- net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+ net_prefetchw(head_addr);
va = mlx5e_mpwqe_get_linear_page_frag(rq);
if (!va) {
@@ -1956,6 +1958,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
linear_page = &rq->mpwqe.linear_info->frag_page;
} else {
+ dma_addr_t addr;
+
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
if (unlikely(!skb)) {
@@ -1967,6 +1971,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
net_prefetchw(va); /* xdp_frame data area */
net_prefetchw(skb->data);
+ addr = page_pool_get_dma_addr_netmem(head_page->netmem);
+ dma_sync_single_for_cpu(rq->pdev, addr + head_offset,
+ ALIGN(headlen, sizeof(long)),
+ rq->buff.map_dir);
+
frag_offset += headlen;
byte_cnt -= headlen;
linear_hr = skb_headroom(skb);
@@ -2056,8 +2065,6 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
__pskb_pull_tail(skb, headlen);
}
} else {
- dma_addr_t addr;
-
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
@@ -2071,10 +2078,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
pagep->frags++;
while (++pagep < frag_page);
}
+
/* copy header */
- addr = page_pool_get_dma_addr_netmem(head_page->netmem);
- mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
- head_offset, head_offset, headlen);
+ skb_copy_to_linear_data(skb, head_addr,
+ ALIGN(headlen, sizeof(long)));
+
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
--
2.44.0