[PATCH 4.14 20/92] ixgbe: split XDP_TX tail and XDP_REDIRECT map flushing
From: Greg Kroah-Hartman
Date: Fri Jul 20 2018 - 08:33:32 EST
4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Jesper Dangaard Brouer <brouer@xxxxxxxxxx>
[ Upstream commit ad088ec480768850db019a5cc543685e868a513d ]
The driver was combining the XDP_TX tail flush and XDP_REDIRECT
map flushing (xdp_do_flush_map). This is suboptimal, these two
flush operations should be kept separate.
Fixes: 11393cc9b9be ("xdp: Add batching support to redirect map")
Signed-off-by: Jesper Dangaard Brouer <brouer@xxxxxxxxxx>
Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2211,9 +2211,10 @@ static struct sk_buff *ixgbe_build_skb(s
return skb;
}
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS 0
+#define IXGBE_XDP_CONSUMED BIT(0)
+#define IXGBE_XDP_TX BIT(1)
+#define IXGBE_XDP_REDIR BIT(2)
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_buff *xdp);
@@ -2242,7 +2243,7 @@ static struct sk_buff *ixgbe_run_xdp(str
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
if (!err)
- result = IXGBE_XDP_TX;
+ result = IXGBE_XDP_REDIR;
else
result = IXGBE_XDP_CONSUMED;
break;
@@ -2302,7 +2303,7 @@ static int ixgbe_clean_rx_irq(struct ixg
unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
- bool xdp_xmit = false;
+ unsigned int xdp_xmit = 0;
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
@@ -2342,8 +2343,10 @@ static int ixgbe_clean_rx_irq(struct ixg
}
if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
- xdp_xmit = true;
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
} else {
rx_buffer->pagecnt_bias++;
@@ -2415,7 +2418,10 @@ static int ixgbe_clean_rx_irq(struct ixg
total_rx_packets++;
}
- if (xdp_xmit) {
+ if (xdp_xmit & IXGBE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
/* Force memory writes to complete before letting h/w
@@ -2423,8 +2429,6 @@ static int ixgbe_clean_rx_irq(struct ixg
*/
wmb();
writel(ring->next_to_use, ring->tail);
-
- xdp_do_flush_map();
}
u64_stats_update_begin(&rx_ring->syncp);