[PATCH v3 net 2/3] net: enetc: fix the issues of XDP_REDIRECT feature

From: Wei Fang
Date: Wed Oct 09 2024 - 05:20:31 EST


When testing the XDP_REDIRECT function on the LS1028A platform, we
found a very reproducible issue that the Tx frames can no longer be
sent out even if XDP_REDIRECT is turned off. Specifically, if there
is a lot of traffic on Rx direction, when XDP_REDIRECT is turned on,
the console may display some warnings like "timeout for tx ring #6
clear", and all redirected frames will be dropped, the detaild log
is as follows.

root@ls1028ardb:~# ./xdp-bench redirect eno0 eno2
Redirecting from eno0 (ifindex 3; driver fsl_enetc) to eno2 (ifindex 4; driver fsl_enetc)
[203.849809] fsl_enetc 0000:00:00.2 eno2: timeout for tx ring #5 clear
[204.006051] fsl_enetc 0000:00:00.2 eno2: timeout for tx ring #6 clear
[204.161944] fsl_enetc 0000:00:00.2 eno2: timeout for tx ring #7 clear
eno0->eno2 1420505 rx/s 1420590 err,drop/s 0 xmit/s
xmit eno0->eno2 0 xmit/s 1420590 drop/s 0 drv_err/s 15.71 bulk-avg
eno0->eno2 1420484 rx/s 1420485 err,drop/s 0 xmit/s
xmit eno0->eno2 0 xmit/s 1420485 drop/s 0 drv_err/s 15.71 bulk-avg

By analyzing the XDP_REDIRECT implementation of enetc driver, we
found two problems. First, enetc driver will reconfigure Tx and
Rx BD rings when a bpf program is installed or uninstalled, but
there is no mechanisms to block the redirected frames when enetc
driver reconfigures BD rings. So introduce ENETC_TX_DOWN flag to
prevent the redirected frames to be attached to Tx BD rings. This
is not only used to block XDP_REDIRECT frames, but also to block
XDP_TX frames.

Second, Tx BD rings are disabled first in enetc_stop() and then
wait for empty. This operation is not safe while the Tx BD ring
is actively transmitting frames, and will cause the ring to not
be empty and hardware exception. As described in the block guide
of LS1028A NETC, software should only disable an active ring after
all pending ring entries have been consumed (i.e. when PI = CI).
Disabling a transmit ring that is actively processing BDs risks
a HW-SW race hazard whereby a hardware resource becomes assigned
to work on one or more ring entries only to have those entries be
removed due to the ring becoming disabled. So the correct behavior
is that the software stops putting frames on the Tx BD rings (this
is what ENETC_TX_DOWN does), then waits for the Tx BD rings to be
empty, and finally disables the Tx BD rings.

Fixes: c33bfaf91c4c ("net: enetc: set up XDP program under enetc_reconfigure()")
Cc: stable@xxxxxxxxxxxxxxx
Signed-off-by: Wei Fang <wei.fang@xxxxxxx>
---
v2 changes:
Remove a blank line from the end of enetc_disable_tx_bdrs().
v3 changes:
Block the XDP_TX frames when ENETC_TX_DOWN flag is set.
---
drivers/net/ethernet/freescale/enetc/enetc.c | 50 ++++++++++++++++----
drivers/net/ethernet/freescale/enetc/enetc.h | 1 +
2 files changed, 41 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 56e59721ec7d..52da10f62430 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -902,6 +902,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)

if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
+ !test_bit(ENETC_TX_DOWN, &priv->flags) &&
(enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -1377,6 +1378,9 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
int xdp_tx_bd_cnt, i, k;
int xdp_tx_frm_cnt = 0;

+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags)))
+ return -ENETDOWN;
+
enetc_lock_mdio();

tx_ring = priv->xdp_tx_ring[smp_processor_id()];
@@ -1602,6 +1606,12 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
+ if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ tx_ring->stats.xdp_tx_drops++;
+ break;
+ }
+
xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
rx_ring,
orig_i, i);
@@ -2223,18 +2233,24 @@ static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}

-static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_enable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;

- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_enable_txbdr(hw, priv->tx_ring[i]);
-
for (i = 0; i < priv->num_rx_rings; i++)
enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}

+static void enetc_enable_tx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_enable_txbdr(hw, priv->tx_ring[i]);
+}
+
static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
@@ -2251,18 +2267,24 @@ static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
}

-static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_disable_rx_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;

- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_disable_txbdr(hw, priv->tx_ring[i]);
-
for (i = 0; i < priv->num_rx_rings; i++)
enetc_disable_rxbdr(hw, priv->rx_ring[i]);
}

+static void enetc_disable_tx_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_disable_txbdr(hw, priv->tx_ring[i]);
+}
+
static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
int delay = 8, timeout = 100;
@@ -2452,6 +2474,8 @@ void enetc_start(struct net_device *ndev)

enetc_setup_interrupts(priv);

+ enetc_enable_tx_bdrs(priv);
+
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
ENETC_BDR_INT_BASE_IDX + i);
@@ -2460,9 +2484,11 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}

- enetc_enable_bdrs(priv);
+ enetc_enable_rx_bdrs(priv);

netif_tx_start_all_queues(ndev);
+
+ clear_bit(ENETC_TX_DOWN, &priv->flags);
}
EXPORT_SYMBOL_GPL(enetc_start);

@@ -2520,9 +2546,11 @@ void enetc_stop(struct net_device *ndev)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int i;

+ set_bit(ENETC_TX_DOWN, &priv->flags);
+
netif_tx_stop_all_queues(ndev);

- enetc_disable_bdrs(priv);
+ enetc_disable_rx_bdrs(priv);

for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
@@ -2535,6 +2563,8 @@ void enetc_stop(struct net_device *ndev)

enetc_wait_bdrs(priv);

+ enetc_disable_tx_bdrs(priv);
+
enetc_clear_interrupts(priv);
}
EXPORT_SYMBOL_GPL(enetc_stop);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 97524dfa234c..fb7d98d57783 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -325,6 +325,7 @@ enum enetc_active_offloads {

enum enetc_flags_bit {
ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
+ ENETC_TX_DOWN,
};

/* interrupt coalescing modes */
--
2.34.1