yea when I read this part again, I don't this it's needed.diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 3088cdd08f35..ba96776d5854 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -308,6 +308,8 @@
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_POPTS_SMD_V 0x10 /* Transmitted packet is a SMD-Verify */
+#define IGC_TXD_POPTS_SMD_R 0x20 /* Transmitted packet is a SMD-Response */
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
@@ -370,9 +372,13 @@
#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
#define IGC_RXDEXT_STATERR_LB 0x00040000
+#define IGC_RXD_STAT_SMD_V 0x2000 /* SMD-Verify packet */
+#define IGC_RXD_STAT_SMD_R 0x4000 /* SMD-Response packet */
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_SMD_TYPE_MASK 0x06000
+#define IGC_RXDADV_STAT_SMD_TYPE_SHIFT 13
#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 1954561ec4aa..7cde0e5a7320 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1788,6 +1788,7 @@ static int igc_ethtool_set_mm(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct fpe_t *fpe = &adapter->fpe;
+ bool verify_enabled_changed;
if (cmd->tx_min_frag_size < IGC_TX_MIN_FRAG_SIZE ||
cmd->tx_min_frag_size > IGC_TX_MAX_FRAG_SIZE)
@@ -1805,7 +1806,12 @@ static int igc_ethtool_set_mm(struct net_device *netdev,
fpe->tx_enabled = cmd->tx_enabled;
fpe->pmac_enabled = cmd->pmac_enabled;
- fpe->verify_enabled = cmd->verify_enabled;
+ verify_enabled_changed = (cmd->verify_enabled != fpe->verify_enabled);
I wonder if it's worth using an intermediary variable when the result is
only evaluated once. The intention is clear enough already, you call a
function named igc_fpe_verify_enabled_changed().
+
+ if (verify_enabled_changed) {
+ fpe->verify_enabled = cmd->verify_enabled;
+ igc_fpe_verify_enabled_changed(fpe);
+ }
return igc_tsn_offload_apply(adapter);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index b85eaf34d07b..e184959ef218 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2534,7 +2534,7 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
}
/* This function assumes __netif_tx_lock is held by the caller. */
-static void igc_flush_tx_descriptors(struct igc_ring *ring)
+void igc_flush_tx_descriptors(struct igc_ring *ring)
{
/* Once tail pointer is updated, hardware can fetch the descriptors
* any time so we issue a write membar here to ensure all memory
@@ -2585,6 +2585,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
int xdp_status = 0, rx_buffer_pgcnt;
+ int smd_type;
while (likely(total_packets < budget)) {
struct igc_xdp_buff ctx = { .rx_ts = NULL };
@@ -2622,6 +2623,18 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
size -= IGC_TS_HDR_LEN;
}
+ smd_type = igc_fpe_get_smd_type(rx_desc->wb.upper.status_error);
+
+ if (igc_fpe_is_verify_or_response(smd_type, size)) {
+ igc_fpe_preprocess_verify_response(&adapter->fpe,
+ smd_type);
+
+ /* Advance the ring next-to-clean */
+ igc_is_non_eop(rx_ring, rx_desc);
+ cleaned_count++;
+ continue;
+ }
+
Premature optimization is the root of all evil, I know, but in the
future it might be interesting to add a static key here that gets
incremented (enabled) based on pmac_enabled, such that the fast path
does not get to suffer a performance penalty when frame preemption is
supported in the kernel, regardless of whether it is enabled or not.