[PATCH net-next v9 5/7] 8139cp: Implement ndo_set_rx_mode_async callback

From: I Viswanath

Date: Sat Mar 14 2026 - 14:31:35 EST


Implement the ndo_set_rx_mode_async callback and update
the driver to use the snapshot/commit model for RX mode update.

Signed-off-by: I Viswanath <viswanathiyyappan@xxxxxxxxx>
---

Call paths involving netif_set_rx_mode in 8139cp

netif_set_rx_mode
|-- cp_init_hw
| |-- cp_open (ndo_open, takes lock)
| | `-- cp_change_mtu (ndo_change_mtu, takes lock)
| |
| `-- cp_resume (lock added)
|
`-- cp_tx_timeout (ndo_tx_timeout, takes lock)

drivers/net/ethernet/realtek/8139cp.c | 49 ++++++++++++++++++---------
1 file changed, 33 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5652da8a178c..9651a0d9d8f0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -372,7 +372,6 @@ struct cp_private {
} while (0)


-static void __cp_set_rx_mode (struct net_device *dev);
static void cp_tx (struct cp_private *cp);
static void cp_clean_rings (struct cp_private *cp);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -885,30 +884,31 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */

-static void __cp_set_rx_mode (struct net_device *dev)
+static void cp_set_rx_mode_async(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
+ char *ha_addr;
int rx_mode;
+ int ni;

/* Note: do not reorder, GCC is clever about common statements. */
- if (dev->flags & IFF_PROMISC) {
+ if (netif_get_rx_mode_cfg(dev, NETIF_RX_MODE_CFG_PROMISC)) {
/* Unconditionally log net taps. */
rx_mode =
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netif_rx_mode_mc_count(dev) > multicast_filter_limit) ||
+ netif_get_rx_mode_cfg(dev, NETIF_RX_MODE_CFG_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
- struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ netif_rx_mode_for_each_mc_addr(ha_addr, dev, ni) {
+ int bit_nr = ether_crc(ETH_ALEN, ha_addr) >> 26;

mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
@@ -925,12 +925,14 @@ static void __cp_set_rx_mode (struct net_device *dev)

static void cp_set_rx_mode (struct net_device *dev)
{
- unsigned long flags;
- struct cp_private *cp = netdev_priv(dev);
+ bool allmulti = !!(dev->flags & IFF_ALLMULTI);
+ bool promisc = !!(dev->flags & IFF_PROMISC);

- spin_lock_irqsave (&cp->lock, flags);
- __cp_set_rx_mode(dev);
- spin_unlock_irqrestore (&cp->lock, flags);
+ netif_set_rx_mode_flag(dev, NETIF_RX_MODE_UC_SKIP, true);
+ netif_set_rx_mode_flag(dev, NETIF_RX_MODE_MC_SKIP, promisc | allmulti);
+
+ netif_set_rx_mode_cfg(dev, NETIF_RX_MODE_CFG_ALLMULTI, allmulti);
+ netif_set_rx_mode_cfg(dev, NETIF_RX_MODE_CFG_PROMISC, promisc);
}

static void __cp_get_stats(struct cp_private *cp)
@@ -1040,7 +1042,7 @@ static void cp_init_hw (struct cp_private *cp)
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */

- __cp_set_rx_mode(dev);
+ netif_set_rx_mode(dev);
cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));

cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
@@ -1262,7 +1264,7 @@ static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
cp_clean_rings(cp);
cp_init_rings(cp);
cp_start_hw(cp);
- __cp_set_rx_mode(dev);
+ netif_set_rx_mode(dev);
cpw16_f(IntrMask, cp_norx_intr_mask);

netif_wake_queue(dev);
@@ -1870,6 +1872,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = cp_set_mac_address,
.ndo_set_rx_mode = cp_set_rx_mode,
+ .ndo_set_rx_mode_async = cp_set_rx_mode_async,
.ndo_get_stats = cp_get_stats,
.ndo_eth_ioctl = cp_ioctl,
.ndo_start_xmit = cp_start_xmit,
@@ -2071,7 +2074,7 @@ static int __maybe_unused cp_suspend(struct device *device)
spin_unlock_irqrestore (&cp->lock, flags);

device_set_wakeup_enable(device, cp->wol_enabled);
-
+ netif_disable_async_ops(dev);
return 0;
}

@@ -2081,6 +2084,8 @@ static int __maybe_unused cp_resume(struct device *device)
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;

+ netif_enable_async_ops(dev);
+
if (!netif_running(dev))
return 0;

@@ -2088,7 +2093,11 @@ static int __maybe_unused cp_resume(struct device *device)

/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
cp_init_rings_index (cp);
+
+ rtnl_lock();
cp_init_hw (cp);
+ rtnl_unlock();
+
cp_enable_irq(cp);
netif_start_queue (dev);

@@ -2101,6 +2110,13 @@ static int __maybe_unused cp_resume(struct device *device)
return 0;
}

+static void cp_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ netif_disable_async_ops(dev);
+}
+
static const struct pci_device_id cp_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
@@ -2116,6 +2132,7 @@ static struct pci_driver cp_driver = {
.probe = cp_init_one,
.remove = cp_remove_one,
.driver.pm = &cp_pm_ops,
+ .shutdown = &cp_shutdown
};

module_pci_driver(cp_driver);
--
2.47.3