[net-next v3 14/15] net: marvell: Convert tasklet API to new bottom half workqueue mechanism

From: Allen Pais
Date: Tue Jul 30 2024 - 14:39:11 EST


Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the marvell drivers. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@xxxxxxxxx>
---
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 9 ++++++---
drivers/net/ethernet/marvell/skge.c | 12 ++++++------
drivers/net/ethernet/marvell/skge.h | 3 ++-
3 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 8c45ad983abc..adffbbd20962 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2628,9 +2628,12 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
* The number of sent descriptors is returned.
* Per-thread access
*
- * Called only from mvpp2_txq_done(), called from mvpp2_tx()
- * (migration disabled) and from the TX completion tasklet (migration
- * disabled) so using smp_processor_id() is OK.
+ * Called only from mvpp2_txq_done().
+ *
+ * Historically, this function was invoked directly from mvpp2_tx()
+ * (with migration disabled) and from the bottom half workqueue.
+ * Verify that the use of smp_processor_id() is still appropriate
+ * considering the current bottom half workqueue implementation.
*/
static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index fcfb34561882..4448af079447 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3342,13 +3342,13 @@ static void skge_error_irq(struct skge_hw *hw)
}

/*
- * Interrupt from PHY are handled in tasklet (softirq)
+ * Interrupt from PHY are handled in bh work (softirq)
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(struct tasklet_struct *t)
+static void skge_extirq(struct work_struct *work)
{
- struct skge_hw *hw = from_tasklet(hw, t, phy_task);
+ struct skge_hw *hw = from_work(hw, work, phy_bh_work);
int port;

for (port = 0; port < hw->ports; port++) {
@@ -3389,7 +3389,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
status &= hw->intr_mask;
if (status & IS_EXT_REG) {
hw->intr_mask &= ~IS_EXT_REG;
- tasklet_schedule(&hw->phy_task);
+ queue_work(system_bh_wq, &hw->phy_bh_work);
}

if (status & (IS_XA1_F|IS_R1_F)) {
@@ -3937,7 +3937,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->pdev = pdev;
spin_lock_init(&hw->hw_lock);
spin_lock_init(&hw->phy_lock);
- tasklet_setup(&hw->phy_task, skge_extirq);
+ INIT_WORK(&hw->phy_bh_work, skge_extirq);

hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
@@ -4035,7 +4035,7 @@ static void skge_remove(struct pci_dev *pdev)
dev0 = hw->dev[0];
unregister_netdev(dev0);

- tasklet_kill(&hw->phy_task);
+ cancel_work_sync(&hw->phy_bh_work);

spin_lock_irq(&hw->hw_lock);
hw->intr_mask = 0;
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
index f72217348eb4..0cf77f4b1c57 100644
--- a/drivers/net/ethernet/marvell/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -5,6 +5,7 @@
#ifndef _SKGE_H
#define _SKGE_H
#include <linux/interrupt.h>
+#include <linux/workqueue.h>

/* PCI config registers */
#define PCI_DEV_REG1 0x40
@@ -2418,7 +2419,7 @@ struct skge_hw {
u32 ram_offset;
u16 phy_addr;
spinlock_t phy_lock;
- struct tasklet_struct phy_task;
+ struct work_struct phy_bh_work;

char irq_name[]; /* skge@pci:000:04:00.0 */
};
--
2.34.1