[PATCH net-next, v2] net: mana: Trigger VF reset/recovery on health check failure due to HWC timeout

From: Dipayaan Roy

Date: Fri Feb 27 2026 - 03:15:13 EST


The GF stats periodic query is used as mechanism to monitor HWC health
check. If this HWC command times out, it is a strong indication that
the device/SoC is in a faulty state and requires recovery.

Today, when a timeout is detected, the driver marks
hwc_timeout_occurred, clears cached stats, and stops rescheduling the
periodic work. However, the device itself is left in the same failing
state.

Extend the timeout handling path to trigger the existing MANA VF
recovery service by queueing a GDMA_EQE_HWC_RESET_REQUEST work item.
This is expected to initiate the appropriate recovery flow by suspende
resume first and if it fails then trigger a bus rescan.

This change is intentionally limited to HWC command timeouts and does
not trigger recovery for errors reported by the SoC as a normal command
response.

Signed-off-by: Dipayaan Roy <dipayanroy@xxxxxxxxxxxxxxxxxxx>
---
Changes in v2:
- Added common helper, proper clearing of gc flags.
---
---
.../net/ethernet/microsoft/mana/gdma_main.c | 65 ++++++++++---------
drivers/net/ethernet/microsoft/mana/mana_en.c | 9 ++-
include/net/mana/gdma.h | 16 ++++-
3 files changed, 55 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 37d2f108a839..aef8612b73cb 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -490,15 +490,9 @@ static void mana_serv_reset(struct pci_dev *pdev)
dev_info(&pdev->dev, "MANA reset cycle completed\n");

out:
- gc->in_service = false;
+ clear_bit(GC_IN_SERVICE, &gc->flags);
}

-struct mana_serv_work {
- struct work_struct serv_work;
- struct pci_dev *pdev;
- enum gdma_eqe_type type;
-};
-
static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev)
{
switch (type) {
@@ -558,12 +552,42 @@ static void mana_serv_func(struct work_struct *w)
module_put(THIS_MODULE);
}

+int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type)
+{
+ struct mana_serv_work *mns_wk;
+
+ if (test_and_set_bit(GC_IN_SERVICE, &gc->flags)) {
+ dev_info(gc->dev, "Already in service\n");
+ return -EBUSY;
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_info(gc->dev, "Module is unloading\n");
+ clear_bit(GC_IN_SERVICE, &gc->flags);
+ return -ENODEV;
+ }
+
+ mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
+ if (!mns_wk) {
+ module_put(THIS_MODULE);
+ clear_bit(GC_IN_SERVICE, &gc->flags);
+ return -ENOMEM;
+ }
+
+ dev_info(gc->dev, "Start MANA service type:%d\n", type);
+ mns_wk->pdev = to_pci_dev(gc->dev);
+ mns_wk->type = type;
+ pci_dev_get(mns_wk->pdev);
+ INIT_WORK(&mns_wk->serv_work, mana_serv_func);
+ schedule_work(&mns_wk->serv_work);
+ return 0;
+}
+
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
struct gdma_context *gc = eq->gdma_dev->gdma_context;
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
- struct mana_serv_work *mns_wk;
union gdma_eqe_info eqe_info;
enum gdma_eqe_type type;
struct gdma_event event;
@@ -623,30 +647,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
"Service is to be processed in probe\n");
break;
}
-
- if (gc->in_service) {
- dev_info(gc->dev, "Already in service\n");
- break;
- }
-
- if (!try_module_get(THIS_MODULE)) {
- dev_info(gc->dev, "Module is unloading\n");
- break;
- }
-
- mns_wk = kzalloc_obj(*mns_wk, GFP_ATOMIC);
- if (!mns_wk) {
- module_put(THIS_MODULE);
- break;
- }
-
- dev_info(gc->dev, "Start MANA service type:%d\n", type);
- gc->in_service = true;
- mns_wk->pdev = to_pci_dev(gc->dev);
- mns_wk->type = type;
- pci_dev_get(mns_wk->pdev);
- INIT_WORK(&mns_wk->serv_work, mana_serv_func);
- schedule_work(&mns_wk->serv_work);
+ mana_schedule_serv_work(gc, type);
break;

default:
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 933e9d681ded..56ee993e3a43 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -875,7 +875,7 @@ static void mana_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct gdma_context *gc = ac->gdma_dev->gdma_context;

/* Already in service, hence tx queue reset is not required.*/
- if (gc->in_service)
+ if (test_bit(GC_IN_SERVICE, &gc->flags))
return;

/* Note: If there are pending queue reset work for this port(apc),
@@ -3525,6 +3525,7 @@ static void mana_gf_stats_work_handler(struct work_struct *work)
{
struct mana_context *ac =
container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
int err;

err = mana_query_gf_stats(ac);
@@ -3532,6 +3533,12 @@ static void mana_gf_stats_work_handler(struct work_struct *work)
/* HWC timeout detected - reset stats and stop rescheduling */
ac->hwc_timeout_occurred = true;
memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
+ dev_warn(gc->dev,
+ "Gf stats wk handler: gf stats query timed out.\n");
+ /* As HWC timed out, indicating a faulty HW state and needs a
+ * reset.
+ */
+ mana_schedule_serv_work(gc, GDMA_EQE_HWC_RESET_REQUEST);
return;
}
schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 766f4fb25e26..ec17004b10c0 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -215,6 +215,12 @@ enum gdma_page_type {

#define GDMA_INVALID_DMA_REGION 0

+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
struct gdma_mem_info {
struct device *dev;

@@ -386,6 +392,7 @@ struct gdma_irq_context {

enum gdma_context_flags {
GC_PROBE_SUCCEEDED = 0,
+ GC_IN_SERVICE = 1,
};

struct gdma_context {
@@ -411,7 +418,6 @@ struct gdma_context {
u32 test_event_eq_id;

bool is_pf;
- bool in_service;

phys_addr_t bar0_pa;
void __iomem *bar0_va;
@@ -473,6 +479,8 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);

void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);

+int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type);
+
struct gdma_wqe {
u32 reserved :24;
u32 last_vbytes :8;
@@ -615,6 +623,9 @@ enum {
/* Driver can handle hardware recovery events during probe */
#define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)

+/* Driver supports self recovery on Hardware Channel timeouts */
+#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY BIT(25)
+
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
@@ -628,7 +639,8 @@ enum {
GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
- GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY)
+ GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY | \
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY)

#define GDMA_DRV_CAP_FLAGS2 0

--
2.34.1