[PATCH v1 1/1] vfio/nvgrace-gpu: Add Blackwell-Next GPU readiness check via CXL DVSEC

From: ankita

Date: Mon Mar 30 2026 - 01:45:37 EST


From: Ankit Agrawal <ankita@xxxxxxxxxx>

Blackwell-Next GPUs report device readiness via the CXL DVSEC Range 1 Low
register (offset 0x1C) instead of the BAR0 HBM training register used by
GB200. The GPU memory readiness is checked by polling for the Memory_Active
bit (bit 1) for the Memory_Active_Timeout (bits 15:13).

Add runtime detection by checking the presence of the DVSEC register.
Wire a wait_device_ready ops pointer on nvgrace_gpu_pci_core_device,
which is set at probe to either the Blackwell-Next (CXL DVSEC) or
legacy variant.

Signed-off-by: Ankit Agrawal <ankita@xxxxxxxxxx>
---
drivers/vfio/pci/nvgrace-gpu/main.c | 80 ++++++++++++++++++++++++++---
1 file changed, 72 insertions(+), 8 deletions(-)

diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
index fa056b69f899..8b6b3577a8ea 100644
--- a/drivers/vfio/pci/nvgrace-gpu/main.c
+++ b/drivers/vfio/pci/nvgrace-gpu/main.c
@@ -34,6 +34,12 @@
#define HBM_TRAINING_BAR0_OFFSET 0x200BC
#define STATUS_READY 0xFF

+#define CXL_DEVICE_DVSEC_ID 0
+#define CXL_DVSEC_RANGE_1_LOW 0x1C
+#define CXL_DVSEC_MEMORY_VALID BIT(0)
+#define CXL_DVSEC_MEMORY_ACTIVE BIT(1)
+#define CXL_DVSEC_MEMORY_ACTIVE_TIMEOUT GENMASK(15, 13)
+
#define POLL_QUANTUM_MS 1000
#define POLL_TIMEOUT_MS (30 * 1000)

@@ -64,6 +70,10 @@ struct nvgrace_gpu_pci_core_device {
bool has_mig_hw_bug;
/* GPU has just been reset */
bool reset_done;
+ /* CXL Device DVSEC offset; 0 if not present (legacy GB path) */
+ int cxl_dvsec;
+ int (*wait_device_ready)(struct nvgrace_gpu_pci_core_device *nvdev,
+ void __iomem *io);
};

static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev)
@@ -242,7 +252,8 @@ static void nvgrace_gpu_close_device(struct vfio_device *core_vdev)
vfio_pci_core_close_device(core_vdev);
}

-static int nvgrace_gpu_wait_device_ready(void __iomem *io)
+static int nvgrace_gpu_wait_device_ready_legacy(struct nvgrace_gpu_pci_core_device *nvdev,
+ void __iomem *io)
{
unsigned long timeout = jiffies + msecs_to_jiffies(POLL_TIMEOUT_MS);

@@ -256,6 +267,52 @@ static int nvgrace_gpu_wait_device_ready(void __iomem *io)
return -ETIME;
}

+/*
+ * Decode the 3-bit Memory_Active_Timeout field from CXL DVSEC Range 1 Low
+ * (bits 15:13) into milliseconds. Encoding per CXL spec r4.0 sec 8.1.3.8.2:
+ * 000b = 1s, 001b = 4s, 010b = 16s, 011b = 64s, 100b = 256s,
+ * 101b-111b = reserved (clamped to 256s).
+ */
+static inline unsigned long nvgrace_gpu_cxl_mem_active_timeout_ms(u8 timeout)
+{
+ return 1000UL << (2 * min_t(u8, timeout, 4));
+}
+
+static int nvgrace_gpu_wait_device_ready_bw_next(struct nvgrace_gpu_pci_core_device *nvdev,
+ void __iomem *io)
+{
+ struct pci_dev *pdev = nvdev->core_device.pdev;
+ int pcie_dvsec = nvdev->cxl_dvsec;
+ unsigned long timeout;
+ u32 dvsec_memory_status;
+ u8 mem_active_timeout;
+
+ pci_read_config_dword(pdev, pcie_dvsec + CXL_DVSEC_RANGE_1_LOW,
+ &dvsec_memory_status);
+
+ if (!(dvsec_memory_status & CXL_DVSEC_MEMORY_VALID))
+ return -ENODEV;
+
+ mem_active_timeout = FIELD_GET(CXL_DVSEC_MEMORY_ACTIVE_TIMEOUT,
+ dvsec_memory_status);
+
+ timeout = jiffies +
+ msecs_to_jiffies(nvgrace_gpu_cxl_mem_active_timeout_ms(mem_active_timeout));
+
+ do {
+ pci_read_config_dword(pdev,
+ pcie_dvsec + CXL_DVSEC_RANGE_1_LOW,
+ &dvsec_memory_status);
+
+ if (dvsec_memory_status & CXL_DVSEC_MEMORY_ACTIVE)
+ return 0;
+
+ msleep(POLL_QUANTUM_MS);
+ } while (!time_after(jiffies, timeout));
+
+ return -ETIME;
+}
+
/*
* If the GPU memory is accessed by the CPU while the GPU is not ready
* after reset, it can cause harmless corrected RAS events to be logged.
@@ -275,7 +332,7 @@ nvgrace_gpu_check_device_ready(struct nvgrace_gpu_pci_core_device *nvdev)
if (!__vfio_pci_memory_enabled(vdev))
return -EIO;

- ret = nvgrace_gpu_wait_device_ready(vdev->barmap[0]);
+ ret = nvdev->wait_device_ready(nvdev, vdev->barmap[0]);
if (ret)
return ret;

@@ -1146,8 +1203,9 @@ static bool nvgrace_gpu_has_mig_hw_bug(struct pci_dev *pdev)
* Ensure that the BAR0 region is enabled before accessing the
* registers.
*/
-static int nvgrace_gpu_probe_check_device_ready(struct pci_dev *pdev)
+static int nvgrace_gpu_probe_check_device_ready(struct nvgrace_gpu_pci_core_device *nvdev)
{
+ struct pci_dev *pdev = nvdev->core_device.pdev;
void __iomem *io;
int ret;

@@ -1165,7 +1223,7 @@ static int nvgrace_gpu_probe_check_device_ready(struct pci_dev *pdev)
goto iomap_exit;
}

- ret = nvgrace_gpu_wait_device_ready(io);
+ ret = nvdev->wait_device_ready(nvdev, io);

pci_iounmap(pdev, io);
iomap_exit:
@@ -1183,10 +1241,6 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev,
u64 memphys, memlength;
int ret;

- ret = nvgrace_gpu_probe_check_device_ready(pdev);
- if (ret)
- return ret;
-
ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength);
if (!ret)
ops = &nvgrace_gpu_pci_ops;
@@ -1198,6 +1252,16 @@ static int nvgrace_gpu_probe(struct pci_dev *pdev,

dev_set_drvdata(&pdev->dev, &nvdev->core_device);

+ nvdev->cxl_dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL,
+ CXL_DEVICE_DVSEC_ID);
+ nvdev->wait_device_ready = nvdev->cxl_dvsec ?
+ nvgrace_gpu_wait_device_ready_bw_next :
+ nvgrace_gpu_wait_device_ready_legacy;
+
+ ret = nvgrace_gpu_probe_check_device_ready(nvdev);
+ if (ret)
+ goto out_put_vdev;
+
if (ops == &nvgrace_gpu_pci_ops) {
nvdev->has_mig_hw_bug = nvgrace_gpu_has_mig_hw_bug(pdev);

--
2.34.1