RE: [PATCH RFC v2 03/15] vfio/nvgrace-gpu: track GPUs associated with the EGM regions

From: Shameer Kolothum Thodi

Date: Thu Feb 26 2026 - 10:16:36 EST




> -----Original Message-----
> From: Ankit Agrawal <ankita@xxxxxxxxxx>
> Sent: 23 February 2026 15:55
> To: Ankit Agrawal <ankita@xxxxxxxxxx>; Vikram Sethi <vsethi@xxxxxxxxxx>;
> Jason Gunthorpe <jgg@xxxxxxxxxx>; Matt Ochs <mochs@xxxxxxxxxx>;
> jgg@xxxxxxxx; Shameer Kolothum Thodi <skolothumtho@xxxxxxxxxx>;
> alex@xxxxxxxxxxx
> Cc: Neo Jia <cjia@xxxxxxxxxx>; Zhi Wang <zhiw@xxxxxxxxxx>; Krishnakant
> Jaju <kjaju@xxxxxxxxxx>; Yishai Hadas <yishaih@xxxxxxxxxx>;
> kevin.tian@xxxxxxxxx; kvm@xxxxxxxxxxxxxxx; linux-kernel@xxxxxxxxxxxxxxx
> Subject: [PATCH RFC v2 03/15] vfio/nvgrace-gpu: track GPUs associated with
> the EGM regions
>
> From: Ankit Agrawal <ankita@xxxxxxxxxx>
>
> Grace Blackwell systems could have multiple GPUs on a socket and
> thus are associated with the corresponding EGM region for that
> socket. Track the GPUs as a list.
>
> On the device probe, the device pci_dev struct is added to a
> linked list of the appropriate EGM region.
>
> Similarly on device remove, the pci_dev struct for the GPU
> is removed from the EGM region.
>
> Since the GPUs on a socket have the same EGM region, they have
> the have the same set of EGM region information. Skip the EGM
> region information fetch if already done through a differnt
> GPU on the same socket.
>
> Signed-off-by: Ankit Agrawal <ankita@xxxxxxxxxx>
> ---
> drivers/vfio/pci/nvgrace-gpu/egm_dev.c | 29 ++++++++++++++++++++
> drivers/vfio/pci/nvgrace-gpu/egm_dev.h | 4 +++
> drivers/vfio/pci/nvgrace-gpu/main.c | 37 +++++++++++++++++++++++---
> include/linux/nvgrace-egm.h | 6 +++++
> 4 files changed, 72 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/vfio/pci/nvgrace-gpu/egm_dev.c
> b/drivers/vfio/pci/nvgrace-gpu/egm_dev.c
> index faf658723f7a..0bf95688a486 100644
> --- a/drivers/vfio/pci/nvgrace-gpu/egm_dev.c
> +++ b/drivers/vfio/pci/nvgrace-gpu/egm_dev.c
> @@ -17,6 +17,33 @@ int nvgrace_gpu_has_egm_property(struct pci_dev
> *pdev, u64 *pegmpxm)
> pegmpxm);
> }
>
> +int add_gpu(struct nvgrace_egm_dev *egm_dev, struct pci_dev *pdev)
> +{
> + struct gpu_node *node;
> +
> + node = kzalloc(sizeof(*node), GFP_KERNEL);
> + if (!node)
> + return -ENOMEM;
> +
> + node->pdev = pdev;
> +
> + list_add_tail(&node->list, &egm_dev->gpus);
> +
> + return 0;
> +}
> +
> +void remove_gpu(struct nvgrace_egm_dev *egm_dev, struct pci_dev *pdev)
> +{
> + struct gpu_node *node, *tmp;
> +
> + list_for_each_entry_safe(node, tmp, &egm_dev->gpus, list) {

Looks like this gpu list also will require a lock.
Can we get rid of this gpu list by having a refcount_t in struct nvgrace_egm_dev?

> + if (node->pdev == pdev) {
> + list_del(&node->list);
> + kfree(node);
> + }
> + }
> +}
> +
> static void nvgrace_gpu_release_aux_device(struct device *device)
> {
> struct auxiliary_device *aux_dev = container_of(device, struct
> auxiliary_device, dev);
> @@ -37,6 +64,8 @@ nvgrace_gpu_create_aux_device(struct pci_dev *pdev,
> const char *name,
> goto create_err;
>
> egm_dev->egmpxm = egmpxm;
> + INIT_LIST_HEAD(&egm_dev->gpus);
> +
> egm_dev->aux_dev.id = egmpxm;
> egm_dev->aux_dev.name = name;
> egm_dev->aux_dev.dev.release = nvgrace_gpu_release_aux_device;
> diff --git a/drivers/vfio/pci/nvgrace-gpu/egm_dev.h
> b/drivers/vfio/pci/nvgrace-gpu/egm_dev.h
> index c00f5288f4e7..1635753c9e50 100644
> --- a/drivers/vfio/pci/nvgrace-gpu/egm_dev.h
> +++ b/drivers/vfio/pci/nvgrace-gpu/egm_dev.h
> @@ -10,6 +10,10 @@
>
> int nvgrace_gpu_has_egm_property(struct pci_dev *pdev, u64 *pegmpxm);
>
> +int add_gpu(struct nvgrace_egm_dev *egm_dev, struct pci_dev *pdev);
> +
> +void remove_gpu(struct nvgrace_egm_dev *egm_dev, struct pci_dev *pdev);
> +
> struct nvgrace_egm_dev *
> nvgrace_gpu_create_aux_device(struct pci_dev *pdev, const char *name,
> u64 egmphys);
> diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-
> gpu/main.c
> index 23028e6e7192..3dd0c57e5789 100644
> --- a/drivers/vfio/pci/nvgrace-gpu/main.c
> +++ b/drivers/vfio/pci/nvgrace-gpu/main.c
> @@ -77,9 +77,10 @@ static struct list_head egm_dev_list;
>
> static int nvgrace_gpu_create_egm_aux_device(struct pci_dev *pdev)
> {
> - struct nvgrace_egm_dev_entry *egm_entry;
> + struct nvgrace_egm_dev_entry *egm_entry = NULL;
> u64 egmpxm;
> int ret = 0;
> + bool is_new_region = false;
>
> /*
> * EGM is an optional feature enabled in SBIOS. If disabled, there
> @@ -90,6 +91,19 @@ static int nvgrace_gpu_create_egm_aux_device(struct
> pci_dev *pdev)
> if (nvgrace_gpu_has_egm_property(pdev, &egmpxm))
> goto exit;
>
> + list_for_each_entry(egm_entry, &egm_dev_list, list) {
> + /*
> + * A system could have multiple GPUs associated with an
> + * EGM region and will have the same set of EGM region
> + * information. Skip the EGM region information fetch if
> + * already done through a differnt GPU on the same socket.
> + */
> + if (egm_entry->egm_dev->egmpxm == egmpxm)
> + goto add_gpu;
> + }
> +
> + is_new_region = true;
> +
> egm_entry = kzalloc(sizeof(*egm_entry), GFP_KERNEL);
> if (!egm_entry)
> return -ENOMEM;
> @@ -98,13 +112,24 @@ static int
> nvgrace_gpu_create_egm_aux_device(struct pci_dev *pdev)
> nvgrace_gpu_create_aux_device(pdev,
> NVGRACE_EGM_DEV_NAME,
> egmpxm);
> if (!egm_entry->egm_dev) {
> - kvfree(egm_entry);
> ret = -EINVAL;
> - goto exit;
> + goto free_egm_entry;
> }
>
> - list_add_tail(&egm_entry->list, &egm_dev_list);
> +add_gpu:
> + ret = add_gpu(egm_entry->egm_dev, pdev);
> + if (ret)
> + goto free_dev;
>
> + if (is_new_region)
> + list_add_tail(&egm_entry->list, &egm_dev_list);

So this is where you address the previous patch comment I suppose...
If so, need to change the commit description there.

> + return 0;
> +
> +free_dev:
> + if (is_new_region)
> + auxiliary_device_destroy(&egm_entry->egm_dev->aux_dev);
> +free_egm_entry:
> + kvfree(egm_entry);

Suppose the add_gpu() above fails, then you will end up here with an existing
egm_entry which might be in use.

Thanks,
Shameer