[PATCH 09/22] iommu/amd: Introduce domain for IOMMU Private Address (IPA) region

From: Suravee Suthikulpanit

Date: Mon Mar 30 2026 - 04:59:40 EST


AMD vIOMMU introduces the IOMMU Private Address (IPA) region, which is
used to manage data structures necessary for IOMMU virtualization within
the guest.

Introduce a new domain specifically for IPA region for each IOMMU, which
is stored in struct amd_iommu.viommu_pdom. This domain uses AMD IOMMU v1
page table.

For more info, please see section vIOMMU Private Address Space of the IOMMU
specification [1].

[1] https://docs.amd.com/v/u/en-US/48882_3.10_PUB

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
drivers/iommu/amd/amd_iommu.h | 3 ++
drivers/iommu/amd/amd_iommu_types.h | 3 ++
drivers/iommu/amd/iommu.c | 6 +--
drivers/iommu/amd/viommu.c | 76 +++++++++++++++++++++++++++++
4 files changed, 85 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index f1fafa21777d..ad88c4118719 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -28,6 +28,7 @@ void iommu_feature_enable(struct amd_iommu *iommu, u8 bit);
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
gfp_t gfp, size_t size);
u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end);
+int iommu_flush_dte(struct amd_iommu *iommu, u16 devid);

#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(void);
@@ -36,6 +37,8 @@ static inline void amd_iommu_debugfs_setup(void) {}
#endif

extern bool amd_iommu_viommu;
+extern const struct pt_iommu_driver_ops amd_hw_driver_ops_v1;
+extern const struct iommu_domain_ops amdv1_ops;

/* Needed for interrupt remapping */
int amd_iommu_prepare(void);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 53854a4f4307..36ca9003dd88 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -807,6 +807,9 @@ struct amd_iommu {
/* IOPF support */
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[32];
+
+ /* HW vIOMMU support */
+ struct protection_domain *viommu_pdom;
};

static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 191a52b657c1..07a0314a3fdc 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1546,7 +1546,7 @@ static void domain_flush_complete(struct protection_domain *domain)
amd_iommu_completion_wait(pdom_iommu_info->iommu);
}

-static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
+int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
{
struct iommu_cmd cmd;

@@ -2720,12 +2720,12 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
iommu_put_pages_list(&gather->freelist);
}

-static const struct pt_iommu_driver_ops amd_hw_driver_ops_v1 = {
+const struct pt_iommu_driver_ops amd_hw_driver_ops_v1 = {
.get_top_lock = amd_iommu_get_top_lock,
.change_top = amd_iommu_change_top,
};

-static const struct iommu_domain_ops amdv1_ops = {
+const struct iommu_domain_ops amdv1_ops = {
IOMMU_PT_DOMAIN_OPS(amdv1),
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
diff --git a/drivers/iommu/amd/viommu.c b/drivers/iommu/amd/viommu.c
index 76198bf4f4f6..2a6339076c6e 100644
--- a/drivers/iommu/amd/viommu.c
+++ b/drivers/iommu/amd/viommu.c
@@ -92,6 +92,78 @@ static int __init viommu_vf_vfcntl_init(struct amd_iommu *iommu)
return 0;
}

+static struct iommu_domain *
+viommu_domain_alloc(struct amd_iommu *iommu)
+{
+ int ret;
+ struct pt_iommu_amdv1_cfg cfg = {};
+ struct protection_domain *domain;
+
+ domain = protection_domain_alloc();
+ if (!domain)
+ return NULL;
+
+ domain->pd_mode = PD_MODE_V1;
+ domain->iommu.driver_ops = &amd_hw_driver_ops_v1;
+ domain->iommu.nid = dev_to_node(&iommu->dev->dev);
+
+ cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
+ BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
+ BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
+ cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE);
+ cfg.common.hw_max_vasz_lg2 =
+ min(64, (amd_iommu_hpt_level - 1) * 9 + 21);
+ cfg.common.hw_max_oasz_lg2 = 52;
+ cfg.starting_level = 2;
+ domain->domain.ops = &amdv1_ops;
+
+ ret = pt_iommu_amdv1_init(&domain->amdv1, &cfg, GFP_KERNEL);
+ if (ret) {
+ amd_iommu_domain_free(&domain->domain);
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Narrow the supported page sizes to those selected by the kernel
+ * command line.
+ */
+ domain->domain.pgsize_bitmap &= amd_iommu_pgsize_bitmap;
+ domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
+
+ return &domain->domain;
+}
+
+static int viommu_private_space_init(struct amd_iommu *iommu)
+{
+ struct iommu_domain *dom;
+ struct protection_domain *pdom;
+ struct pt_iommu_amdv1_hw_info pt_info;
+
+ /*
+ * Setup page table root pointer, Guest MMIO and
+ * Cmdbuf Dirty Status regions.
+ */
+ dom = viommu_domain_alloc(iommu);
+ if (!dom) {
+ pr_err("%s: Failed to initialize private space\n", __func__);
+ goto err_out;
+ }
+
+ pdom = to_pdomain(dom);
+ iommu->viommu_pdom = pdom;
+
+ pt_iommu_amdv1_hw_info(&pdom->amdv1, &pt_info);
+ pr_debug("%s: devid=%#x, pte_root=%#llx\n",
+ __func__, iommu->devid,
+ (unsigned long long)pt_info.host_pt_root);
+
+ return 0;
+err_out:
+ if (dom)
+ amd_iommu_domain_free(dom);
+ return -ENOMEM;
+}
+
/*
* Returns VF MMIO BAR offset for the give guest ID which will be
* mapped to guest vIOMMU 3rd 4K MMIO address
@@ -119,5 +191,9 @@ int __init amd_viommu_init(struct amd_iommu *iommu)
if (ret)
return ret;

+ ret = viommu_private_space_init(iommu);
+ if (ret)
+ return ret;
+
return 0;
}
--
2.34.1