[PATCH v1 3/4] iommu/arm-smmu-v3: Decouple vmid from S2 nest_parent domain

From: Nicolin Chen
Date: Wed Mar 05 2025 - 00:06:23 EST


An S2 nest_parent domain can be shared across vSMMUs in the same VM, since
the S2 domain is basically the IPA mappings for the entire RAM of the VM.

Meanwhile, each vSMMU can have its own VMID, so the VMID allocation should
be done per vSMMU instance v.s. per S2 nest_parent domain.

However, an S2 domain can be also allocated when a physical SMMU instance
doesn't support S1. So, the structure has to retain the s2_cfg and vmid.

Allocate a vmid for a vSMMU instance in arm_vsmmu_alloc() and add a proper
arm_vsmmu_destroy() to clean it up.

Add a per-domain "vsmmus" list pairing with a spinlock, maintaining a list
on the S2 parent domain, to iterate S2 invalidations over the vmids across
the vSMMU instances created for the same VM.

Signed-off-by: Nicolin Chen <nicolinc@xxxxxxxxxx>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 10 +++-
.../arm/arm-smmu-v3/arm-smmu-v3-iommufd.c | 35 ++++++++++++--
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 47 +++++++++++++++----
3 files changed, 79 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 3336d196062c..1f6696bc4f6c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -849,8 +849,12 @@ struct arm_smmu_domain {

enum arm_smmu_domain_stage stage;
union {
- struct arm_smmu_ctx_desc cd;
- struct arm_smmu_s2_cfg s2_cfg;
+ struct arm_smmu_ctx_desc cd; /* S1 */
+ struct arm_smmu_s2_cfg s2_cfg; /* S2 && !nest_parent */
+ struct { /* S2 && nest_parent */
+ struct list_head list;
+ spinlock_t lock;
+ } vsmmus;
};

struct iommu_domain domain;
@@ -1049,6 +1053,8 @@ struct arm_vsmmu {
struct arm_smmu_device *smmu;
struct arm_smmu_domain *s2_parent;
u16 vmid;
+
+ struct list_head vsmmus_elm; /* arm_smmu_domain::vsmmus::list */
};

#if IS_ENABLED(CONFIG_ARM_SMMU_V3_IOMMUFD)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index ff8b550159f2..2c5a9d0abed5 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -30,6 +30,23 @@ void *arm_smmu_hw_info(struct device *dev, u32 *length, u32 *type)
return info;
}

+static void arm_vsmmu_destroy(struct iommufd_viommu *viommu)
+{
+ struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
+ struct arm_smmu_device *smmu = vsmmu->smmu;
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_TLBI_S12_VMALL,
+ .tlbi.vmid = vsmmu->vmid,
+ };
+ unsigned long flags;
+
+ spin_lock_irqsave(&vsmmu->s2_parent->vsmmus.lock, flags);
+ list_del(&vsmmu->vsmmus_elm);
+ spin_unlock_irqrestore(&vsmmu->s2_parent->vsmmus.lock, flags);
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+ ida_free(&smmu->vmid_map, vsmmu->vmid);
+}
+
static void arm_smmu_make_nested_cd_table_ste(
struct arm_smmu_ste *target, struct arm_smmu_master *master,
struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
@@ -337,6 +354,7 @@ static int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
}

static const struct iommufd_viommu_ops arm_vsmmu_ops = {
+ .destroy = arm_vsmmu_destroy,
.alloc_domain_nested = arm_vsmmu_alloc_domain_nested,
.cache_invalidate = arm_vsmmu_cache_invalidate,
};
@@ -351,6 +369,8 @@ struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_domain *s2_parent = to_smmu_domain(parent);
struct arm_vsmmu *vsmmu;
+ unsigned long flags;
+ int vmid;

if (viommu_type != IOMMU_VIOMMU_TYPE_ARM_SMMUV3)
return ERR_PTR(-EOPNOTSUPP);
@@ -381,15 +401,24 @@ struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
!(smmu->features & ARM_SMMU_FEAT_S2FWB))
return ERR_PTR(-EOPNOTSUPP);

+ vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
+ GFP_KERNEL);
+ if (vmid < 0)
+ return ERR_PTR(vmid);
+
vsmmu = iommufd_viommu_alloc(ictx, struct arm_vsmmu, core,
&arm_vsmmu_ops);
- if (IS_ERR(vsmmu))
+ if (IS_ERR(vsmmu)) {
+ ida_free(&smmu->vmid_map, vmid);
return ERR_CAST(vsmmu);
+ }

vsmmu->smmu = smmu;
+ vsmmu->vmid = (u16)vmid;
vsmmu->s2_parent = s2_parent;
- /* FIXME Move VMID allocation from the S2 domain allocation to here */
- vsmmu->vmid = s2_parent->s2_cfg.vmid;
+ spin_lock_irqsave(&s2_parent->vsmmus.lock, flags);
+ list_add_tail(&vsmmu->vsmmus_elm, &s2_parent->vsmmus.list);
+ spin_unlock_irqrestore(&s2_parent->vsmmus.lock, flags);

return &vsmmu->core;
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 0462eb1b2912..addc6308742b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2249,10 +2249,22 @@ static void arm_smmu_tlb_inv_context(void *cookie)
*/
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid);
- } else {
+ } else if (!smmu_domain->nest_parent) {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+ } else {
+ struct arm_vsmmu *vsmmu, *next;
+ unsigned long flags;
+
+ cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
+ spin_lock_irqsave(&smmu_domain->vsmmus.lock, flags);
+ list_for_each_entry_safe(vsmmu, next, &smmu_domain->vsmmus.list,
+ vsmmus_elm) {
+ cmd.tlbi.vmid = vsmmu->vmid;
+ arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
+ }
+ spin_unlock_irqrestore(&smmu_domain->vsmmus.lock, flags);
}
arm_smmu_atc_inv_domain(smmu_domain, 0, 0);
}
@@ -2342,19 +2354,33 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = smmu_domain->cd.asid;
- } else {
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule,
+ smmu_domain);
+ } else if (!smmu_domain->nest_parent) {
cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
- }
- __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule,
+ smmu_domain);
+ } else {
+ struct arm_vsmmu *vsmmu, *next;
+ unsigned long flags;

- if (smmu_domain->nest_parent) {
/*
* When the S2 domain changes all the nested S1 ASIDs have to be
* flushed too.
*/
cmd.opcode = CMDQ_OP_TLBI_NH_ALL;
arm_smmu_cmdq_issue_cmd_with_sync(smmu_domain->smmu, &cmd);
+
+ cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
+ spin_lock_irqsave(&smmu_domain->vsmmus.lock, flags);
+ list_for_each_entry_safe(vsmmu, next, &smmu_domain->vsmmus.list,
+ vsmmus_elm) {
+ cmd.tlbi.vmid = vsmmu->vmid;
+ __arm_smmu_tlb_inv_range(&cmd, iova, size, granule,
+ smmu_domain);
+ }
+ spin_unlock_irqrestore(&smmu_domain->vsmmus.lock, flags);
}

/*
@@ -2477,7 +2503,7 @@ static void arm_smmu_domain_free_paging(struct iommu_domain *domain)
mutex_lock(&arm_smmu_asid_lock);
xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid);
mutex_unlock(&arm_smmu_asid_lock);
- } else {
+ } else if (!smmu_domain->nest_parent) {
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
if (cfg->vmid)
ida_free(&smmu->vmid_map, cfg->vmid);
@@ -2506,7 +2532,10 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
struct arm_smmu_domain *smmu_domain)
{
int vmid;
- struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
+
+ /* nest_parent stores vmid in vSMMU instead of a shared S2 domain */
+ if (smmu_domain->nest_parent)
+ return 0;

/* Reserve VMID 0 for stage-2 bypass STEs */
vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
@@ -2514,7 +2543,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
if (vmid < 0)
return vmid;

- cfg->vmid = (u16)vmid;
+ smmu_domain->s2_cfg.vmid = (u16)vmid;
return 0;
}

@@ -3233,6 +3262,8 @@ arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
}
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
smmu_domain->nest_parent = true;
+ INIT_LIST_HEAD(&smmu_domain->vsmmus.list);
+ spin_lock_init(&smmu_domain->vsmmus.lock);
break;
case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
case IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_PASID:
--
2.43.0