[PATCH v3 07/10] iommu/arm-smmu-v3: Allocate IOTLB cache tag if no id to reuse

From: Nicolin Chen

Date: Mon Feb 23 2026 - 15:32:22 EST


An IOTLB tag now is forwarded from arm_smmu_domain_get_iotlb_tag() to its
final destination (a CD or STE entry).

Thus, arm_smmu_domain_get_iotlb_tag() can safely delink its references to
the cd->asid and s2_cfg->vmid in the smmu_domain. Instead, allocate a new
IOTLB cache tag from the xarray/ida.

The old ASID and VMID in the smmu_domain will be deprecated, once VMID is
decoupled in the vSMMU use case too.

Since invst->new_invs->inv[0] and invst->tag are basically the same thing,
merge arm_smmu_inv_flush_iotlb_tag() into arm_smmu_iotlb_tag_free().

Suggested-by: Jason Gunthorpe <jgg@xxxxxxxxxx>
Signed-off-by: Nicolin Chen <nicolinc@xxxxxxxxxx>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 63 +++++++++++++--------
1 file changed, 38 insertions(+), 25 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 0755ebe1c1560..9ab904d9d142c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3276,13 +3276,44 @@ static int arm_smmu_alloc_iotlb_tag(struct iommu_domain *domain,
return 0;
}

- /* FIXME replace with an actual allocation from the bitmap */
+ lockdep_assert_held(&arm_smmu_asid_lock);
+
+ if (tag->type == INV_TYPE_S1_ASID) {
+ ret = xa_alloc(&arm_smmu_asid_xa, &tag->id, smmu_domain,
+ XA_LIMIT(1, (1 << smmu->asid_bits) - 1),
+ GFP_KERNEL);
+ } else {
+ ret = ida_alloc_range(&smmu->vmid_map, 1,
+ (1 << smmu->vmid_bits) - 1, GFP_KERNEL);
+ if (ret > 0) {
+ tag->id = ret; /* int is good for 16-bit VMID */
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static void arm_smmu_iotlb_tag_free(struct arm_smmu_inv *tag)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = tag->nsize_opcode,
+ };
+
+ arm_smmu_inv_assert_iotlb_tag(tag);
+
if (tag->type == INV_TYPE_S1_ASID)
- tag->id = smmu_domain->cd.asid;
+ cmd.tlbi.asid = tag->id;
else
- tag->id = smmu_domain->s2_cfg.vmid;
+ cmd.tlbi.vmid = tag->id;
+ arm_smmu_cmdq_issue_cmd_with_sync(tag->smmu, &cmd);

- return 0;
+ if (tag->type == INV_TYPE_S1_ASID)
+ xa_erase(&arm_smmu_asid_xa, tag->id);
+ else if (tag->type == INV_TYPE_S2_VMID)
+ ida_free(&tag->smmu->vmid_map, tag->id);
+
+ /* Keep INV_TYPE_S2_VMID_VSMMU. vSMMU will free it */
}

static struct arm_smmu_inv *
@@ -3510,26 +3541,6 @@ arm_smmu_install_new_domain_invs(struct arm_smmu_attach_state *state)
kfree_rcu(invst->old_invs, rcu);
}

-static void arm_smmu_inv_flush_iotlb_tag(struct arm_smmu_inv *inv)
-{
- struct arm_smmu_cmdq_ent cmd = {};
-
- switch (inv->type) {
- case INV_TYPE_S1_ASID:
- cmd.tlbi.asid = inv->id;
- break;
- case INV_TYPE_S2_VMID:
- /* S2_VMID using nsize_opcode covers S2_VMID_S1_CLEAR */
- cmd.tlbi.vmid = inv->id;
- break;
- default:
- return;
- }
-
- cmd.opcode = inv->nsize_opcode;
- arm_smmu_cmdq_issue_cmd_with_sync(inv->smmu, &cmd);
-}
-
/* Should be installed after arm_smmu_install_ste_for_dev() */
static void
arm_smmu_install_old_domain_invs(struct arm_smmu_attach_state *state)
@@ -3551,7 +3562,7 @@ arm_smmu_install_old_domain_invs(struct arm_smmu_attach_state *state)
* array must be left cleared in the IOTLB.
*/
if (!READ_ONCE(invst->new_invs->inv[0].users))
- arm_smmu_inv_flush_iotlb_tag(&invst->new_invs->inv[0]);
+ arm_smmu_iotlb_tag_free(&invst->tag);

new_invs = arm_smmu_invs_purge(old_invs);
if (!new_invs)
@@ -3697,6 +3708,8 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
err_free_vmaster:
kfree(state->vmaster);
err_unprepare_invs:
+ if (!READ_ONCE(state->new_domain_invst.tag.users))
+ arm_smmu_iotlb_tag_free(&state->new_domain_invst.tag);
kfree(state->new_domain_invst.new_invs);
return ret;
}
--
2.43.0