Hi Kunkun,I think they are different. In normal cases, when we want to invalidate the
On 4/1/21 2:37 PM, Kunkun Jiang wrote:
Hi Eric,I think this is the same issue as the one reported by Chenxiang
On 2021/2/24 4:56, Eric Auger wrote:
With nested stage support, soon we will need to invalidateHere is the part of code in __arm_smmu_tlb_inv_range():
S1 contexts and ranges tagged with an unmanaged asid, this
latter being managed by the guest. So let's introduce 2 helpers
that allow to invalidate with externally managed ASIDs
Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx>
---
v13 -> v14
- Actually send the NH_ASID command (reported by Xingang Wang)
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 38 ++++++++++++++++-----
1 file changed, 29 insertions(+), 9 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 5579ec4fccc8..4c19a1114de4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1843,9 +1843,9 @@ int arm_smmu_atc_inv_domain(struct
arm_smmu_domain *smmu_domain, int ssid,
}
/* IO_PGTABLE API */
-static void arm_smmu_tlb_inv_context(void *cookie)
+static void __arm_smmu_tlb_inv_context(struct arm_smmu_domain
*smmu_domain,
+ int ext_asid)
{
- struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd;
@@ -1856,7 +1856,13 @@ static void arm_smmu_tlb_inv_context(void
*cookie)
* insertion to guarantee those are observed before the TLBI. Do be
* careful, 007.
*/
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ if (ext_asid >= 0) { /* guest stage 1 invalidation */
+ cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
+ cmd.tlbi.asid = ext_asid;
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
+ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
} else {
cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
@@ -1867,6 +1873,13 @@ static void arm_smmu_tlb_inv_context(void *cookie)
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}
+static void arm_smmu_tlb_inv_context(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+
+ __arm_smmu_tlb_inv_context(smmu_domain, -1);
+}
+
static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
unsigned long iova, size_t size,
size_t granule,
@@ -1926,9 +1939,10 @@ static void __arm_smmu_tlb_inv_range(struct
arm_smmu_cmdq_ent *cmd,
arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {When pSMMU supports RIL, we get the leaf page size by __ffs(smmu_domain->
/* Get the leaf page size */
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
cmd->tlbi.tg = (tg - 10) / 2;
/* Determine what level the granule is at */
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
num_pages = size >> tg;
}
domain.pgsize_bitmap). In nested mode, it is determined by host
PAGE_SIZE. If
the host kernel and guest kernel has different translation granule (e.g.
host 16K,
guest 4K), __arm_smmu_tlb_inv_range() will issue an incorrect tlbi command.
Do you have any idea about this issue?
https://lore.kernel.org/lkml/15938ed5-2095-e903-a290-333c299015a2@xxxxxxxxxxxxx/
In case RIL is not supported by the host, next version will use the
smallest pSMMU supported page size, as done in __arm_smmu_tlb_inv_range
Thanks
Eric
Best Regards,.
Kunkun Jiang
-static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t
size,
- size_t granule, bool leaf,
- struct arm_smmu_domain *smmu_domain)
+static void
+arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
+ size_t granule, bool leaf, int ext_asid,
+ struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_cmdq_ent cmd = {
.tlbi = {
@@ -1936,7 +1950,12 @@ static void
arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
},
};
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ if (ext_asid >= 0) { /* guest stage 1 invalidation */
+ cmd.opcode = smmu_domain->smmu->features &
ARM_SMMU_FEAT_E2H ?
+ CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
+ cmd.tlbi.asid = ext_asid;
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cmd.opcode = smmu_domain->smmu->features &
ARM_SMMU_FEAT_E2H ?
CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
@@ -1944,6 +1963,7 @@ static void
arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
+
__arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
/*
@@ -1982,7 +2002,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct
iommu_iotlb_gather *gather,
static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
- arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie);
+ arm_smmu_tlb_inv_range_domain(iova, size, granule, false, -1,
cookie);
}
static const struct iommu_flush_ops arm_smmu_flush_ops = {
@@ -2523,7 +2543,7 @@ static void arm_smmu_iotlb_sync(struct
iommu_domain *domain,
arm_smmu_tlb_inv_range_domain(gather->start,
gather->end - gather->start + 1,
- gather->pgsize, true, smmu_domain);
+ gather->pgsize, true, -1, smmu_domain);
}
static phys_addr_t