[PATCH v5 10/22] iommu/arm-smmu-v3: Link domains and devices
From: Eric Auger
Date: Fri Mar 15 2019 - 12:10:39 EST
From: Jean-Philippe Brucker <jean-philippe.brucker@xxxxxxx>
When removing a mapping from a domain, we need to send an invalidation to
all devices that might have stored it in their Address Translation Cache
(ATC). In addition with SVM, we'll need to invalidate context descriptors
of all devices attached to a live domain.
Maintain a list of devices in each domain, protected by a spinlock. It is
updated every time we attach or detach devices to and from domains.
It needs to be a spinlock because we'll invalidate ATC entries from
within hardirq-safe contexts, but it may be possible to relax the read
side with RCU later.
Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@xxxxxxx>
---
drivers/iommu/arm-smmu-v3.c | 28 ++++++++++++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index d3880010c6cf..ff998c967a0a 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -594,6 +594,11 @@ struct arm_smmu_device {
struct arm_smmu_master_data {
struct arm_smmu_device *smmu;
struct arm_smmu_strtab_ent ste;
+
+ struct arm_smmu_domain *domain;
+ struct list_head list; /* domain->devices */
+
+ struct device *dev;
};
/* SMMU private data for an IOMMU domain */
@@ -618,6 +623,9 @@ struct arm_smmu_domain {
};
struct iommu_domain domain;
+
+ struct list_head devices;
+ spinlock_t devices_lock;
};
struct arm_smmu_option_prop {
@@ -1493,6 +1501,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
}
mutex_init(&smmu_domain->init_mutex);
+ INIT_LIST_HEAD(&smmu_domain->devices);
+ spin_lock_init(&smmu_domain->devices_lock);
+
return &smmu_domain->domain;
}
@@ -1713,6 +1724,16 @@ static void arm_smmu_detach_dev(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master_data *master = fwspec->iommu_priv;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = master->domain;
+
+ if (smmu_domain) {
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_del(&master->list);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+ master->domain = NULL;
+ }
master->ste.assigned = false;
arm_smmu_install_ste_for_dev(fwspec);
@@ -1722,6 +1743,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ unsigned long flags;
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master_data *master;
@@ -1757,6 +1779,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
ste->assigned = true;
+ master->domain = smmu_domain;
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_add(&master->list, &smmu_domain->devices);
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
ste->s1_cfg = NULL;
@@ -1883,6 +1910,7 @@ static int arm_smmu_add_device(struct device *dev)
return -ENOMEM;
master->smmu = smmu;
+ master->dev = dev;
fwspec->iommu_priv = master;
}
--
2.20.1