Attaching an IOMMU domain to a PASID of a device is a generic operation
for modern IOMMU drivers which support PASID-granular DMA address
translation. Currently visible usage scenarios include (but not limited):
- SVA (Shared Virtual Address)
- kernel DMA with PASID
- hardware-assist mediated device
This adds a pair of domain ops for this purpose and adds the interfaces
for device drivers to attach/detach a domain to/from a {device, PASID}.
Some buses, like PCI, route packets without considering the PASID value.
Thus a DMA target address with PASID might be treated as P2P if the
address falls into the MMIO BAR of other devices in the group. To make
things simple, these interfaces only apply to devices belonging to the
singleton groups, and the singleton is immutable in fabric (i.e. not
affected by hotplug).
Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@xxxxxxxxxx>
Tested-by: Zhangfei Gao <zhangfei.gao@xxxxxxxxxx>
Tested-by: Tony Zhu <tony.zhu@xxxxxxxxx>
---
include/linux/iommu.h | 21 ++++++++++++
drivers/iommu/iommu.c | 75 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 96 insertions(+)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f41eb2b3c7da..f2b5aa7efe43 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -262,6 +262,8 @@ struct iommu_ops {
* struct iommu_domain_ops - domain specific operations
* @attach_dev: attach an iommu domain to a device
* @detach_dev: detach an iommu domain from a device
+ * @set_dev_pasid: set an iommu domain to a pasid of device
+ * @block_dev_pasid: block pasid of device from using iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
@@ -282,6 +284,10 @@ struct iommu_ops {
struct iommu_domain_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
+ int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid);
+ void (*block_dev_pasid)(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid);
int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
@@ -679,6 +685,10 @@ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
void iommu_group_release_dma_owner(struct iommu_group *group);
bool iommu_group_dma_owner_claimed(struct iommu_group *group);
+int iommu_attach_device_pasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid);
+void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -1052,6 +1062,17 @@ static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
{
return false;
}
+
+static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+}
#endif /* CONFIG_IOMMU_API */
/**
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 74a0a3ec0907..be48b09371f4 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -39,6 +39,7 @@ struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
struct list_head devices;
+ struct xarray pasid_array;
struct mutex mutex;
void *iommu_data;
void (*iommu_data_release)(void *iommu_data);
@@ -660,6 +661,7 @@ struct iommu_group *iommu_group_alloc(void)
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
+ xa_init(&group->pasid_array);
ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
if (ret < 0) {
@@ -3271,3 +3273,76 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group)
return user;
}
EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+
+static bool iommu_group_immutable_singleton(struct iommu_group *group,
+ struct device *dev)
+{
+ int count;
+
+ mutex_lock(&group->mutex);
+ count = iommu_group_device_count(group);
+ mutex_unlock(&group->mutex);
+
+ if (count != 1)
+ return false;
+
+ /*
+ * The PCI device could be considered to be fully isolated if all
+ * devices on the path from the device to the host-PCI bridge are
+ * protected from peer-to-peer DMA by ACS.
+ */
+ if (dev_is_pci(dev))
+ return pci_acs_path_enabled(to_pci_dev(dev), NULL,
+ REQ_ACS_FLAGS);
+
+ /*
+ * Otherwise, the device came from DT/ACPI, assume it is static and
+ * then singleton can know from the device count in the group.
+ */
+ return true;
+}
+
+int iommu_attach_device_pasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid)
+{
+ struct iommu_group *group;
+ void *curr;
+ int ret;
+
+ if (!domain->ops->set_dev_pasid)
+ return -EOPNOTSUPP;
+
+ group = iommu_group_get(dev);
+ if (!group || !iommu_group_immutable_singleton(group, dev)) {
+ iommu_group_put(group);
+ return -EINVAL;
+ }
+
+ mutex_lock(&group->mutex);
+ curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
+ if (curr) {
+ ret = xa_err(curr) ? : -EBUSY;
+ goto out_unlock;
+ }
+ ret = domain->ops->set_dev_pasid(domain, dev, pasid);
+ if (ret)
+ xa_erase(&group->pasid_array, pasid);
+out_unlock:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ return ret;
+}
+
+void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid)
+{
+ struct iommu_group *group = iommu_group_get(dev);
+
+ mutex_lock(&group->mutex);
+ domain->ops->block_dev_pasid(domain, dev, pasid);
+ xa_erase(&group->pasid_array, pasid);
+ mutex_unlock(&group->mutex);
+
+ iommu_group_put(group);
+}