[RFC PATCH 05/10] iommu/vt-d: Setup DMA remapping for mediated devices
From: Lu Baolu
Date: Sun Jul 22 2018 - 02:12:01 EST
This configures the second level page table when external components
request to allocate a domain for a mediated device.
Cc: Ashok Raj <ashok.raj@xxxxxxxxx>
Cc: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Liu Yi L <yi.l.liu@xxxxxxxxx>
Signed-off-by: Sanjay Kumar <sanjay.k.kumar@xxxxxxxxx>
Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
---
drivers/iommu/intel-iommu.c | 73 ++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 66 insertions(+), 7 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 57ccfc4..b6e9ea8 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2569,8 +2569,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev)
dev->archdata.iommu = info;
- if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
- bool pass_through;
+ if (dev && sm_supported(iommu)) {
+ bool pass_through = hw_pass_through &&
+ domain_type_is_si(domain);
ret = intel_pasid_alloc_table(dev);
if (ret) {
@@ -2579,12 +2580,21 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
return NULL;
}
- /* Setup the PASID entry for requests without PASID: */
- pass_through = hw_pass_through && domain_type_is_si(domain);
spin_lock(&iommu->lock);
- intel_pasid_setup_second_level(iommu, domain, dev,
- PASID_RID2PASID,
- pass_through);
+
+ /* Setup the PASID entry for requests without PASID: */
+ if (dev_is_pci(dev))
+ intel_pasid_setup_second_level(iommu, domain, dev,
+ PASID_RID2PASID,
+ pass_through);
+ /* Setup the PASID entry for mediated devices: */
+ else if (dev_is_mdev(dev))
+ intel_pasid_setup_second_level(iommu, domain, dev,
+ domain->default_pasid,
+ false);
+ else
+ pr_err("Unsupported device %s\n", dev_name(dev));
+
spin_unlock(&iommu->lock);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -4937,6 +4947,32 @@ static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
}
+static void
+iommu_flush_ext_iotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 gran)
+{
+ struct qi_desc desc;
+
+ desc.high = 0;
+ desc.low = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
+ QI_EIOTLB_GRAN(gran) | QI_EIOTLB_TYPE;
+
+ qi_submit_sync(&desc, iommu);
+}
+
+static void iommu_flush_pasid_dev_iotlb(struct intel_iommu *iommu,
+ struct device *dev, int sid, int pasid)
+{
+ struct qi_desc desc;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int qdep = pci_ats_queue_depth(pdev);
+
+ desc.low = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE;
+ desc.high = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) | QI_DEV_EIOTLB_SIZE;
+
+ qi_submit_sync(&desc, iommu);
+}
+
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
struct intel_iommu *iommu;
@@ -4949,6 +4985,29 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
iommu = info->iommu;
+ if (dev_is_mdev(info->dev)) {
+ struct dmar_domain *domain = info->domain;
+ int did = domain->iommu_did[iommu->seq_id];
+ int sid = info->bus << 8 | info->devfn;
+ struct device *dev = info->dev;
+
+ intel_pasid_clear_entry(dev, domain->default_pasid);
+
+ /* Flush IOTLB including PASID Cache: */
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+
+ /*
+ * Flush EIOTLB. The only way to flush global mappings within
+ * a PASID is to use QI_GRAN_ALL_ALL.
+ */
+ iommu_flush_ext_iotlb(iommu, did, domain->default_pasid,
+ QI_GRAN_ALL_ALL);
+
+ /* Flush Dev TLB: */
+ iommu_flush_pasid_dev_iotlb(iommu, dev_mdev_parent(dev), sid,
+ domain->default_pasid);
+ }
+
if (info->dev) {
iommu_disable_dev_iotlb(info);
domain_context_clear(iommu, info->dev);
--
2.7.4