[PATCH 11/18] iommu/vt-d: Replace Intel specific PASID allocator with IOASID

From: Jacob Pan
Date: Mon Apr 08 2019 - 19:57:45 EST


Make use of generic IOASID code to manage PASID allocation,
free, and lookup.

Signed-off-by: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
---
drivers/iommu/Kconfig | 1 +
drivers/iommu/intel-pasid.c | 36 ------------------------------------
drivers/iommu/intel-svm.c | 41 ++++++++++++++++++++++++-----------------
3 files changed, 25 insertions(+), 53 deletions(-)

diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 6f07f3b..7f92009 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -204,6 +204,7 @@ config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86
select PCI_PASID
+ select IOASID
select MMU_NOTIFIER
help
Shared Virtual Memory (SVM) provides a facility for devices
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index a17a522..0fda626 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -26,42 +26,6 @@
*/
static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
-static DEFINE_IDR(pasid_idr);
-
-int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
-{
- int ret, min, max;
-
- min = max_t(int, start, PASID_MIN);
- max = min_t(int, end, intel_pasid_max_id);
-
- WARN_ON(in_interrupt());
- idr_preload(gfp);
- spin_lock(&pasid_lock);
- ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
- spin_unlock(&pasid_lock);
- idr_preload_end();
-
- return ret;
-}
-
-void intel_pasid_free_id(int pasid)
-{
- spin_lock(&pasid_lock);
- idr_remove(&pasid_idr, pasid);
- spin_unlock(&pasid_lock);
-}
-
-void *intel_pasid_lookup_id(int pasid)
-{
- void *p;
-
- spin_lock(&pasid_lock);
- p = idr_find(&pasid_idr, pasid);
- spin_unlock(&pasid_lock);
-
- return p;
-}

int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid)
{
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 3a4b09a..595ea0a 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -25,6 +25,7 @@
#include <linux/dmar.h>
#include <linux/interrupt.h>
#include <linux/mm_types.h>
+#include <linux/ioasid.h>
#include <asm/page.h>

#include "intel-pasid.h"
@@ -211,7 +212,9 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ /* for emulated iommu, PASID cache invalidation implies IOTLB/DTLB */
+ if (!cap_caching_mode(svm->iommu->cap))
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
}
rcu_read_unlock();

@@ -315,16 +318,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;

- /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
- ret = intel_pasid_alloc_id(svm,
- !!cap_caching_mode(iommu->cap),
- pasid_max - 1, GFP_KERNEL);
- if (ret < 0) {
+ /* Do not use PASID 0, reserved for RID to PASID */
+ svm->pasid = ioasid_alloc(NULL, PASID_MIN,
+ pasid_max - 1, svm);
+ if (svm->pasid == INVALID_IOASID) {
kfree(svm);
kfree(sdev);
+ ret = ENOSPC;
goto out;
}
- svm->pasid = ret;
svm->notifier.ops = &intel_mmuops;
svm->mm = mm;
svm->flags = flags;
@@ -334,7 +336,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (mm) {
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
- intel_pasid_free_id(svm->pasid);
+ ioasid_free(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@@ -350,7 +352,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
- intel_pasid_free_id(svm->pasid);
+ ioasid_free(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@@ -383,7 +385,12 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
if (!iommu)
goto out;

- svm = intel_pasid_lookup_id(pasid);
+ svm = ioasid_find(NULL, pasid, NULL);
+ if (IS_ERR(svm)) {
+ ret = PTR_ERR(svm);
+ goto out;
+ }
+
if (!svm)
goto out;

@@ -405,7 +412,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree_rcu(sdev, rcu);

if (list_empty(&svm->devs)) {
- intel_pasid_free_id(svm->pasid);
+ ioasid_free(svm->pasid);
if (svm->mm)
mmu_notifier_unregister(&svm->notifier, svm->mm);

@@ -440,10 +447,11 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
if (!iommu)
goto out;

- svm = intel_pasid_lookup_id(pasid);
- if (!svm)
+ svm = ioasid_find(NULL, pasid, NULL);
+ if (IS_ERR(svm)) {
+ ret = PTR_ERR(svm);
goto out;
-
+ }
/* init_mm is used in this case */
if (!svm->mm)
ret = 1;
@@ -550,13 +558,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)

if (!svm || svm->pasid != req->pasid) {
rcu_read_lock();
- svm = intel_pasid_lookup_id(req->pasid);
+ svm = ioasid_find(NULL, req->pasid, NULL);
/* It *can't* go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding.
* So we only need RCU to protect the internal idr code. */
rcu_read_unlock();
-
- if (!svm) {
+ if (IS_ERR(svm) || !svm) {
pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
iommu->name, req->pasid, ((unsigned long long *)req)[0],
((unsigned long long *)req)[1]);
--
2.7.4