[PATCH 3/3] iommu/vt-d: Use global PASID for SVM usage

From: Lu Baolu
Date: Wed Mar 14 2018 - 22:46:56 EST


This patch switches PASID management for SVM from SVM specific
idr to the global idr.

Cc: Ashok Raj <ashok.raj@xxxxxxxxx>
Cc: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Liu Yi L <yi.l.liu@xxxxxxxxx>

Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
drivers/iommu/intel-svm.c | 20 +++++++++-----------
include/linux/intel-iommu.h | 1 -
2 files changed, 9 insertions(+), 12 deletions(-)

diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f3b7394..1c45f75 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -85,8 +85,6 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
iommu->name);
}

- idr_init(&iommu->pasid_idr);
-
return 0;
}

@@ -102,7 +100,7 @@ int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
free_pages((unsigned long)iommu->pasid_state_table, order);
iommu->pasid_state_table = NULL;
}
- idr_destroy(&iommu->pasid_idr);
+
return 0;
}

@@ -392,9 +390,9 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
pasid_max = iommu->pasid_max;

/* Do not use PASID 0 in caching mode (virtualised IOMMU) */
- ret = idr_alloc(&iommu->pasid_idr, svm,
- !!cap_caching_mode(iommu->cap),
- pasid_max - 1, GFP_KERNEL);
+ ret = intel_iommu_alloc_pasid(svm,
+ !!cap_caching_mode(iommu->cap),
+ pasid_max - 1, GFP_KERNEL);
if (ret < 0) {
kfree(svm);
goto out;
@@ -409,7 +407,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
if (mm) {
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
- idr_remove(&svm->iommu->pasid_idr, svm->pasid);
+ intel_iommu_free_pasid(svm->pasid);
kfree(svm);
kfree(sdev);
goto out;
@@ -463,7 +461,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
if (!iommu || !iommu->pasid_table)
goto out;

- svm = idr_find(&iommu->pasid_idr, pasid);
+ svm = intel_iommu_lookup_pasid(pasid);
if (!svm)
goto out;

@@ -488,7 +486,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
svm->iommu->pasid_table[svm->pasid].val = 0;
wmb();

- idr_remove(&svm->iommu->pasid_idr, svm->pasid);
+ intel_iommu_free_pasid(svm->pasid);
if (svm->mm)
mmu_notifier_unregister(&svm->notifier, svm->mm);

@@ -523,7 +521,7 @@ int intel_svm_is_pasid_valid(struct device *dev, int pasid)
if (!iommu || !iommu->pasid_table)
goto out;

- svm = idr_find(&iommu->pasid_idr, pasid);
+ svm = intel_iommu_lookup_pasid(pasid);
if (!svm)
goto out;

@@ -621,7 +619,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)

if (!svm || svm->pasid != req->pasid) {
rcu_read_lock();
- svm = idr_find(&iommu->pasid_idr, req->pasid);
+ svm = intel_iommu_lookup_pasid(req->pasid);
/* It *can't* go away, because the driver is not permitted
* to unbind the mm while any page faults are outstanding.
* So we only need RCU to protect the internal idr code. */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index a2013dd..e50bef8 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -418,7 +418,6 @@ struct intel_iommu {
struct pasid_state_entry *pasid_state_table;
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
- struct idr pasid_idr;
u32 pasid_max;
#endif
struct q_inval *qi; /* Queued invalidation info */
--
2.7.4