[PATCH v3 08/11] iommu: Move iopf_handler() to iommu-sva.c

From: Lu Baolu
Date: Thu Aug 17 2023 - 19:45:01 EST


The iopf_handler() function handles a fault_group for a SVA domain. Move
it to the right place.

Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
---
drivers/iommu/iommu-sva.h | 17 +++++++++++++
drivers/iommu/io-pgfault.c | 50 +++-----------------------------------
drivers/iommu/iommu-sva.c | 48 ++++++++++++++++++++++++++++++++++++
3 files changed, 69 insertions(+), 46 deletions(-)

diff --git a/drivers/iommu/iommu-sva.h b/drivers/iommu/iommu-sva.h
index de7819c796ce..510a7df23fba 100644
--- a/drivers/iommu/iommu-sva.h
+++ b/drivers/iommu/iommu-sva.h
@@ -24,6 +24,9 @@ void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
+void iopf_free_group(struct iopf_group *group);
+int iopf_queue_work(struct iopf_group *group, work_func_t func);
+int iommu_sva_handle_iopf_group(struct iopf_group *group);

#else /* CONFIG_IOMMU_SVA */
static inline int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
@@ -67,5 +70,19 @@ iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
{
return IOMMU_PAGE_RESP_INVALID;
}
+
+static inline void iopf_free_group(struct iopf_group *group)
+{
+}
+
+static inline int iopf_queue_work(struct iopf_group *group, work_func_t func)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_sva_handle_iopf_group(struct iopf_group *group)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_IOMMU_SVA */
#endif /* _IOMMU_SVA_H */
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index d07586cd37fd..00c2e447b740 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -25,7 +25,8 @@ struct iopf_queue {
struct mutex lock;
};

-static void iopf_free_group(struct iopf_group *group)
+/* Called by the iopf handler to free the iopf group. */
+void iopf_free_group(struct iopf_group *group)
{
struct iopf_fault *iopf, *next;

@@ -37,50 +38,7 @@ static void iopf_free_group(struct iopf_group *group)
kfree(group);
}

-static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
- enum iommu_page_response_code status)
-{
- struct iommu_page_response resp = {
- .pasid = iopf->fault.prm.pasid,
- .grpid = iopf->fault.prm.grpid,
- .code = status,
- };
-
- if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
- (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
- resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
-
- return iommu_page_response(dev, &resp);
-}
-
-static void iopf_handler(struct work_struct *work)
-{
- struct iopf_fault *iopf;
- struct iopf_group *group;
- struct iommu_domain *domain;
- enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
-
- group = container_of(work, struct iopf_group, work);
- domain = iommu_get_domain_for_dev_pasid(group->dev,
- group->last_fault.fault.prm.pasid, 0);
- if (!domain || !domain->iopf_handler)
- status = IOMMU_PAGE_RESP_INVALID;
-
- list_for_each_entry(iopf, &group->faults, list) {
- /*
- * For the moment, errors are sticky: don't handle subsequent
- * faults in the group if there is an error.
- */
- if (status == IOMMU_PAGE_RESP_SUCCESS)
- status = domain->iopf_handler(&iopf->fault,
- domain->fault_data);
- }
-
- iopf_complete_group(group->dev, &group->last_fault, status);
- iopf_free_group(group);
-}
-
-static int iopf_queue_work(struct iopf_group *group, work_func_t func)
+int iopf_queue_work(struct iopf_group *group, work_func_t func)
{
struct iommu_fault_param *fault_param = group->dev->iommu->fault_param;

@@ -189,7 +147,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
list_move(&iopf->list, &group->faults);
}

- ret = iopf_queue_work(group, iopf_handler);
+ ret = iommu_sva_handle_iopf_group(group);
if (ret)
iopf_free_group(group);

diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index b78671a8a914..df8734b6ec00 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -210,3 +210,51 @@ void mm_pasid_drop(struct mm_struct *mm)

iommu_free_global_pasid(mm->pasid);
}
+
+static int iommu_sva_complete_iopf(struct device *dev, struct iopf_fault *iopf,
+ enum iommu_page_response_code status)
+{
+ struct iommu_page_response resp = {
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = status,
+ };
+
+ if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
+ (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
+ resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
+
+ return iommu_page_response(dev, &resp);
+}
+
+static void iommu_sva_iopf_handler(struct work_struct *work)
+{
+ struct iopf_fault *iopf;
+ struct iopf_group *group;
+ struct iommu_domain *domain;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
+
+ group = container_of(work, struct iopf_group, work);
+ domain = iommu_get_domain_for_dev_pasid(group->dev,
+ group->last_fault.fault.prm.pasid, 0);
+ if (!domain || !domain->iopf_handler)
+ status = IOMMU_PAGE_RESP_INVALID;
+
+ list_for_each_entry(iopf, &group->faults, list) {
+ /*
+ * For the moment, errors are sticky: don't handle subsequent
+ * faults in the group if there is an error.
+ */
+ if (status == IOMMU_PAGE_RESP_SUCCESS)
+ status = domain->iopf_handler(&iopf->fault,
+ domain->fault_data);
+ }
+
+ iommu_sva_complete_iopf(group->dev, &group->last_fault, status);
+ iopf_free_group(group);
+}
+
+int iommu_sva_handle_iopf_group(struct iopf_group *group)
+{
+ return iopf_queue_work(group, iommu_sva_iopf_handler);
+}
--
2.34.1