From: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
Sent: Thursday, July 27, 2023 1:49 PM
@@ -82,7 +82,7 @@ static void iopf_handler(struct work_struct *work)
if (!domain || !domain->iopf_handler)
status = IOMMU_PAGE_RESP_INVALID;
- list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ list_for_each_entry(iopf, &group->faults, list) {
/*
* For the moment, errors are sticky: don't handle
subsequent
* faults in the group if there is an error.
@@ -90,14 +90,20 @@ static void iopf_handler(struct work_struct *work)
if (status == IOMMU_PAGE_RESP_SUCCESS)
status = domain->iopf_handler(&iopf->fault,
domain->fault_data);
-
- if (!(iopf->fault.prm.flags &
- IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
- kfree(iopf);
}
iopf_complete_group(group->dev, &group->last_fault, status);
- kfree(group);
+ iopf_free_group(group);
+}
this is perf-critical path. It's not good to traverse the list twice.
+
+static int iopf_queue_work(struct iopf_group *group, work_func_t func)
+{
+ struct iopf_device_param *iopf_param = group->dev->iommu-
iopf_param;+
+ INIT_WORK(&group->work, func);
+ queue_work(iopf_param->queue->wq, &group->work);
+
+ return 0;
}
Is there plan to introduce further error in the future? otherwise this should
be void.
btw the work queue is only for sva. If there is no other caller this can be
just kept in iommu-sva.c. No need to create a helper.
@@ -199,8 +204,11 @@ int iommu_queue_iopf(struct iommu_fault *fault,
struct device *dev)
list_move(&iopf->list, &group->faults);
}
- queue_work(iopf_param->queue->wq, &group->work);
- return 0;
+ ret = iopf_queue_work(group, iopf_handler);
+ if (ret)
+ iopf_free_group(group);
+
+ return ret;
Here we can document that the iopf handler (in patch10) should free the
group, allowing the optimization inside the handler.