Re: [PATCH v7 08/11] iommu/vt-d: Misc macro clean up for SVM

From: Jacob Pan
Date: Mon Oct 28 2019 - 18:34:10 EST


On Sat, 26 Oct 2019 09:00:51 +0800
Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx> wrote:

> Hi,
>
> On 10/25/19 3:55 AM, Jacob Pan wrote:
> > Use combined macros for_each_svm_dev() to simplify SVM device
> > iteration and error checking.
> >
> > Suggested-by: Andy Shevchenko <andriy.shevchenko@xxxxxxxxxxxxxxx>
> > Signed-off-by: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
> > Reviewed-by: Eric Auger <eric.auger@xxxxxxxxxx>
> > ---
> > drivers/iommu/intel-svm.c | 89
> > ++++++++++++++++++++++------------------------- 1 file changed, 42
> > insertions(+), 47 deletions(-)
> >
> > diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
> > index a9a7f85a09bc..a18b02a9709d 100644
> > --- a/drivers/iommu/intel-svm.c
> > +++ b/drivers/iommu/intel-svm.c
> > @@ -212,6 +212,10 @@ static const struct mmu_notifier_ops
> > intel_mmuops = { static DEFINE_MUTEX(pasid_mutex);
> > static LIST_HEAD(global_svm_list);
> >
> > +#define for_each_svm_dev(svm, dev) \
> > + list_for_each_entry(sdev, &svm->devs, list) \
> > + if (dev == sdev->dev) \
> > +
> > int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
> > struct svm_dev_ops *ops) {
> > struct intel_iommu *iommu =
> > intel_svm_device_to_iommu(dev); @@ -257,15 +261,13 @@ int
> > intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct
> > svm_dev_ goto out; }
> >
> > - list_for_each_entry(sdev, &svm->devs,
> > list) {
> > - if (dev == sdev->dev) {
> > - if (sdev->ops != ops) {
> > - ret = -EBUSY;
> > - goto out;
> > - }
> > - sdev->users++;
> > - goto success;
> > + for_each_svm_dev(svm, dev) {
> > + if (sdev->ops != ops) {
> > + ret = -EBUSY;
> > + goto out;
> > }
> > + sdev->users++;
> > + goto success;
> > }
> >
> > break;
> > @@ -402,50 +404,43 @@ int intel_svm_unbind_mm(struct device *dev,
> > int pasid) goto out;
> >
> > svm = ioasid_find(NULL, pasid, NULL);
> > - if (IS_ERR(svm)) {
> > + if (IS_ERR_OR_NULL(svm)) {
> > ret = PTR_ERR(svm);
> > goto out;
> > }
> >
> > - if (!svm)
> > - goto out;
>
> If svm == NULL here, this function will return success. This isn't
> expected, right?
>
you are right, should handle separately.

Thanks!
> Others looks good to me.
>
> Reviewed-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
>
> Best regards,
> baolu
>
> > -
> > - list_for_each_entry(sdev, &svm->devs, list) {
> > - if (dev == sdev->dev) {
> > - ret = 0;
> > - sdev->users--;
> > - if (!sdev->users) {
> > - list_del_rcu(&sdev->list);
> > - /* Flush the PASID cache and IOTLB
> > for this device.
> > - * Note that we do depend on the
> > hardware *not* using
> > - * the PASID any more. Just as we
> > depend on other
> > - * devices never using PASIDs that
> > they have no right
> > - * to use. We have a *shared*
> > PASID table, because it's
> > - * large and has to be physically
> > contiguous. So it's
> > - * hard to be as defensive as we
> > might like. */
> > - intel_pasid_tear_down_entry(iommu,
> > dev, svm->pasid);
> > - intel_flush_svm_range_dev(svm,
> > sdev, 0, -1, 0);
> > - kfree_rcu(sdev, rcu);
> > -
> > - if (list_empty(&svm->devs)) {
> > - /* Clear private data so
> > that free pass check */
> > -
> > ioasid_set_data(svm->pasid, NULL);
> > - ioasid_free(svm->pasid);
> > - if (svm->mm)
> > -
> > mmu_notifier_unregister(&svm->notifier, svm->mm); -
> > - list_del(&svm->list);
> > -
> > - /* We mandate that no page
> > faults may be outstanding
> > - * for the PASID when
> > intel_svm_unbind_mm() is called.
> > - * If that is not obeyed,
> > subtle errors will happen.
> > - * Let's make them less
> > subtle... */
> > - memset(svm, 0x6b,
> > sizeof(*svm));
> > - kfree(svm);
> > - }
> > + for_each_svm_dev(svm, dev) {
> > + ret = 0;
> > + sdev->users--;
> > + if (!sdev->users) {
> > + list_del_rcu(&sdev->list);
> > + /* Flush the PASID cache and IOTLB for
> > this device.
> > + * Note that we do depend on the hardware
> > *not* using
> > + * the PASID any more. Just as we depend
> > on other
> > + * devices never using PASIDs that they
> > have no right
> > + * to use. We have a *shared* PASID table,
> > because it's
> > + * large and has to be physically
> > contiguous. So it's
> > + * hard to be as defensive as we might
> > like. */
> > + intel_pasid_tear_down_entry(iommu, dev,
> > svm->pasid);
> > + intel_flush_svm_range_dev(svm, sdev, 0,
> > -1, 0);
> > + kfree_rcu(sdev, rcu);
> > +
> > + if (list_empty(&svm->devs)) {
> > + /* Clear private data so that free
> > pass check */
> > + ioasid_set_data(svm->pasid, NULL);
> > + ioasid_free(svm->pasid);
> > + if (svm->mm)
> > +
> > mmu_notifier_unregister(&svm->notifier, svm->mm);
> > + list_del(&svm->list);
> > + /* We mandate that no page faults
> > may be outstanding
> > + * for the PASID when
> > intel_svm_unbind_mm() is called.
> > + * If that is not obeyed, subtle
> > errors will happen.
> > + * Let's make them less subtle...
> > */
> > + memset(svm, 0x6b, sizeof(*svm));
> > + kfree(svm);
> > }
> > - break;
> > }
> > + break;
> > }
> > out:
> > mutex_unlock(&pasid_mutex);
> > @@ -581,7 +576,7 @@ static irqreturn_t prq_event_thread(int irq,
> > void *d)
> > * to unbind the mm while any page faults
> > are outstanding.
> > * So we only need RCU to protect the
> > internal idr code. */ rcu_read_unlock();
> > - if (IS_ERR(svm) || !svm) {
> > + if (IS_ERR_OR_NULL(svm)) {
> > pr_err("%s: Page request for
> > invalid PASID %d: %08llx %08llx\n", iommu->name, req->pasid,
> > ((unsigned long long *)req)[0], ((unsigned long long *)req)[1]);
> >

[Jacob Pan]