Re: [PATCH v5 16/19] iommu/vt-d: Misc macro clean up for SVM

From: Jacob Pan
Date: Thu Aug 15 2019 - 18:47:12 EST


On Fri, 16 Aug 2019 00:17:44 +0300
Andy Shevchenko <andy.shevchenko@xxxxxxxxx> wrote:

> On Thu, Aug 15, 2019 at 11:52 PM Jacob Pan
> <jacob.jun.pan@xxxxxxxxxxxxxxx> wrote:
> >
> > Use combined macros for_each_svm_dev() to simplify SVM device
> > iteration and error checking.
> >
> > Suggested-by: Andy Shevchenko <andriy.shevchenko@xxxxxxxxxxxxxxx>
> > Signed-off-by: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
> > Reviewed-by: Eric Auger <eric.auger@xxxxxxxxxx>
> > ---
> > drivers/iommu/intel-svm.c | 85
> > +++++++++++++++++++++++------------------------ 1 file changed, 41
> > insertions(+), 44 deletions(-)
> >
> > diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
> > index 5a688a5..ea6f2e2 100644
> > --- a/drivers/iommu/intel-svm.c
> > +++ b/drivers/iommu/intel-svm.c
> > @@ -218,6 +218,10 @@ static const struct mmu_notifier_ops
> > intel_mmuops = { static DEFINE_MUTEX(pasid_mutex);
> > static LIST_HEAD(global_svm_list);
> >
> > +#define for_each_svm_dev(svm, dev) \
> > + list_for_each_entry(sdev, &svm->devs, list) \
>
> > + if (dev == sdev->dev) \
>
> This should be
> if (dev != sdev->dev) {} else
> and no trailing \ is neeeded.
>
> The rationale of above form to avoid
> for_each_foo() {
> } else {
> ...WTF?!..
> }
>
I understand, but until we have the else {} case we don't have anything
to avoid. The current code only has a simple positive logic.

> > +
> > int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
> > struct svm_dev_ops *ops) {
> > struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
> > @@ -263,15 +267,13 @@ int intel_svm_bind_mm(struct device *dev, int
> > *pasid, int flags, struct svm_dev_ goto out;
> > }
> >
> > - list_for_each_entry(sdev, &svm->devs, list)
> > {
> > - if (dev == sdev->dev) {
> > - if (sdev->ops != ops) {
> > - ret = -EBUSY;
> > - goto out;
> > - }
> > - sdev->users++;
> > - goto success;
> > + for_each_svm_dev(svm, dev) {
> > + if (sdev->ops != ops) {
> > + ret = -EBUSY;
> > + goto out;
> > }
> > + sdev->users++;
> > + goto success;
> > }
> >
> > break;
> > @@ -408,48 +410,43 @@ int intel_svm_unbind_mm(struct device *dev,
> > int pasid) goto out;
> >
> > svm = ioasid_find(NULL, pasid, NULL);
> > - if (IS_ERR(svm)) {
> > + if (IS_ERR_OR_NULL(svm)) {
> > ret = PTR_ERR(svm);
> > goto out;
> > }
> >
> > - if (!svm)
> > - goto out;
> > + for_each_svm_dev(svm, dev) {
> > + ret = 0;
> > + sdev->users--;
> > + if (!sdev->users) {
> > + list_del_rcu(&sdev->list);
> > + /* Flush the PASID cache and IOTLB for this
> > device.
> > + * Note that we do depend on the hardware
> > *not* using
> > + * the PASID any more. Just as we depend on
> > other
> > + * devices never using PASIDs that they
> > have no right
> > + * to use. We have a *shared* PASID table,
> > because it's
> > + * large and has to be physically
> > contiguous. So it's
> > + * hard to be as defensive as we might
> > like. */
> > + intel_pasid_tear_down_entry(iommu, dev,
> > svm->pasid);
> > + intel_flush_svm_range_dev(svm, sdev, 0, -1,
> > 0, !svm->mm);
> > + kfree_rcu(sdev, rcu);
> > +
> > + if (list_empty(&svm->devs)) {
> > + ioasid_free(svm->pasid);
> > + if (svm->mm)
> > +
> > mmu_notifier_unregister(&svm->notifier, svm->mm);
> >
> > - list_for_each_entry(sdev, &svm->devs, list) {
> > - if (dev == sdev->dev) {
> > - ret = 0;
> > - sdev->users--;
> > - if (!sdev->users) {
> > - list_del_rcu(&sdev->list);
> > - /* Flush the PASID cache and IOTLB
> > for this device.
> > - * Note that we do depend on the
> > hardware *not* using
> > - * the PASID any more. Just as we
> > depend on other
> > - * devices never using PASIDs that
> > they have no right
> > - * to use. We have a *shared* PASID
> > table, because it's
> > - * large and has to be physically
> > contiguous. So it's
> > - * hard to be as defensive as we
> > might like. */
> > - intel_pasid_tear_down_entry(iommu,
> > dev, svm->pasid);
> > - intel_flush_svm_range_dev(svm,
> > sdev, 0, -1, 0, !svm->mm);
> > - kfree_rcu(sdev, rcu);
> > -
> > - if (list_empty(&svm->devs)) {
> > - ioasid_free(svm->pasid);
> > - if (svm->mm)
> > -
> > mmu_notifier_unregister(&svm->notifier, svm->mm); -
> > - list_del(&svm->list);
> > -
> > - /* We mandate that no page
> > faults may be outstanding
> > - * for the PASID when
> > intel_svm_unbind_mm() is called.
> > - * If that is not obeyed,
> > subtle errors will happen.
> > - * Let's make them less
> > subtle... */
> > - memset(svm, 0x6b,
> > sizeof(*svm));
> > - kfree(svm);
> > - }
> > + list_del(&svm->list);
> > +
> > + /* We mandate that no page faults
> > may be outstanding
> > + * for the PASID when
> > intel_svm_unbind_mm() is called.
> > + * If that is not obeyed, subtle
> > errors will happen.
> > + * Let's make them less subtle... */
> > + memset(svm, 0x6b, sizeof(*svm));
> > + kfree(svm);
> > }
> > - break;
> > }
> > + break;
> > }
> > out:
> > mutex_unlock(&pasid_mutex);
> > @@ -585,7 +582,7 @@ static irqreturn_t prq_event_thread(int irq,
> > void *d)
> > * to unbind the mm while any page faults
> > are outstanding.
> > * So we only need RCU to protect the
> > internal idr code. */ rcu_read_unlock();
> > - if (IS_ERR(svm) || !svm) {
> > + if (IS_ERR_OR_NULL(svm)) {
> > pr_err("%s: Page request for
> > invalid PASID %d: %08llx %08llx\n", iommu->name, req->pasid,
> > ((unsigned long long *)req)[0], ((unsigned long long *)req)[1]);
> > --
> > 2.7.4
> >
>
>

[Jacob Pan]