Re: [PATCH v2 8/8] dmaengine/idxd: Re-enable kernel workqueue under DMA API

From: Jacob Pan
Date: Tue Mar 28 2023 - 16:20:03 EST


Hi Fenghua,

On Tue, 28 Mar 2023 11:16:31 -0700, Fenghua Yu <fenghua.yu@xxxxxxxxx> wrote:

> Hi, Jacob,
>
> On 3/27/23 16:21, Jacob Pan wrote:
> > Kernel workqueues were disabled due to flawed use of kernel VA and SVA
> > API. Now That we have the support for attaching PASID to the device's
>
> s/That/that/
will fix

> > default domain and the ability to reserve global PASIDs from SVA APIs,
> > we can re-enable the kernel work queues and use them under DMA API.
> >
> > We also use non-privileged access for in-kernel DMA to be consistent
> > with the IOMMU settings. Consequently, interrupt for user privilege is
> > enabled for work completion IRQs.
> >
> > Link:https://lore.kernel.org/linux-iommu/20210511194726.GP1002214@xxxxxxxxxx/
> > Reviewed-by: Dave Jiang <dave.jiang@xxxxxxxxx>
> > Signed-off-by: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
> > ---
> > drivers/dma/idxd/device.c | 30 ++++-------------------
> > drivers/dma/idxd/init.c | 51 ++++++++++++++++++++++++++++++++++++---
> > drivers/dma/idxd/sysfs.c | 7 ------
> > 3 files changed, 52 insertions(+), 36 deletions(-)
> >
> > diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
> > index 6fca8fa8d3a8..f6b133d61a04 100644
> > --- a/drivers/dma/idxd/device.c
> > +++ b/drivers/dma/idxd/device.c
> > @@ -299,21 +299,6 @@ void idxd_wqs_unmap_portal(struct idxd_device
> > *idxd) }
> > }
> >
> > -static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
> > -{
> > - struct idxd_device *idxd = wq->idxd;
> > - union wqcfg wqcfg;
> > - unsigned int offset;
> > -
> > - offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
> > - spin_lock(&idxd->dev_lock);
> > - wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base +
> > offset);
> > - wqcfg.priv = priv;
> > - wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
> > - iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base +
> > offset);
> > - spin_unlock(&idxd->dev_lock);
> > -}
> > -
> > static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
> > {
> > struct idxd_device *idxd = wq->idxd;
> > @@ -1324,15 +1309,14 @@ int drv_enable_wq(struct idxd_wq *wq)
> > }
> >
> > /*
> > - * In the event that the WQ is configurable for pasid and priv
> > bits.
> > - * For kernel wq, the driver should setup the pasid, pasid_en,
> > and priv bit.
> > - * However, for non-kernel wq, the driver should only set the
> > pasid_en bit for
> > - * shared wq. A dedicated wq that is not 'kernel' type will
> > configure pasid and
> > + * In the event that the WQ is configurable for pasid, the
> > driver
> > + * should setup the pasid, pasid_en bit. This is true for both
> > kernel
> > + * and user shared workqueues. There is no need to setup priv
> > bit in
> > + * that in-kernel DMA will also do user privileged requests.
> > + * A dedicated wq that is not 'kernel' type will configure
> > pasid and
> > * pasid_en later on so there is no need to setup.
> > */
> > if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
> > - int priv = 0;
> > -
> > if (wq_pasid_enabled(wq)) {
> > if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
> > u32 pasid = wq_dedicated(wq) ?
> > idxd->pasid : 0; @@ -1340,10 +1324,6 @@ int drv_enable_wq(struct
> > idxd_wq *wq) __idxd_wq_set_pasid_locked(wq, pasid);
> > }
> > }
> > -
> > - if (is_idxd_wq_kernel(wq))
> > - priv = 1;
> > - __idxd_wq_set_priv_locked(wq, priv);
> > }
> >
> > rc = 0;
> > diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
> > index e6ee267da0ff..a3396e1b38f1 100644
> > --- a/drivers/dma/idxd/init.c
> > +++ b/drivers/dma/idxd/init.c
> > @@ -506,14 +506,56 @@ static struct idxd_device *idxd_alloc(struct
> > pci_dev *pdev, struct idxd_driver_d
> > static int idxd_enable_system_pasid(struct idxd_device *idxd)
> > {
> > - return -EOPNOTSUPP;
> > + struct pci_dev *pdev = idxd->pdev;
> > + struct device *dev = &pdev->dev;
> > + struct iommu_domain *domain;
> > + union gencfg_reg gencfg;
> > + ioasid_t pasid;
> > + int ret;
> > +
> > + /*
> > + * Attach a global PASID to the DMA domain so that we can use
> > ENQCMDS
> > + * to submit work on buffers mapped by DMA API.
> > + */
> > + domain = iommu_get_dma_domain(dev);
> > + if (!domain)
> > + return -EPERM;
> > +
> > + pasid = iommu_sva_reserve_pasid(1, dev->iommu->max_pasids);
> > + if (pasid == IOMMU_PASID_INVALID)
> > + return -ENOSPC;
> > +
> > + ret = iommu_attach_device_pasid(domain, dev, pasid);
> > + if (ret) {
> > + dev_err(dev, "failed to attach device pasid %d, domain
> > type %d",
> > + pasid, domain->type);
> > + iommu_sva_release_pasid(pasid);
> > + return ret;
> > + }
> > +
> > + /* Since we set user privilege for kernel DMA, enable
> > completion IRQ */
> > + gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
> > + gencfg.user_int_en = 1;
> > + iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
> > + idxd->pasid = pasid;
> > +
> > + return ret;
> > }
> >
> > static void idxd_disable_system_pasid(struct idxd_device *idxd)
> > {
> > + struct pci_dev *pdev = idxd->pdev;
> > + struct device *dev = &pdev->dev;
> > + struct iommu_domain *domain;
> > +
> > + domain = iommu_get_domain_for_dev(dev);
> > + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> > + return;
> >
> > - iommu_sva_unbind_device(idxd->sva);
> > + iommu_detach_device_pasid(domain, dev, idxd->pasid);
> > + iommu_sva_release_pasid(idxd->pasid);
>
> May need gencfg.user_int_en = 0 here.
Yes, good catch!


Thanks,

Jacob