Re: [patch V3 05/10] PCI/MSI: Switch to MSI descriptor locking to guard()
From: Bjorn Helgaas
Date: Tue Mar 18 2025 - 16:26:04 EST
On Mon, Mar 17, 2025 at 02:29:27PM +0100, Thomas Gleixner wrote:
> Convert the code to use the new guard(msi_descs_lock).
>
> No functional change intended.
>
> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@xxxxxxxxxx>
> Cc: Bjorn Helgaas <bhelgaas@xxxxxxxxxx>
> Cc: linux-pci@xxxxxxxxxxxxxxx
Acked-by: Bjorn Helgaas <bhelgaas@xxxxxxxxxx>
To help connect these together, I might mention "msi_descs_lock"
specifically in the subject line of the earlier patch:
genirq/msi: Use lock guards for MSI descriptor locking
The msi_capability_init() -> __msi_capability_init() rework is a big
chunk compared to the rest of this patch. Same for
msix_setup_interrupts() -> __msix_setup_interrupts().
I think I see the point (basically move the body to the new "__"
functions and put the guard() in the original functions before calling
the new ones).
> ---
> V3: Use__free in __msix_setup_interrupts() - PeterZ
> V2: Remove the gotos - Jonathan
> ---
> drivers/pci/msi/api.c | 6 --
> drivers/pci/msi/msi.c | 124 +++++++++++++++++++++++++-------------------------
> 2 files changed, 64 insertions(+), 66 deletions(-)
>
> --- a/drivers/pci/msi/api.c
> +++ b/drivers/pci/msi/api.c
> @@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev
> if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
> return;
>
> - msi_lock_descs(&dev->dev);
> + guard(msi_descs_lock)(&dev->dev);
> pci_msi_shutdown(dev);
> pci_free_msi_irqs(dev);
> - msi_unlock_descs(&dev->dev);
> }
> EXPORT_SYMBOL(pci_disable_msi);
>
> @@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *de
> if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
> return;
>
> - msi_lock_descs(&dev->dev);
> + guard(msi_descs_lock)(&dev->dev);
> pci_msix_shutdown(dev);
> pci_free_msi_irqs(dev);
> - msi_unlock_descs(&dev->dev);
> }
> EXPORT_SYMBOL(pci_disable_msix);
>
> --- a/drivers/pci/msi/msi.c
> +++ b/drivers/pci/msi/msi.c
> @@ -336,41 +336,11 @@ static int msi_verify_entries(struct pci
> return !entry ? 0 : -EIO;
> }
>
> -/**
> - * msi_capability_init - configure device's MSI capability structure
> - * @dev: pointer to the pci_dev data structure of MSI device function
> - * @nvec: number of interrupts to allocate
> - * @affd: description of automatic IRQ affinity assignments (may be %NULL)
> - *
> - * Setup the MSI capability structure of the device with the requested
> - * number of interrupts. A return value of zero indicates the successful
> - * setup of an entry with the new MSI IRQ. A negative return value indicates
> - * an error, and a positive return value indicates the number of interrupts
> - * which could have been allocated.
> - */
> -static int msi_capability_init(struct pci_dev *dev, int nvec,
> - struct irq_affinity *affd)
> +static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
> {
> - struct irq_affinity_desc *masks = NULL;
> + int ret = msi_setup_msi_desc(dev, nvec, masks);
> struct msi_desc *entry, desc;
> - int ret;
> -
> - /* Reject multi-MSI early on irq domain enabled architectures */
> - if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
> - return 1;
> -
> - /*
> - * Disable MSI during setup in the hardware, but mark it enabled
> - * so that setup code can evaluate it.
> - */
> - pci_msi_set_enable(dev, 0);
> - dev->msi_enabled = 1;
> -
> - if (affd)
> - masks = irq_create_affinity_masks(nvec, affd);
>
> - msi_lock_descs(&dev->dev);
> - ret = msi_setup_msi_desc(dev, nvec, masks);
> if (ret)
> goto fail;
>
> @@ -399,19 +369,48 @@ static int msi_capability_init(struct pc
>
> pcibios_free_irq(dev);
> dev->irq = entry->irq;
> - goto unlock;
> -
> + return 0;
> err:
> pci_msi_unmask(&desc, msi_multi_mask(&desc));
> pci_free_msi_irqs(dev);
> fail:
> dev->msi_enabled = 0;
> -unlock:
> - msi_unlock_descs(&dev->dev);
> - kfree(masks);
> return ret;
> }
>
> +/**
> + * msi_capability_init - configure device's MSI capability structure
> + * @dev: pointer to the pci_dev data structure of MSI device function
> + * @nvec: number of interrupts to allocate
> + * @affd: description of automatic IRQ affinity assignments (may be %NULL)
> + *
> + * Setup the MSI capability structure of the device with the requested
> + * number of interrupts. A return value of zero indicates the successful
> + * setup of an entry with the new MSI IRQ. A negative return value indicates
> + * an error, and a positive return value indicates the number of interrupts
> + * which could have been allocated.
> + */
> +static int msi_capability_init(struct pci_dev *dev, int nvec,
> + struct irq_affinity *affd)
> +{
> + /* Reject multi-MSI early on irq domain enabled architectures */
> + if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
> + return 1;
> +
> + /*
> + * Disable MSI during setup in the hardware, but mark it enabled
> + * so that setup code can evaluate it.
> + */
> + pci_msi_set_enable(dev, 0);
> + dev->msi_enabled = 1;
> +
> + struct irq_affinity_desc *masks __free(kfree) =
> + affd ? irq_create_affinity_masks(nvec, affd) : NULL;
> +
> + guard(msi_descs_lock)(&dev->dev);
> + return __msi_capability_init(dev, nvec, masks);
> +}
> +
> int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
> struct irq_affinity *affd)
> {
> @@ -666,38 +665,39 @@ static void msix_mask_all(void __iomem *
> writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
> }
>
> -static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
> - int nvec, struct irq_affinity *affd)
> -{
> - struct irq_affinity_desc *masks = NULL;
> - int ret;
> +DEFINE_FREE(free_msi_irqs, struct pci_dev *, if (_T) pci_free_msi_irqs(_T));
>
> - if (affd)
> - masks = irq_create_affinity_masks(nvec, affd);
> +static int __msix_setup_interrupts(struct pci_dev *__dev, struct msix_entry *entries,
> + int nvec, struct irq_affinity_desc *masks)
> +{
> + struct pci_dev *dev __free(free_msi_irqs) = __dev;
>
> - msi_lock_descs(&dev->dev);
> - ret = msix_setup_msi_descs(dev, entries, nvec, masks);
> + int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
> if (ret)
> - goto out_free;
> + return ret;
>
> ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
> if (ret)
> - goto out_free;
> + return ret;
>
> /* Check if all MSI entries honor device restrictions */
> ret = msi_verify_entries(dev);
> if (ret)
> - goto out_free;
> + return ret;
>
> + retain_ptr(dev);
> msix_update_entries(dev, entries);
> - goto out_unlock;
> + return 0;
> +}
>
> -out_free:
> - pci_free_msi_irqs(dev);
> -out_unlock:
> - msi_unlock_descs(&dev->dev);
> - kfree(masks);
> - return ret;
> +static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
> + int nvec, struct irq_affinity *affd)
> +{
> + struct irq_affinity_desc *masks __free(kfree) =
> + affd ? irq_create_affinity_masks(nvec, affd) : NULL;
> +
> + guard(msi_descs_lock)(&dev->dev);
> + return __msix_setup_interrupts(dev, entries, nvec, masks);
> }
>
> /**
> @@ -871,13 +871,13 @@ void __pci_restore_msix_state(struct pci
>
> write_msg = arch_restore_msi_irqs(dev);
>
> - msi_lock_descs(&dev->dev);
> - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
> - if (write_msg)
> - __pci_write_msi_msg(entry, &entry->msg);
> - pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
> + scoped_guard (msi_descs_lock, &dev->dev) {
> + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
> + if (write_msg)
> + __pci_write_msi_msg(entry, &entry->msg);
> + pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
> + }
> }
> - msi_unlock_descs(&dev->dev);
>
> pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
> }
>