[RFC PATCH V3 20/26] vfio/pci: Separate frontend and backend code during interrupt enable/disable

From: Reinette Chatre
Date: Fri Oct 27 2023 - 13:02:50 EST


vfio_msi_set_vector_signal() contains a mix of generic and
backend specific code.

Separate the backend specific code into functions that can be
replaced by backend-specific callbacks.

The dev_info() used in error message is replaced by a pr_info()
that prints the device name generated by the backend specific code
intended to be used during request_irq().

Signed-off-by: Reinette Chatre <reinette.chatre@xxxxxxxxx>
---
Changes since RFC V2:
- New patch

drivers/vfio/pci/vfio_pci_intrs.c | 110 +++++++++++++++++++-----------
1 file changed, 70 insertions(+), 40 deletions(-)

diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index fd0713dc9f81..c1f65b8adfe2 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -416,28 +416,81 @@ static int vfio_msi_alloc_irq(struct vfio_pci_core_device *vdev,
return map.index < 0 ? map.index : map.virq;
}

-static int vfio_msi_set_vector_signal(struct vfio_pci_intr_ctx *intr_ctx,
- unsigned int vector, int fd,
+static void vfio_msi_free_interrupt(struct vfio_pci_intr_ctx *intr_ctx,
+ struct vfio_pci_irq_ctx *ctx,
+ unsigned int vector)
+{
+ struct vfio_pci_core_device *vdev = intr_ctx->priv;
+ u16 cmd;
+
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ free_irq(ctx->virq, ctx->trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ ctx->virq = 0;
+ /* Interrupt stays allocated, will be freed at MSI-X disable. */
+}
+
+static int vfio_msi_request_interrupt(struct vfio_pci_intr_ctx *intr_ctx,
+ struct vfio_pci_irq_ctx *ctx,
+ unsigned int vector,
unsigned int index)
+{
+ bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
+ struct vfio_pci_core_device *vdev = intr_ctx->priv;
+ int irq, ret;
+ u16 cmd;
+
+ /* Interrupt stays allocated, will be freed at MSI-X disable. */
+ irq = vfio_msi_alloc_irq(vdev, vector, msix);
+ if (irq < 0)
+ return irq;
+
+ /*
+ * If the vector was previously allocated, refresh the on-device
+ * message data before enabling in case it had been cleared or
+ * corrupted (e.g. due to backdoor resets) since writing.
+ */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
+ if (msix) {
+ struct msi_msg msg;
+
+ get_cached_msi_msg(irq, &msg);
+ pci_write_msi_msg(irq, &msg);
+ }
+
+ ret = request_irq(irq, vfio_msihandler, 0, ctx->name, ctx->trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
+ ctx->virq = irq;
+
+ return ret;
+}
+
+static char *vfio_msi_device_name(struct vfio_pci_intr_ctx *intr_ctx,
+ unsigned int vector,
+ unsigned int index)
{
bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
struct vfio_pci_core_device *vdev = intr_ctx->priv;
struct pci_dev *pdev = vdev->pdev;
+
+ return kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)",
+ msix ? "x" : "", vector, pci_name(pdev));
+}
+
+static int vfio_msi_set_vector_signal(struct vfio_pci_intr_ctx *intr_ctx,
+ unsigned int vector, int fd,
+ unsigned int index)
+{
struct vfio_pci_irq_ctx *ctx;
struct eventfd_ctx *trigger;
- int irq = -EINVAL, ret;
- u16 cmd;
+ int ret;

ctx = vfio_irq_ctx_get(intr_ctx, vector);

if (ctx && ctx->trigger) {
irq_bypass_unregister_producer(&ctx->producer);
- irq = ctx->virq;
- cmd = vfio_pci_memory_lock_and_enable(vdev);
- free_irq(ctx->virq, ctx->trigger);
- vfio_pci_memory_unlock_and_restore(vdev, cmd);
- ctx->virq = 0;
- /* Interrupt stays allocated, will be freed at MSI-X disable. */
+ vfio_msi_free_interrupt(intr_ctx, ctx, vector);
kfree(ctx->name);
ctx->name = NULL;
eventfd_ctx_put(ctx->trigger);
@@ -447,13 +500,6 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_intr_ctx *intr_ctx,
if (fd < 0)
return 0;

- if (irq == -EINVAL) {
- /* Interrupt stays allocated, will be freed at MSI-X disable. */
- irq = vfio_msi_alloc_irq(vdev, vector, msix);
- if (irq < 0)
- return irq;
- }
-
/* Per-interrupt context remain allocated. */
if (!ctx) {
ctx = vfio_irq_ctx_alloc(intr_ctx, vector);
@@ -461,8 +507,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_intr_ctx *intr_ctx,
return -ENOMEM;
}

- ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)",
- msix ? "x" : "", vector, pci_name(pdev));
+ ctx->name = vfio_msi_device_name(intr_ctx, vector, index);
if (!ctx->name)
return -ENOMEM;

@@ -472,42 +517,27 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_intr_ctx *intr_ctx,
goto out_free_name;
}

- /*
- * If the vector was previously allocated, refresh the on-device
- * message data before enabling in case it had been cleared or
- * corrupted (e.g. due to backdoor resets) since writing.
- */
- cmd = vfio_pci_memory_lock_and_enable(vdev);
- if (msix) {
- struct msi_msg msg;
-
- get_cached_msi_msg(irq, &msg);
- pci_write_msi_msg(irq, &msg);
- }
+ ctx->trigger = trigger;

- ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger);
- vfio_pci_memory_unlock_and_restore(vdev, cmd);
+ ret = vfio_msi_request_interrupt(intr_ctx, ctx, vector, index);
if (ret)
goto out_put_eventfd_ctx;

- ctx->virq = irq;
-
ctx->producer.token = trigger;
ctx->producer.irq = ctx->virq;
ret = irq_bypass_register_producer(&ctx->producer);
if (unlikely(ret)) {
- dev_info(&pdev->dev,
- "irq bypass producer (token %p) registration fails: %d\n",
- ctx->producer.token, ret);
+ pr_info("%s irq bypass producer (token %p) registration fails: %d\n",
+ ctx->name, ctx->producer.token, ret);

ctx->producer.token = NULL;
}
- ctx->trigger = trigger;

return 0;

out_put_eventfd_ctx:
- eventfd_ctx_put(trigger);
+ eventfd_ctx_put(ctx->trigger);
+ ctx->trigger = NULL;
out_free_name:
kfree(ctx->name);
ctx->name = NULL;
--
2.34.1