Re: [PATCH v5 06/19] irqdomain: Fix mapping-creation race
From: Johan Hovold
Date: Fri Feb 10 2023 - 04:10:25 EST
On Thu, Feb 09, 2023 at 02:03:19PM +0000, Marc Zyngier wrote:
> On Thu, 09 Feb 2023 13:23:10 +0000,
> Johan Hovold <johan+linaro@xxxxxxxxxx> wrote:
> >
> > Parallel probing of devices that share interrupts (e.g. when a driver
> > uses asynchronous probing) can currently result in two mappings for the
> > same hardware interrupt to be created due to missing serialisation.
> >
> > Make sure to hold the irq_domain_mutex when creating mappings so that
> > looking for an existing mapping before creating a new one is done
> > atomically.
> >
> > Fixes: 765230b5f084 ("driver-core: add asynchronous probing support for drivers")
> > Fixes: b62b2cf5759b ("irqdomain: Fix handling of type settings for existing mappings")
> > Link: https://lore.kernel.org/r/YuJXMHoT4ijUxnRb@xxxxxxxxxxxxxxxxxxxx
> > Cc: stable@xxxxxxxxxxxxxxx # 4.8
> > Cc: Dmitry Torokhov <dtor@xxxxxxxxxxxx>
> > Cc: Jon Hunter <jonathanh@xxxxxxxxxx>
> > Tested-by: Hsin-Yi Wang <hsinyi@xxxxxxxxxxxx>
> > Tested-by: Mark-PK Tsai <mark-pk.tsai@xxxxxxxxxxxx>
> > Signed-off-by: Johan Hovold <johan+linaro@xxxxxxxxxx>
> > ---
> > kernel/irq/irqdomain.c | 55 ++++++++++++++++++++++++++++++------------
> > 1 file changed, 40 insertions(+), 15 deletions(-)
> >
> > diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
> > index 7b57949bc79c..1ddb01bd49a4 100644
> > --- a/kernel/irq/irqdomain.c
> > +++ b/kernel/irq/irqdomain.c
> > @@ -25,6 +25,9 @@ static DEFINE_MUTEX(irq_domain_mutex);
> >
> > static struct irq_domain *irq_default_domain;
> >
> > +static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
> > + unsigned int nr_irqs, int node, void *arg,
> > + bool realloc, const struct irq_affinity_desc *affinity);
> > static void irq_domain_check_hierarchy(struct irq_domain *domain);
> >
> > struct irqchip_fwid {
> > @@ -682,9 +685,9 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
> > EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
> > #endif
> >
> > -static unsigned int __irq_create_mapping_affinity(struct irq_domain *domain,
> > - irq_hw_number_t hwirq,
> > - const struct irq_affinity_desc *affinity)
> > +static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
> > + irq_hw_number_t hwirq,
> > + const struct irq_affinity_desc *affinity)
> > {
> > struct device_node *of_node = irq_domain_get_of_node(domain);
> > int virq;
> > @@ -699,7 +702,7 @@ static unsigned int __irq_create_mapping_affinity(struct irq_domain *domain,
> > return 0;
> > }
> >
> > - if (irq_domain_associate(domain, virq, hwirq)) {
> > + if (irq_domain_associate_locked(domain, virq, hwirq)) {
> > irq_free_desc(virq);
> > return 0;
> > }
> > @@ -735,14 +738,20 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
> > return 0;
> > }
> >
> > + mutex_lock(&irq_domain_mutex);
> > +
> > /* Check if mapping already exists */
> > virq = irq_find_mapping(domain, hwirq);
> > if (virq) {
> > pr_debug("existing mapping on virq %d\n", virq);
> > - return virq;
> > + goto out;
> > }
> >
> > - return __irq_create_mapping_affinity(domain, hwirq, affinity);
> > + virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
> > +out:
> > + mutex_unlock(&irq_domain_mutex);
> > +
> > + return virq;
> > }
> > EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
> >
> > @@ -809,6 +818,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
> > if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
> > type &= IRQ_TYPE_SENSE_MASK;
> >
> > + mutex_lock(&irq_domain_mutex);
> > +
> > /*
> > * If we've already configured this interrupt,
> > * don't do it again, or hell will break loose.
> > @@ -821,7 +832,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
> > * interrupt number.
> > */
> > if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
> > - return virq;
> > + goto out;
> >
> > /*
> > * If the trigger type has not been set yet, then set
> > @@ -830,36 +841,43 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
> > if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
> > irq_data = irq_get_irq_data(virq);
> > if (!irq_data)
> > - return 0;
> > + goto err;
> >
> > irqd_set_trigger_type(irq_data, type);
> > - return virq;
> > + goto out;
> > }
> >
> > pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
> > hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
> > - return 0;
> > + goto err;
> > }
> >
> > if (irq_domain_is_hierarchy(domain)) {
> > - virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
> > + virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
> > + fwspec, false, NULL);
> > if (virq <= 0)
> > - return 0;
> > + goto err;
> > } else {
> > /* Create mapping */
> > - virq = __irq_create_mapping_affinity(domain, hwirq, NULL);
> > + virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
> > if (!virq)
> > - return virq;
> > + goto err;
> > }
> >
> > irq_data = irq_get_irq_data(virq);
> > if (WARN_ON(!irq_data))
> > - return 0;
> > + goto err;
> >
> > /* Store trigger type */
> > irqd_set_trigger_type(irq_data, type);
> > +out:
> > + mutex_unlock(&irq_domain_mutex);
> >
> > return virq;
> > +err:
> > + mutex_unlock(&irq_domain_mutex);
> > +
> > + return 0;
>
> nit: it'd look better if we had a single exit path with the unlock,
> setting virq to 0 on failure. Not a big deal, as this can be tidied up
> when applied.
Using a single exit path would result in a slightly bigger diff (5
lines) and would not separate the success and error paths as clearly,
but yeah, it's possibly still preferred (see result below).
Johan
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 7b57949bc79c..bfda4adc05c0 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -25,6 +25,9 @@ static DEFINE_MUTEX(irq_domain_mutex);
static struct irq_domain *irq_default_domain;
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
+ unsigned int nr_irqs, int node, void *arg,
+ bool realloc, const struct irq_affinity_desc *affinity);
static void irq_domain_check_hierarchy(struct irq_domain *domain);
struct irqchip_fwid {
@@ -682,9 +685,9 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
#endif
-static unsigned int __irq_create_mapping_affinity(struct irq_domain *domain,
- irq_hw_number_t hwirq,
- const struct irq_affinity_desc *affinity)
+static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity)
{
struct device_node *of_node = irq_domain_get_of_node(domain);
int virq;
@@ -699,7 +702,7 @@ static unsigned int __irq_create_mapping_affinity(struct irq_domain *domain,
return 0;
}
- if (irq_domain_associate(domain, virq, hwirq)) {
+ if (irq_domain_associate_locked(domain, virq, hwirq)) {
irq_free_desc(virq);
return 0;
}
@@ -735,14 +738,20 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
return 0;
}
+ mutex_lock(&irq_domain_mutex);
+
/* Check if mapping already exists */
virq = irq_find_mapping(domain, hwirq);
if (virq) {
pr_debug("existing mapping on virq %d\n", virq);
- return virq;
+ goto out;
}
- return __irq_create_mapping_affinity(domain, hwirq, affinity);
+ virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
+out:
+ mutex_unlock(&irq_domain_mutex);
+
+ return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
@@ -809,6 +818,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
type &= IRQ_TYPE_SENSE_MASK;
+ mutex_lock(&irq_domain_mutex);
+
/*
* If we've already configured this interrupt,
* don't do it again, or hell will break loose.
@@ -821,7 +832,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
* interrupt number.
*/
if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
- return virq;
+ goto out;
/*
* If the trigger type has not been set yet, then set
@@ -829,35 +840,45 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
*/
if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
irq_data = irq_get_irq_data(virq);
- if (!irq_data)
- return 0;
+ if (!irq_data) {
+ virq = 0;
+ goto out;
+ }
irqd_set_trigger_type(irq_data, type);
- return virq;
+ goto out;
}
pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
- return 0;
+ virq = 0;
+ goto out;
}
if (irq_domain_is_hierarchy(domain)) {
- virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
- if (virq <= 0)
- return 0;
+ virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
+ fwspec, false, NULL);
+ if (virq <= 0) {
+ virq = 0;
+ goto out;
+ }
} else {
/* Create mapping */
- virq = __irq_create_mapping_affinity(domain, hwirq, NULL);
+ virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
if (!virq)
- return virq;
+ goto out;
}
irq_data = irq_get_irq_data(virq);
- if (WARN_ON(!irq_data))
- return 0;
+ if (WARN_ON(!irq_data)) {
+ virq = 0;
+ goto out;
+ }
/* Store trigger type */
irqd_set_trigger_type(irq_data, type);
+out:
+ mutex_unlock(&irq_domain_mutex);
return virq;
}
@@ -1888,6 +1909,13 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
irq_set_handler_data(virq, handler_data);
}
+static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
+ unsigned int nr_irqs, int node, void *arg,
+ bool realloc, const struct irq_affinity_desc *affinity)
+{
+ return -EINVAL;
+}
+
static void irq_domain_check_hierarchy(struct irq_domain *domain)
{
}