[PATCH 4.19 01/78] irq/matrix: Split out the CPU selection code into a helper

From: Greg Kroah-Hartman
Date: Mon Mar 04 2019 - 03:27:59 EST


4.19-stable review patch. If anyone has any objections, please let me know.

------------------

[ Upstream commit 8ffe4e61c06a48324cfd97f1199bb9838acce2f2 ]

Linux finds the CPU which has the lowest vector allocation count to spread
out the non managed interrupts across the possible target CPUs, but does
not do so for managed interrupts.

Split out the CPU selection code into a helper function for reuse. No
functional change.

Signed-off-by: Dou Liyang <douly.fnst@xxxxxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: hpa@xxxxxxxxx
Link: https://lkml.kernel.org/r/20180908175838.14450-1-dou_liyang@xxxxxxx
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
kernel/irq/matrix.c | 65 ++++++++++++++++++++++++++++++----------------------
1 file changed, 38 insertions(+), 27 deletions(-)

--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(st
return area;
}

+/* Find the best CPU which has the lowest vector allocation count */
+static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
+ const struct cpumask *msk)
+{
+ unsigned int cpu, best_cpu, maxavl = 0;
+ struct cpumap *cm;
+
+ best_cpu = UINT_MAX;
+
+ for_each_cpu(cpu, msk) {
+ cm = per_cpu_ptr(m->maps, cpu);
+
+ if (!cm->online || cm->available <= maxavl)
+ continue;
+
+ best_cpu = cpu;
+ maxavl = cm->available;
+ }
+ return best_cpu;
+}
+
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
@@ -322,37 +343,27 @@ void irq_matrix_remove_reserved(struct i
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu)
{
- unsigned int cpu, best_cpu, maxavl = 0;
+ unsigned int cpu, bit;
struct cpumap *cm;
- unsigned int bit;

- best_cpu = UINT_MAX;
- for_each_cpu(cpu, msk) {
- cm = per_cpu_ptr(m->maps, cpu);
-
- if (!cm->online || cm->available <= maxavl)
- continue;
+ cpu = matrix_find_best_cpu(m, msk);
+ if (cpu == UINT_MAX)
+ return -ENOSPC;

- best_cpu = cpu;
- maxavl = cm->available;
- }
+ cm = per_cpu_ptr(m->maps, cpu);
+ bit = matrix_alloc_area(m, cm, 1, false);
+ if (bit >= m->alloc_end)
+ return -ENOSPC;
+ cm->allocated++;
+ cm->available--;
+ m->total_allocated++;
+ m->global_available--;
+ if (reserved)
+ m->global_reserved--;
+ *mapped_cpu = cpu;
+ trace_irq_matrix_alloc(bit, cpu, m, cm);
+ return bit;

- if (maxavl) {
- cm = per_cpu_ptr(m->maps, best_cpu);
- bit = matrix_alloc_area(m, cm, 1, false);
- if (bit < m->alloc_end) {
- cm->allocated++;
- cm->available--;
- m->total_allocated++;
- m->global_available--;
- if (reserved)
- m->global_reserved--;
- *mapped_cpu = best_cpu;
- trace_irq_matrix_alloc(bit, best_cpu, m, cm);
- return bit;
- }
- }
- return -ENOSPC;
}

/**