[PATCH V2 2/7] genirq/affinity: pass affinity managed mask array to irq_build_affinity_masks

From: Ming Lei
Date: Wed Aug 18 2021 - 05:30:07 EST


Pass affinity managed mask array to irq_build_affinity_masks() so that
index of the first affinity managed vector is always zero, then we can
simplify the implementation a bit.

Reviewed-by: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
---
kernel/irq/affinity.c | 28 ++++++++++++----------------
1 file changed, 12 insertions(+), 16 deletions(-)

diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 856ab6d39c05..0bc83d57cb34 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -246,14 +246,13 @@ static void alloc_nodes_vectors(unsigned int numvecs,

static int __irq_build_affinity_masks(unsigned int startvec,
unsigned int numvecs,
- unsigned int firstvec,
cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask,
struct cpumask *nmsk,
struct irq_affinity_desc *masks)
{
unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0;
- unsigned int last_affv = firstvec + numvecs;
+ unsigned int last_affv = numvecs;
unsigned int curvec = startvec;
nodemask_t nodemsk = NODE_MASK_NONE;
struct node_vectors *node_vectors;
@@ -272,7 +271,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
node_to_cpumask[n]);
if (++curvec == last_affv)
- curvec = firstvec;
+ curvec = 0;
}
return numvecs;
}
@@ -320,7 +319,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
* may start anywhere
*/
if (curvec >= last_affv)
- curvec = firstvec;
+ curvec = 0;
irq_spread_init_one(&masks[curvec].mask, nmsk,
cpus_per_vec);
}
@@ -335,11 +334,10 @@ static int __irq_build_affinity_masks(unsigned int startvec,
* 1) spread present CPU on these vectors
* 2) spread other possible CPUs on these vectors
*/
-static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
+static int irq_build_affinity_masks(unsigned int numvecs,
struct irq_affinity_desc *masks)
{
- unsigned int curvec = startvec, nr_present = 0, nr_others = 0;
- unsigned int firstvec = startvec;
+ unsigned int curvec = 0, nr_present = 0, nr_others = 0;
cpumask_var_t *node_to_cpumask;
cpumask_var_t nmsk, npresmsk;
int ret = -ENOMEM;
@@ -359,9 +357,8 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
build_node_to_cpumask(node_to_cpumask);

/* Spread on present CPUs starting from affd->pre_vectors */
- ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
- node_to_cpumask, cpu_present_mask,
- nmsk, masks);
+ ret = __irq_build_affinity_masks(curvec, numvecs, node_to_cpumask,
+ cpu_present_mask, nmsk, masks);
if (ret < 0)
goto fail_build_affinity;
nr_present = ret;
@@ -373,13 +370,12 @@ static int irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs,
* out vectors.
*/
if (nr_present >= numvecs)
- curvec = firstvec;
+ curvec = 0;
else
- curvec = firstvec + nr_present;
+ curvec = nr_present;
cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
- ret = __irq_build_affinity_masks(curvec, numvecs, firstvec,
- node_to_cpumask, npresmsk, nmsk,
- masks);
+ ret = __irq_build_affinity_masks(curvec, numvecs, node_to_cpumask,
+ npresmsk, nmsk, masks);
if (ret >= 0)
nr_others = ret;

@@ -462,7 +458,7 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
unsigned int this_vecs = affd->set_size[i];
int ret;

- ret = irq_build_affinity_masks(curvec, this_vecs, masks);
+ ret = irq_build_affinity_masks(this_vecs, &masks[curvec]);
if (ret) {
kfree(masks);
return NULL;
--
2.31.1