[RFC PATCH 2/8] sched/topology: Introduce sg->shared
From: K Prateek Nayak
Date: Thu Mar 13 2025 - 05:39:28 EST
sched_group(s) of a particular sched_domain are created using the
sched_domain struct of the child domain. Attach the sched_domain_shared
struct from the corresponding child domain to the sched_group.
This shared struct will be used to propagate the sched group stats up
the sched domain hierarchy to optimize load balancing in subsequent
commits.
Signed-off-by: K Prateek Nayak <kprateek.nayak@xxxxxxx>
---
kernel/sched/sched.h | 3 +++
kernel/sched/topology.c | 27 +++++++++++++++++++++++++++
2 files changed, 30 insertions(+)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 023b844159c9..38aa4cba5d1f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2089,6 +2089,9 @@ struct sched_group {
int asym_prefer_cpu; /* CPU of highest priority in group */
int flags;
+ /* sd->shared of the domain from which this group was created */
+ struct sched_domain_shared *shared;
+
/*
* The CPUs this group covers.
*
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 815474823b3f..508ee8aa492b 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -612,6 +612,23 @@ static struct root_domain *alloc_rootdomain(void)
return rd;
}
+static void link_sg_shared(struct sched_group *sg, struct sched_domain_shared *sds)
+{
+ if (!sds)
+ return;
+
+ sg->shared = sds;
+ atomic_inc(&sds->ref);
+}
+
+static void free_sg_shared(struct sched_group *sg)
+{
+ if (sg->shared && atomic_dec_and_test(&sg->shared->ref))
+ kfree(sg->shared);
+
+ sg->shared = NULL;
+}
+
static void free_sched_groups(struct sched_group *sg, int free_sgc)
{
struct sched_group *tmp, *first;
@@ -626,6 +643,8 @@ static void free_sched_groups(struct sched_group *sg, int free_sgc)
if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
kfree(sg->sgc);
+ free_sg_shared(sg);
+
if (atomic_dec_and_test(&sg->ref))
kfree(sg);
sg = tmp;
@@ -746,6 +765,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
if (parent->parent) {
parent->parent->child = tmp;
parent->parent->groups->flags = tmp->flags;
+
+ free_sg_shared(parent->parent->groups);
+ link_sg_shared(parent->parent->groups, tmp->shared);
}
/*
@@ -773,6 +795,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
* the child is being destroyed.
*/
do {
+ free_sg_shared(sg);
sg->flags = 0;
} while (sg != sd->groups);
@@ -972,10 +995,12 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
if (!sg)
return NULL;
+ sg->shared = NULL;
sg_span = sched_group_span(sg);
if (sd->child) {
cpumask_copy(sg_span, sched_domain_span(sd->child));
sg->flags = sd->child->flags;
+ link_sg_shared(sg, sd->child->shared);
} else {
cpumask_copy(sg_span, sched_domain_span(sd));
}
@@ -1225,9 +1250,11 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
if (already_visited)
return sg;
+ sg->shared = NULL;
if (child) {
cpumask_copy(sched_group_span(sg), sched_domain_span(child));
cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
+ link_sg_shared(sg, child->shared);
sg->flags = child->flags;
} else {
cpumask_set_cpu(cpu, sched_group_span(sg));
--
2.43.0