On Fri, Dec 08, 2017 at 12:07:54PM -0800, subhra mazumdar wrote:This patch solves the scalability problem of potentially searching all
+static inline voidWhahah, lol, no!
+sd_context_switch(struct sched_domain *sd, struct rq *rq, int util)
+{
+ struct sched_group *sg_cpu;
+
+ /* atomically add/subtract the util */
+ sg_cpu = sd->sg_cpu;
+ if (util > 0)
+ atomic_inc(
+ (atomic_t *)(&(sg_cpu->utilization)));
+ else
+ atomic_dec(
+ (atomic_t *)(&(sg_cpu->utilization)));
Ok
+}That one is RCU, not RCU-sched protected..
+
/*
* context_switch - switch to the new MM and the new thread's register state.
*/
@@ -2751,6 +2766,51 @@ context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next, struct rq_flags *rf)
{
struct mm_struct *mm, *oldmm;
+ int this_cpu = rq->cpu;
+ struct sched_domain *sd;
+ unsigned int cond;
+
+ cond = ((prev != rq->idle) << 1) | (next != rq->idle);
+ sd = rcu_dereference(per_cpu(sd_llc, this_cpu));
I will clean this part up.
+ /*WTH do you even think this is reasonable?
+ * From sd_llc downward update the SMT utilization.
+ * Skip the lowest level 0.
+ */
+ for_each_lower_domain(sd) {
+ if (sd->level == 0)
+ break;
+ if (rq->initial_util == UTIL_UNINITIALIZED) {
+ switch (cond) {
+ case PREV_IDLE_NEXT_NIDLE:
+ case PREV_NIDLE_NEXT_NIDLE:
+ sd_context_switch(sd, rq, SMT_THREAD_UTIL);
+ break;
+ case PREV_NIDLE_NEXT_IDLE:
+ case PREV_IDLE_NEXT_IDLE:
+ break;
+ }
+ } else {
+ switch (cond) {
+ case PREV_IDLE_NEXT_NIDLE:
+ sd_context_switch(sd, rq, SMT_THREAD_UTIL);
+ break;
+ case PREV_NIDLE_NEXT_IDLE:
+ sd_context_switch(sd, rq, -SMT_THREAD_UTIL);
+ break;
+ case PREV_IDLE_NEXT_IDLE:
+ case PREV_NIDLE_NEXT_NIDLE:
+ break;
+ }
+ }
+ }
+
+ if (sd) {
+ if (next == rq->idle)
+ rq->initial_util = UTIL_IDLE;
+ else
+ rq->initial_util = UTIL_BUSY;
+ }
I will improve the changelog to explain the logic better in v2.
prepare_task_switch(rq, prev, next);And I still have no idea what the patch does, but I can't be bothered to
reverse engineer it just now.