[RFC 2/3] sched/idle: make cpu_idle_force_poll per-cpu

From: Luiz Capitulino
Date: Tue Sep 22 2015 - 16:35:25 EST


In preparation to support setting idle polling behavior
at run-time, this commit makes the cpu_idle_force_poll
global counter a per-cpu data.

The new per-cpu data is actually a struct, and new
helper functions are added in order to maintain the
same semantics cpu_idle_force_poll used to have.

This change should not be visible to arch code calling
cpu_idle_poll_ctrl().

Signed-off-by: Luiz Capitulino <lcapitulino@xxxxxxxxxx>
---
kernel/sched/idle.c | 42 +++++++++++++++++++++++++++++++-----------
1 file changed, 31 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 93d0657..3060977 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -24,29 +24,49 @@ void sched_idle_set_state(struct cpuidle_state *idle_state)
idle_set_state(this_rq(), idle_state);
}

-static int __read_mostly cpu_idle_force_poll;
+struct idle_poll {
+ int force_poll;
+};
+
+static DEFINE_PER_CPU(struct idle_poll, idle_poll) = {
+ .force_poll = 0,
+};
+
+static bool this_cpu_idle_poll(void)
+{
+ return per_cpu(idle_poll, smp_processor_id()).force_poll > 0;
+}
+
+static void cpu_idle_poll_set_all(int v)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(idle_poll, cpu).force_poll = v;
+}

void cpu_idle_poll_ctrl(bool enable)
{
- if (enable) {
- cpu_idle_force_poll++;
- } else {
- cpu_idle_force_poll--;
- WARN_ON_ONCE(cpu_idle_force_poll < 0);
+ int *p, cpu;
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(idle_poll, cpu).force_poll;
+ enable == true ? ++*p : --*p;
+ WARN_ON_ONCE(*p < 0);
}
}

#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
static int __init cpu_idle_poll_setup(char *__unused)
{
- cpu_idle_force_poll = 1;
+ cpu_idle_poll_set_all(1);
return 1;
}
__setup("nohlt", cpu_idle_poll_setup);

static int __init cpu_idle_nopoll_setup(char *__unused)
{
- cpu_idle_force_poll = 0;
+ cpu_idle_poll_set_all(0);
return 1;
}
__setup("hlt", cpu_idle_nopoll_setup);
@@ -58,7 +78,7 @@ static inline void cpu_idle_poll(void)
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
while (!tif_need_resched() &&
- (cpu_idle_force_poll || tick_check_broadcast_expired()))
+ (this_cpu_idle_poll() || tick_check_broadcast_expired()))
cpu_relax();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
rcu_idle_exit();
@@ -71,7 +91,7 @@ void __weak arch_cpu_idle_exit(void) { }
void __weak arch_cpu_idle_dead(void) { }
void __weak arch_cpu_idle(void)
{
- cpu_idle_force_poll = 1;
+ cpu_idle_poll_set_all(1);
local_irq_enable();
}

@@ -242,7 +262,7 @@ static void cpu_idle_loop(void)
* know that the IPI is going to arrive right
* away
*/
- if (cpu_idle_force_poll || tick_check_broadcast_expired())
+ if (this_cpu_idle_poll() || tick_check_broadcast_expired())
cpu_idle_poll();
else
cpuidle_idle_call();
--
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/