[PATCH 5/9] perf: Migrate perf to use new tick dependency mask model

From: Frederic Weisbecker
Date: Thu Feb 04 2016 - 12:04:01 EST


Instead of providing asynchronous checks for the nohz subsystem to verify
perf event tick dependency, migrate perf to the new mask.

Perf needs the tick for two situations:

1) Freq events. We could set the tick dependency when those are
installed on a CPU context. But setting a global dependency on top of
the global freq events accounting is much easier. If people want that
to be optimized, we can still refine that on the per-CPU tick dependency
level. This patch dooesn't change the current behaviour anyway.

2) Throttled events: this is a per-cpu dependency.

Reviewed-by: Chris Metcalf <cmetcalf@xxxxxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Chris Metcalf <cmetcalf@xxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Luiz Capitulino <lcapitulino@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Rik van Riel <riel@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Viresh Kumar <viresh.kumar@xxxxxxxxxx>
Signed-off-by: Frederic Weisbecker <fweisbec@xxxxxxxxx>
---
include/linux/perf_event.h | 6 -----
include/linux/tick.h | 2 --
kernel/events/core.c | 65 ++++++++++++++++++++++++++++++++++------------
kernel/time/tick-sched.c | 8 +-----
4 files changed, 49 insertions(+), 32 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f9828a4..15bc5a6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1111,12 +1111,6 @@ static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
#endif

-#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
-extern bool perf_event_can_stop_tick(void);
-#else
-static inline bool perf_event_can_stop_tick(void) { return true; }
-#endif
-
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
#else
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9ae7ebf..994c5be 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -233,7 +233,6 @@ static inline void tick_clear_dep_signal(struct signal_struct *signal,
tick_nohz_clear_dep_signal(signal, bit);
}

-extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(void);
@@ -260,7 +259,6 @@ static inline void tick_clear_dep_signal(struct task_struct *signal,
enum tick_dependency_bit bit) { }

static inline void tick_nohz_full_kick_cpu(int cpu) { }
-static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { }
static inline void __tick_nohz_task_switch(void) { }
#endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 06ae52e..cedfbfe 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3051,17 +3051,6 @@ done:
return rotate;
}

-#ifdef CONFIG_NO_HZ_FULL
-bool perf_event_can_stop_tick(void)
-{
- if (atomic_read(&nr_freq_events) ||
- __this_cpu_read(perf_throttled_count))
- return false;
- else
- return true;
-}
-#endif
-
void perf_event_task_tick(void)
{
struct list_head *head = this_cpu_ptr(&active_ctx_list);
@@ -3072,6 +3061,7 @@ void perf_event_task_tick(void)

__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
+ tick_clear_dep_cpu(smp_processor_id(), TICK_PERF_EVENTS_BIT);

list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
perf_adjust_freq_unthr_context(ctx, throttled);
@@ -3519,6 +3509,28 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
}

+#ifdef CONFIG_NO_HZ_FULL
+static DEFINE_SPINLOCK(nr_freq_lock);
+#endif
+
+static void unaccount_freq_event_nohz(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+ spin_lock(&nr_freq_lock);
+ if (atomic_dec_and_test(&nr_freq_events))
+ tick_nohz_clear_dep(TICK_PERF_EVENTS_BIT);
+ spin_unlock(&nr_freq_lock);
+#endif
+}
+
+static void unaccount_freq_event(void)
+{
+ if (tick_nohz_full_enabled())
+ unaccount_freq_event_nohz();
+ else
+ atomic_dec(&nr_freq_events);
+}
+
static void unaccount_event(struct perf_event *event)
{
if (event->parent)
@@ -3533,7 +3545,7 @@ static void unaccount_event(struct perf_event *event)
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.freq)
- atomic_dec(&nr_freq_events);
+ unaccount_freq_event();
if (event->attr.context_switch) {
static_key_slow_dec_deferred(&perf_sched_events);
atomic_dec(&nr_switch_events);
@@ -6359,9 +6371,9 @@ static int __perf_event_overflow(struct perf_event *event,
if (unlikely(throttle
&& hwc->interrupts >= max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
+ tick_set_dep_cpu(smp_processor_id(), TICK_PERF_EVENTS_BIT);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
- tick_nohz_full_kick();
ret = 1;
}
}
@@ -7751,6 +7763,27 @@ static void account_event_cpu(struct perf_event *event, int cpu)
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
}

+/* Freq events need the tick to stay alive (see perf_event_task_tick). */
+static void account_freq_event_nohz(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+ /* Lock so we don't race with concurrent unaccount */
+ spin_lock(&nr_freq_lock);
+ if (atomic_inc_return(&nr_freq_events) == 1)
+ tick_nohz_set_dep(TICK_PERF_EVENTS_BIT);
+ spin_unlock(&nr_freq_lock);
+#endif
+}
+
+static void account_freq_event(void)
+{
+ if (tick_nohz_full_enabled())
+ account_freq_event_nohz();
+ else
+ atomic_inc(&nr_freq_events);
+}
+
+
static void account_event(struct perf_event *event)
{
if (event->parent)
@@ -7764,10 +7797,8 @@ static void account_event(struct perf_event *event)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
- if (event->attr.freq) {
- if (atomic_inc_return(&nr_freq_events) == 1)
- tick_nohz_full_kick_all();
- }
+ if (event->attr.freq)
+ account_freq_event();
if (event->attr.context_switch) {
atomic_inc(&nr_switch_events);
static_key_slow_inc(&perf_sched_events.key);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f258381..6fdb55d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
-#include <linux/perf_event.h>
#include <linux/context_tracking.h>

#include <asm/irq_regs.h>
@@ -213,11 +212,6 @@ static bool can_stop_full_tick(struct tick_sched *ts)
return false;
}

- if (!perf_event_can_stop_tick()) {
- trace_tick_stop(0, TICK_PERF_EVENTS_MASK);
- return false;
- }
-
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
* sched_clock_tick() needs us?
@@ -255,7 +249,7 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
* This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
* is NMI safe.
*/
-void tick_nohz_full_kick(void)
+static void tick_nohz_full_kick(void)
{
if (!tick_nohz_full_cpu(smp_processor_id()))
return;
--
2.7.0