[PATCH 11/35] clockevents: Cleanup dead cpu explicitely

From: Peter Zijlstra
Date: Mon Feb 16 2015 - 08:19:19 EST


From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

clockevents_notify() is a leftover from the early design of the
clockevents facility. It's really not a notification mechanism, it's a
multiplex call. We are way better off to have explicit calls instead of this
monstrosity.

Split out the cleanup function for a dead cpu and invoke it directly
from the cpu down code. Make it conditional on CPU_HOTPLUG as well.

Temporary change, will be refined in later patches.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
include/linux/clockchips.h | 1
include/linux/tick.h | 2 +
kernel/cpu.c | 1
kernel/time/clockevents.c | 61 +++++++++++++++++++++++++------------------
kernel/time/hrtimer.c | 3 --
kernel/time/tick-broadcast.c | 29 ++++++++++----------
kernel/time/tick-common.c | 6 ++--
kernel/time/tick-internal.h | 10 +++----
8 files changed, 62 insertions(+), 51 deletions(-)

Index: linux/include/linux/clockchips.h
===================================================================
--- linux.orig/include/linux/clockchips.h
+++ linux/include/linux/clockchips.h
@@ -16,7 +16,6 @@ enum clock_event_nofitiers {
CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
- CLOCK_EVT_NOTIFY_CPU_DEAD,
};

#ifdef CONFIG_GENERIC_CLOCKEVENTS
Index: linux/include/linux/tick.h
===================================================================
--- linux.orig/include/linux/tick.h
+++ linux/include/linux/tick.h
@@ -30,10 +30,12 @@ extern void __init tick_init(void);
/* Should be core only, but XEN resume magic requires this */
extern void tick_resume_local(void);
extern void tick_handover_do_timer(void);
+extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
static inline void tick_resume_local(void) { }
static inline void tick_handover_do_timer(void) { }
+static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */

#ifdef CONFIG_TICK_ONESHOT
Index: linux/kernel/cpu.c
===================================================================
--- linux.orig/kernel/cpu.c
+++ linux/kernel/cpu.c
@@ -428,6 +428,7 @@ static int __ref _cpu_down(unsigned int
__cpu_die(cpu);

/* CPU is completely dead: tell everyone. Too late to complain. */
+ tick_cleanup_dead_cpu(cpu);
cpu_notify_nofail(CPU_DEAD | mod, hcpu);

check_for_tasks(cpu);
Index: linux/kernel/time/clockevents.c
===================================================================
--- linux.orig/kernel/time/clockevents.c
+++ linux/kernel/time/clockevents.c
@@ -538,15 +538,49 @@ void clockevents_resume(void)
dev->resume(dev);
}

+#ifdef CONFIG_HOTPLUG_CPU
+/**
+ * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
+ */
+void tick_cleanup_dead_cpu(int cpu)
+{
+ struct clock_event_device *dev, *tmp;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&clockevents_lock, flags);
+
+ tick_shutdown_broadcast_oneshot(cpu);
+ tick_shutdown_broadcast(cpu);
+ tick_shutdown(cpu);
+ /*
+ * Unregister the clock event devices which were
+ * released from the users in the notify chain.
+ */
+ list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
+ list_del(&dev->list);
+ /*
+ * Now check whether the CPU has left unused per cpu devices
+ */
+ list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
+ if (cpumask_test_cpu(cpu, dev->cpumask) &&
+ cpumask_weight(dev->cpumask) == 1 &&
+ !tick_is_broadcast_device(dev)) {
+ BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
+ list_del(&dev->list);
+ }
+ }
+ raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+}
+#endif
+
/**
* clockevents_notify - notification about relevant events
* Returns 0 on success, any other value on error
*/
int clockevents_notify(unsigned long reason, void *arg)
{
- struct clock_event_device *dev, *tmp;
unsigned long flags;
- int cpu, ret = 0;
+ int ret = 0;

raw_spin_lock_irqsave(&clockevents_lock, flags);

@@ -562,29 +596,6 @@ int clockevents_notify(unsigned long rea
ret = tick_broadcast_oneshot_control(reason);
break;

- case CLOCK_EVT_NOTIFY_CPU_DEAD:
- tick_shutdown_broadcast_oneshot(arg);
- tick_shutdown_broadcast(arg);
- tick_shutdown(arg);
- /*
- * Unregister the clock event devices which were
- * released from the users in the notify chain.
- */
- list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
- list_del(&dev->list);
- /*
- * Now check whether the CPU has left unused per cpu devices
- */
- cpu = *((int *)arg);
- list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
- if (cpumask_test_cpu(cpu, dev->cpumask) &&
- cpumask_weight(dev->cpumask) == 1 &&
- !tick_is_broadcast_device(dev)) {
- BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
- list_del(&dev->list);
- }
- }
- break;
default:
break;
}
Index: linux/kernel/time/hrtimer.c
===================================================================
--- linux.orig/kernel/time/hrtimer.c
+++ linux/kernel/time/hrtimer.c
@@ -1717,11 +1717,8 @@ static int hrtimer_cpu_notify(struct not
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- {
- clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
migrate_hrtimers(scpu);
break;
- }
#endif

default:
Index: linux/kernel/time/tick-broadcast.c
===================================================================
--- linux.orig/kernel/time/tick-broadcast.c
+++ linux/kernel/time/tick-broadcast.c
@@ -418,14 +418,14 @@ void tick_set_periodic_handler(struct cl
dev->event_handler = tick_handle_periodic_broadcast;
}

+#ifdef CONFIG_HOTPLUG_CPU
/*
* Remove a CPU from broadcasting
*/
-void tick_shutdown_broadcast(unsigned int *cpup)
+void tick_shutdown_broadcast(unsigned int cpu)
{
struct clock_event_device *bc;
unsigned long flags;
- unsigned int cpu = *cpup;

raw_spin_lock_irqsave(&tick_broadcast_lock, flags);

@@ -440,6 +440,7 @@ void tick_shutdown_broadcast(unsigned in

raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
+#endif

void tick_suspend_broadcast(void)
{
@@ -683,16 +684,6 @@ static void broadcast_shutdown_local(str
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
}

-static void broadcast_move_bc(int deadcpu)
-{
- struct clock_event_device *bc = tick_broadcast_device.evtdev;
-
- if (!bc || !broadcast_needs_cpu(bc, deadcpu))
- return;
- /* This moves the broadcast assignment to this cpu */
- clockevents_program_event(bc, bc->next_event, 1);
-}
-
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop
@@ -908,14 +899,23 @@ void tick_broadcast_switch_to_oneshot(vo
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}

+#ifdef CONFIG_HOTPLUG_CPU
+static void broadcast_move_bc(int deadcpu)
+{
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+ if (!bc || !broadcast_needs_cpu(bc, deadcpu))
+ return;
+ /* This moves the broadcast assignment to this cpu */
+ clockevents_program_event(bc, bc->next_event, 1);
+}

/*
* Remove a dead CPU from broadcasting
*/
-void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
+void tick_shutdown_broadcast_oneshot(unsigned int cpu)
{
unsigned long flags;
- unsigned int cpu = *cpup;

raw_spin_lock_irqsave(&tick_broadcast_lock, flags);

@@ -931,6 +931,7 @@ void tick_shutdown_broadcast_oneshot(uns

raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
+#endif

/*
* Check, whether the broadcast device is in one shot mode
Index: linux/kernel/time/tick-common.c
===================================================================
--- linux.orig/kernel/time/tick-common.c
+++ linux/kernel/time/tick-common.c
@@ -348,7 +348,6 @@ void tick_handover_do_timer(void)
TICK_DO_TIMER_NONE;
}
}
-#endif

/*
* Shutdown an event device on a given cpu:
@@ -357,9 +356,9 @@ void tick_handover_do_timer(void)
* access the hardware device itself.
* We just set the mode and remove it from the lists.
*/
-void tick_shutdown(unsigned int *cpup)
+void tick_shutdown(unsigned int cpu)
{
- struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
+ struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
struct clock_event_device *dev = td->evtdev;

td->mode = TICKDEV_MODE_PERIODIC;
@@ -374,6 +373,7 @@ void tick_shutdown(unsigned int *cpup)
td->evtdev = NULL;
}
}
+#endif

/**
* tick_suspend - Suspend the tick and the broadcast device
Index: linux/kernel/time/tick-internal.h
===================================================================
--- linux.orig/kernel/time/tick-internal.h
+++ linux/kernel/time/tick-internal.h
@@ -20,7 +20,7 @@ extern int tick_do_timer_cpu __read_most
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
extern void tick_handle_periodic(struct clock_event_device *dev);
extern void tick_check_new_device(struct clock_event_device *dev);
-extern void tick_shutdown(unsigned int *cpup);
+extern void tick_shutdown(unsigned int cpu);
extern void tick_suspend(void);
extern void tick_resume(void);
extern bool tick_check_replacement(struct clock_event_device *curdev,
@@ -79,7 +79,7 @@ extern int tick_device_uses_broadcast(st
extern void tick_install_broadcast_device(struct clock_event_device *dev);
extern int tick_is_broadcast_device(struct clock_event_device *dev);
extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
-extern void tick_shutdown_broadcast(unsigned int *cpup);
+extern void tick_shutdown_broadcast(unsigned int cpu);
extern void tick_suspend_broadcast(void);
extern void tick_resume_broadcast(void);
extern bool tick_resume_check_broadcast(void);
@@ -94,7 +94,7 @@ static inline int tick_is_broadcast_devi
static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; }
static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { }
static inline void tick_broadcast_on_off(unsigned long reason, int *oncpu) { }
-static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
+static inline void tick_shutdown_broadcast(unsigned int cpu) { }
static inline void tick_suspend_broadcast(void) { }
static inline void tick_resume_broadcast(void) { }
static inline bool tick_resume_check_broadcast(void) { return false; }
@@ -113,7 +113,7 @@ static inline void tick_set_periodic_han
extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
extern int tick_broadcast_oneshot_control(unsigned long reason);
extern void tick_broadcast_switch_to_oneshot(void);
-extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
+extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
extern int tick_broadcast_oneshot_active(void);
extern void tick_check_oneshot_broadcast_this_cpu(void);
bool tick_broadcast_oneshot_available(void);
@@ -122,7 +122,7 @@ extern struct cpumask *tick_get_broadcas
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
static inline void tick_broadcast_switch_to_oneshot(void) { }
-static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
+static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); }


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/