[patch 12/29] lockup_detector: Cleanup stub functions
From: Thomas Gleixner
Date: Thu Aug 31 2017 - 03:38:21 EST
Having stub functions which take a full page is not helping the
readablility of code.
Condense them and move the doubled #ifdef variant into the SYSFS section.
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
kernel/watchdog.c | 77 ++++++++++++++++--------------------------------------
1 file changed, 24 insertions(+), 53 deletions(-)
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -105,13 +105,8 @@ static int __read_mostly watchdog_runnin
* softlockup watchdog threads start and stop. The arch must select the
* SOFTLOCKUP_DETECTOR Kconfig.
*/
-int __weak watchdog_nmi_enable(unsigned int cpu)
-{
- return 0;
-}
-void __weak watchdog_nmi_disable(unsigned int cpu)
-{
-}
+int __weak watchdog_nmi_enable(unsigned int cpu) { return 0; }
+void __weak watchdog_nmi_disable(unsigned int cpu) { }
/*
* watchdog_nmi_reconfigure can be implemented to be notified after any
@@ -123,10 +118,7 @@ void __weak watchdog_nmi_disable(unsigne
* - sysctl_hardlockup_all_cpu_backtrace
* - hardlockup_panic
*/
-void __weak watchdog_nmi_reconfigure(void)
-{
-}
-
+void __weak watchdog_nmi_reconfigure(void) { }
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
@@ -134,6 +126,11 @@ void __weak watchdog_nmi_reconfigure(voi
#define for_each_watchdog_cpu(cpu) \
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
+/* Global variables, exported for sysctl */
+unsigned int __read_mostly softlockup_panic =
+ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
+int __read_mostly soft_watchdog_enabled;
+
static u64 __read_mostly sample_period;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -147,13 +144,9 @@ static DEFINE_PER_CPU(struct task_struct
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static unsigned long soft_lockup_nmi_warn;
-unsigned int __read_mostly softlockup_panic =
- CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
-
static int __init softlockup_panic_setup(char *str)
{
softlockup_panic = simple_strtoul(str, NULL, 0);
-
return 1;
}
__setup("softlockup_panic=", softlockup_panic_setup);
@@ -592,44 +585,13 @@ static void watchdog_disable_all_cpus(vo
}
}
-#ifdef CONFIG_SYSCTL
-static int watchdog_update_cpus(void)
-{
- return smpboot_update_cpumask_percpu_thread(
- &watchdog_threads, &watchdog_cpumask);
-}
-#endif
-
-#else /* SOFTLOCKUP */
-static int watchdog_park_threads(void)
-{
- return 0;
-}
-
-static void watchdog_unpark_threads(void)
-{
-}
-
-static int watchdog_enable_all_cpus(void)
-{
- return 0;
-}
-
-static void watchdog_disable_all_cpus(void)
-{
-}
-
-#ifdef CONFIG_SYSCTL
-static int watchdog_update_cpus(void)
-{
- return 0;
-}
-#endif
-
-static void set_sample_period(void)
-{
-}
-#endif /* SOFTLOCKUP */
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
+static inline int watchdog_park_threads(void) { return 0; }
+static inline void watchdog_unpark_threads(void) { }
+static inline int watchdog_enable_all_cpus(void) { return 0; }
+static inline void watchdog_disable_all_cpus(void) { }
+static inline void set_sample_period(void) { }
+#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
static void __lockup_detector_cleanup(void)
{
@@ -826,6 +788,15 @@ int proc_watchdog_thresh(struct ctl_tabl
return err;
}
+static int watchdog_update_cpus(void)
+{
+ if (IS_ENABLED(CONFIG_SOFTLOCKUP_DETECTOR)) {
+ return smpboot_update_cpumask_percpu_thread(&watchdog_threads,
+ &watchdog_cpumask);
+ }
+ return 0;
+}
+
/*
* The cpumask is the mask of possible cpus that the watchdog can run
* on, not the mask of cpus it is actually running on. This allows the