schedstats-sysctl.patch
Add sysctl option for controlling schedstats collection
dynamically. Delay accounting leverages schedstats for
cpu delay statistics.
Index: linux-2.6.16-rc4/kernel/sched.c
===================================================================
--- linux-2.6.16-rc4.orig/kernel/sched.c 2006-02-27 01:20:04.000000000 -0500
+++ linux-2.6.16-rc4/kernel/sched.c 2006-02-27 01:52:52.000000000 -0500
@@ -382,11 +382,56 @@ static inline void task_rq_unlock(runque
}
#ifdef CONFIG_SCHEDSTATS
+
+int schedstats_sysctl = 0; /* schedstats turned off by default */
+static DEFINE_PER_CPU(int, schedstats) = 0;
+
+static void schedstats_set(int val)
+{
+ int i;
+ static spinlock_t schedstats_lock = SPIN_LOCK_UNLOCKED;
+
+ spin_lock(&schedstats_lock);
+ schedstats_sysctl = val;
+ for (i = 0; i < NR_CPUS; i++)
+ per_cpu(schedstats, i) = val;
+ spin_unlock(&schedstats_lock);
+}
+
+static int __init schedstats_setup_enable(char *str)
+{
+ schedstats_sysctl = 1;
+ schedstats_set(schedstats_sysctl);
+ return 1;
+}
+
+__setup("schedstats", schedstats_setup_enable);
+
+int schedstats_sysctl_handler(ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, prev = schedstats_sysctl;
+ struct task_struct *g, *t;
+
+ ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ if ((ret != 0) || (prev == schedstats_sysctl))
+ return ret;
+ if (schedstats_sysctl) {
+ read_lock(&tasklist_lock);
+ do_each_thread(g, t) {
+ memset(&t->sched_info, 0, sizeof(t->sched_info));
+ } while_each_thread(g, t);
+ read_unlock(&tasklist_lock);
+ }
+ schedstats_set(schedstats_sysctl);