[PATCH 2/3] sched/rt: add a tuning knob to allow changing SCHED_RRtimeslice

From: Clark Williams
Date: Thu Feb 07 2013 - 10:47:25 EST



Add a /proc/sys/kernel scheduler knob named sched_rr_timeslice_ms
that allows global changing of the SCHED_RR timeslice value. User
visable value is in milliseconds but is stored as jiffies. Setting
to 0 (zero) resets to the default (currently 100ms).

Signed-off-by: Clark Williams <williams@xxxxxxxxxx>
---
include/linux/sched/sysctl.h | 15 ++++++++++++++-
kernel/sched/core.c | 19 +++++++++++++++++++
kernel/sched/rt.c | 6 ++++--
kernel/sysctl.c | 7 +++++++
4 files changed, 44 insertions(+), 3 deletions(-)

diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index bac914e..d2bb0ae 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -73,6 +73,13 @@ static inline unsigned int get_sysctl_timer_migration(void)
return 1;
}
#endif
+
+/*
+ * control realtime throttling:
+ *
+ * /proc/sys/kernel/sched_rt_period_us
+ * /proc/sys/kernel/sched_rt_runtime_us
+ */
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;

@@ -90,7 +97,13 @@ extern unsigned int sysctl_sched_autogroup_enabled;
*/
#define RR_TIMESLICE (100 * HZ / 1000)

-int sched_rt_handler(struct ctl_table *table, int write,
+extern int sched_rr_timeslice;
+
+extern int sched_rr_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+extern int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 26058d0..1c39c33 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7508,6 +7508,25 @@ static int sched_rt_global_constraints(void)
}
#endif /* CONFIG_RT_GROUP_SCHED */

+int sched_rr_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ /* make sure that internally we keep jiffies */
+ /* also, writing zero resets timeslice to default */
+ if (!ret && write) {
+ sched_rr_timeslice = sched_rr_timeslice <= 0 ?
+ RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+ }
+ mutex_unlock(&mutex);
+ return ret;
+}
+
int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 839718d..127a2c4 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -7,6 +7,8 @@

#include <linux/slab.h>

+int sched_rr_timeslice = RR_TIMESLICE;
+
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);

struct rt_bandwidth def_rt_bandwidth;
@@ -2016,7 +2018,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
if (--p->rt.time_slice)
return;

- p->rt.time_slice = RR_TIMESLICE;
+ p->rt.time_slice = sched_rr_timeslice;

/*
* Requeue to the end of queue if we (and all of our ancestors) are the
@@ -2047,7 +2049,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
* Time slice is 0 for SCHED_FIFO tasks
*/
if (task->policy == SCHED_RR)
- return RR_TIMESLICE;
+ return sched_rr_timeslice;
else
return 0;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7357e23..4fc9be9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -404,6 +404,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_rt_handler,
},
+ {
+ .procname = "sched_rr_timeslice_ms",
+ .data = &sched_rr_timeslice,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = sched_rr_handler,
+ },
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
--
1.8.1

Attachment: signature.asc
Description: PGP signature