[PATCH] [2/4] SCHED: Mark all frequently used sysctls __read_mostly
From: Andi Kleen
Date: Wed Nov 12 2008 - 07:28:32 EST
There are more optimizations possible here, but that's for another
patch.
Signed-off-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
---
kernel/sched.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
Index: linux-2.6.28-rc4-test/kernel/sched.c
===================================================================
--- linux-2.6.28-rc4-test.orig/kernel/sched.c 2008-11-12 12:32:52.000000000 +0100
+++ linux-2.6.28-rc4-test/kernel/sched.c 2008-11-12 12:52:52.000000000 +0100
@@ -686,7 +686,7 @@
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
-const_debug unsigned int sysctl_sched_features =
+const_debug unsigned int sysctl_sched_features __read_mostly =
#include "sched_features.h"
0;
@@ -809,26 +809,26 @@
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
-const_debug unsigned int sysctl_sched_nr_migrate = 32;
+const_debug unsigned int sysctl_sched_nr_migrate __read_mostly = 32;
/*
* ratelimit for updating the group shares.
* default: 0.25ms
*/
-unsigned int sysctl_sched_shares_ratelimit = 250000;
+unsigned int sysctl_sched_shares_ratelimit __read_mostly = 250000;
/*
* Inject some fuzzyness into changing the per-cpu group shares
* this avoids remote rq-locks at the expense of fairness.
* default: 4
*/
-unsigned int sysctl_sched_shares_thresh = 4;
+unsigned int sysctl_sched_shares_thresh __read_mostly = 4;
/*
* period over which we measure -rt task cpu usage in us.
* default: 1s
*/
-unsigned int sysctl_sched_rt_period = 1000000;
+unsigned int sysctl_sched_rt_period __read_mostly = 1000000;
static __read_mostly int scheduler_running;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/