--- 3.3-sched-bfs-420.patch +++ rifs-v3-kernel3.3.x @@ -1,61 +1,7 @@ -The Brain Fuck Scheduler v0.420 AKA smoking by Con Kolivas. - -A single shared runqueue O(n) strict fairness earliest deadline first design. - -Excellent throughput and latency for 1 to many CPUs on desktop and server -commodity hardware. -Not recommended for 4096 cpus. - -Scalability is optimal when your workload is equal to the number of CPUs on -bfs. ie you should ONLY do make -j4 on quad core, -j2 on dual core and so on. - -Features SCHED_IDLEPRIO and SCHED_ISO scheduling policies as well. -You do NOT need to use these policies for good performance, they are purely -optional for even better performance in extreme conditions. - -To run something idleprio, use schedtool like so: - -schedtool -D -e make -j4 - -To run something isoprio, use schedtool like so: - -schedtool -I -e amarok - -Includes accurate sub-tick accounting of tasks so userspace reported -cpu usage may be very different if you have very short lived tasks. - --ck - - ---- - Documentation/scheduler/sched-BFS.txt | 347 + - Documentation/sysctl/kernel.txt | 26 - arch/powerpc/platforms/cell/spufs/sched.c | 5 - arch/x86/Kconfig | 10 - drivers/cpufreq/cpufreq.c | 7 - drivers/cpufreq/cpufreq_conservative.c | 4 - drivers/cpufreq/cpufreq_ondemand.c | 8 - fs/proc/base.c | 2 - include/linux/init_task.h | 64 - include/linux/ioprio.h | 2 - include/linux/jiffies.h | 2 - include/linux/sched.h | 110 - init/Kconfig | 16 - init/main.c | 1 - kernel/delayacct.c | 2 - kernel/exit.c | 2 - kernel/posix-cpu-timers.c | 12 - kernel/sched/Makefile | 8 - kernel/sched/bfs.c | 7251 ++++++++++++++++++++++++++++++ - kernel/sysctl.c | 31 - lib/Kconfig.debug | 2 - 21 files changed, 7865 insertions(+), 47 deletions(-) - -Index: linux-3.3-ck1/arch/powerpc/platforms/cell/spufs/sched.c -=================================================================== ---- linux-3.3-ck1.orig/arch/powerpc/platforms/cell/spufs/sched.c 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/arch/powerpc/platforms/cell/spufs/sched.c 2012-03-24 19:30:29.038925740 +1100 -@@ -63,11 +63,6 @@ static struct timer_list spusched_timer; +diff -ruN linux-3.3.5/arch/powerpc/platforms/cell/spufs/sched.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/powerpc/platforms/cell/spufs/sched.c +--- linux-3.3.5/arch/powerpc/platforms/cell/spufs/sched.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/powerpc/platforms/cell/spufs/sched.c 2012-05-19 22:04:37.000000000 +0800 +@@ -63,11 +63,6 @@ static struct timer_list spuloadavg_timer; /* @@ -67,363 +13,90 @@ * Frequency of the spu scheduler tick. By default we do one SPU scheduler * tick for every 10 CPU scheduler ticks. */ -Index: linux-3.3-ck1/Documentation/scheduler/sched-BFS.txt -=================================================================== ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ linux-3.3-ck1/Documentation/scheduler/sched-BFS.txt 2012-03-24 19:30:29.038925740 +1100 -@@ -0,0 +1,347 @@ -+BFS - The Brain Fuck Scheduler by Con Kolivas. -+ -+Goals. -+ -+The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to -+completely do away with the complex designs of the past for the cpu process -+scheduler and instead implement one that is very simple in basic design. -+The main focus of BFS is to achieve excellent desktop interactivity and -+responsiveness without heuristics and tuning knobs that are difficult to -+understand, impossible to model and predict the effect of, and when tuned to -+one workload cause massive detriment to another. -+ -+ -+Design summary. -+ -+BFS is best described as a single runqueue, O(n) lookup, earliest effective -+virtual deadline first design, loosely based on EEVDF (earliest eligible virtual -+deadline first) and my previous Staircase Deadline scheduler. Each component -+shall be described in order to understand the significance of, and reasoning for -+it. The codebase when the first stable version was released was approximately -+9000 lines less code than the existing mainline linux kernel scheduler (in -+2.6.31). This does not even take into account the removal of documentation and -+the cgroups code that is not used. -+ -+Design reasoning. -+ -+The single runqueue refers to the queued but not running processes for the -+entire system, regardless of the number of CPUs. The reason for going back to -+a single runqueue design is that once multiple runqueues are introduced, -+per-CPU or otherwise, there will be complex interactions as each runqueue will -+be responsible for the scheduling latency and fairness of the tasks only on its -+own runqueue, and to achieve fairness and low latency across multiple CPUs, any -+advantage in throughput of having CPU local tasks causes other disadvantages. -+This is due to requiring a very complex balancing system to at best achieve some -+semblance of fairness across CPUs and can only maintain relatively low latency -+for tasks bound to the same CPUs, not across them. To increase said fairness -+and latency across CPUs, the advantage of local runqueue locking, which makes -+for better scalability, is lost due to having to grab multiple locks. -+ -+A significant feature of BFS is that all accounting is done purely based on CPU -+used and nowhere is sleep time used in any way to determine entitlement or -+interactivity. Interactivity "estimators" that use some kind of sleep/run -+algorithm are doomed to fail to detect all interactive tasks, and to falsely tag -+tasks that aren't interactive as being so. The reason for this is that it is -+close to impossible to determine that when a task is sleeping, whether it is -+doing it voluntarily, as in a userspace application waiting for input in the -+form of a mouse click or otherwise, or involuntarily, because it is waiting for -+another thread, process, I/O, kernel activity or whatever. Thus, such an -+estimator will introduce corner cases, and more heuristics will be required to -+cope with those corner cases, introducing more corner cases and failed -+interactivity detection and so on. Interactivity in BFS is built into the design -+by virtue of the fact that tasks that are waking up have not used up their quota -+of CPU time, and have earlier effective deadlines, thereby making it very likely -+they will preempt any CPU bound task of equivalent nice level. See below for -+more information on the virtual deadline mechanism. Even if they do not preempt -+a running task, because the rr interval is guaranteed to have a bound upper -+limit on how long a task will wait for, it will be scheduled within a timeframe -+that will not cause visible interface jitter. -+ -+ -+Design details. -+ -+Task insertion. -+ -+BFS inserts tasks into each relevant queue as an O(1) insertion into a double -+linked list. On insertion, *every* running queue is checked to see if the newly -+queued task can run on any idle queue, or preempt the lowest running task on the -+system. This is how the cross-CPU scheduling of BFS achieves significantly lower -+latency per extra CPU the system has. In this case the lookup is, in the worst -+case scenario, O(n) where n is the number of CPUs on the system. -+ -+Data protection. -+ -+BFS has one single lock protecting the process local data of every task in the -+global queue. Thus every insertion, removal and modification of task data in the -+global runqueue needs to grab the global lock. However, once a task is taken by -+a CPU, the CPU has its own local data copy of the running process' accounting -+information which only that CPU accesses and modifies (such as during a -+timer tick) thus allowing the accounting data to be updated lockless. Once a -+CPU has taken a task to run, it removes it from the global queue. Thus the -+global queue only ever has, at most, -+ -+ (number of tasks requesting cpu time) - (number of logical CPUs) + 1 -+ -+tasks in the global queue. This value is relevant for the time taken to look up -+tasks during scheduling. This will increase if many tasks with CPU affinity set -+in their policy to limit which CPUs they're allowed to run on if they outnumber -+the number of CPUs. The +1 is because when rescheduling a task, the CPU's -+currently running task is put back on the queue. Lookup will be described after -+the virtual deadline mechanism is explained. -+ -+Virtual deadline. -+ -+The key to achieving low latency, scheduling fairness, and "nice level" -+distribution in BFS is entirely in the virtual deadline mechanism. The one -+tunable in BFS is the rr_interval, or "round robin interval". This is the -+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy) -+tasks of the same nice level will be running for, or looking at it the other -+way around, the longest duration two tasks of the same nice level will be -+delayed for. When a task requests cpu time, it is given a quota (time_slice) -+equal to the rr_interval and a virtual deadline. The virtual deadline is -+offset from the current time in jiffies by this equation: -+ -+ jiffies + (prio_ratio * rr_interval) -+ -+The prio_ratio is determined as a ratio compared to the baseline of nice -20 -+and increases by 10% per nice level. The deadline is a virtual one only in that -+no guarantee is placed that a task will actually be scheduled by this time, but -+it is used to compare which task should go next. There are three components to -+how a task is next chosen. First is time_slice expiration. If a task runs out -+of its time_slice, it is descheduled, the time_slice is refilled, and the -+deadline reset to that formula above. Second is sleep, where a task no longer -+is requesting CPU for whatever reason. The time_slice and deadline are _not_ -+adjusted in this case and are just carried over for when the task is next -+scheduled. Third is preemption, and that is when a newly waking task is deemed -+higher priority than a currently running task on any cpu by virtue of the fact -+that it has an earlier virtual deadline than the currently running task. The -+earlier deadline is the key to which task is next chosen for the first and -+second cases. Once a task is descheduled, it is put back on the queue, and an -+O(n) lookup of all queued-but-not-running tasks is done to determine which has -+the earliest deadline and that task is chosen to receive CPU next. -+ -+The CPU proportion of different nice tasks works out to be approximately the -+ -+ (prio_ratio difference)^2 -+ -+The reason it is squared is that a task's deadline does not change while it is -+running unless it runs out of time_slice. Thus, even if the time actually -+passes the deadline of another task that is queued, it will not get CPU time -+unless the current running task deschedules, and the time "base" (jiffies) is -+constantly moving. -+ -+Task lookup. -+ -+BFS has 103 priority queues. 100 of these are dedicated to the static priority -+of realtime tasks, and the remaining 3 are, in order of best to worst priority, -+SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority -+scheduling). When a task of these priorities is queued, a bitmap of running -+priorities is set showing which of these priorities has tasks waiting for CPU -+time. When a CPU is made to reschedule, the lookup for the next task to get -+CPU time is performed in the following way: -+ -+First the bitmap is checked to see what static priority tasks are queued. If -+any realtime priorities are found, the corresponding queue is checked and the -+first task listed there is taken (provided CPU affinity is suitable) and lookup -+is complete. If the priority corresponds to a SCHED_ISO task, they are also -+taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds -+to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this -+stage, every task in the runlist that corresponds to that priority is checked -+to see which has the earliest set deadline, and (provided it has suitable CPU -+affinity) it is taken off the runqueue and given the CPU. If a task has an -+expired deadline, it is taken and the rest of the lookup aborted (as they are -+chosen in FIFO order). -+ -+Thus, the lookup is O(n) in the worst case only, where n is as described -+earlier, as tasks may be chosen before the whole task list is looked over. -+ -+ -+Scalability. -+ -+The major limitations of BFS will be that of scalability, as the separate -+runqueue designs will have less lock contention as the number of CPUs rises. -+However they do not scale linearly even with separate runqueues as multiple -+runqueues will need to be locked concurrently on such designs to be able to -+achieve fair CPU balancing, to try and achieve some sort of nice-level fairness -+across CPUs, and to achieve low enough latency for tasks on a busy CPU when -+other CPUs would be more suited. BFS has the advantage that it requires no -+balancing algorithm whatsoever, as balancing occurs by proxy simply because -+all CPUs draw off the global runqueue, in priority and deadline order. Despite -+the fact that scalability is _not_ the prime concern of BFS, it both shows very -+good scalability to smaller numbers of CPUs and is likely a more scalable design -+at these numbers of CPUs. -+ -+It also has some very low overhead scalability features built into the design -+when it has been deemed their overhead is so marginal that they're worth adding. -+The first is the local copy of the running process' data to the CPU it's running -+on to allow that data to be updated lockless where possible. Then there is -+deference paid to the last CPU a task was running on, by trying that CPU first -+when looking for an idle CPU to use the next time it's scheduled. Finally there -+is the notion of "sticky" tasks that are flagged when they are involuntarily -+descheduled, meaning they still want further CPU time. This sticky flag is -+used to bias heavily against those tasks being scheduled on a different CPU -+unless that CPU would be otherwise idle. When a cpu frequency governor is used -+that scales with CPU load, such as ondemand, sticky tasks are not scheduled -+on a different CPU at all, preferring instead to go idle. This means the CPU -+they were bound to is more likely to increase its speed while the other CPU -+will go idle, thus speeding up total task execution time and likely decreasing -+power usage. This is the only scenario where BFS will allow a CPU to go idle -+in preference to scheduling a task on the earliest available spare CPU. -+ -+The real cost of migrating a task from one CPU to another is entirely dependant -+on the cache footprint of the task, how cache intensive the task is, how long -+it's been running on that CPU to take up the bulk of its cache, how big the CPU -+cache is, how fast and how layered the CPU cache is, how fast a context switch -+is... and so on. In other words, it's close to random in the real world where we -+do more than just one sole workload. The only thing we can be sure of is that -+it's not free. So BFS uses the principle that an idle CPU is a wasted CPU and -+utilising idle CPUs is more important than cache locality, and cache locality -+only plays a part after that. -+ -+When choosing an idle CPU for a waking task, the cache locality is determined -+according to where the task last ran and then idle CPUs are ranked from best -+to worst to choose the most suitable idle CPU based on cache locality, NUMA -+node locality and hyperthread sibling business. They are chosen in the -+following preference (if idle): -+ -+* Same core, idle or busy cache, idle threads -+* Other core, same cache, idle or busy cache, idle threads. -+* Same node, other CPU, idle cache, idle threads. -+* Same node, other CPU, busy cache, idle threads. -+* Same core, busy threads. -+* Other core, same cache, busy threads. -+* Same node, other CPU, busy threads. -+* Other node, other CPU, idle cache, idle threads. -+* Other node, other CPU, busy cache, idle threads. -+* Other node, other CPU, busy threads. -+ -+This shows the SMT or "hyperthread" awareness in the design as well which will -+choose a real idle core first before a logical SMT sibling which already has -+tasks on the physical CPU. -+ -+Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark. -+However this benchmarking was performed on an earlier design that was far less -+scalable than the current one so it's hard to know how scalable it is in terms -+of both CPUs (due to the global runqueue) and heavily loaded machines (due to -+O(n) lookup) at this stage. Note that in terms of scalability, the number of -+_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x) -+quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark -+results are very promising indeed, without needing to tweak any knobs, features -+or options. Benchmark contributions are most welcome. -+ -+ -+Features -+ -+As the initial prime target audience for BFS was the average desktop user, it -+was designed to not need tweaking, tuning or have features set to obtain benefit -+from it. Thus the number of knobs and features has been kept to an absolute -+minimum and should not require extra user input for the vast majority of cases. -+There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval -+and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition -+to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is -+support for CGROUPS. The average user should neither need to know what these -+are, nor should they need to be using them to have good desktop behaviour. -+ -+rr_interval -+ -+There is only one "scheduler" tunable, the round robin interval. This can be -+accessed in -+ -+ /proc/sys/kernel/rr_interval -+ -+The value is in milliseconds, and the default value is set to 6ms. Valid values -+are from 1 to 1000. Decreasing the value will decrease latencies at the cost of -+decreasing throughput, while increasing it will improve throughput, but at the -+cost of worsening latencies. The accuracy of the rr interval is limited by HZ -+resolution of the kernel configuration. Thus, the worst case latencies are -+usually slightly higher than this actual value. BFS uses "dithering" to try and -+minimise the effect the Hz limitation has. The default value of 6 is not an -+arbitrary one. It is based on the fact that humans can detect jitter at -+approximately 7ms, so aiming for much lower latencies is pointless under most -+circumstances. It is worth noting this fact when comparing the latency -+performance of BFS to other schedulers. Worst case latencies being higher than -+7ms are far worse than average latencies not being in the microsecond range. -+Experimentation has shown that rr intervals being increased up to 300 can -+improve throughput but beyond that, scheduling noise from elsewhere prevents -+further demonstrable throughput. -+ -+Isochronous scheduling. -+ -+Isochronous scheduling is a unique scheduling policy designed to provide -+near-real-time performance to unprivileged (ie non-root) users without the -+ability to starve the machine indefinitely. Isochronous tasks (which means -+"same time") are set using, for example, the schedtool application like so: -+ -+ schedtool -I -e amarok -+ -+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works -+is that it has a priority level between true realtime tasks and SCHED_NORMAL -+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie, -+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval -+rate). However if ISO tasks run for more than a tunable finite amount of time, -+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of -+time is the percentage of _total CPU_ available across the machine, configurable -+as a percentage in the following "resource handling" tunable (as opposed to a -+scheduler tunable): -+ -+ /proc/sys/kernel/iso_cpu -+ -+and is set to 70% by default. It is calculated over a rolling 5 second average -+Because it is the total CPU available, it means that on a multi CPU machine, it -+is possible to have an ISO task running as realtime scheduling indefinitely on -+just one CPU, as the other CPUs will be available. Setting this to 100 is the -+equivalent of giving all users SCHED_RR access and setting it to 0 removes the -+ability to run any pseudo-realtime tasks. -+ -+A feature of BFS is that it detects when an application tries to obtain a -+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the -+appropriate privileges to use those policies. When it detects this, it will -+give the task SCHED_ISO policy instead. Thus it is transparent to the user. -+Because some applications constantly set their policy as well as their nice -+level, there is potential for them to undo the override specified by the user -+on the command line of setting the policy to SCHED_ISO. To counter this, once -+a task has been set to SCHED_ISO policy, it needs superuser privileges to set -+it back to SCHED_NORMAL. This will ensure the task remains ISO and all child -+processes and threads will also inherit the ISO policy. -+ -+Idleprio scheduling. -+ -+Idleprio scheduling is a scheduling policy designed to give out CPU to a task -+_only_ when the CPU would be otherwise idle. The idea behind this is to allow -+ultra low priority tasks to be run in the background that have virtually no -+effect on the foreground tasks. This is ideally suited to distributed computing -+clients (like setiathome, folding, mprime etc) but can also be used to start -+a video encode or so on without any slowdown of other tasks. To avoid this -+policy from grabbing shared resources and holding them indefinitely, if it -+detects a state where the task is waiting on I/O, the machine is about to -+suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As -+per the Isochronous task management, once a task has been scheduled as IDLEPRIO, -+it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can -+be set to start as SCHED_IDLEPRIO with the schedtool command like so: -+ -+ schedtool -D -e ./mprime -+ -+Subtick accounting. -+ -+It is surprisingly difficult to get accurate CPU accounting, and in many cases, -+the accounting is done by simply determining what is happening at the precise -+moment a timer tick fires off. This becomes increasingly inaccurate as the -+timer tick frequency (HZ) is lowered. It is possible to create an application -+which uses almost 100% CPU, yet by being descheduled at the right time, records -+zero CPU usage. While the main problem with this is that there are possible -+security implications, it is also difficult to determine how much CPU a task -+really does use. BFS tries to use the sub-tick accounting from the TSC clock, -+where possible, to determine real CPU usage. This is not entirely reliable, but -+is far more likely to produce accurate CPU usage data than the existing designs -+and will not show tasks as consuming no CPU usage when they actually are. Thus, -+the amount of CPU reported as being used by BFS will more accurately represent -+how much CPU the task itself is using (as is shown for example by the 'time' -+application), so the reported values may be quite different to other schedulers. -+Values reported as the 'load' are more prone to problems with this design, but -+per process values are closer to real usage. When comparing throughput of BFS -+to other designs, it is important to compare the actual completed work in terms -+of total wall clock time taken and total work done, rather than the reported -+"cpu usage". -+ -+ -+Con Kolivas Tue, 5 Apr 2011 -Index: linux-3.3-ck1/Documentation/sysctl/kernel.txt -=================================================================== ---- linux-3.3-ck1.orig/Documentation/sysctl/kernel.txt 2012-03-24 19:30:00.012420362 +1100 -+++ linux-3.3-ck1/Documentation/sysctl/kernel.txt 2012-03-24 19:30:29.039925758 +1100 -@@ -33,6 +33,7 @@ show up in /proc/sys/kernel: +diff -ruN linux-3.3.5/arch/x86/Kconfig linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/Kconfig +--- linux-3.3.5/arch/x86/Kconfig 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/Kconfig 2012-05-19 22:04:37.000000000 +0800 +@@ -806,15 +806,7 @@ + increased overhead in some places. If unsure say N here. + + config IRQ_TIME_ACCOUNTING +- bool "Fine granularity task level IRQ time accounting" +- default n +- ---help--- +- Select this option to enable fine granularity task irq time +- accounting. This is done by reading a timestamp on each +- transitions between softirq and hardirq state, so there can be a +- small performance impact. +- +- If in doubt, say N here. ++ def_bool y + + source "kernel/Kconfig.preempt" + +@@ -1112,7 +1104,7 @@ + + choice + depends on EXPERIMENTAL +- prompt "Memory split" if EXPERT ++ prompt "Memory split" + default VMSPLIT_3G + depends on X86_32 + ---help--- +@@ -1132,17 +1124,17 @@ + option alone! + + config VMSPLIT_3G +- bool "3G/1G user/kernel split" ++ bool "Default 896MB lowmem (3G/1G user/kernel split)" + config VMSPLIT_3G_OPT + depends on !X86_PAE +- bool "3G/1G user/kernel split (for full 1G low memory)" ++ bool "1GB lowmem (3G/1G user/kernel split)" + config VMSPLIT_2G +- bool "2G/2G user/kernel split" ++ bool "2GB lowmem (2G/2G user/kernel split)" + config VMSPLIT_2G_OPT + depends on !X86_PAE +- bool "2G/2G user/kernel split (for full 2G low memory)" ++ bool "2GB lowmem (2G/2G user/kernel split)" + config VMSPLIT_1G +- bool "1G/3G user/kernel split" ++ bool "3GB lowmem (1G/3G user/kernel split)" + endchoice + + config PAGE_OFFSET +Binary files linux-3.3.5/arch/x86/kernel/acpi/realmode/video-mode.o.localhost.localdomain.8045.gUNab9 and linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/kernel/acpi/realmode/video-mode.o.localhost.localdomain.8045.gUNab9 differ +Binary files linux-3.3.5/arch/x86/kernel/acpi/realmode/video-vga.o.localhost.localdomain.8047.qBICNe and linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/kernel/acpi/realmode/video-vga.o.localhost.localdomain.8047.qBICNe differ +diff -ruN linux-3.3.5/arch/x86/kernel/cpu/proc.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/kernel/cpu/proc.c +--- linux-3.3.5/arch/x86/kernel/cpu/proc.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/kernel/cpu/proc.c 2012-05-19 22:04:37.000000000 +0800 +@@ -109,7 +109,7 @@ + + seq_printf(m, "\nbogomips\t: %lu.%02lu\n", + c->loops_per_jiffy/(500000/HZ), +- (c->loops_per_jiffy/(5000/HZ)) % 100); ++ (c->loops_per_jiffy * 10 /(50000/HZ)) % 100); + + #ifdef CONFIG_X86_64 + if (c->x86_tlbsize > 0) +diff -ruN linux-3.3.5/arch/x86/kernel/smpboot.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/kernel/smpboot.c +--- linux-3.3.5/arch/x86/kernel/smpboot.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/kernel/smpboot.c 2012-05-19 22:04:37.000000000 +0800 +@@ -436,7 +436,7 @@ + "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", + num_online_cpus(), + bogosum/(500000/HZ), +- (bogosum/(5000/HZ))%100); ++ (bogosum * 10/(50000/HZ))%100); + + pr_debug("Before bogocount - setting activated=1.\n"); + } +Binary files linux-3.3.5/arch/x86/tools/insn_sanity and linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/tools/insn_sanity differ +Binary files linux-3.3.5/arch/x86/tools/test_get_len and linux-3.3.5-RIFS-RC3-BRAIN-EATING/arch/x86/tools/test_get_len differ +diff -ruN linux-3.3.5/Documentation/sysctl/kernel.txt linux-3.3.5-RIFS-RC3-BRAIN-EATING/Documentation/sysctl/kernel.txt +--- linux-3.3.5/Documentation/sysctl/kernel.txt 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/Documentation/sysctl/kernel.txt 2012-05-19 22:04:37.000000000 +0800 +@@ -33,6 +33,7 @@ - domainname - hostname - hotplug @@ -431,7 +104,7 @@ - kptr_restrict - kstack_depth_to_print [ X86 only ] - l2cr [ PPC only ] -@@ -59,6 +60,7 @@ show up in /proc/sys/kernel: +@@ -59,6 +60,7 @@ - randomize_va_space - real-root-dev ==> Documentation/initrd.txt - reboot-cmd [ SPARC only ] @@ -439,7 +112,7 @@ - rtsig-max - rtsig-nr - sem -@@ -301,6 +303,16 @@ kernel stack. +@@ -301,6 +303,16 @@ ============================================================== @@ -456,7 +129,7 @@ l2cr: (PPC only) This flag controls the L2 cache of G3 processor boards. If -@@ -517,6 +529,20 @@ rebooting. ??? +@@ -517,6 +529,20 @@ ============================================================== @@ -477,11 +150,75 @@ rtsig-max & rtsig-nr: The file rtsig-max can be used to tune the maximum number -Index: linux-3.3-ck1/fs/proc/base.c -=================================================================== ---- linux-3.3-ck1.orig/fs/proc/base.c 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/fs/proc/base.c 2012-03-24 19:30:29.039925758 +1100 -@@ -342,7 +342,7 @@ static int proc_pid_stack(struct seq_fil +diff -ruN linux-3.3.5/drivers/cpufreq/cpufreq.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/drivers/cpufreq/cpufreq.c +--- linux-3.3.5/drivers/cpufreq/cpufreq.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/drivers/cpufreq/cpufreq.c 2012-05-19 22:04:37.000000000 +0800 +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -1445,6 +1446,12 @@ + target_freq, relation); + if (cpu_online(policy->cpu) && cpufreq_driver->target) + retval = cpufreq_driver->target(policy, target_freq, relation); ++ if (likely(retval != -EINVAL)) { ++ if (target_freq == policy->max) ++ cpu_nonscaling(policy->cpu); ++ else ++ cpu_scaling(policy->cpu); ++ } + + return retval; + } +diff -ruN linux-3.3.5/drivers/cpufreq/cpufreq_conservative.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/drivers/cpufreq/cpufreq_conservative.c +--- linux-3.3.5/drivers/cpufreq/cpufreq_conservative.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/drivers/cpufreq/cpufreq_conservative.c 2012-05-19 22:04:37.000000000 +0800 +@@ -29,8 +29,8 @@ + * It helps to keep variable names smaller, simpler + */ + +-#define DEF_FREQUENCY_UP_THRESHOLD (80) +-#define DEF_FREQUENCY_DOWN_THRESHOLD (20) ++#define DEF_FREQUENCY_UP_THRESHOLD (63) ++#define DEF_FREQUENCY_DOWN_THRESHOLD (26) + + /* + * The polling frequency of this governor depends on the capability of +diff -ruN linux-3.3.5/drivers/cpufreq/cpufreq_ondemand.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/drivers/cpufreq/cpufreq_ondemand.c +--- linux-3.3.5/drivers/cpufreq/cpufreq_ondemand.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/drivers/cpufreq/cpufreq_ondemand.c 2012-05-19 22:04:37.000000000 +0800 +@@ -28,8 +28,8 @@ + * It helps to keep variable names smaller, simpler + */ + +-#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +-#define DEF_FREQUENCY_UP_THRESHOLD (80) ++#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (26) ++#define DEF_FREQUENCY_UP_THRESHOLD (63) + #define DEF_SAMPLING_DOWN_FACTOR (1) + #define MAX_SAMPLING_DOWN_FACTOR (100000) + #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +@@ -416,10 +416,10 @@ + + /* + * Every sampling_rate, we check, if current idle time is less +- * than 20% (default), then we try to increase frequency ++ * than 37% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over +- * 30%. If such a frequency exist, we try to decrease to this frequency. ++ * 63%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of +diff -ruN linux-3.3.5/fs/proc/base.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/fs/proc/base.c +--- linux-3.3.5/fs/proc/base.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/fs/proc/base.c 2012-05-19 22:04:37.000000000 +0800 +@@ -342,7 +342,7 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer) { return sprintf(buffer, "%llu %llu %lu\n", @@ -490,11 +227,10 @@ (unsigned long long)task->sched_info.run_delay, task->sched_info.pcount); } -Index: linux-3.3-ck1/include/linux/init_task.h -=================================================================== ---- linux-3.3-ck1.orig/include/linux/init_task.h 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/include/linux/init_task.h 2012-03-24 19:30:29.039925758 +1100 -@@ -125,12 +125,70 @@ extern struct cred init_cred; +diff -ruN linux-3.3.5/include/linux/init_task.h linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/init_task.h +--- linux-3.3.5/include/linux/init_task.h 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/init_task.h 2012-05-19 22:04:37.000000000 +0800 +@@ -125,12 +125,69 @@ # define INIT_PERF_EVENTS(tsk) #endif @@ -504,8 +240,8 @@ * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) */ -+#ifdef CONFIG_SCHED_BFS -+#define INIT_TASK_COMM "BFS" ++#ifdef CONFIG_SCHED_RIFS ++#define INIT_TASK_COMM "RIFS" +#define INIT_TASK(tsk) \ +{ \ + .state = 0, \ @@ -515,7 +251,6 @@ + .prio = NORMAL_PRIO, \ + .static_prio = MAX_PRIO-20, \ + .normal_prio = NORMAL_PRIO, \ -+ .deadline = 0, \ + .policy = SCHED_NORMAL, \ + .cpus_allowed = CPU_MASK_ALL, \ + .mm = NULL, \ @@ -562,47 +297,57 @@ + INIT_TRACE_RECURSION \ + INIT_TASK_RCU_PREEMPT(tsk) \ +} -+#else /* CONFIG_SCHED_BFS */ ++#else /* CONFIG_SCHED_RIFS */ +#define INIT_TASK_COMM "swapper" #define INIT_TASK(tsk) \ { \ .state = 0, \ -@@ -193,7 +251,7 @@ extern struct cred init_cred; +@@ -193,7 +250,7 @@ INIT_TRACE_RECURSION \ INIT_TASK_RCU_PREEMPT(tsk) \ } - -+#endif /* CONFIG_SCHED_BFS */ ++#endif /* CONFIG_SCHED_RIFS */ #define INIT_CPU_TIMERS(cpu_timers) \ { \ -Index: linux-3.3-ck1/include/linux/ioprio.h -=================================================================== ---- linux-3.3-ck1.orig/include/linux/ioprio.h 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/include/linux/ioprio.h 2012-03-24 19:30:29.039925758 +1100 -@@ -64,6 +64,8 @@ static inline int task_ioprio_class(stru +diff -ruN linux-3.3.5/include/linux/jiffies.h linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/jiffies.h +--- linux-3.3.5/include/linux/jiffies.h 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/jiffies.h 2012-05-19 22:04:37.000000000 +0800 +@@ -164,7 +164,7 @@ + * Have the 32 bit jiffies value wrap 5 minutes after boot + * so jiffies wrap bugs show up earlier. + */ +-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) ++#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ)) - static inline int task_nice_ioprio(struct task_struct *task) - { -+ if (iso_task(task)) -+ return 0; - return (task_nice(task) + 20) / 5; - } + /* + * Change timeval to jiffies, trying to avoid the +diff -ruN linux-3.3.5/include/linux/nfsd/stats.h linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/nfsd/stats.h +--- linux-3.3.5/include/linux/nfsd/stats.h 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/nfsd/stats.h 2012-05-19 22:04:37.000000000 +0800 +@@ -11,8 +11,8 @@ -Index: linux-3.3-ck1/include/linux/sched.h -=================================================================== ---- linux-3.3-ck1.orig/include/linux/sched.h 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/include/linux/sched.h 2012-03-24 19:30:29.040925775 +1100 -@@ -37,8 +37,15 @@ + #include + +-/* thread usage wraps very million seconds (approx one fortnight) */ +-#define NFSD_USAGE_WRAP (HZ*1000000) ++/* thread usage wraps every one hundred thousand seconds (approx one day) */ ++#define NFSD_USAGE_WRAP (HZ*100000) + + #ifdef __KERNEL__ + +diff -ruN linux-3.3.5/include/linux/sched.h linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/sched.h +--- linux-3.3.5/include/linux/sched.h 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/sched.h 2012-05-25 22:43:53.000000000 +0800 +@@ -37,8 +37,13 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 -/* SCHED_ISO: reserved but not implemented yet */ -+/* SCHED_ISO: Implemented on BFS only */ #define SCHED_IDLE 5 -+#ifdef CONFIG_SCHED_BFS -+#define SCHED_ISO 4 +#define SCHED_IDLEPRIO SCHED_IDLE ++#ifdef CONFIG_SCHED_RIFS +#define SCHED_MAX (SCHED_IDLEPRIO) +#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX) +#endif @@ -610,7 +355,7 @@ /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ #define SCHED_RESET_ON_FORK 0x40000000 -@@ -269,8 +276,6 @@ extern asmlinkage void schedule_tail(str +@@ -269,8 +274,6 @@ extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); @@ -619,25 +364,27 @@ #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern void select_nohz_load_balancer(int stop_tick); extern void set_cpu_sd_state_idle(void); -@@ -1243,15 +1248,31 @@ struct task_struct { +@@ -1243,15 +1246,33 @@ #ifdef CONFIG_SMP struct llist_node wake_entry; - int on_cpu; #endif - int on_rq; -+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_BFS) ++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_RIFS) + bool on_cpu; +#endif -+#ifndef CONFIG_SCHED_BFS ++#ifndef CONFIG_SCHED_RIFS + bool on_rq; +#endif int prio, static_prio, normal_prio; unsigned int rt_priority; -+#ifdef CONFIG_SCHED_BFS ++#ifdef CONFIG_SCHED_RIFS + int time_slice; -+ u64 deadline; ++ u64 crt_time; ++ u64 run_time; ++ u64 run_scale; + struct list_head run_list; + u64 last_ran; + u64 sched_time; /* sched_clock time spent running */ @@ -645,7 +392,7 @@ + bool sticky; /* Soft affined flag */ +#endif + unsigned long rt_timeout; -+#else /* CONFIG_SCHED_BFS */ ++#else /* CONFIG_SCHED_RIFS */ const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; @@ -653,21 +400,21 @@ #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ -@@ -1358,6 +1379,9 @@ struct task_struct { +@@ -1358,6 +1379,9 @@ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ cputime_t utime, stime, utimescaled, stimescaled; -+#ifdef CONFIG_SCHED_BFS ++#ifdef CONFIG_SCHED_RIFS + unsigned long utime_pc, stime_pc; +#endif cputime_t gtime; #ifndef CONFIG_VIRT_CPU_ACCOUNTING cputime_t prev_utime, prev_stime; -@@ -1592,6 +1616,64 @@ struct task_struct { +@@ -1592,6 +1616,55 @@ #endif }; -+#ifdef CONFIG_SCHED_BFS ++#ifdef CONFIG_SCHED_RIFS +bool grunqueue_is_locked(void); +void grq_unlock_wait(void); +void cpu_scaling(int cpu); @@ -687,10 +434,6 @@ + +void print_scheduler_version(void); + -+static inline bool iso_task(struct task_struct *p) -+{ -+ return (p->policy == SCHED_ISO); -+} +#else /* CFS */ +extern int runqueue_is_locked(int cpu); +static inline void cpu_scaling(int cpu) @@ -713,54 +456,49 @@ + printk(KERN_INFO"CFS CPU scheduler.\n"); +} + -+static inline bool iso_task(struct task_struct *p) -+{ -+ return false; -+} -+ +/* Anyone feel like implementing this? */ +static inline bool above_background_load(void) +{ + return false; +} -+#endif /* CONFIG_SCHED_BFS */ ++#endif /* CONFIG_SCHED_RIFS */ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -@@ -1609,10 +1691,20 @@ struct task_struct { +@@ -1609,10 +1682,20 @@ */ #define MAX_USER_RT_PRIO 100 -#define MAX_RT_PRIO MAX_USER_RT_PRIO -+#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1) ++#define MAX_RT_PRIO (MAX_USER_RT_PRIO) +#define DEFAULT_PRIO (MAX_RT_PRIO + 20) -+#ifdef CONFIG_SCHED_BFS ++#ifdef CONFIG_SCHED_RIFS +#define PRIO_RANGE (40) +#define MAX_PRIO (MAX_RT_PRIO + PRIO_RANGE) -+#define ISO_PRIO (MAX_RT_PRIO) ++//#define ISO_PRIO (MAX_RT_PRIO) 已经被我干掉,哈哈 +#define NORMAL_PRIO (MAX_RT_PRIO + 1) -+#define IDLE_PRIO (MAX_RT_PRIO + 2) ++#define IDLE_PRIO (MAX_PRIO + 1) +#define PRIO_LIMIT ((IDLE_PRIO) + 1) -+#else /* CONFIG_SCHED_BFS */ ++#else /* CONFIG_SCHED_RIFS */ #define MAX_PRIO (MAX_RT_PRIO + 40) -#define DEFAULT_PRIO (MAX_RT_PRIO + 20) +#define NORMAL_PRIO DEFAULT_PRIO -+#endif /* CONFIG_SCHED_BFS */ ++#endif /* CONFIG_SCHED_RIFS */ static inline int rt_prio(int prio) { -@@ -1976,7 +2068,7 @@ extern unsigned long long +@@ -1976,7 +2059,7 @@ task_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS) ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_RIFS) extern void sched_exec(void); #else #define sched_exec() {} -@@ -2668,7 +2760,7 @@ static inline unsigned int task_cpu(cons +@@ -2668,7 +2751,7 @@ return 0; } @@ -769,20 +507,82 @@ { } -Index: linux-3.3-ck1/init/Kconfig -=================================================================== ---- linux-3.3-ck1.orig/init/Kconfig 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/init/Kconfig 2012-03-24 19:30:29.040925775 +1100 -@@ -29,6 +29,19 @@ config IRQ_WORK +diff -ruN linux-3.3.5/include/linux/swap.h linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/swap.h +--- linux-3.3.5/include/linux/swap.h 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/linux/swap.h 2012-05-19 22:04:37.000000000 +0800 +@@ -201,7 +201,7 @@ + int next; /* swapfile to be used next */ + }; +-/* Swap 50% full? Release swapcache more aggressively.. */ ++/* Swap 50% full? */ + #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) + + /* linux/mm/page_alloc.c */ +@@ -351,9 +351,10 @@ + extern void __put_swap_token(struct mm_struct *); + extern void disable_swap_token(struct mem_cgroup *memcg); + ++/* Only allow swap token to have effect if swap is full */ + static inline int has_swap_token(struct mm_struct *mm) + { +- return (mm == swap_token_mm); ++ return (mm == swap_token_mm && vm_swap_full()); + } + + static inline void put_swap_token(struct mm_struct *mm) +diff -ruN linux-3.3.5/include/net/inet_timewait_sock.h linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/net/inet_timewait_sock.h +--- linux-3.3.5/include/net/inet_timewait_sock.h 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/include/net/inet_timewait_sock.h 2012-05-19 22:04:37.000000000 +0800 +@@ -38,8 +38,8 @@ + * If time > 4sec, it is "slow" path, no recycling is required, + * so that we select tick to get range about 4 seconds. + */ +-#if HZ <= 16 || HZ > 4096 +-# error Unsupported: HZ <= 16 or HZ > 4096 ++#if HZ <= 16 || HZ > 16384 ++# error Unsupported: HZ <= 16 or HZ > 16384 + #elif HZ <= 32 + # define INET_TWDR_RECYCLE_TICK (5 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) + #elif HZ <= 64 +@@ -54,8 +54,12 @@ + # define INET_TWDR_RECYCLE_TICK (10 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) + #elif HZ <= 2048 + # define INET_TWDR_RECYCLE_TICK (11 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) +-#else ++#elif HZ <= 4096 + # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) ++#elif HZ <= 8192 ++# define INET_TWDR_RECYCLE_TICK (13 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) ++#else ++# define INET_TWDR_RECYCLE_TICK (14 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG) + #endif + + /* TIME_WAIT reaping mechanism. */ +diff -ruN linux-3.3.5/init/calibrate.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/init/calibrate.c +--- linux-3.3.5/init/calibrate.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/init/calibrate.c 2012-05-19 22:04:37.000000000 +0800 +@@ -293,7 +293,7 @@ + if (!printed) + pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", + lpj/(500000/HZ), +- (lpj/(5000/HZ)) % 100, lpj); ++ (lpj * 10 /(50000 / HZ)) % 100, lpj); + + loops_per_jiffy = lpj; + printed = true; +diff -ruN linux-3.3.5/init/Kconfig linux-3.3.5-RIFS-RC3-BRAIN-EATING/init/Kconfig +--- linux-3.3.5/init/Kconfig 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/init/Kconfig 2012-05-19 22:04:37.000000000 +0800 +@@ -29,6 +29,18 @@ + menu "General setup" -+config SCHED_BFS -+ bool "BFS cpu scheduler" ++config SCHED_RIFS ++ bool "RIFS cpu scheduler" + ---help--- -+ The Brain Fuck CPU Scheduler for excellent interactivity and -+ responsiveness on the desktop and solid scalability on normal -+ hardware. Not recommended for 4096 CPUs. ++ The RIFS cpu scheduler is designed for excellent interactivity and ++ responsiveness. + + Currently incompatible with the Group CPU scheduler, and RCU TORTURE + TEST so these options are disabled. @@ -793,35 +593,34 @@ config EXPERIMENTAL bool "Prompt for development and/or incomplete code/drivers" ---help--- -@@ -640,6 +653,7 @@ config PROC_PID_CPUSET +@@ -640,6 +652,7 @@ config CGROUP_CPUACCT bool "Simple CPU accounting cgroup subsystem" -+ depends on !SCHED_BFS ++ depends on !SCHED_RIFS help Provides a simple Resource Controller for monitoring the total CPU consumed by the tasks in a cgroup. -@@ -727,6 +741,7 @@ config CGROUP_PERF +@@ -727,6 +740,7 @@ menuconfig CGROUP_SCHED bool "Group CPU scheduler" -+ depends on !SCHED_BFS ++ depends on !SCHED_RIFS default n help This feature lets CPU scheduler recognize task groups and control CPU -@@ -863,6 +878,7 @@ endif # NAMESPACES +@@ -863,6 +877,7 @@ config SCHED_AUTOGROUP bool "Automatic process group scheduling" -+ depends on !SCHED_BFS ++ depends on !SCHED_RIFS select EVENTFD select CGROUPS select CGROUP_SCHED -Index: linux-3.3-ck1/init/main.c -=================================================================== ---- linux-3.3-ck1.orig/init/main.c 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/init/main.c 2012-03-24 19:30:29.041925792 +1100 -@@ -757,6 +757,7 @@ static noinline int init_post(void) +diff -ruN linux-3.3.5/init/main.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/init/main.c +--- linux-3.3.5/init/main.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/init/main.c 2012-05-19 22:04:37.000000000 +0800 +@@ -757,6 +757,7 @@ system_state = SYSTEM_RUNNING; numa_default_policy(); @@ -829,11 +628,10 @@ current->signal->flags |= SIGNAL_UNKILLABLE; -Index: linux-3.3-ck1/kernel/delayacct.c -=================================================================== ---- linux-3.3-ck1.orig/kernel/delayacct.c 2012-03-24 19:30:00.014420399 +1100 -+++ linux-3.3-ck1/kernel/delayacct.c 2012-03-24 19:30:29.041925792 +1100 -@@ -130,7 +130,7 @@ int __delayacct_add_tsk(struct taskstats +diff -ruN linux-3.3.5/kernel/delayacct.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/delayacct.c +--- linux-3.3.5/kernel/delayacct.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/delayacct.c 2012-05-19 22:04:37.000000000 +0800 +@@ -130,7 +130,7 @@ */ t1 = tsk->sched_info.pcount; t2 = tsk->sched_info.run_delay; @@ -842,11 +640,10 @@ d->cpu_count += t1; -Index: linux-3.3-ck1/kernel/exit.c -=================================================================== ---- linux-3.3-ck1.orig/kernel/exit.c 2012-03-24 19:30:00.014420399 +1100 -+++ linux-3.3-ck1/kernel/exit.c 2012-03-24 19:30:29.041925792 +1100 -@@ -132,7 +132,7 @@ static void __exit_signal(struct task_st +diff -ruN linux-3.3.5/kernel/exit.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/exit.c +--- linux-3.3.5/kernel/exit.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/exit.c 2012-05-19 22:04:37.000000000 +0800 +@@ -132,7 +132,7 @@ sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); task_io_accounting_add(&sig->ioac, &tsk->ioac); @@ -855,11 +652,158 @@ } sig->nr_threads--; -Index: linux-3.3-ck1/kernel/posix-cpu-timers.c -=================================================================== ---- linux-3.3-ck1.orig/kernel/posix-cpu-timers.c 2012-03-24 19:30:00.014420399 +1100 -+++ linux-3.3-ck1/kernel/posix-cpu-timers.c 2012-03-24 19:30:29.042925809 +1100 -@@ -495,7 +495,7 @@ static void cleanup_timers(struct list_h +diff -ruN linux-3.3.5/kernel/Kconfig.hz linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/Kconfig.hz +--- linux-3.3.5/kernel/Kconfig.hz 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/Kconfig.hz 2012-05-19 22:04:37.000000000 +0800 +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_1000 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -23,13 +23,14 @@ + with lots of processors that may show reduced performance if + too many timer interrupts are occurring. + +- config HZ_250 ++ config HZ_250_NODEFAULT + bool "250 HZ" + help +- 250 Hz is a good compromise choice allowing server performance +- while also showing good interactive responsiveness even +- on SMP and NUMA systems. If you are going to be using NTSC video +- or multimedia, selected 300Hz instead. ++ 250 HZ is a lousy compromise choice allowing server interactivity ++ while also showing desktop throughput and no extra power saving on ++ laptops. No good for anything. ++ ++ Recommend 100 or 1000 instead. + + config HZ_300 + bool "300 HZ" +@@ -43,16 +44,82 @@ + bool "1000 HZ" + help + 1000 Hz is the preferred choice for desktop systems and other +- systems requiring fast interactive responses to events. ++ systems requiring fast interactive responses to events. Laptops ++ can also benefit from this choice without sacrificing battery life ++ if dynticks is also enabled. ++ ++ config HZ_1500 ++ bool "1500 HZ" ++ help ++ 1500 Hz is an insane value to use to run broken software that is Hz ++ limited. ++ ++ Being over 1000, driver breakage is likely. ++ ++ config HZ_2000 ++ bool "2000 HZ" ++ help ++ 2000 Hz is an insane value to use to run broken software that is Hz ++ limited. ++ ++ Being over 1000, driver breakage is likely. ++ ++ config HZ_3000 ++ bool "3000 HZ" ++ help ++ 3000 Hz is an insane value to use to run broken software that is Hz ++ limited. ++ ++ Being over 1000, driver breakage is likely. ++ ++ config HZ_4000 ++ bool "4000 HZ" ++ help ++ 4000 Hz is an insane value to use to run broken software that is Hz ++ limited. ++ ++ Being over 1000, driver breakage is likely. ++ ++ config HZ_5000 ++ bool "5000 HZ" ++ help ++ 5000 Hz is an obscene value to use to run broken software that is Hz ++ limited. ++ ++ Being over 1000, driver breakage is likely. ++ ++ config HZ_7500 ++ bool "7500 HZ" ++ help ++ 7500 Hz is an obscene value to use to run broken software that is Hz ++ limited. ++ ++ Being over 1000, driver breakage is likely. ++ ++ config HZ_10000 ++ bool "10000 HZ" ++ help ++ 10000 Hz is an obscene value to use to run broken software that is Hz ++ limited. ++ ++ Being over 1000, driver breakage is likely. ++ + + endchoice + + config HZ + int + default 100 if HZ_100 +- default 250 if HZ_250 ++ default 250 if HZ_250_NODEFAULT + default 300 if HZ_300 + default 1000 if HZ_1000 ++ default 1500 if HZ_1500 ++ default 2000 if HZ_2000 ++ default 3000 if HZ_3000 ++ default 4000 if HZ_4000 ++ default 5000 if HZ_5000 ++ default 7500 if HZ_7500 ++ default 10000 if HZ_10000 + + config SCHED_HRTICK + def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS) +diff -ruN linux-3.3.5/kernel/Kconfig.preempt linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/Kconfig.preempt +--- linux-3.3.5/kernel/Kconfig.preempt 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/Kconfig.preempt 2012-05-19 22:04:37.000000000 +0800 +@@ -1,7 +1,7 @@ + + choice + prompt "Preemption Model" +- default PREEMPT_NONE ++ default PREEMPT + + config PREEMPT_NONE + bool "No Forced Preemption (Server)" +@@ -17,7 +17,7 @@ + latencies. + + config PREEMPT_VOLUNTARY +- bool "Voluntary Kernel Preemption (Desktop)" ++ bool "Voluntary Kernel Preemption (Nothing)" + help + This option reduces the latency of the kernel by adding more + "explicit preemption points" to the kernel code. These new +@@ -31,7 +31,8 @@ + applications to run more 'smoothly' even when the system is + under load. + +- Select this if you are building a kernel for a desktop system. ++ Select this for no system in particular (choose Preemptible ++ instead on a desktop if you know what's good for you). + + config PREEMPT + bool "Preemptible Kernel (Low-Latency Desktop)" +diff -ruN linux-3.3.5/kernel/posix-cpu-timers.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/posix-cpu-timers.c +--- linux-3.3.5/kernel/posix-cpu-timers.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/posix-cpu-timers.c 2012-05-19 22:04:37.000000000 +0800 +@@ -495,7 +495,7 @@ void posix_cpu_timers_exit(struct task_struct *tsk) { cleanup_timers(tsk->cpu_timers, @@ -868,7 +812,7 @@ } void posix_cpu_timers_exit_group(struct task_struct *tsk) -@@ -504,7 +504,7 @@ void posix_cpu_timers_exit_group(struct +@@ -504,7 +504,7 @@ cleanup_timers(tsk->signal->cpu_timers, tsk->utime + sig->utime, tsk->stime + sig->stime, @@ -877,7 +821,7 @@ } static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) -@@ -934,7 +934,7 @@ static void check_thread_timers(struct t +@@ -934,7 +934,7 @@ struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); @@ -886,7 +830,7 @@ tsk->cputime_expires.sched_exp = t->expires.sched; break; } -@@ -951,7 +951,7 @@ static void check_thread_timers(struct t +@@ -951,7 +951,7 @@ ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); if (hard != RLIM_INFINITY && @@ -895,7 +839,7 @@ /* * At the hard limit, we just die. * No need to calculate anything else now. -@@ -959,7 +959,7 @@ static void check_thread_timers(struct t +@@ -959,7 +959,7 @@ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); return; } @@ -904,7 +848,7 @@ /* * At the soft limit, send a SIGXCPU every second. */ -@@ -1252,7 +1252,7 @@ static inline int fastpath_timer_check(s +@@ -1252,7 +1252,7 @@ struct task_cputime task_sample = { .utime = tsk->utime, .stime = tsk->stime, @@ -913,198 +857,31 @@ }; if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) -Index: linux-3.3-ck1/kernel/sysctl.c -=================================================================== ---- linux-3.3-ck1.orig/kernel/sysctl.c 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/kernel/sysctl.c 2012-03-24 19:30:29.042925809 +1100 -@@ -121,7 +121,12 @@ static int __maybe_unused one = 1; - static int __maybe_unused two = 2; - static int __maybe_unused three = 3; - static unsigned long one_ul = 1; --static int one_hundred = 100; -+static int __maybe_unused one_hundred = 100; -+#ifdef CONFIG_SCHED_BFS -+extern int rr_interval; -+extern int sched_iso_cpu; -+static int __read_mostly one_thousand = 1000; -+#endif - #ifdef CONFIG_PRINTK - static int ten_thousand = 10000; - #endif -@@ -251,7 +256,7 @@ static struct ctl_table root_table[] = { - { } - }; +diff -ruN linux-3.3.5/kernel/sched/Makefile linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sched/Makefile +--- linux-3.3.5/kernel/sched/Makefile 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sched/Makefile 2012-05-19 22:05:35.000000000 +0800 +@@ -11,10 +11,13 @@ + CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer + endif --#ifdef CONFIG_SCHED_DEBUG -+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_BFS) - static int min_sched_granularity_ns = 100000; /* 100 usecs */ - static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ - static int min_wakeup_granularity_ns; /* 0 usecs */ -@@ -266,6 +271,7 @@ static int max_extfrag_threshold = 1000; - #endif ++ifdef CONFIG_SCHED_RIFS ++obj-y += rifs.o clock.o ++else + obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o +-obj-$(CONFIG_SMP) += cpupri.o + obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o +-obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_SCHED_DEBUG) += debug.o ++endif ++obj-$(CONFIG_SMP) += cpupri.o - static struct ctl_table kern_table[] = { -+#ifndef CONFIG_SCHED_BFS - { - .procname = "sched_child_runs_first", - .data = &sysctl_sched_child_runs_first, -@@ -383,6 +389,7 @@ static struct ctl_table kern_table[] = { - .extra1 = &one, - }, - #endif -+#endif /* !CONFIG_SCHED_BFS */ - #ifdef CONFIG_PROVE_LOCKING - { - .procname = "prove_locking", -@@ -850,6 +857,26 @@ static struct ctl_table kern_table[] = { - .proc_handler = proc_dointvec, - }, - #endif -+#ifdef CONFIG_SCHED_BFS -+ { -+ .procname = "rr_interval", -+ .data = &rr_interval, -+ .maxlen = sizeof (int), -+ .mode = 0644, -+ .proc_handler = &proc_dointvec_minmax, -+ .extra1 = &one, -+ .extra2 = &one_thousand, -+ }, -+ { -+ .procname = "iso_cpu", -+ .data = &sched_iso_cpu, -+ .maxlen = sizeof (int), -+ .mode = 0644, -+ .proc_handler = &proc_dointvec_minmax, -+ .extra1 = &zero, -+ .extra2 = &one_hundred, -+ }, -+#endif - #if defined(CONFIG_S390) && defined(CONFIG_SMP) - { - .procname = "spin_retry", -Index: linux-3.3-ck1/lib/Kconfig.debug -=================================================================== ---- linux-3.3-ck1.orig/lib/Kconfig.debug 2012-03-24 19:30:00.012420362 +1100 -+++ linux-3.3-ck1/lib/Kconfig.debug 2012-03-24 19:30:29.042925809 +1100 -@@ -875,7 +875,7 @@ config BOOT_PRINTK_DELAY - config RCU_TORTURE_TEST - tristate "torture tests for RCU" -- depends on DEBUG_KERNEL -+ depends on DEBUG_KERNEL && !SCHED_BFS - default n - help - This option provides a kernel module that runs torture tests -Index: linux-3.3-ck1/include/linux/jiffies.h -=================================================================== ---- linux-3.3-ck1.orig/include/linux/jiffies.h 2012-03-24 19:30:00.012420362 +1100 -+++ linux-3.3-ck1/include/linux/jiffies.h 2012-03-24 19:30:29.043925827 +1100 -@@ -164,7 +164,7 @@ static inline u64 get_jiffies_64(void) - * Have the 32 bit jiffies value wrap 5 minutes after boot - * so jiffies wrap bugs show up earlier. - */ --#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) -+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ)) - - /* - * Change timeval to jiffies, trying to avoid the -Index: linux-3.3-ck1/drivers/cpufreq/cpufreq.c -=================================================================== ---- linux-3.3-ck1.orig/drivers/cpufreq/cpufreq.c 2012-03-24 19:30:00.012420362 +1100 -+++ linux-3.3-ck1/drivers/cpufreq/cpufreq.c 2012-03-24 19:30:29.043925827 +1100 -@@ -28,6 +28,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -1445,6 +1446,12 @@ int __cpufreq_driver_target(struct cpufr - target_freq, relation); - if (cpu_online(policy->cpu) && cpufreq_driver->target) - retval = cpufreq_driver->target(policy, target_freq, relation); -+ if (likely(retval != -EINVAL)) { -+ if (target_freq == policy->max) -+ cpu_nonscaling(policy->cpu); -+ else -+ cpu_scaling(policy->cpu); -+ } - - return retval; - } -Index: linux-3.3-ck1/drivers/cpufreq/cpufreq_ondemand.c -=================================================================== ---- linux-3.3-ck1.orig/drivers/cpufreq/cpufreq_ondemand.c 2012-03-24 19:30:00.012420362 +1100 -+++ linux-3.3-ck1/drivers/cpufreq/cpufreq_ondemand.c 2012-03-24 19:30:29.043925827 +1100 -@@ -28,8 +28,8 @@ - * It helps to keep variable names smaller, simpler - */ - --#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) --#define DEF_FREQUENCY_UP_THRESHOLD (80) -+#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (26) -+#define DEF_FREQUENCY_UP_THRESHOLD (63) - #define DEF_SAMPLING_DOWN_FACTOR (1) - #define MAX_SAMPLING_DOWN_FACTOR (100000) - #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) -@@ -416,10 +416,10 @@ static void dbs_check_cpu(struct cpu_dbs - - /* - * Every sampling_rate, we check, if current idle time is less -- * than 20% (default), then we try to increase frequency -+ * than 37% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over -- * 30%. If such a frequency exist, we try to decrease to this frequency. -+ * 63%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of -Index: linux-3.3-ck1/drivers/cpufreq/cpufreq_conservative.c -=================================================================== ---- linux-3.3-ck1.orig/drivers/cpufreq/cpufreq_conservative.c 2012-03-24 19:30:00.012420362 +1100 -+++ linux-3.3-ck1/drivers/cpufreq/cpufreq_conservative.c 2012-03-24 19:30:29.043925827 +1100 -@@ -29,8 +29,8 @@ - * It helps to keep variable names smaller, simpler - */ - --#define DEF_FREQUENCY_UP_THRESHOLD (80) --#define DEF_FREQUENCY_DOWN_THRESHOLD (20) -+#define DEF_FREQUENCY_UP_THRESHOLD (63) -+#define DEF_FREQUENCY_DOWN_THRESHOLD (26) - - /* - * The polling frequency of this governor depends on the capability of -Index: linux-3.3-ck1/arch/x86/Kconfig -=================================================================== ---- linux-3.3-ck1.orig/arch/x86/Kconfig 2012-03-24 19:30:00.013420381 +1100 -+++ linux-3.3-ck1/arch/x86/Kconfig 2012-03-24 19:30:29.044925845 +1100 -@@ -806,15 +806,7 @@ config SCHED_MC - increased overhead in some places. If unsure say N here. - - config IRQ_TIME_ACCOUNTING -- bool "Fine granularity task level IRQ time accounting" -- default n -- ---help--- -- Select this option to enable fine granularity task irq time -- accounting. This is done by reading a timestamp on each -- transitions between softirq and hardirq state, so there can be a -- small performance impact. -- -- If in doubt, say N here. -+ def_bool y - - source "kernel/Kconfig.preempt" - -Index: linux-3.3-ck1/kernel/sched/bfs.c -=================================================================== ---- /dev/null 1970-01-01 00:00:00.000000000 +0000 -+++ linux-3.3-ck1/kernel/sched/bfs.c 2012-03-24 19:30:29.047925897 +1100 -@@ -0,0 +1,7251 @@ +diff -ruN linux-3.3.5/kernel/sched/rifs.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sched/rifs.c +--- linux-3.3.5/kernel/sched/rifs.c 1970-01-01 08:00:00.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sched/rifs.c 2012-05-24 14:31:27.000000000 +0800 +@@ -0,0 +1,6675 @@ +/* -+ * kernel/sched/bfs.c, was kernel/sched.c ++ * kernel/sched/rifs.c + * + * Kernel scheduler and related syscalls + * @@ -1122,15 +899,13 @@ + * 2003-09-03 Interactivity tuning by Con Kolivas. + * 2004-04-02 Scheduler domains code by Nick Piggin + * 2007-04-15 Work begun on replacing all interactivity tuning with a -+ * fair scheduling design by Con Kolivas. ++ * fair scheduling design by Con Kolivas. + * 2007-05-05 Load balancing (smp-nice) and other improvements -+ * by Peter Williams ++ * by Peter Williams + * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith + * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri + * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, -+ * Thomas Gleixner, Mike Kravetz -+ * now Brainfuck deadline scheduling policy by Con Kolivas deletes -+ * a whole lot of those previous things. ++ * Thomas Gleixner, Mike Kravetz + */ + +#include @@ -1177,9 +952,6 @@ +#include +#include +#include -+#ifdef CONFIG_PARAVIRT -+#include -+#endif + +#include "cpupri.h" +#include "../workqueue_sched.h" @@ -1195,12 +967,7 @@ + (policy) == SCHED_RR) +#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) +#define idleprio_task(p) unlikely((p)->policy == SCHED_IDLEPRIO) -+#define iso_task(p) unlikely((p)->policy == SCHED_ISO) -+#define iso_queue(rq) unlikely((rq)->rq_policy == SCHED_ISO) -+#define rq_running_iso(rq) ((rq)->rq_prio == ISO_PRIO) + -+#define ISO_PERIOD ((5 * HZ * grq.noc) + 1) -+ +/* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], @@ -1238,7 +1005,7 @@ + +void print_scheduler_version(void) +{ -+ printk(KERN_INFO "BFS CPU scheduler v0.420 by Con Kolivas.\n"); ++ printk(KERN_INFO "Rotary Interactivity Favor Scheduler - RIFS By QQ:3766691.\n"); +} + +/* @@ -1249,24 +1016,17 @@ +int rr_interval __read_mostly = 6; + +/* -+ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks -+ * are allowed to run five seconds as real time tasks. This is the total over -+ * all online cpus. ++ * 兼容用设置。 + */ -+int sched_iso_cpu __read_mostly = 70; ++int sched_iso_cpu __read_mostly = 0; + +/* -+ * The relative length of deadline for each priority(nice) level. ++ * time_slice for each process + */ -+static int prio_ratios[PRIO_RANGE] __read_mostly; ++#define timeslice() MS_TO_US(rr_interval) + -+/* -+ * The quota handed out to tasks of all priority levels when refilling their -+ * time_slice. -+ */ -+static inline int timeslice(void) -+{ -+ return MS_TO_US(rr_interval); ++#define get_time_slice(p) { \ ++ p->time_slice = timeslice(); \ +} + +/* @@ -1287,12 +1047,6 @@ + bool idle_cpus; +#endif + int noc; /* num_online_cpus stored and updated when it changes */ -+ u64 niffies; /* Nanosecond jiffies */ -+ unsigned long last_jiffy; /* Last jiffy we updated niffies */ -+ -+ raw_spinlock_t iso_lock; -+ int iso_ticks; -+ bool iso_refractory; +}; + +#ifdef CONFIG_SMP @@ -1336,20 +1090,10 @@ + * This data should only be modified by the local cpu. + */ +struct rq { -+#ifdef CONFIG_SMP -+#ifdef CONFIG_NO_HZ -+ u64 nohz_stamp; -+ unsigned char in_nohz_recently; -+#endif -+#endif -+ + struct task_struct *curr, *idle, *stop; + struct mm_struct *prev_mm; + -+ /* Stored data about rq->curr to work outside grq lock */ -+ u64 rq_deadline; + unsigned int rq_policy; -+ int rq_time_slice; + u64 rq_last_ran; + int rq_prio; + bool rq_running; /* There is a task running */ @@ -1380,41 +1124,13 @@ + /* See if all cache siblings are idle */ + cpumask_t cache_siblings; +#endif -+ u64 last_niffy; /* Last time this RQ updated grq.niffies */ +#endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + u64 prev_irq_time; +#endif -+#ifdef CONFIG_PARAVIRT -+ u64 prev_steal_time; -+#endif -+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING -+ u64 prev_steal_time_rq; -+#endif + -+ u64 clock, old_clock, last_tick; ++ u64 clock; + u64 clock_task; -+ bool dither; -+ -+#ifdef CONFIG_SCHEDSTATS -+ -+ /* latency stats */ -+ struct sched_info rq_sched_info; -+ unsigned long long rq_cpu_time; -+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ -+ -+ /* sys_sched_yield() stats */ -+ unsigned int yld_count; -+ -+ /* schedule() stats */ -+ unsigned int sched_switch; -+ unsigned int sched_count; -+ unsigned int sched_goidle; -+ -+ /* try_to_wake_up() stats */ -+ unsigned int ttwu_count; -+ unsigned int ttwu_local; -+#endif +}; + +DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -1435,13 +1151,13 @@ + +int __weak arch_sd_sibling_asym_packing(void) +{ -+ return 0*SD_ASYM_PACKING; ++ return 0*SD_ASYM_PACKING; +} +#endif + +#define rcu_dereference_check_sched_domain(p) \ + rcu_dereference_check((p), \ -+ lockdep_is_held(&sched_domains_mutex)) ++ lockdep_is_held(&sched_domains_mutex)) + +/* + * The domain tree (rq->sd) is protected by RCU's quiescent state transition. @@ -1486,29 +1202,6 @@ + return rq->cpu; +} + -+/* -+ * Niffies are a globally increasing nanosecond counter. Whenever a runqueue -+ * clock is updated with the grq.lock held, it is an opportunity to update the -+ * niffies value. Any CPU can update it by adding how much its clock has -+ * increased since it last updated niffies, minus any added niffies by other -+ * CPUs. -+ */ -+static inline void update_clocks(struct rq *rq) -+{ -+ s64 ndiff; -+ long jdiff; -+ -+ update_rq_clock(rq); -+ ndiff = rq->clock - rq->old_clock; -+ /* old_clock is only updated when we are updating niffies */ -+ rq->old_clock = rq->clock; -+ ndiff -= grq.niffies - rq->last_niffy; -+ jdiff = jiffies - grq.last_jiffy; -+ niffy_diff(&ndiff, jdiff); -+ grq.last_jiffy += jdiff; -+ grq.niffies += ndiff; -+ rq->last_niffy = grq.niffies; -+} +#else /* CONFIG_SMP */ +static struct rq *uprq; +#define cpu_rq(cpu) (uprq) @@ -1520,19 +1213,6 @@ + return 0; +} + -+static inline void update_clocks(struct rq *rq) -+{ -+ s64 ndiff; -+ long jdiff; -+ -+ update_rq_clock(rq); -+ ndiff = rq->clock - rq->old_clock; -+ rq->old_clock = rq->clock; -+ jdiff = jiffies - grq.last_jiffy; -+ niffy_diff(&ndiff, jdiff); -+ grq.last_jiffy += jdiff; -+ grq.niffies += ndiff; -+} +#endif +#define raw_rq() (&__raw_get_cpu_var(runqueues)) + @@ -1548,7 +1228,7 @@ +/* + * All common locking functions performed on grq.lock. rq->clock is local to + * the CPU accessing it so it can be modified just with interrupts disabled -+ * when we're not updating niffies. ++ * when we're not updating the time. + * Looking up task_rq must be done under grq.lock to be safe. + */ +static void update_rq_clock_task(struct rq *rq, s64 delta); @@ -1588,7 +1268,6 @@ + __acquires(grq.lock) +{ + grq_lock(); -+ update_clocks(rq); +} + +static inline void grq_unlock_irq(void) @@ -1622,7 +1301,6 @@ + __acquires(grq.lock) +{ + struct rq *rq = task_grq_lock(p, flags); -+ update_clocks(rq); + return rq; +} + @@ -1633,13 +1311,6 @@ + return task_rq(p); +} + -+static inline void time_task_grq_lock_irq(struct task_struct *p) -+ __acquires(grq.lock) -+{ -+ struct rq *rq = task_grq_lock_irq(p); -+ update_clocks(rq); -+} -+ +static inline void task_grq_unlock_irq(void) + __releases(grq.lock) +{ @@ -1753,16 +1424,6 @@ +} +#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ + -+static inline bool deadline_before(u64 deadline, u64 time) -+{ -+ return (deadline < time); -+} -+ -+static inline bool deadline_after(u64 deadline, u64 time) -+{ -+ return (deadline > time); -+} -+ +/* + * A task that is queued but not running will be on the grq run list. + * A task that is not running or queued will not be on the grq run list. @@ -1785,40 +1446,12 @@ +} + +/* -+ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as -+ * an idle task, we ensure none of the following conditions are met. -+ */ -+static bool idleprio_suitable(struct task_struct *p) -+{ -+ return (!freezing(p) && !signal_pending(p) && -+ !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING))); -+} -+ -+/* -+ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check -+ * that the iso_refractory flag is not set. -+ */ -+static bool isoprio_suitable(void) -+{ -+ return !grq.iso_refractory; -+} -+ -+/* + * Adding to the global runqueue. Enter with grq locked. + */ +static void enqueue_task(struct task_struct *p) +{ -+ if (!rt_task(p)) { -+ /* Check it hasn't gotten rt from PI */ -+ if ((idleprio_task(p) && idleprio_suitable(p)) || -+ (iso_task(p) && isoprio_suitable())) -+ p->prio = p->normal_prio; -+ else -+ p->prio = NORMAL_PRIO; -+ } + __set_bit(p->prio, grq.prio_bitmap); + list_add_tail(&p->run_list, grq.queue + p->prio); -+ sched_info_queued(p); +} + +/* Only idle task does this as a real time task*/ @@ -1826,33 +1459,12 @@ +{ + __set_bit(p->prio, grq.prio_bitmap); + list_add(&p->run_list, grq.queue + p->prio); -+ sched_info_queued(p); +} + +static inline void requeue_task(struct task_struct *p) +{ -+ sched_info_queued(p); +} + -+/* -+ * Returns the relative length of deadline all compared to the shortest -+ * deadline which is that of nice -20. -+ */ -+static inline int task_prio_ratio(struct task_struct *p) -+{ -+ return prio_ratios[TASK_USER_PRIO(p)]; -+} -+ -+/* -+ * task_timeslice - all tasks of all priorities get the exact same timeslice -+ * length. CPU distribution is handled by giving different deadlines to -+ * tasks of different priorities. Use 128 as the base value for fast shifts. -+ */ -+static inline int task_timeslice(struct task_struct *p) -+{ -+ return (rr_interval * task_prio_ratio(p) / 128); -+} -+ +#ifdef CONFIG_SMP +/* + * qnr is the "queued but not running" count which is the total number of @@ -2033,7 +1645,7 @@ + +static inline bool suitable_idle_cpus(struct task_struct *p) +{ -+ return uprq->curr == uprq->idle; ++ return current == uprq->idle; +} + +static inline void resched_suitable_idle(struct task_struct *p) @@ -2075,43 +1687,11 @@ + inc_qnr(); +} + -+static inline int normal_prio(struct task_struct *p) -+{ -+ if (has_rt_policy(p)) -+ return MAX_RT_PRIO - 1 - p->rt_priority; -+ if (idleprio_task(p)) -+ return IDLE_PRIO; -+ if (iso_task(p)) -+ return ISO_PRIO; -+ return NORMAL_PRIO; -+} -+ +/* -+ * Calculate the current priority, i.e. the priority -+ * taken into account by the scheduler. This value might -+ * be boosted by RT tasks as it will be RT if the task got -+ * RT-boosted. If not then it returns p->normal_prio. -+ */ -+static int effective_prio(struct task_struct *p) -+{ -+ p->normal_prio = normal_prio(p); -+ /* -+ * If we are RT tasks or we were boosted to RT priority, -+ * keep the priority unchanged. Otherwise, update priority -+ * to the normal priority: -+ */ -+ if (!rt_prio(p->prio)) -+ return p->normal_prio; -+ return p->prio; -+} -+ -+/* + * activate_task - move a task to the runqueue. Enter with grq locked. + */ +static void activate_task(struct task_struct *p, struct rq *rq) +{ -+ update_clocks(rq); -+ + /* + * Sleep time is in units of nanosecs, so shift by 20 to get a + * milliseconds-range estimation of the amount of time that the task @@ -2120,10 +1700,9 @@ + if (unlikely(prof_on == SLEEP_PROFILING)) { + if (p->state == TASK_UNINTERRUPTIBLE) + profile_hits(SLEEP_PROFILING, (void *)get_wchan(p), -+ (rq->clock - p->last_ran) >> 20); ++ (rq->clock - p->last_ran) >> 20); + } + -+ p->prio = effective_prio(p); + if (task_contributes_to_load(p)) + grq.nr_uninterruptible--; + enqueue_task(p); @@ -2244,33 +1823,8 @@ +} +#endif + -+/* -+ * Move a task off the global queue and take it to a cpu for it will -+ * become the running task. -+ */ -+static inline void take_task(int cpu, struct task_struct *p) -+{ -+ set_task_cpu(p, cpu); -+ dequeue_task(p); -+ clear_sticky(p); -+ dec_qnr(); -+} + +/* -+ * Returns a descheduling task to the grq runqueue unless it is being -+ * deactivated. -+ */ -+static inline void return_task(struct task_struct *p, bool deactivate) -+{ -+ if (deactivate) -+ deactivate_task(p); -+ else { -+ inc_qnr(); -+ enqueue_task(p); -+ } -+} -+ -+/* + * resched_task - mark a task 'to be rescheduled now'. + * + * On UP this means the setting of the need_resched flag, on SMP it @@ -2471,25 +2025,32 @@ +#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT) + +/* -+ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the -+ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or ++ * RT tasks and NORMAL tasks preempt purely on priority. ++ * SCHED_IDLEPRIO don't preempt anything else or + * between themselves, they cooperatively multitask. An idle rq scores as + * prio PRIO_LIMIT so it is always preempted. + */ +static inline bool -+can_preempt(struct task_struct *p, int prio, u64 deadline) ++can_preempt(struct task_struct *p, int prio) +{ + /* Better static priority RT task or better policy preemption */ -+ if (p->prio < prio) ++ if (p->prio <= prio) + return true; + if (p->prio > prio) + return false; -+ /* SCHED_NORMAL, BATCH and ISO will preempt based on deadline */ -+ if (!deadline_before(p->deadline, deadline)) -+ return false; + return true; +} + ++static inline void requeue_task_head(struct task_struct *p) ++{ ++ if(task_queued(p)) { ++ dequeue_task(p); ++ enqueue_task_head(p); ++ }else { ++ enqueue_task_head(p); ++ } ++} ++ +#ifdef CONFIG_SMP +#ifdef CONFIG_HOTPLUG_CPU +/* @@ -2526,8 +2087,7 @@ +static void try_preempt(struct task_struct *p, struct rq *this_rq) +{ + struct rq *highest_prio_rq = NULL; -+ int cpu, highest_prio; -+ u64 latest_deadline; ++ int cpu, highest_prio = 0; + cpumask_t tmp; + + /* @@ -2551,7 +2111,7 @@ + else + return; + -+ highest_prio = latest_deadline = 0; ++ requeue_task_head(p); + + for_each_cpu_mask(cpu, tmp) { + struct rq *rq; @@ -2562,17 +2122,16 @@ + if (rq_prio < highest_prio) + continue; + -+ if (rq_prio > highest_prio || -+ deadline_after(rq->rq_deadline, latest_deadline)) { -+ latest_deadline = rq->rq_deadline; ++ if (rq_prio > highest_prio) { + highest_prio = rq_prio; + highest_prio_rq = rq; + } + } + + if (likely(highest_prio_rq)) { -+ if (can_preempt(p, highest_prio, highest_prio_rq->rq_deadline)) ++ if (can_preempt(p, highest_prio)) { + resched_task(highest_prio_rq->curr); ++ } + } +} +#else /* CONFIG_SMP */ @@ -2585,39 +2144,16 @@ +{ + if (p->policy == SCHED_IDLEPRIO) + return; -+ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline)) -+ resched_task(uprq->curr); ++ requeue_task_head(p); ++ if (can_preempt(p, uprq->rq_prio)) { ++ resched_task(current); ++ } +} +#endif /* CONFIG_SMP */ + +static void +ttwu_stat(struct task_struct *p, int cpu, int wake_flags) +{ -+#ifdef CONFIG_SCHEDSTATS -+ struct rq *rq = this_rq(); -+ -+#ifdef CONFIG_SMP -+ int this_cpu = smp_processor_id(); -+ -+ if (cpu == this_cpu) -+ schedstat_inc(rq, ttwu_local); -+ else { -+ struct sched_domain *sd; -+ -+ rcu_read_lock(); -+ for_each_domain(this_cpu, sd) { -+ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { -+ schedstat_inc(sd, ttwu_wake_remote); -+ break; -+ } -+ } -+ rcu_read_unlock(); -+ } -+ -+#endif /* CONFIG_SMP */ -+ -+ schedstat_inc(rq, ttwu_count); -+#endif /* CONFIG_SCHEDSTATS */ +} + +static inline void ttwu_activate(struct task_struct *p, struct rq *rq, @@ -2631,8 +2167,9 @@ + * don't trigger a preemption if there are no idle cpus, + * instead waiting for current to deschedule. + */ -+ if (!is_sync || suitable_idle_cpus(p)) ++ if (!is_sync || suitable_idle_cpus(p)) { + try_preempt(p, rq); ++ } +} + +static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, @@ -2732,10 +2269,6 @@ + return; + + if (!task_queued(p)) { -+ if (likely(!task_running(p))) { -+ schedstat_inc(rq, ttwu_count); -+ schedstat_inc(rq, ttwu_local); -+ } + ttwu_activate(p, rq, false); + ttwu_stat(p, smp_processor_id(), 0); + success = true; @@ -2765,8 +2298,6 @@ + return try_to_wake_up(p, state, 0); +} + -+static void time_slice_expired(struct task_struct *p); -+ +/* + * Perform scheduler related setup for a newly forked process p. + * p is forked by current. @@ -2788,7 +2319,6 @@ + p->state = TASK_RUNNING; + set_task_cpu(p, cpu); + -+ /* Should be reset in fork.c but done here for ease of bfs patching */ + p->sched_time = p->stime_pc = p->utime_pc = 0; + + /* @@ -2797,12 +2327,10 @@ + if (unlikely(p->sched_reset_on_fork)) { + if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { + p->policy = SCHED_NORMAL; -+ p->normal_prio = normal_prio(p); + } + + if (PRIO_TO_NICE(p->static_prio) < 0) { + p->static_prio = NICE_TO_PRIO(0); -+ p->normal_prio = p->static_prio; + } + + /* @@ -2816,7 +2344,7 @@ + /* + * Make sure we do not leak PI boosting priority to the child. + */ -+ p->prio = curr->normal_prio; ++ p->prio = curr->static_prio; + + INIT_LIST_HEAD(&p->run_list); +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) @@ -2838,24 +2366,20 @@ + * total amount of pending timeslices in the system doesn't change, + * resulting in more scheduling fairness. If it's negative, it won't + * matter since that's the same as being 0. current's time_slice is -+ * actually in rq_time_slice when it's running, as is its last_ran -+ * value. rq->rq_deadline is only modified within schedule() so it -+ * is always equal to current->deadline. ++ * actually in timeslice when it's running, as is its last_ran ++ * value. + */ + rq = task_grq_lock_irq(curr); -+ if (likely(rq->rq_time_slice >= RESCHED_US * 2)) { -+ rq->rq_time_slice /= 2; -+ p->time_slice = rq->rq_time_slice; ++ if (likely(curr->time_slice >= RESCHED_US * 2)) { ++ curr->time_slice /= 2; ++ p->time_slice = curr->time_slice; + } else { + /* -+ * Forking task has run out of timeslice. Reschedule it and -+ * start its child with a new time slice and deadline. The -+ * child will end up running first because its deadline will -+ * be slightly earlier. ++ * Forking task has run out of timeslice. Reschedule it. + */ -+ rq->rq_time_slice = 0; ++ curr->time_slice = 0; + set_tsk_need_resched(curr); -+ time_slice_expired(p); ++ get_time_slice(p); + } + p->last_ran = rq->rq_last_ran; + task_grq_unlock_irq(); @@ -2967,9 +2491,8 @@ + */ +static inline void +prepare_task_switch(struct rq *rq, struct task_struct *prev, -+ struct task_struct *next) ++ struct task_struct *next) +{ -+ sched_info_switch(prev, next); + perf_event_task_sched_out(prev, next); + fire_sched_out_preempt_notifiers(prev, next); + prepare_lock_switch(rq, next); @@ -3059,10 +2582,10 @@ + */ +static inline void +context_switch(struct rq *rq, struct task_struct *prev, -+ struct task_struct *next) ++ struct task_struct *next) +{ + struct mm_struct *mm, *oldmm; -+ ++ + prepare_task_switch(rq, prev, next); + + mm = next->mm; @@ -3334,16 +2857,6 @@ + +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ + -+#ifdef CONFIG_PARAVIRT -+static inline u64 steal_ticks(u64 steal) -+{ -+ if (unlikely(steal > NSEC_PER_SEC)) -+ return div_u64(steal, TICK_NSEC); -+ -+ return __iter_div_u64_rem(steal, TICK_NSEC, &steal); -+} -+#endif -+ +static void update_rq_clock_task(struct rq *rq, s64 delta) +{ +#ifdef CONFIG_IRQ_TIME_ACCOUNTING @@ -3370,24 +2883,6 @@ + rq->prev_irq_time += irq_delta; + delta -= irq_delta; +#endif -+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING -+ if (static_branch((¶virt_steal_rq_enabled))) { -+ u64 st, steal = paravirt_steal_clock(cpu_of(rq)); -+ -+ steal -= rq->prev_steal_time_rq; -+ -+ if (unlikely(steal > delta)) -+ steal = delta; -+ -+ st = steal_ticks(steal); -+ steal = st * TICK_NSEC; -+ -+ rq->prev_steal_time_rq += steal; -+ -+ delta -= steal; -+ } -+#endif -+ + rq->clock_task += delta; +} + @@ -3418,25 +2913,6 @@ +} +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ + -+static __always_inline bool steal_account_process_tick(void) -+{ -+#ifdef CONFIG_PARAVIRT -+ if (static_branch(¶virt_steal_enabled)) { -+ u64 steal, st = 0; -+ -+ steal = paravirt_steal_clock(smp_processor_id()); -+ steal -= this_rq()->prev_steal_time; -+ -+ st = steal_ticks(steal); -+ this_rq()->prev_steal_time += st * TICK_NSEC; -+ -+ account_steal_time(st); -+ return st; -+ } -+#endif -+ return false; -+} -+ +/* + * On each tick, see what percentage of that tick was attributed to each + * component and add the percentage to the _pc values. Once a _pc value has @@ -3464,7 +2940,7 @@ + +static void +pc_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset, -+ unsigned long pc, unsigned long ns) ++ unsigned long pc, unsigned long ns) +{ + u64 *cpustat = kcpustat_this_cpu->cpustat; + cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); @@ -3474,7 +2950,6 @@ + p->stime_pc %= 128; + p->stime += (__force u64)cputime_one_jiffy; + p->stimescaled += one_jiffy_scaled; -+ account_group_system_time(p, cputime_one_jiffy); + acct_update_integrals(p); + } + p->sched_time += ns; @@ -3511,7 +2986,6 @@ + p->utime_pc %= 128; + p->utime += (__force u64)cputime_one_jiffy; + p->utimescaled += one_jiffy_scaled; -+ account_group_user_time(p, cputime_one_jiffy); + acct_update_integrals(p); + } + p->sched_time += ns; @@ -3555,62 +3029,51 @@ + * CPU scheduler quota accounting is also performed here in microseconds. + */ +static void -+update_cpu_clock(struct rq *rq, struct task_struct *p, bool tick) ++update_cpu_clock(struct rq *rq, struct task_struct *p) +{ + long account_ns = rq->clock - rq->timekeep_clock; + struct task_struct *idle = rq->idle; + unsigned long account_pc; ++ int user_tick; + ++ p->last_ran = rq->clock; ++ + if (unlikely(account_ns < 0)) + account_ns = 0; + + account_pc = NS_TO_PC(account_ns); + -+ if (tick) { -+ int user_tick; ++ /* Accurate tick timekeeping */ ++ rq->account_pc += account_pc - 128; ++ if (rq->account_pc < 0) { ++ /* ++ * Small errors in micro accounting may not make the ++ * accounting add up to 128 each tick so we keep track ++ * of the percentage and round it up when less than 128 ++ */ ++ account_pc += -rq->account_pc; ++ rq->account_pc = 0; ++ } + -+ /* Accurate tick timekeeping */ -+ rq->account_pc += account_pc - 128; -+ if (rq->account_pc < 0) { -+ /* -+ * Small errors in micro accounting may not make the -+ * accounting add up to 128 each tick so we keep track -+ * of the percentage and round it up when less than 128 -+ */ -+ account_pc += -rq->account_pc; -+ rq->account_pc = 0; -+ } -+ if (steal_account_process_tick()) -+ goto ts_account; ++ user_tick = user_mode(get_irq_regs()); + -+ user_tick = user_mode(get_irq_regs()); ++ if (user_tick) ++ pc_user_time(rq, p, account_pc, account_ns); ++ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) ++ pc_system_time(rq, p, HARDIRQ_OFFSET, ++ account_pc, account_ns); ++ else ++ pc_idle_time(rq, account_pc); + -+ if (user_tick) -+ pc_user_time(rq, p, account_pc, account_ns); -+ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) -+ pc_system_time(rq, p, HARDIRQ_OFFSET, -+ account_pc, account_ns); -+ else -+ pc_idle_time(rq, account_pc); ++ if (sched_clock_irqtime) ++ irqtime_account_hi_si(); + -+ if (sched_clock_irqtime) -+ irqtime_account_hi_si(); -+ } else { -+ /* Accurate subtick timekeeping */ -+ rq->account_pc += account_pc; -+ if (p == idle) -+ pc_idle_time(rq, account_pc); -+ else -+ pc_user_time(rq, p, account_pc, account_ns); -+ } -+ -+ts_account: + /* time_slice accounting is done in usecs to avoid overflow on 32bit */ + if (rq->rq_policy != SCHED_FIFO && p != idle) { + s64 time_diff = rq->clock - rq->rq_last_ran; + + niffy_diff(&time_diff, 1); -+ rq->rq_time_slice -= NS_TO_US(time_diff); ++ p->time_slice -= NS_TO_US(time_diff); + } + rq->rq_last_ran = rq->timekeep_clock = rq->clock; +} @@ -3626,7 +3089,6 @@ + u64 ns = 0; + + if (p == rq->curr) { -+ update_clocks(rq); + ns = rq->clock_task - rq->rq_last_ran; + if (unlikely((s64)ns < 0)) + ns = 0; @@ -3668,7 +3130,7 @@ + +/* Compatibility crap for removal */ +void account_user_time(struct task_struct *p, cputime_t cputime, -+ cputime_t cputime_scaled) ++ cputime_t cputime_scaled) +{ +} + @@ -3683,14 +3145,13 @@ + * @cputime_scaled: cputime scaled by cpu frequency + */ +static void account_guest_time(struct task_struct *p, cputime_t cputime, -+ cputime_t cputime_scaled) ++ cputime_t cputime_scaled) +{ + u64 *cpustat = kcpustat_this_cpu->cpustat; + + /* Add guest time to process. */ + p->utime += (__force u64)cputime; + p->utimescaled += (__force u64)cputime_scaled; -+ account_group_user_time(p, cputime); + p->gtime += (__force u64)cputime; + + /* Add guest time to cpustat. */ @@ -3717,7 +3178,6 @@ + /* Add system time to process. */ + p->stime += (__force u64)cputime; + p->stimescaled += (__force u64)cputime_scaled; -+ account_group_system_time(p, cputime); + + /* Add system time to cpustat. */ + *target_cputime64 += (__force u64)cputime; @@ -3794,124 +3254,21 @@ +} +#endif + -+static inline void grq_iso_lock(void) -+ __acquires(grq.iso_lock) -+{ -+ raw_spin_lock(&grq.iso_lock); -+} -+ -+static inline void grq_iso_unlock(void) -+ __releases(grq.iso_lock) -+{ -+ raw_spin_unlock(&grq.iso_lock); -+} -+ -+/* -+ * Functions to test for when SCHED_ISO tasks have used their allocated -+ * quota as real time scheduling and convert them back to SCHED_NORMAL. -+ * Where possible, the data is tested lockless, to avoid grabbing iso_lock -+ * because the occasional inaccurate result won't matter. However the -+ * tick data is only ever modified under lock. iso_refractory is only simply -+ * set to 0 or 1 so it's not worth grabbing the lock yet again for that. -+ */ -+static bool set_iso_refractory(void) -+{ -+ grq.iso_refractory = true; -+ return grq.iso_refractory; -+} -+ -+static bool clear_iso_refractory(void) -+{ -+ grq.iso_refractory = false; -+ return grq.iso_refractory; -+} -+ -+/* -+ * Test if SCHED_ISO tasks have run longer than their alloted period as RT -+ * tasks and set the refractory flag if necessary. There is 10% hysteresis -+ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a -+ * slow division. -+ */ -+static bool test_ret_isorefractory(struct rq *rq) -+{ -+ if (likely(!grq.iso_refractory)) { -+ if (grq.iso_ticks > ISO_PERIOD * sched_iso_cpu) -+ return set_iso_refractory(); -+ } else { -+ if (grq.iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) -+ return clear_iso_refractory(); -+ } -+ return grq.iso_refractory; -+} -+ -+static void iso_tick(void) -+{ -+ grq_iso_lock(); -+ grq.iso_ticks += 100; -+ grq_iso_unlock(); -+} -+ -+/* No SCHED_ISO task was running so decrease rq->iso_ticks */ -+static inline void no_iso_tick(void) -+{ -+ if (grq.iso_ticks) { -+ grq_iso_lock(); -+ grq.iso_ticks -= grq.iso_ticks / ISO_PERIOD + 1; -+ if (unlikely(grq.iso_refractory && grq.iso_ticks < -+ ISO_PERIOD * (sched_iso_cpu * 115 / 128))) -+ clear_iso_refractory(); -+ grq_iso_unlock(); -+ } -+} -+ +/* This manages tasks that have run out of timeslice during a scheduler_tick */ ++/* 当前队列时钟的控制 */ +static void task_running_tick(struct rq *rq) +{ + struct task_struct *p; ++ p = rq->curr; + -+ /* -+ * If a SCHED_ISO task is running we increment the iso_ticks. In -+ * order to prevent SCHED_ISO tasks from causing starvation in the -+ * presence of true RT tasks we account those as iso_ticks as well. -+ */ -+ if ((rt_queue(rq) || (iso_queue(rq) && !grq.iso_refractory))) { -+ if (grq.iso_ticks <= (ISO_PERIOD * 128) - 128) -+ iso_tick(); -+ } else -+ no_iso_tick(); -+ -+ if (iso_queue(rq)) { -+ if (unlikely(test_ret_isorefractory(rq))) { -+ if (rq_running_iso(rq)) { -+ /* -+ * SCHED_ISO task is running as RT and limit -+ * has been hit. Force it to reschedule as -+ * SCHED_NORMAL by zeroing its time_slice -+ */ -+ rq->rq_time_slice = 0; -+ } -+ } -+ } -+ + /* SCHED_FIFO tasks never run out of timeslice. */ + if (rq->rq_policy == SCHED_FIFO) + return; -+ /* -+ * Tasks that were scheduled in the first half of a tick are not -+ * allowed to run into the 2nd half of the next tick if they will -+ * run out of time slice in the interim. Otherwise, if they have -+ * less than RESCHED_US μs of time slice left they will be rescheduled. -+ */ -+ if (rq->dither) { -+ if (rq->rq_time_slice > HALF_JIFFY_US) -+ return; -+ else -+ rq->rq_time_slice = 0; -+ } else if (rq->rq_time_slice >= RESCHED_US) -+ return; + -+ /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */ -+ p = rq->curr; ++ if (p->time_slice > RESCHED_US) ++ return; ++ ++ /* time_slice expired. Grq locked */ + grq_lock(); + requeue_task(p); + set_tsk_need_resched(p); @@ -3933,12 +3290,9 @@ + sched_clock_tick(); + /* grq lock not grabbed, so only update rq clock */ + update_rq_clock(rq); -+ update_cpu_clock(rq, rq->curr, true); ++ update_cpu_clock(rq, rq->curr); + if (!rq_idle(rq)) + task_running_tick(rq); -+ else -+ no_iso_tick(); -+ rq->last_tick = rq->clock; + perf_event_task_tick(); +} + @@ -3999,134 +3353,177 @@ +EXPORT_SYMBOL(sub_preempt_count); +#endif + ++static inline int priority_decrement(struct rq *rq, struct task_struct *p) ++{ ++ if(p->prio < NORMAL_PRIO) ++ return 1; ++ p->prio ++; ++ if(p->prio < p->static_prio) ++ p->prio = p->static_prio; ++ if(p->prio >= IDLE_PRIO) { ++ p->prio = p->static_prio + 1; ++ if(p->prio >= IDLE_PRIO) ++ p->prio = p->static_prio; ++ } ++ return 1; ++} ++ +/* -+ * Deadline is "now" in niffies + (offset by priority). Setting the deadline -+ * is the key to everything. It distributes cpu fairly amongst tasks of the -+ * same nice value, it proportions cpu according to nice level, it means the -+ * task that last woke up the longest ago has the earliest deadline, thus -+ * ensuring that interactive tasks get low latency on wake up. The CPU -+ * proportion works out to the square of the virtual deadline difference, so -+ * this equation will give nice 19 3% CPU compared to nice 0. ++ * Timeslices below RESCHED_US are considered as good as expired as there's no ++ * point rescheduling when there's so little time left. SCHED_BATCH tasks ++ * have been flagged be not latency sensitive and likely to be fully CPU ++ * bound so every time they're rescheduled they have their time_slice ++ * refilled. + */ -+static inline u64 prio_deadline_diff(int user_prio) ++static inline void check_timeslice_end(struct rq *rq, struct task_struct *p) +{ -+ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128)); ++ if(p->policy == SCHED_FIFO) ++ goto out; ++ if(p->time_slice < RESCHED_US || batch_task(p)) { ++ if(p->prio >= NORMAL_PRIO) { ++ p->prio ++; ++ if(p->prio < p->static_prio) ++ p->prio = p->static_prio; ++ if(p->prio >= IDLE_PRIO) { ++ p->prio = p->static_prio + 1; ++ if(p->prio >= IDLE_PRIO) ++ p->prio = p->static_prio; ++ } ++ } ++ }else { ++ if(p->time_slice >= MS_TO_US(rr_interval / 2)) { ++ if(p->state != TASK_RUNNING) ++ p->prio --; ++ else ++ p->preempt = 0; ++ if(p->prio < NORMAL_PRIO) ++ p->prio = NORMAL_PRIO; ++ if(p->prio <= 0) ++ p->prio = 0; ++ } ++ goto out; ++ } ++ get_time_slice(p); ++out: ++ return; +} + -+static inline u64 task_deadline_diff(struct task_struct *p) ++#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) ++ ++ ++/* ++ * 最低位查找,查找最高优先级开始。 ++ * Find the lowest bit set in the bitmap.We would find the highest priority first/ ++ */ ++static inline unsigned long ++get_prio_bit(unsigned long *addr, unsigned long offset) +{ -+ return prio_deadline_diff(TASK_USER_PRIO(p)); ++ unsigned long *from = addr + (offset / BITS_PER_LONG); ++ unsigned long *limit = addr + PRIO_LIMIT / BITS_PER_LONG; ++ int i = offset % BITS_PER_LONG; ++ ++ if (offset >= PRIO_LIMIT) ++ return PRIO_LIMIT; ++ ++ for(;from != (limit);from++) { ++ for(;i < BITS_PER_LONG;i++, offset++) { ++ if(((*from >> i) & 0x1)) { ++ goto out; ++ } ++ } ++ ++ /* ++ * This can make sure to generate the best machine code. ++ */ ++ i = 0; ++ } ++out: ++ return offset; +} + -+static inline u64 static_deadline_diff(int static_prio) ++/* ++ * The currently running task's information is all stored in rq local data ++ * which is only modified by the local CPU, thereby allowing the data to be ++ * changed without grabbing the grq lock. ++ */ ++static inline void set_rq_task(struct rq *rq, struct task_struct *p) +{ -+ return prio_deadline_diff(USER_PRIO(static_prio)); ++ rq->rq_last_ran = p->last_ran = rq->clock; ++ rq->rq_policy = p->policy; ++ rq->rq_prio = p->prio; ++ if (p != rq->idle) ++ rq->rq_running = true; ++ else ++ rq->rq_running = false; +} + -+static inline int longest_deadline_diff(void) ++static void reset_rq_task(struct rq *rq, struct task_struct *p) +{ -+ return prio_deadline_diff(39); ++ rq->rq_policy = p->policy; ++ rq->rq_prio = p->prio; +} + -+static inline int ms_longest_deadline_diff(void) ++static inline void operate_blk_needs_flush_plug(struct task_struct *p) +{ -+ return NS_TO_MS(longest_deadline_diff()); ++ grq_unlock_irq(); ++ preempt_enable_no_resched(); ++ blk_schedule_flush_plug(p); +} + -+/* -+ * The time_slice is only refilled when it is empty and that is when we set a -+ * new deadline. -+ */ -+static void time_slice_expired(struct task_struct *p) ++static inline void task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ -+ p->time_slice = timeslice(); -+ p->deadline = grq.niffies + task_deadline_diff(p); ++ /* ++ * Don't stick tasks when a real time task is going to run as ++ * they may literally get stuck. ++ */ ++ if (rt_task(next)) ++ unstick_task(rq, prev); ++ set_rq_task(rq, next); ++ grq.nr_switches++; ++ prev->on_cpu = false; ++ next->on_cpu = true; ++ rq->curr = next; ++ ++ /* ++ * The context switch have flipped the stack from under us ++ * and restored the local variables which were saved when ++ * this task called schedule() in the past. prev == current ++ * is still correct, but it can be moved to another cpu/rq. ++ */ ++ context_switch(rq, prev, next); /* unlocks the grq */ +} + -+/* -+ * Timeslices below RESCHED_US are considered as good as expired as there's no -+ * point rescheduling when there's so little time left. SCHED_BATCH tasks -+ * have been flagged be not latency sensitive and likely to be fully CPU -+ * bound so every time they're rescheduled they have their time_slice -+ * refilled, but get a new later deadline to have little effect on -+ * SCHED_NORMAL tasks. + ++/* ++ * Move a task off the global queue and take it to a cpu for it will ++ * become the running task. + */ -+static inline void check_deadline(struct task_struct *p) ++static inline void take_task(int cpu, struct task_struct *p) +{ -+ if (p->time_slice < RESCHED_US || batch_task(p)) -+ time_slice_expired(p); ++ set_task_cpu(p, cpu); ++ dequeue_task(p); ++ clear_sticky(p); ++ dec_qnr(); +} + -+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) -+ +/* -+ * Scheduler queue bitmap specific find next bit. ++ * Put the descheduling task back to grq. + */ -+static inline unsigned long -+next_sched_bit(const unsigned long *addr, unsigned long offset) ++static inline void put_prev_task(struct rq *rq, int cpu, struct task_struct *p, bool deactivate) +{ -+ const unsigned long *p; -+ unsigned long result; -+ unsigned long size; -+ unsigned long tmp; -+ -+ size = PRIO_LIMIT; -+ if (offset >= size) -+ return size; -+ -+ p = addr + BITOP_WORD(offset); -+ result = offset & ~(BITS_PER_LONG-1); -+ size -= result; -+ offset %= BITS_PER_LONG; -+ if (offset) { -+ tmp = *(p++); -+ tmp &= (~0UL << offset); -+ if (size < BITS_PER_LONG) -+ goto found_first; -+ if (tmp) -+ goto found_middle; -+ size -= BITS_PER_LONG; -+ result += BITS_PER_LONG; ++ if(deactivate) ++ deactivate_task(p); ++ else { ++ inc_qnr(); ++ enqueue_task(p); + } -+ while (size & ~(BITS_PER_LONG-1)) { -+ if ((tmp = *(p++))) -+ goto found_middle; -+ result += BITS_PER_LONG; -+ size -= BITS_PER_LONG; -+ } -+ if (!size) -+ return result; -+ tmp = *p; -+ -+found_first: -+ tmp &= (~0UL >> (BITS_PER_LONG - size)); -+ if (tmp == 0UL) /* Are any bits set? */ -+ return result + size; /* Nope. */ -+found_middle: -+ return result + __ffs(tmp); +} + +/* -+ * O(n) lookup of all tasks in the global runqueue. The real brainfuck -+ * of lock contention and O(n). It's not really O(n) as only the queued, -+ * but not running tasks are scanned, and is O(n) queued in the worst case -+ * scenario only because the right task can be found before scanning all of -+ * them. -+ * Tasks are selected in this order: -+ * Real time tasks are selected purely by their static priority and in the -+ * order they were queued, so the lowest value idx, and the first queued task -+ * of that priority value is chosen. -+ * If no real time tasks are found, the SCHED_ISO priority is checked, and -+ * all SCHED_ISO tasks have the same priority value, so they're selected by -+ * the earliest deadline value. -+ * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the -+ * earliest deadline. -+ * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are -+ * selected by the earliest deadline. ++ * Task picking for next time to run. + */ +static inline struct -+task_struct *earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) ++task_struct *get_runnable_task(struct rq *rq, int cpu, struct task_struct *idle) +{ + struct task_struct *edt = NULL; + unsigned long idx = -1; @@ -4134,59 +3531,18 @@ + do { + struct list_head *queue; + struct task_struct *p; -+ u64 earliest_deadline; + -+ idx = next_sched_bit(grq.prio_bitmap, ++idx); ++ idx = get_prio_bit(grq.prio_bitmap, ++idx); + if (idx >= PRIO_LIMIT) + return idle; + queue = grq.queue + idx; + -+ if (idx < MAX_RT_PRIO) { -+ /* We found an rt task */ -+ list_for_each_entry(p, queue, run_list) { -+ /* Make sure cpu affinity is ok */ -+ if (needs_other_cpu(p, cpu)) -+ continue; -+ edt = p; -+ goto out_take; -+ } -+ /* -+ * None of the RT tasks at this priority can run on -+ * this cpu -+ */ -+ continue; -+ } -+ -+ /* -+ * No rt tasks. Find the earliest deadline task. Now we're in -+ * O(n) territory. -+ */ -+ earliest_deadline = ~0ULL; + list_for_each_entry(p, queue, run_list) { -+ u64 dl; -+ + /* Make sure cpu affinity is ok */ + if (needs_other_cpu(p, cpu)) + continue; -+ -+ /* -+ * Soft affinity happens here by not scheduling a task -+ * with its sticky flag set that ran on a different CPU -+ * last when the CPU is scaling, or by greatly biasing -+ * against its deadline when not, based on cpu cache -+ * locality. -+ */ -+ if (task_sticky(p) && task_rq(p) != rq) { -+ if (scaling_rq(rq)) -+ continue; -+ dl = p->deadline << locality_diff(p, rq); -+ } else -+ dl = p->deadline; -+ -+ if (deadline_before(dl, earliest_deadline)) { -+ earliest_deadline = dl; -+ edt = p; -+ } ++ edt = p; ++ goto out_take; + } + } while (!edt); + @@ -4195,150 +3551,77 @@ + return edt; +} + ++#define SCHED_RESCHED -1 + +/* -+ * Print scheduling while atomic bug: -+ */ -+static noinline void __schedule_bug(struct task_struct *prev) -+{ -+ struct pt_regs *regs = get_irq_regs(); -+ -+ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", -+ prev->comm, prev->pid, preempt_count()); -+ -+ debug_show_held_locks(prev); -+ print_modules(); -+ if (irqs_disabled()) -+ print_irqtrace_events(prev); -+ -+ if (regs) -+ show_regs(regs); -+ else -+ dump_stack(); -+} -+ -+/* -+ * Various schedule()-time debugging checks and statistics: -+ */ -+static inline void schedule_debug(struct task_struct *prev) -+{ -+ /* -+ * Test if we are atomic. Since do_exit() needs to call into -+ * schedule() atomically, we ignore that path for now. -+ * Otherwise, whine if we are scheduling when we should not be. -+ */ -+ if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) -+ __schedule_bug(prev); -+ rcu_sleep_check(); -+ -+ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); -+ -+ schedstat_inc(this_rq(), sched_count); -+} -+ -+/* -+ * The currently running task's information is all stored in rq local data -+ * which is only modified by the local CPU, thereby allowing the data to be -+ * changed without grabbing the grq lock. -+ */ -+static inline void set_rq_task(struct rq *rq, struct task_struct *p) -+{ -+ rq->rq_time_slice = p->time_slice; -+ rq->rq_deadline = p->deadline; -+ rq->rq_last_ran = p->last_ran = rq->clock; -+ rq->rq_policy = p->policy; -+ rq->rq_prio = p->prio; -+ if (p != rq->idle) -+ rq->rq_running = true; -+ else -+ rq->rq_running = false; -+} -+ -+static void reset_rq_task(struct rq *rq, struct task_struct *p) -+{ -+ rq->rq_policy = p->policy; -+ rq->rq_prio = p->prio; -+} -+ -+/* + * schedule() is the main scheduler function. + */ -+asmlinkage void __sched schedule(void) ++static inline int check_sleep_on_wq(int cpu, struct task_struct *p) +{ -+ struct task_struct *prev, *next, *idle; -+ unsigned long *switch_count; -+ bool deactivate; -+ struct rq *rq; -+ int cpu; -+ -+need_resched: -+ preempt_disable(); -+ -+ cpu = smp_processor_id(); -+ rq = cpu_rq(cpu); -+ rcu_note_context_switch(cpu); -+ prev = rq->curr; -+ -+ deactivate = false; -+ schedule_debug(prev); -+ -+ grq_lock_irq(); -+ -+ switch_count = &prev->nivcsw; -+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { -+ if (unlikely(signal_pending_state(prev->state, prev))) { -+ prev->state = TASK_RUNNING; ++ int deactivate; ++ deactivate = 0; ++ if (p->state && !(preempt_count() & PREEMPT_ACTIVE)) { ++ if (unlikely(signal_pending_state(p->state, p))) { ++ p->state = TASK_RUNNING; + } else { -+ deactivate = true; ++ deactivate = 1; + /* + * If a worker is going to sleep, notify and + * ask workqueue whether it wants to wake up a + * task to maintain concurrency. If so, wake + * up the task. + */ -+ if (prev->flags & PF_WQ_WORKER) { ++ if (p->flags & PF_WQ_WORKER) { + struct task_struct *to_wakeup; + -+ to_wakeup = wq_worker_sleeping(prev, cpu); ++ to_wakeup = wq_worker_sleeping(p, cpu); + if (to_wakeup) { + /* This shouldn't happen, but does */ -+ if (unlikely(to_wakeup == prev)) -+ deactivate = false; ++ if (unlikely(to_wakeup == p)) ++ deactivate = 0; + else + try_to_wake_up_local(to_wakeup); + } + } ++ ++ /* ++ * If we are going to sleep and we have plugged IO queued, make ++ * sure to submit it to avoid deadlocks. ++ */ ++ if (unlikely(deactivate && blk_needs_flush_plug(p))) { ++ operate_blk_needs_flush_plug(p); ++ deactivate = SCHED_RESCHED; ++ goto out; ++ } + } -+ switch_count = &prev->nvcsw; + } ++out: ++ return deactivate; ++} + -+ /* -+ * If we are going to sleep and we have plugged IO queued, make -+ * sure to submit it to avoid deadlocks. -+ */ -+ if (unlikely(deactivate && blk_needs_flush_plug(prev))) { -+ grq_unlock_irq(); -+ preempt_enable_no_resched(); -+ blk_schedule_flush_plug(prev); -+ goto need_resched; -+ } ++static inline int do_schedule(void) ++{ ++ struct task_struct *prev, *next, *idle; ++ struct rq *rq; ++ int cpu; ++ int deactivate; + -+ update_clocks(rq); -+ update_cpu_clock(rq, prev, false); -+ if (rq->clock - rq->last_tick > HALF_JIFFY_NS) -+ rq->dither = false; -+ else -+ rq->dither = true; ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ rcu_note_context_switch(cpu); ++ prev = rq->curr; + ++ grq_lock_irq(); ++ ++ if((deactivate = check_sleep_on_wq(cpu, prev)) == SCHED_RESCHED) { ++ goto out; ++ } ++ + clear_tsk_need_resched(prev); + + idle = rq->idle; + if (idle != prev) { -+ /* Update all the information stored on struct rq */ -+ prev->time_slice = rq->rq_time_slice; -+ prev->deadline = rq->rq_deadline; -+ check_deadline(prev); -+ prev->last_ran = rq->clock; ++ check_timeslice_end(rq, prev); + + /* Task changed affinity off this CPU */ + if (needs_other_cpu(prev, cpu)) @@ -4346,18 +3629,16 @@ + else if (!deactivate) { + if (!queued_notrunning()) { + /* -+ * We now know prev is the only thing that is -+ * awaiting CPU so we can bypass rechecking for -+ * the earliest deadline task and just run it -+ * again. ++ * Rerun the prev task again. + */ + set_rq_task(rq, prev); + grq_unlock_irq(); -+ goto rerun_prev_unlocked; ++ goto out; + } else + swap_sticky(rq, cpu, prev); + } -+ return_task(prev, deactivate); ++ ++ put_prev_task(rq, cpu, prev, deactivate); + } + + if (unlikely(!queued_notrunning())) { @@ -4366,48 +3647,36 @@ + * scheduled as a high priority task in its own right. + */ + next = idle; -+ schedstat_inc(rq, sched_goidle); + set_cpuidle_map(cpu); + } else { -+ next = earliest_deadline_task(rq, cpu, idle); -+ if (likely(next->prio != PRIO_LIMIT)) -+ clear_cpuidle_map(cpu); -+ else -+ set_cpuidle_map(cpu); ++ next = get_runnable_task(rq, cpu, idle); + } + + if (likely(prev != next)) { -+ /* -+ * Don't stick tasks when a real time task is going to run as -+ * they may literally get stuck. -+ */ -+ if (rt_task(next)) -+ unstick_task(rq, prev); -+ set_rq_task(rq, next); ++ prev->nvcsw++; + grq.nr_switches++; -+ prev->on_cpu = false; -+ next->on_cpu = true; -+ rq->curr = next; -+ ++*switch_count; + -+ context_switch(rq, prev, next); /* unlocks the grq */ -+ /* -+ * The context switch have flipped the stack from under us -+ * and restored the local variables which were saved when -+ * this task called schedule() in the past. prev == current -+ * is still correct, but it can be moved to another cpu/rq. -+ */ -+ cpu = smp_processor_id(); -+ rq = cpu_rq(cpu); ++ task_switch(rq, prev, next); + idle = rq->idle; + } else + grq_unlock_irq(); + -+rerun_prev_unlocked: -+ preempt_enable_no_resched(); -+ if (unlikely(need_resched())) -+ goto need_resched; ++out: ++ return deactivate; +} ++ ++asmlinkage void __sched schedule(void) ++{ ++reschedule: ++ preempt_disable(); ++ ++ if(do_schedule() == SCHED_RESCHED) ++ goto reschedule; ++ ++ preempt_enable_no_resched(); ++ if(unlikely(need_resched())) ++ goto reschedule; ++} +EXPORT_SYMBOL(schedule); + +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER @@ -4826,7 +4095,7 @@ + */ +long __sched +wait_for_completion_killable_timeout(struct completion *x, -+ unsigned long timeout) ++ unsigned long timeout) +{ + return wait_for_common(x, timeout, TASK_KILLABLE); +} @@ -4934,7 +4203,7 @@ + * @prio: prio value (kernel-internal form) + * + * This function changes the 'effective' priority of a task. It does -+ * not touch ->normal_prio like __setscheduler(). ++ * not touch ->prio like __setscheduler(). + * + * Used by the rt_mutex code to implement priority inheritance logic. + */ @@ -4951,13 +4220,10 @@ + trace_sched_pi_setprio(p, prio); + oldprio = p->prio; + queued = task_queued(p); -+ if (queued) -+ dequeue_task(p); + p->prio = prio; + if (task_running(p) && prio > oldprio) + resched_task(p); + if (queued) { -+ enqueue_task(p); + try_preempt(p, rq); + } + @@ -4966,15 +4232,6 @@ + +#endif + -+/* -+ * Adjust the deadline for when the priority is to change, before it's -+ * changed. -+ */ -+static inline void adjust_deadline(struct task_struct *p, int new_prio) -+{ -+ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p); -+} -+ +void set_user_nice(struct task_struct *p, long nice) +{ + int queued, new_static, old_static; @@ -5000,16 +4257,12 @@ + goto out_unlock; + } + queued = task_queued(p); -+ if (queued) -+ dequeue_task(p); + -+ adjust_deadline(p, new_static); + old_static = p->static_prio; + p->static_prio = new_static; -+ p->prio = effective_prio(p); ++ p->prio = p->static_prio; + + if (queued) { -+ enqueue_task(p); + if (new_static < old_static) + try_preempt(p, rq); + } else if (task_running(p)) { @@ -5083,26 +4336,11 @@ + * @p: the task in question. + * + * This is the priority value as seen by users in /proc. -+ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes -+ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO). ++ * RT tasks are offset by -100. Normal tasks are centered around 1. + */ +int task_prio(const struct task_struct *p) +{ -+ int delta, prio = p->prio - MAX_RT_PRIO; -+ -+ /* rt tasks and iso tasks */ -+ if (prio <= 0) -+ goto out; -+ -+ /* Convert to ms to avoid overflows */ -+ delta = NS_TO_MS(p->deadline - grq.niffies); -+ delta = delta * 40 / ms_longest_deadline_diff(); -+ if (delta > 0 && delta <= 80) -+ prio += delta; -+ if (idleprio_task(p)) -+ prio += 40; -+out: -+ return prio; ++ return p->prio; +} + +/** @@ -5151,7 +4389,6 @@ + p->policy = policy; + oldrtprio = p->rt_priority; + p->rt_priority = prio; -+ p->normal_prio = normal_prio(p); + oldprio = p->prio; + /* we are holding p->pi_lock already */ + p->prio = rt_mutex_getprio(p); @@ -5203,12 +4440,6 @@ + unlock_task_sighand(p, &lflags); + if (rlim_rtprio) + goto recheck; -+ /* -+ * If the caller requested an RT policy without having the -+ * necessary rights, we downgrade the policy to SCHED_ISO. -+ * We also set the parameter to zero to pass the checks. -+ */ -+ policy = SCHED_ISO; + param = &zero_param; + } +recheck: @@ -5230,8 +4461,8 @@ + * SCHED_BATCH is 0. + */ + if (param->sched_priority < 0 || -+ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) || -+ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1)) ++ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) || ++ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1)) + return -EINVAL; + if (is_rt_policy(policy) != (param->sched_priority != 0)) + return -EINVAL; @@ -5250,20 +4481,10 @@ + + /* can't increase priority */ + if (param->sched_priority > p->rt_priority && -+ param->sched_priority > rlim_rtprio) ++ param->sched_priority > rlim_rtprio) + return -EPERM; + } else { + switch (p->policy) { -+ /* -+ * Can only downgrade policies but not back to -+ * SCHED_NORMAL -+ */ -+ case SCHED_ISO: -+ if (policy == SCHED_ISO) -+ goto out; -+ if (policy == SCHED_NORMAL) -+ return -EPERM; -+ break; + case SCHED_BATCH: + if (policy == SCHED_BATCH) + goto out; @@ -5332,15 +4553,11 @@ + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + goto recheck; + } -+ update_clocks(rq); + p->sched_reset_on_fork = reset_on_fork; + + queued = task_queued(p); -+ if (queued) -+ dequeue_task(p); + __setscheduler(p, rq, policy, param->sched_priority); + if (queued) { -+ enqueue_task(p); + try_preempt(p, rq); + } + __task_grq_unlock(); @@ -5360,7 +4577,7 @@ + * NOTE that the task may be already dead. + */ +int sched_setscheduler(struct task_struct *p, int policy, -+ const struct sched_param *param) ++ const struct sched_param *param) +{ + return __sched_setscheduler(p, policy, param, true); +} @@ -5379,7 +4596,7 @@ + * but our caller might not have that capability. + */ +int sched_setscheduler_nocheck(struct task_struct *p, int policy, -+ const struct sched_param *param) ++ const struct sched_param *param) +{ + return __sched_setscheduler(p, policy, param, false); +} @@ -5413,7 +4630,7 @@ + * @param: structure containing the new RT priority. + */ +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, -+ struct sched_param __user *param) ++ struct sched_param __user *param) +{ + /* negative values for policy are not valid */ + if (policy < 0) @@ -5562,7 +4779,7 @@ +} + +static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, -+ cpumask_t *new_mask) ++ cpumask_t *new_mask) +{ + if (len < sizeof(cpumask_t)) { + memset(new_mask, 0, sizeof(cpumask_t)); @@ -5662,8 +4879,7 @@ + * sys_sched_yield - yield the current processor to other threads. + * + * This function yields the current CPU to other tasks. It does this by -+ * scheduling away the current task. If it still has the earliest deadline -+ * it will be scheduled again as the next task. ++ * scheduling away the current task. + */ +SYSCALL_DEFINE0(sched_yield) +{ @@ -5671,7 +4887,6 @@ + + p = current; + grq_lock_irq(); -+ schedstat_inc(task_rq(p), yld_count); + requeue_task(p); + + /* @@ -5786,19 +5001,19 @@ + unsigned long flags; + bool yielded = 0; + struct rq *rq; ++ struct task_struct *curr; + + rq = this_rq(); + grq_lock_irqsave(&flags); + if (task_running(p) || p->state) + goto out_unlock; + yielded = 1; -+ if (p->deadline > rq->rq_deadline) -+ p->deadline = rq->rq_deadline; -+ p->time_slice += rq->rq_time_slice; -+ rq->rq_time_slice = 0; ++ curr = rq->curr; ++ p->time_slice += curr->time_slice; ++ curr->time_slice = 0; + if (p->time_slice > timeslice()) + p->time_slice = timeslice(); -+ set_tsk_need_resched(rq->curr); ++ set_tsk_need_resched(curr); +out_unlock: + grq_unlock_irqrestore(&flags); + @@ -5864,7 +5079,6 @@ + break; + case SCHED_NORMAL: + case SCHED_BATCH: -+ case SCHED_ISO: + case SCHED_IDLEPRIO: + ret = 0; + break; @@ -5890,7 +5104,6 @@ + break; + case SCHED_NORMAL: + case SCHED_BATCH: -+ case SCHED_ISO: + case SCHED_IDLEPRIO: + ret = 0; + break; @@ -5929,7 +5142,7 @@ + goto out_unlock; + + grq_lock_irqsave(&flags); -+ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p)); ++ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(rr_interval); + grq_unlock_irqrestore(&flags); + + rcu_read_unlock(); @@ -5959,7 +5172,7 @@ + printk(KERN_CONT " %08lx ", thread_saved_pc(p)); +#else + if (state == TASK_RUNNING) -+ printk(KERN_CONT " running task "); ++ printk(KERN_CONT " running task "); + else + printk(KERN_CONT " %016lx ", thread_saved_pc(p)); +#endif @@ -5979,10 +5192,10 @@ + +#if BITS_PER_LONG == 32 + printk(KERN_INFO -+ " task PC stack pid father\n"); ++ " task PC stack pid father\n"); +#else + printk(KERN_INFO -+ " task PC stack pid father\n"); ++ " task PC stack pid father\n"); +#endif + rcu_read_lock(); + do_each_thread(g, p) { @@ -6030,6 +5243,7 @@ + idle->state = TASK_RUNNING; + /* Setting prio to illegal value shouldn't matter when never queued */ + idle->prio = PRIO_LIMIT; ++ idle->policy = SCHED_IDLE; + set_rq_task(rq, idle); + do_set_cpus_allowed(idle, &cpumask_of_cpu(cpu)); + /* Silence PROVE_RCU */ @@ -6254,8 +5468,8 @@ + */ + if (p->mm && printk_ratelimit()) { + printk(KERN_INFO "process %d (%s) no " -+ "longer affine to cpu %d\n", -+ task_pid_nr(p), p->comm, src_cpu); ++ "longer affine to cpu %d\n", ++ task_pid_nr(p), p->comm, src_cpu); + } + } + clear_sticky(p); @@ -6539,12 +5753,11 @@ + case CPU_DEAD: + /* Idle task back to normal (off runqueue, low prio) */ + grq_lock_irq(); -+ return_task(idle, true); ++ put_prev_task(rq, cpu, idle, true); + idle->static_prio = MAX_PRIO; + __setscheduler(idle, rq, SCHED_NORMAL, 0); + idle->prio = PRIO_LIMIT; + set_rq_task(rq, idle); -+ update_clocks(rq); + grq_unlock_irq(); + break; + @@ -6576,7 +5789,7 @@ +}; + +static int __cpuinit sched_cpu_active(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) ++ unsigned long action, void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: @@ -6711,7 +5924,7 @@ + printk(KERN_ERR "ERROR: groups don't span domain->span\n"); + + if (sd->parent && -+ !cpumask_subset(groupmask, sched_domain_span(sd->parent))) ++ !cpumask_subset(groupmask, sched_domain_span(sd->parent))) + printk(KERN_ERR "ERROR: parent span is not a superset " + "of domain->span\n"); + return 0; @@ -7115,8 +6328,8 @@ +struct sched_domain_topology_level { + sched_domain_init_f init; + sched_domain_mask_f mask; -+ int flags; -+ struct sd_data data; ++ int flags; ++ struct sd_data data; +}; + +static int @@ -7433,7 +6646,7 @@ + struct sched_group *sg; + struct sched_group_power *sgp; + -+ sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), ++ sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), + GFP_KERNEL, cpu_to_node(j)); + if (!sd) + return -ENOMEM; @@ -7507,7 +6720,7 @@ + * to the individual cpus + */ +static int build_sched_domains(const struct cpumask *cpu_map, -+ struct sched_domain_attr *attr) ++ struct sched_domain_attr *attr) +{ + enum s_alloc alloc_state = sa_none; + struct sched_domain *sd; @@ -7701,7 +6914,7 @@ + * Call with hotplug lock held + */ +void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], -+ struct sched_domain_attr *dattr_new) ++ struct sched_domain_attr *dattr_new) +{ + int i, j, n; + int new_topology; @@ -7720,7 +6933,7 @@ + for (i = 0; i < ndoms_cur; i++) { + for (j = 0; j < n && !new_topology; j++) { + if (cpumask_equal(doms_cur[i], doms_new[j]) -+ && dattrs_equal(dattr_cur, i, dattr_new, j)) ++ && dattrs_equal(dattr_cur, i, dattr_new, j)) + goto match1; + } + /* no match - a current sched domain not in new doms_new[] */ @@ -7740,7 +6953,7 @@ + for (i = 0; i < ndoms_new; i++) { + for (j = 0; j < ndoms_cur && !new_topology; j++) { + if (cpumask_equal(doms_new[i], doms_cur[j]) -+ && dattrs_equal(dattr_new, i, dattr_cur, j)) ++ && dattrs_equal(dattr_new, i, dattr_cur, j)) + goto match2; + } + /* no match - add a new doms_new */ @@ -7809,8 +7022,8 @@ + return sprintf(buf, "%u\n", sched_mc_power_savings); +} +static ssize_t sched_mc_power_savings_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++ struct device_attribute *attr, ++ const char *buf, size_t count) +{ + return sched_power_savings_store(buf, count, 0); +} @@ -7821,14 +7034,14 @@ + +#ifdef CONFIG_SCHED_SMT +static ssize_t sched_smt_power_savings_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) ++ struct device_attribute *attr, ++ char *buf) +{ + return sprintf(buf, "%u\n", sched_smt_power_savings); +} +static ssize_t sched_smt_power_savings_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++ struct device_attribute *attr, ++ const char *buf, size_t count) +{ + return sched_power_savings_store(buf, count, 1); +} @@ -7859,7 +7072,7 @@ + * around partition_sched_domains(). + */ +static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, -+ void *hcpu) ++ void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: @@ -7872,7 +7085,7 @@ +} + +static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, -+ void *hcpu) ++ void *hcpu) +{ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_PREPARE: @@ -7898,7 +7111,7 @@ +static bool siblings_cpu_idle(int cpu) +{ + return cpumask_subset(&(cpu_rq(cpu)->smt_siblings), -+ &grq.cpu_idle_map); ++ &grq.cpu_idle_map); +} +#endif +#ifdef CONFIG_SCHED_MC @@ -7906,7 +7119,7 @@ +static bool cache_cpu_idle(int cpu) +{ + return cpumask_subset(&(cpu_rq(cpu)->cache_siblings), -+ &grq.cpu_idle_map); ++ &grq.cpu_idle_map); +} +#endif + @@ -8025,17 +7238,10 @@ + int i; + struct rq *rq; + -+ prio_ratios[0] = 128; -+ for (i = 1 ; i < PRIO_RANGE ; i++) -+ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10; ++ print_scheduler_version(); + + raw_spin_lock_init(&grq.lock); + grq.nr_running = grq.nr_uninterruptible = grq.nr_switches = 0; -+ grq.niffies = 0; -+ grq.last_jiffy = jiffies; -+ raw_spin_lock_init(&grq.iso_lock); -+ grq.iso_ticks = 0; -+ grq.iso_refractory = false; + grq.noc = 1; +#ifdef CONFIG_SMP + init_defrootdomain(); @@ -8047,11 +7253,9 @@ + for_each_possible_cpu(i) { + rq = cpu_rq(i); + rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc = -+ rq->iowait_pc = rq->idle_pc = 0; -+ rq->dither = false; ++ rq->iowait_pc = rq->idle_pc = 0; +#ifdef CONFIG_SMP + rq->sticky_task = NULL; -+ rq->last_niffy = 0; + rq->sd = NULL; + rq->rd = NULL; + rq->online = false; @@ -8142,7 +7346,7 @@ + + rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ + if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || -+ system_state != SYSTEM_RUNNING || oops_in_progress) ++ system_state != SYSTEM_RUNNING || oops_in_progress) + return; + if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) + return; @@ -8175,18 +7379,15 @@ + read_lock_irq(&tasklist_lock); + + do_each_thread(g, p) { -+ if (!rt_task(p) && !iso_task(p)) ++ if (!rt_task(p)) + continue; + + raw_spin_lock_irqsave(&p->pi_lock, flags); + rq = __task_grq_lock(p); + + queued = task_queued(p); -+ if (queued) -+ dequeue_task(p); + __setscheduler(p, rq, SCHED_NORMAL, 0); + if (queued) { -+ enqueue_task(p); + try_preempt(p, rq); + } + @@ -8354,24 +7555,595 @@ + return smt_gain; +} +#endif -Index: linux-3.3-ck1/kernel/sched/Makefile -=================================================================== ---- linux-3.3-ck1.orig/kernel/sched/Makefile 2012-03-24 19:30:00.014420399 +1100 -+++ linux-3.3-ck1/kernel/sched/Makefile 2012-03-24 19:30:29.047925897 +1100 -@@ -11,10 +11,14 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER - CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer - endif +diff -ruN linux-3.3.5/kernel/sched/stats.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sched/stats.c +--- linux-3.3.5/kernel/sched/stats.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sched/stats.c 1970-01-01 08:00:00.000000000 +0800 +@@ -1,111 +0,0 @@ +- +-#include +-#include +-#include +-#include +- +-#include "sched.h" +- +-/* +- * bump this up when changing the output format or the meaning of an existing +- * format, so that tools can adapt (or abort) +- */ +-#define SCHEDSTAT_VERSION 15 +- +-static int show_schedstat(struct seq_file *seq, void *v) +-{ +- int cpu; +- int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; +- char *mask_str = kmalloc(mask_len, GFP_KERNEL); +- +- if (mask_str == NULL) +- return -ENOMEM; +- +- seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); +- seq_printf(seq, "timestamp %lu\n", jiffies); +- for_each_online_cpu(cpu) { +- struct rq *rq = cpu_rq(cpu); +-#ifdef CONFIG_SMP +- struct sched_domain *sd; +- int dcount = 0; +-#endif +- +- /* runqueue-specific stats */ +- seq_printf(seq, +- "cpu%d %u %u %u %u %u %u %llu %llu %lu", +- cpu, rq->yld_count, +- rq->sched_switch, rq->sched_count, rq->sched_goidle, +- rq->ttwu_count, rq->ttwu_local, +- rq->rq_cpu_time, +- rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); +- +- seq_printf(seq, "\n"); +- +-#ifdef CONFIG_SMP +- /* domain-specific stats */ +- rcu_read_lock(); +- for_each_domain(cpu, sd) { +- enum cpu_idle_type itype; +- +- cpumask_scnprintf(mask_str, mask_len, +- sched_domain_span(sd)); +- seq_printf(seq, "domain%d %s", dcount++, mask_str); +- for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; +- itype++) { +- seq_printf(seq, " %u %u %u %u %u %u %u %u", +- sd->lb_count[itype], +- sd->lb_balanced[itype], +- sd->lb_failed[itype], +- sd->lb_imbalance[itype], +- sd->lb_gained[itype], +- sd->lb_hot_gained[itype], +- sd->lb_nobusyq[itype], +- sd->lb_nobusyg[itype]); +- } +- seq_printf(seq, +- " %u %u %u %u %u %u %u %u %u %u %u %u\n", +- sd->alb_count, sd->alb_failed, sd->alb_pushed, +- sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, +- sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, +- sd->ttwu_wake_remote, sd->ttwu_move_affine, +- sd->ttwu_move_balance); +- } +- rcu_read_unlock(); +-#endif +- } +- kfree(mask_str); +- return 0; +-} +- +-static int schedstat_open(struct inode *inode, struct file *file) +-{ +- unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); +- char *buf = kmalloc(size, GFP_KERNEL); +- struct seq_file *m; +- int res; +- +- if (!buf) +- return -ENOMEM; +- res = single_open(file, show_schedstat, NULL); +- if (!res) { +- m = file->private_data; +- m->buf = buf; +- m->size = size; +- } else +- kfree(buf); +- return res; +-} +- +-static const struct file_operations proc_schedstat_operations = { +- .open = schedstat_open, +- .read = seq_read, +- .llseek = seq_lseek, +- .release = single_release, +-}; +- +-static int __init proc_schedstat_init(void) +-{ +- proc_create("schedstat", 0, NULL, &proc_schedstat_operations); +- return 0; +-} +-module_init(proc_schedstat_init); +diff -ruN linux-3.3.5/kernel/sched/stats.h linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sched/stats.h +--- linux-3.3.5/kernel/sched/stats.h 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sched/stats.h 2012-05-19 22:05:22.000000000 +0800 +@@ -1,231 +0,0 @@ +- +-#ifdef CONFIG_SCHEDSTATS +- +-/* +- * Expects runqueue lock to be held for atomicity of update +- */ +-static inline void +-rq_sched_info_arrive(struct rq *rq, unsigned long long delta) +-{ +- if (rq) { +- rq->rq_sched_info.run_delay += delta; +- rq->rq_sched_info.pcount++; +- } +-} +- +-/* +- * Expects runqueue lock to be held for atomicity of update +- */ +-static inline void +-rq_sched_info_depart(struct rq *rq, unsigned long long delta) +-{ +- if (rq) +- rq->rq_cpu_time += delta; +-} +- +-static inline void +-rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) +-{ +- if (rq) +- rq->rq_sched_info.run_delay += delta; +-} +-# define schedstat_inc(rq, field) do { (rq)->field++; } while (0) +-# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) +-# define schedstat_set(var, val) do { var = (val); } while (0) +-#else /* !CONFIG_SCHEDSTATS */ +-static inline void +-rq_sched_info_arrive(struct rq *rq, unsigned long long delta) +-{} +-static inline void +-rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) +-{} +-static inline void +-rq_sched_info_depart(struct rq *rq, unsigned long long delta) +-{} +-# define schedstat_inc(rq, field) do { } while (0) +-# define schedstat_add(rq, field, amt) do { } while (0) +-# define schedstat_set(var, val) do { } while (0) +-#endif +- +-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) +-static inline void sched_info_reset_dequeued(struct task_struct *t) +-{ +- t->sched_info.last_queued = 0; +-} +- +-/* +- * We are interested in knowing how long it was from the *first* time a +- * task was queued to the time that it finally hit a cpu, we call this routine +- * from dequeue_task() to account for possible rq->clock skew across cpus. The +- * delta taken on each cpu would annul the skew. +- */ +-static inline void sched_info_dequeued(struct task_struct *t) +-{ +- unsigned long long now = task_rq(t)->clock, delta = 0; +- +- if (unlikely(sched_info_on())) +- if (t->sched_info.last_queued) +- delta = now - t->sched_info.last_queued; +- sched_info_reset_dequeued(t); +- t->sched_info.run_delay += delta; +- +- rq_sched_info_dequeued(task_rq(t), delta); +-} +- +-/* +- * Called when a task finally hits the cpu. We can now calculate how +- * long it was waiting to run. We also note when it began so that we +- * can keep stats on how long its timeslice is. +- */ +-static void sched_info_arrive(struct task_struct *t) +-{ +- unsigned long long now = task_rq(t)->clock, delta = 0; +- +- if (t->sched_info.last_queued) +- delta = now - t->sched_info.last_queued; +- sched_info_reset_dequeued(t); +- t->sched_info.run_delay += delta; +- t->sched_info.last_arrival = now; +- t->sched_info.pcount++; +- +- rq_sched_info_arrive(task_rq(t), delta); +-} +- +-/* +- * This function is only called from enqueue_task(), but also only updates +- * the timestamp if it is already not set. It's assumed that +- * sched_info_dequeued() will clear that stamp when appropriate. +- */ +-static inline void sched_info_queued(struct task_struct *t) +-{ +- if (unlikely(sched_info_on())) +- if (!t->sched_info.last_queued) +- t->sched_info.last_queued = task_rq(t)->clock; +-} +- +-/* +- * Called when a process ceases being the active-running process, either +- * voluntarily or involuntarily. Now we can calculate how long we ran. +- * Also, if the process is still in the TASK_RUNNING state, call +- * sched_info_queued() to mark that it has now again started waiting on +- * the runqueue. +- */ +-static inline void sched_info_depart(struct task_struct *t) +-{ +- unsigned long long delta = task_rq(t)->clock - +- t->sched_info.last_arrival; +- +- rq_sched_info_depart(task_rq(t), delta); +- +- if (t->state == TASK_RUNNING) +- sched_info_queued(t); +-} +- +-/* +- * Called when tasks are switched involuntarily due, typically, to expiring +- * their time slice. (This may also be called when switching to or from +- * the idle task.) We are only called when prev != next. +- */ +-static inline void +-__sched_info_switch(struct task_struct *prev, struct task_struct *next) +-{ +- struct rq *rq = task_rq(prev); +- +- /* +- * prev now departs the cpu. It's not interesting to record +- * stats about how efficient we were at scheduling the idle +- * process, however. +- */ +- if (prev != rq->idle) +- sched_info_depart(prev); +- +- if (next != rq->idle) +- sched_info_arrive(next); +-} +-static inline void +-sched_info_switch(struct task_struct *prev, struct task_struct *next) +-{ +- if (unlikely(sched_info_on())) +- __sched_info_switch(prev, next); +-} +-#else +-#define sched_info_queued(t) do { } while (0) +-#define sched_info_reset_dequeued(t) do { } while (0) +-#define sched_info_dequeued(t) do { } while (0) +-#define sched_info_switch(t, next) do { } while (0) +-#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ +- +-/* +- * The following are functions that support scheduler-internal time accounting. +- * These functions are generally called at the timer tick. None of this depends +- * on CONFIG_SCHEDSTATS. +- */ +- +-/** +- * account_group_user_time - Maintain utime for a thread group. +- * +- * @tsk: Pointer to task structure. +- * @cputime: Time value by which to increment the utime field of the +- * thread_group_cputime structure. +- * +- * If thread group time is being maintained, get the structure for the +- * running CPU and update the utime field there. +- */ +-static inline void account_group_user_time(struct task_struct *tsk, +- cputime_t cputime) +-{ +- struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; +- +- if (!cputimer->running) +- return; +- +- raw_spin_lock(&cputimer->lock); +- cputimer->cputime.utime += cputime; +- raw_spin_unlock(&cputimer->lock); +-} +- +-/** +- * account_group_system_time - Maintain stime for a thread group. +- * +- * @tsk: Pointer to task structure. +- * @cputime: Time value by which to increment the stime field of the +- * thread_group_cputime structure. +- * +- * If thread group time is being maintained, get the structure for the +- * running CPU and update the stime field there. +- */ +-static inline void account_group_system_time(struct task_struct *tsk, +- cputime_t cputime) +-{ +- struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; +- +- if (!cputimer->running) +- return; +- +- raw_spin_lock(&cputimer->lock); +- cputimer->cputime.stime += cputime; +- raw_spin_unlock(&cputimer->lock); +-} +- +-/** +- * account_group_exec_runtime - Maintain exec runtime for a thread group. +- * +- * @tsk: Pointer to task structure. +- * @ns: Time value by which to increment the sum_exec_runtime field +- * of the thread_group_cputime structure. +- * +- * If thread group time is being maintained, get the structure for the +- * running CPU and update the sum_exec_runtime field there. +- */ +-static inline void account_group_exec_runtime(struct task_struct *tsk, +- unsigned long long ns) +-{ +- struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; +- +- if (!cputimer->running) +- return; +- +- raw_spin_lock(&cputimer->lock); +- cputimer->cputime.sum_exec_runtime += ns; +- raw_spin_unlock(&cputimer->lock); +-} +diff -ruN linux-3.3.5/kernel/sysctl.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sysctl.c +--- linux-3.3.5/kernel/sysctl.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/kernel/sysctl.c 2012-05-19 22:04:37.000000000 +0800 +@@ -121,7 +121,12 @@ + static int __maybe_unused two = 2; + static int __maybe_unused three = 3; + static unsigned long one_ul = 1; +-static int one_hundred = 100; ++static int __maybe_unused one_hundred = 100; ++#ifdef CONFIG_SCHED_RIFS ++extern int rr_interval; ++extern int sched_iso_cpu; ++static int __read_mostly one_thousand = 1000; ++#endif + #ifdef CONFIG_PRINTK + static int ten_thousand = 10000; + #endif +@@ -251,7 +256,7 @@ + { } + }; -+ifdef CONFIG_SCHED_BFS -+obj-y += bfs.o clock.o -+else - obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o --obj-$(CONFIG_SMP) += cpupri.o - obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o --obj-$(CONFIG_SCHEDSTATS) += stats.o - obj-$(CONFIG_SCHED_DEBUG) += debug.o -+endif -+obj-$(CONFIG_SMP) += cpupri.o -+obj-$(CONFIG_SCHEDSTATS) += stats.o +-#ifdef CONFIG_SCHED_DEBUG ++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_RIFS) + static int min_sched_granularity_ns = 100000; /* 100 usecs */ + static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ + static int min_wakeup_granularity_ns; /* 0 usecs */ +@@ -266,6 +271,7 @@ + #endif + static struct ctl_table kern_table[] = { ++#ifndef CONFIG_SCHED_RIFS + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -383,6 +389,7 @@ + .extra1 = &one, + }, + #endif ++#endif /* !CONFIG_SCHED_RIFS */ + #ifdef CONFIG_PROVE_LOCKING + { + .procname = "prove_locking", +@@ -850,6 +857,26 @@ + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_SCHED_RIFS ++ { ++ .procname = "rr_interval", ++ .data = &rr_interval, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one_thousand, ++ }, ++ { ++ .procname = "iso_cpu", ++ .data = &sched_iso_cpu, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &one_hundred, ++ }, ++#endif + #if defined(CONFIG_S390) && defined(CONFIG_SMP) + { + .procname = "spin_retry", +diff -ruN linux-3.3.5/lib/Kconfig.debug linux-3.3.5-RIFS-RC3-BRAIN-EATING/lib/Kconfig.debug +--- linux-3.3.5/lib/Kconfig.debug 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/lib/Kconfig.debug 2012-05-19 22:04:37.000000000 +0800 +@@ -875,7 +875,7 @@ + config RCU_TORTURE_TEST + tristate "torture tests for RCU" +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && !SCHED_BFS + default n + help + This option provides a kernel module that runs torture tests +diff -ruN linux-3.3.5/Makefile linux-3.3.5-RIFS-RC3-BRAIN-EATING/Makefile +--- linux-3.3.5/Makefile 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/Makefile 2012-05-26 17:11:09.226639844 +0800 +@@ -1,7 +1,7 @@ + VERSION = 3 + PATCHLEVEL = 3 + SUBLEVEL = 5 +-EXTRAVERSION = ++EXTRAVERSION =-RIFS-V3-RC3-BRAIN-EATING + NAME = Saber-toothed Squirrel + + # *DOCUMENTATION* +diff -ruN linux-3.3.5/mm/memory.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/mm/memory.c +--- linux-3.3.5/mm/memory.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/mm/memory.c 2012-05-19 22:04:37.000000000 +0800 +@@ -3011,7 +3011,7 @@ + mem_cgroup_commit_charge_swapin(page, ptr); + + swap_free(entry); +- if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) ++ if ((vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + try_to_free_swap(page); + unlock_page(page); + if (swapcache) { +diff -ruN linux-3.3.5/mm/page-writeback.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/mm/page-writeback.c +--- linux-3.3.5/mm/page-writeback.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/mm/page-writeback.c 2012-05-19 22:04:37.000000000 +0800 +@@ -65,7 +65,7 @@ + /* + * Start background writeback (via writeback threads) at this percentage + */ +-int dirty_background_ratio = 10; ++int dirty_background_ratio = 1; + + /* + * dirty_background_bytes starts at 0 (disabled) so that it is a function of +@@ -82,7 +82,7 @@ + /* + * The generator of dirty data starts writeback at this percentage + */ +-int vm_dirty_ratio = 20; ++int vm_dirty_ratio = 1; + + /* + * vm_dirty_bytes starts at 0 (disabled) so that it is a function of +diff -ruN linux-3.3.5/mm/swapfile.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/mm/swapfile.c +--- linux-3.3.5/mm/swapfile.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/mm/swapfile.c 2012-05-19 22:04:37.000000000 +0800 +@@ -288,7 +288,7 @@ + scan_base = offset = si->lowest_bit; + + /* reuse swap entry of cache-only swap if not busy. */ +- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { ++ if (si->swap_map[offset] == SWAP_HAS_CACHE) { + int swap_was_freed; + spin_unlock(&swap_lock); + swap_was_freed = __try_to_reclaim_swap(si, offset); +@@ -377,7 +377,7 @@ + spin_lock(&swap_lock); + goto checks; + } +- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { ++ if (si->swap_map[offset] == SWAP_HAS_CACHE) { + spin_lock(&swap_lock); + goto checks; + } +@@ -392,7 +392,7 @@ + spin_lock(&swap_lock); + goto checks; + } +- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { ++ if (si->swap_map[offset] == SWAP_HAS_CACHE) { + spin_lock(&swap_lock); + goto checks; + } +@@ -706,8 +706,7 @@ + * Not mapped elsewhere, or swap space full? Free it! + * Also recheck PageSwapCache now page is locked (above). + */ +- if (PageSwapCache(page) && !PageWriteback(page) && +- (!page_mapped(page) || vm_swap_full())) { ++ if (PageSwapCache(page) && !PageWriteback(page)) { + delete_from_swap_cache(page); + SetPageDirty(page); + } +diff -ruN linux-3.3.5/mm/vmscan.c linux-3.3.5-RIFS-RC3-BRAIN-EATING/mm/vmscan.c +--- linux-3.3.5/mm/vmscan.c 2012-05-07 23:55:30.000000000 +0800 ++++ linux-3.3.5-RIFS-RC3-BRAIN-EATING/mm/vmscan.c 2012-05-19 22:04:37.000000000 +0800 +@@ -153,7 +153,7 @@ + /* + * From 0 .. 100. Higher means more swappy. + */ +-int vm_swappiness = 60; ++int vm_swappiness = 10; + long vm_total_pages; /* The total number of pages which the VM controls */ + + static LIST_HEAD(shrinker_list); +@@ -999,7 +999,7 @@ + + activate_locked: + /* Not a candidate for swapping, so reclaim swap space. */ +- if (PageSwapCache(page) && vm_swap_full()) ++ if (PageSwapCache(page)) + try_to_free_swap(page); + VM_BUG_ON(PageActive(page)); + SetPageActive(page); +@@ -2202,6 +2202,35 @@ + } + + /* ++ * Helper functions to adjust nice level of kswapd, based on the priority of ++ * the task (p) that called it. If it is already higher priority we do not ++ * demote its nice level since it is still working on behalf of a higher ++ * priority task. With kernel threads we leave it at nice 0. ++ * ++ * We don't ever run kswapd real time, so if a real time task calls kswapd we ++ * set it to highest SCHED_NORMAL priority. ++ */ ++static inline int effective_sc_prio(struct task_struct *p) ++{ ++ if (likely(p->mm)) { ++ if (rt_task(p)) ++ return -20; ++ if (p->policy == SCHED_IDLEPRIO) ++ return 19; ++ return task_nice(p); ++ } ++ return 0; ++} ++ ++static void set_kswapd_nice(struct task_struct *kswapd, int active) ++{ ++ long nice = effective_sc_prio(current); ++ ++ if (task_nice(kswapd) > nice || !active) ++ set_user_nice(kswapd, nice); ++} ++ ++/* + * This is the direct reclaim path, for page-allocating processes. We only + * try to reclaim pages from zones which will satisfy the caller's allocation + * request. +@@ -3106,6 +3135,7 @@ + void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) + { + pg_data_t *pgdat; ++ int active; + + if (!populated_zone(zone)) + return; +@@ -3117,7 +3147,9 @@ + pgdat->kswapd_max_order = order; + pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); + } +- if (!waitqueue_active(&pgdat->kswapd_wait)) ++ active = waitqueue_active(&pgdat->kswapd_wait); ++ set_kswapd_nice(pgdat->kswapd, active); ++ if (!active) + return; + if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) + return;