[LKP] [sched] 05bfb65f52c: -5.2% thrulay.throughput
From: Huang Ying
Date: Mon Dec 15 2014 - 01:55:55 EST
FYI, we noticed the below changes on
commit 05bfb65f52cbdabe26ebb629959416a6cffb034d ("sched: Remove a wake_affine() condition")
testbox/testcase/testparams: ivb42/thrulay/performance-300s
afdeee0510db918b 05bfb65f52cbdabe26ebb62995
---------------- --------------------------
%stddev %change %stddev
\ | \
37071 Â 1% -5.2% 35155 Â 1% thrulay.throughput
9 Â 39% +294.4% 35 Â 45% sched_debug.cpu#41.cpu_load[4]
127 Â 43% +199.0% 380 Â 33% sched_debug.cpu#30.curr->pid
89726 Â 35% +249.9% 313930 Â 40% sched_debug.cpu#12.sched_goidle
180377 Â 34% +248.9% 629297 Â 40% sched_debug.cpu#12.nr_switches
186401 Â 33% +239.4% 632605 Â 39% sched_debug.cpu#12.sched_count
467 Â 9% -51.9% 224 Â 46% sched_debug.cfs_rq[27]:/.tg_load_contrib
73 Â 13% -58.6% 30 Â 41% sched_debug.cpu#2.cpu_load[1]
97 Â 28% -59.1% 39 Â 47% sched_debug.cpu#11.load
30 Â 45% +86.1% 56 Â 26% sched_debug.cpu#9.cpu_load[2]
122 Â 37% -50.9% 60 Â 46% sched_debug.cpu#1.cpu_load[1]
16 Â 38% +100.0% 32 Â 31% sched_debug.cfs_rq[41]:/.tg_runnable_contrib
782 Â 34% +93.8% 1517 Â 29% sched_debug.cfs_rq[41]:/.avg->runnable_avg_sum
445 Â 31% -43.3% 252 Â 35% sched_debug.cpu#11.curr->pid
5983 Â 11% +106.3% 12342 Â 12% sched_debug.cfs_rq[12]:/.exec_clock
53 Â 24% -38.5% 32 Â 21% sched_debug.cpu#27.load
1636 Â 24% -42.9% 934 Â 22% sched_debug.cpu#15.curr->pid
285 Â 48% -44.8% 157 Â 33% sched_debug.cpu#26.curr->pid
8138 Â 9% +96.5% 15989 Â 11% sched_debug.cfs_rq[12]:/.min_vruntime
174 Â 26% -46.4% 93 Â 25% sched_debug.cpu#15.load
55 Â 39% +49.8% 82 Â 28% sched_debug.cfs_rq[35]:/.tg_load_contrib
47 Â 22% +82.1% 86 Â 28% sched_debug.cpu#6.cpu_load[2]
26 Â 22% -45.3% 14 Â 28% numa-numastat.node1.other_node
90 Â 24% -39.7% 54 Â 19% sched_debug.cpu#2.cpu_load[3]
24 Â 37% +76.5% 43 Â 6% sched_debug.cpu#32.cpu_load[4]
107188 Â 22% +46.3% 156809 Â 18% sched_debug.cpu#32.sched_count
409 Â 12% +54.5% 633 Â 34% sched_debug.cpu#11.ttwu_local
131 Â 27% -43.5% 74 Â 39% sched_debug.cpu#1.cpu_load[2]
247 Â 32% +64.1% 406 Â 29% sched_debug.cpu#2.curr->pid
89 Â 29% -55.3% 39 Â 47% sched_debug.cfs_rq[11]:/.load
83 Â 16% -50.7% 41 Â 26% sched_debug.cpu#2.cpu_load[2]
194662 Â 18% +49.5% 290986 Â 10% sched_debug.cpu#8.sched_count
24 Â 22% +75.5% 43 Â 25% sched_debug.cpu#31.cpu_load[1]
28 Â 46% +57.5% 44 Â 15% sched_debug.cpu#29.cpu_load[4]
70637 Â 23% +34.4% 94908 Â 17% sched_debug.cpu#27.ttwu_count
26 Â 40% +62.5% 42 Â 15% sched_debug.cpu#32.cpu_load[3]
65 Â 20% -30.2% 45 Â 19% sched_debug.cfs_rq[26]:/.tg_runnable_contrib
3044 Â 20% -29.7% 2139 Â 19% sched_debug.cfs_rq[26]:/.avg->runnable_avg_sum
28 Â 6% -33.0% 19 Â 16% sched_debug.cfs_rq[39]:/.tg_runnable_contrib
1357 Â 6% -32.5% 915 Â 17% sched_debug.cfs_rq[39]:/.avg->runnable_avg_sum
277 Â 14% -38.6% 170 Â 18% sched_debug.cfs_rq[40]:/.runnable_load_avg
279 Â 14% -39.1% 170 Â 18% sched_debug.cfs_rq[40]:/.load
575205 Â 11% +29.6% 745663 Â 10% sched_debug.cpu#11.avg_idle
151626 Â 19% +17.5% 178195 Â 13% sched_debug.cpu#3.ttwu_count
349553 Â 6% +33.4% 466210 Â 11% sched_debug.cpu#0.ttwu_count
133 Â 5% -29.6% 94 Â 26% sched_debug.cpu#40.cpu_load[3]
3767 Â 16% -28.8% 2680 Â 15% sched_debug.cpu#40.curr->pid
279 Â 14% -25.3% 209 Â 9% sched_debug.cpu#40.load
39 Â 8% -24.7% 29 Â 7% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
1855 Â 7% -24.0% 1410 Â 6% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
309 Â 7% -32.9% 207 Â 20% sched_debug.cpu#40.cpu_load[0]
213 Â 5% -32.4% 144 Â 22% sched_debug.cpu#40.cpu_load[2]
602662 Â 11% +20.9% 728740 Â 7% sched_debug.cpu#30.avg_idle
84865 Â 16% +26.5% 107350 Â 8% sched_debug.cpu#26.ttwu_count
285 Â 6% -34.0% 188 Â 20% sched_debug.cpu#40.cpu_load[1]
178498 Â 12% +21.2% 216368 Â 12% sched_debug.cpu#2.ttwu_count
5368 Â 9% +14.5% 6147 Â 9% sched_debug.cfs_rq[28]:/.exec_clock
229046 Â 6% -10.9% 204016 Â 7% sched_debug.cpu#8.ttwu_count
716125 Â 9% +22.4% 876793 Â 4% sched_debug.cpu#14.avg_idle
921 Â 4% +15.3% 1062 Â 5% sched_debug.cpu#25.ttwu_local
628697 Â 12% +22.5% 769882 Â 8% sched_debug.cpu#1.avg_idle
123795 Â 7% -13.6% 106992 Â 9% sched_debug.cpu#32.ttwu_count
10875 Â 4% -16.5% 9083 Â 8% sched_debug.cfs_rq[35]:/.min_vruntime
5103 Â 9% -16.2% 4277 Â 10% sched_debug.cfs_rq[40]:/.min_vruntime
86 Â 6% -13.7% 74 Â 11% sched_debug.cpu#44.ttwu_local
538474 Â 13% +27.6% 686910 Â 7% sched_debug.cpu#15.avg_idle
223 Â 4% +16.4% 260 Â 6% sched_debug.cpu#28.ttwu_local
33784 Â 5% -15.1% 28679 Â 11% cpuidle.C1E-IVT.usage
2764 Â 6% -20.6% 2193 Â 20% sched_debug.cfs_rq[20]:/.min_vruntime
681 Â 19% -19.1% 551 Â 6% cpuidle.POLL.usage
18925 Â 9% +15.6% 21877 Â 3% sched_debug.cfs_rq[0]:/.exec_clock
559454 Â 13% +21.3% 678413 Â 6% sched_debug.cpu#27.avg_idle
49536 Â 3% -10.2% 44495 Â 1% sched_debug.cpu#15.nr_load_updates
17570 Â 6% -17.5% 14492 Â 10% sched_debug.cfs_rq[11]:/.min_vruntime
57206 Â 1% -7.6% 52840 Â 2% sched_debug.cpu#7.nr_load_updates
51547 Â 1% -8.0% 47418 Â 3% sched_debug.cpu#26.nr_load_updates
43519 Â 1% -9.2% 39535 Â 1% sched_debug.cpu#43.nr_load_updates
50591 Â 1% -8.6% 46252 Â 2% sched_debug.cpu#35.nr_load_updates
45642 Â 1% -10.1% 41051 Â 2% sched_debug.cpu#23.nr_load_updates
46023 Â 2% -9.0% 41872 Â 1% sched_debug.cpu#19.nr_load_updates
3.42 Â 1% +9.8% 3.75 Â 2% turbostat.RAM_W
58859 Â 3% +5.9% 62353 Â 1% vmstat.system.cs
269 Â 2% +3.8% 279 Â 0% time.system_time
93 Â 1% +3.5% 96 Â 0% time.percent_of_cpu_this_job_got
ivb42: Ivytown Ivy Bridge-EP
Memory: 64G
thrulay.throughput
44000 ++-*----------------------------------------------------------------+
|..: |
42000 *+ : .*..*. .*. |
| : *.*. *..*. *..* |
| :.. + *. |
40000 ++ * *.. + *.. * |
| + *.*.. .. : |
38000 ++ * * : .*.. |
| *. .*..*.* |
36000 ++ O * |
| O O O O O O
O O O O O O O O O O O O |
34000 ++ O O O O O |
| O O |
32000 ++-------------------O----------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
apt-get install ruby ruby-oj
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Huang, Ying
---
testcase: thrulay
default_monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
cpuidle:
cpufreq:
turbostat:
sched_debug:
interval: 10
pmeter:
default_watchdogs:
watch-oom:
watchdog:
cpufreq_governor:
- performance
commit: b2776bf7149bddd1f4161f14f79520f17fc1d71d
model: Ivytown Ivy Bridge-EP
nr_cpu: 48
memory: 64G
rootfs: debian-x86_64.cgz
runtime: 300s
thrulay:
testbox: ivb42
tbox_group: ivb42
kconfig: x86_64-rhel
enqueue_time: 2014-12-08 05:18:17.151038272 +08:00
head_commit: d273c3193b966e6ecdc5948b3d86efb8514ee335
base_commit: 009d0431c3914de64666bec0d350e54fdd59df6a
branch: internal-eywa/master
kernel: "/kernel/x86_64-rhel/b2776bf7149bddd1f4161f14f79520f17fc1d71d/vmlinuz-3.18.0-gb2776bf"
user: lkp
queue: cyclic
result_root: "/result/ivb42/thrulay/performance-300s/debian-x86_64.cgz/x86_64-rhel/b2776bf7149bddd1f4161f14f79520f17fc1d71d/0"
job_file: "/lkp/scheduled/ivb42/cyclic_thrulay-performance-300s-debian-x86_64.cgz-x86_64-rhel-BASE-b2776bf7149bddd1f4161f14f79520f17fc1d71d-0.yaml"
dequeue_time: 2014-12-09 09:16:04.465533630 +08:00
job_state: finished
loadavg: 1.11 0.89 0.42 1/410 10159
start_time: '1418087801'
end_time: '1418088102'
version: "/lkp/lkp/.src-20141206-060219"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu16/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu17/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu18/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu19/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu20/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu21/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu22/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu23/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu24/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu25/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu26/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu27/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu28/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu29/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu30/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu31/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu32/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu33/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu34/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu35/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu36/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu37/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu38/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu39/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu40/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu41/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu42/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu43/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu44/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu45/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu46/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu47/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
thrulayd
thrulay -t 300 127.0.0.1
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx