[LKP] [locking/rwsem] b3fd4f03ca0: -11.1% vm-scalability.throughput
From: Huang Ying
Date: Fri Mar 13 2015 - 01:36:21 EST
FYI, we noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit b3fd4f03ca0b9952221f39ae6790e698bf4b39e7 ("locking/rwsem: Avoid deceiving lock spinners")
testbox/testcase/testparams: brickland3/vm-scalability/performance-300s-small-allocs
7a215f89a0335582 b3fd4f03ca0b9952221f39ae67
---------------- --------------------------
%stddev %change %stddev
\ | \
2.38 Â 18% +247.4% 8.27 Â 16% vm-scalability.stddev
24226456 Â 0% -23.0% 18650874 Â 1% vm-scalability.time.voluntary_context_switches
1622 Â 0% -15.6% 1369 Â 1% vm-scalability.time.system_time
765 Â 0% -13.1% 665 Â 1% vm-scalability.time.percent_of_cpu_this_job_got
1036 Â 0% -11.9% 914 Â 1% vm-scalability.time.user_time
7.244e+08 Â 0% -11.1% 6.439e+08 Â 1% vm-scalability.time.minor_page_faults
10841276 Â 0% -11.1% 9639981 Â 1% vm-scalability.throughput
8922883 Â 3% -70.2% 2658543 Â 4% cpuidle.C1-IVT-4S.usage
1.19e+10 Â 2% -66.2% 4.016e+09 Â 4% cpuidle.C1-IVT-4S.time
1.97 Â 4% +106.7% 4.08 Â 6% perf-profile.cpu-cycles.intel_idle.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary
58.29 Â 1% -50.2% 29.03 Â 2% turbostat.CPU%c1
34.84 Â 2% +86.6% 65.01 Â 1% turbostat.CPU%c6
170814 Â 7% -39.9% 102682 Â 11% sched_debug.cpu#21.sched_goidle
367762 Â 11% -39.9% 220850 Â 10% sched_debug.cpu#26.sched_count
5.12 Â 2% -39.8% 3.08 Â 5% perf-profile.cpu-cycles.osq_lock.rwsem_down_write_failed.call_rwsem_down_write_failed.vma_link.mmap_region
342453 Â 7% -39.9% 205985 Â 11% sched_debug.cpu#21.nr_switches
3.14 Â 2% +68.0% 5.28 Â 5% perf-profile.cpu-cycles.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary
170317 Â 6% -39.9% 102389 Â 11% sched_debug.cpu#22.sched_goidle
168710 Â 6% -38.4% 103876 Â 11% sched_debug.cpu#23.sched_goidle
341400 Â 6% -39.8% 205416 Â 11% sched_debug.cpu#22.nr_switches
338590 Â 6% -38.5% 208381 Â 11% sched_debug.cpu#23.nr_switches
165879 Â 4% -36.6% 105178 Â 10% sched_debug.cpu#18.sched_goidle
168958 Â 6% -39.0% 103043 Â 11% sched_debug.cpu#20.sched_goidle
359148 Â 5% -39.2% 218455 Â 11% sched_debug.cpu#20.sched_count
332511 Â 4% -36.5% 211029 Â 10% sched_debug.cpu#18.nr_switches
338875 Â 6% -39.0% 206782 Â 11% sched_debug.cpu#20.nr_switches
359968 Â 6% -39.4% 218057 Â 12% sched_debug.cpu#21.sched_count
166207 Â 4% -38.5% 102168 Â 11% sched_debug.cpu#27.sched_goidle
164881 Â 6% -37.0% 103846 Â 11% sched_debug.cpu#25.sched_goidle
330721 Â 6% -37.0% 208344 Â 11% sched_debug.cpu#25.nr_switches
333161 Â 4% -38.3% 205591 Â 11% sched_debug.cpu#27.nr_switches
362674 Â 7% -40.1% 217123 Â 10% sched_debug.cpu#22.sched_count
350433 Â 7% -37.3% 219595 Â 13% sched_debug.cpu#15.nr_switches
174599 Â 7% -37.4% 109366 Â 13% sched_debug.cpu#15.sched_goidle
174038 Â 9% -36.8% 109941 Â 12% sched_debug.cpu#17.sched_goidle
348896 Â 9% -36.7% 220793 Â 12% sched_debug.cpu#17.nr_switches
327443 Â 6% -36.8% 207015 Â 11% sched_debug.cpu#24.nr_switches
348687 Â 5% -37.2% 219100 Â 11% sched_debug.cpu#27.sched_count
163220 Â 6% -36.9% 103066 Â 11% sched_debug.cpu#24.sched_goidle
167997 Â 5% -35.7% 108017 Â 11% sched_debug.cpu#16.sched_goidle
365757 Â 10% -38.9% 223561 Â 12% sched_debug.cpu#28.sched_count
336756 Â 5% -35.6% 216751 Â 11% sched_debug.cpu#16.nr_switches
174236 Â 12% -40.6% 103542 Â 10% sched_debug.cpu#26.sched_goidle
349494 Â 12% -40.6% 207718 Â 10% sched_debug.cpu#26.nr_switches
175130 Â 10% -39.7% 105573 Â 13% sched_debug.cpu#28.sched_goidle
351090 Â 10% -39.6% 212090 Â 13% sched_debug.cpu#28.nr_switches
350972 Â 5% -35.2% 227402 Â 10% sched_debug.cpu#19.sched_count
343794 Â 5% -36.2% 219297 Â 10% sched_debug.cpu#24.sched_count
347268 Â 5% -36.3% 221228 Â 10% sched_debug.cpu#25.sched_count
352999 Â 5% -34.1% 232554 Â 9% sched_debug.cpu#16.sched_count
354362 Â 7% -37.0% 223292 Â 10% sched_debug.cpu#23.sched_count
166091 Â 7% -36.6% 105356 Â 14% sched_debug.cpu#29.sched_goidle
350840 Â 6% -35.5% 226331 Â 13% sched_debug.cpu#29.sched_count
332928 Â 7% -36.4% 211785 Â 14% sched_debug.cpu#29.nr_switches
369396 Â 7% -36.4% 234822 Â 13% sched_debug.cpu#15.sched_count
364063 Â 9% -34.6% 238151 Â 11% sched_debug.cpu#17.sched_count
349193 Â 4% -35.2% 226315 Â 9% sched_debug.cpu#18.sched_count
3.94 Â 1% +49.9% 5.90 Â 5% perf-profile.cpu-cycles.cpuidle_enter.cpu_startup_entry.start_secondary
5361900 Â 1% +50.5% 8067121 Â 0% cpuidle.C6-IVT-4S.usage
166471 Â 5% -36.3% 106063 Â 10% sched_debug.cpu#19.sched_goidle
334078 Â 5% -36.3% 212767 Â 10% sched_debug.cpu#19.nr_switches
511523 Â 11% +40.1% 716466 Â 9% sched_debug.cpu#38.avg_idle
5141 Â 10% -21.3% 4046 Â 18% sched_debug.cfs_rq[16]:/.avg->runnable_avg_sum
185324 Â 6% -28.9% 131826 Â 11% sched_debug.cpu#43.ttwu_count
111 Â 10% -21.6% 87 Â 18% sched_debug.cfs_rq[16]:/.tg_runnable_contrib
188387 Â 5% -28.4% 134976 Â 11% sched_debug.cpu#35.ttwu_count
182306 Â 7% -28.0% 131276 Â 9% sched_debug.cpu#19.ttwu_count
181618 Â 5% -28.6% 129685 Â 10% sched_debug.cpu#18.ttwu_count
370815 Â 7% -25.5% 276125 Â 11% sched_debug.cpu#38.sched_count
178281 Â 7% -25.9% 132044 Â 11% sched_debug.cpu#38.sched_goidle
181914 Â 5% -28.0% 130970 Â 8% sched_debug.cpu#16.ttwu_count
357347 Â 7% -25.9% 264826 Â 11% sched_debug.cpu#38.nr_switches
546593 Â 7% +38.6% 757389 Â 4% sched_debug.cpu#27.avg_idle
183057 Â 5% -27.7% 132439 Â 8% sched_debug.cpu#17.ttwu_count
174289 Â 8% -25.5% 129871 Â 13% sched_debug.cpu#43.sched_goidle
349323 Â 8% -25.4% 260438 Â 13% sched_debug.cpu#43.nr_switches
187088 Â 5% -27.4% 135758 Â 10% sched_debug.cpu#36.ttwu_count
190589 Â 5% -29.1% 135135 Â 12% sched_debug.cpu#33.ttwu_count
179655 Â 10% -28.6% 128343 Â 13% sched_debug.cpu#44.sched_goidle
367566 Â 11% -26.0% 271924 Â 14% sched_debug.cpu#43.sched_count
360119 Â 10% -28.5% 257380 Â 13% sched_debug.cpu#44.nr_switches
177427 Â 5% -25.5% 132241 Â 9% sched_debug.cpu#20.ttwu_count
187477 Â 6% -28.9% 133263 Â 11% sched_debug.cpu#39.ttwu_count
187313 Â 5% -28.7% 133487 Â 11% sched_debug.cpu#41.ttwu_count
192203 Â 4% -28.3% 137818 Â 10% sched_debug.cpu#31.ttwu_count
183679 Â 5% -28.1% 132147 Â 10% sched_debug.cpu#44.ttwu_count
367665 Â 9% -24.6% 277237 Â 15% sched_debug.cpu#42.sched_count
178211 Â 3% -26.9% 130348 Â 8% sched_debug.cpu#23.ttwu_count
175478 Â 4% -25.6% 130478 Â 9% sched_debug.cpu#22.ttwu_count
186974 Â 6% -27.6% 135310 Â 10% sched_debug.cpu#38.ttwu_count
190431 Â 4% -28.0% 137033 Â 11% sched_debug.cpu#32.ttwu_count
190722 Â 4% -28.4% 136639 Â 10% sched_debug.cpu#34.ttwu_count
175431 Â 8% -25.3% 131079 Â 12% sched_debug.cpu#42.sched_goidle
361690 Â 8% -24.8% 272015 Â 13% sched_debug.cpu#39.sched_count
351619 Â 8% -25.2% 262838 Â 12% sched_debug.cpu#42.nr_switches
175056 Â 6% -24.8% 131564 Â 12% sched_debug.cpu#40.sched_goidle
350873 Â 6% -24.8% 263820 Â 12% sched_debug.cpu#40.nr_switches
173107 Â 6% -25.3% 129289 Â 13% sched_debug.cpu#37.sched_goidle
346966 Â 6% -25.3% 259257 Â 13% sched_debug.cpu#37.nr_switches
171359 Â 7% -24.8% 128941 Â 11% sched_debug.cpu#39.sched_goidle
343474 Â 7% -24.7% 258590 Â 11% sched_debug.cpu#39.nr_switches
375852 Â 12% -28.4% 269206 Â 15% sched_debug.cpu#44.sched_count
180303 Â 8% -25.0% 135309 Â 10% sched_debug.cpu#30.sched_goidle
177803 Â 5% -25.3% 132893 Â 9% sched_debug.cpu#21.ttwu_count
361386 Â 8% -24.9% 271330 Â 10% sched_debug.cpu#30.nr_switches
172414 Â 7% -24.9% 129400 Â 13% sched_debug.cpu#41.sched_goidle
345579 Â 7% -24.9% 259489 Â 13% sched_debug.cpu#41.nr_switches
373591 Â 8% -25.3% 279094 Â 13% sched_debug.cpu#31.sched_count
177533 Â 9% -26.9% 129812 Â 12% sched_debug.cpu#36.sched_goidle
179094 Â 8% -26.3% 131936 Â 11% sched_debug.cpu#35.sched_goidle
376042 Â 8% -26.7% 275458 Â 12% sched_debug.cpu#35.sched_count
355841 Â 9% -26.8% 260341 Â 12% sched_debug.cpu#36.nr_switches
183165 Â 12% -29.4% 129301 Â 13% sched_debug.cpu#33.sched_goidle
371977 Â 9% -27.3% 270521 Â 13% sched_debug.cpu#36.sched_count
184524 Â 5% -27.3% 134078 Â 10% sched_debug.cpu#42.ttwu_count
204591 Â 7% -22.5% 158606 Â 16% sched_debug.cfs_rq[33]:/.min_vruntime
358950 Â 8% -26.3% 264608 Â 11% sched_debug.cpu#35.nr_switches
367143 Â 12% -29.4% 259297 Â 13% sched_debug.cpu#33.nr_switches
184796 Â 5% -26.7% 135370 Â 8% sched_debug.cpu#15.ttwu_count
560186 Â 7% +36.4% 764034 Â 4% sched_debug.cpu#15.avg_idle
192024 Â 5% -27.6% 138984 Â 9% sched_debug.cpu#30.ttwu_count
186661 Â 5% -27.9% 134534 Â 11% sched_debug.cpu#40.ttwu_count
175890 Â 4% -26.4% 129452 Â 8% sched_debug.cpu#26.ttwu_count
366664 Â 8% -24.7% 276276 Â 14% sched_debug.cpu#34.sched_count
186046 Â 5% -26.7% 136338 Â 10% sched_debug.cpu#37.ttwu_count
173138 Â 4% -26.5% 127219 Â 8% sched_debug.cpu#28.ttwu_count
231881 Â 4% -26.0% 171683 Â 6% sched_debug.cpu#48.ttwu_count
229108 Â 4% -24.5% 173024 Â 5% sched_debug.cpu#47.ttwu_count
545213 Â 6% +30.6% 712076 Â 3% sched_debug.cpu#39.avg_idle
175780 Â 8% -25.0% 131911 Â 12% sched_debug.cpu#34.sched_goidle
352325 Â 8% -24.9% 264519 Â 12% sched_debug.cpu#34.nr_switches
368764 Â 7% -25.9% 273326 Â 13% sched_debug.cpu#40.sched_count
178369 Â 7% -25.3% 133316 Â 12% sched_debug.cpu#31.sched_goidle
357523 Â 7% -25.2% 267329 Â 12% sched_debug.cpu#31.nr_switches
445168 Â 6% -21.5% 349498 Â 7% sched_debug.cpu#50.sched_count
178502 Â 3% -27.0% 130326 Â 8% sched_debug.cpu#24.ttwu_count
232438 Â 4% -25.2% 173912 Â 4% sched_debug.cpu#45.ttwu_count
214980 Â 6% -21.3% 169213 Â 7% sched_debug.cpu#50.sched_goidle
430801 Â 6% -21.3% 339213 Â 7% sched_debug.cpu#50.nr_switches
175354 Â 7% -25.0% 131440 Â 12% sched_debug.cpu#32.sched_goidle
351473 Â 7% -25.0% 263573 Â 12% sched_debug.cpu#32.nr_switches
175818 Â 4% -25.5% 131027 Â 9% sched_debug.cpu#25.ttwu_count
562351 Â 4% +33.0% 747969 Â 7% sched_debug.cpu#17.avg_idle
226274 Â 5% -24.6% 170581 Â 5% sched_debug.cpu#54.ttwu_count
566741 Â 5% +33.2% 754744 Â 4% sched_debug.cpu#29.avg_idle
365692 Â 8% -24.7% 275497 Â 14% sched_debug.cpu#32.sched_count
443973 Â 6% -22.1% 345964 Â 7% sched_debug.cpu#51.sched_count
360816 Â 8% -25.3% 269548 Â 14% sched_debug.cpu#41.sched_count
224711 Â 4% -24.6% 169414 Â 6% sched_debug.cpu#53.ttwu_count
380979 Â 13% -28.9% 270783 Â 14% sched_debug.cpu#33.sched_count
174502 Â 4% -27.0% 127311 Â 8% sched_debug.cpu#29.ttwu_count
573889 Â 4% +32.2% 758635 Â 5% sched_debug.cpu#24.avg_idle
523204 Â 10% +29.1% 675536 Â 9% sched_debug.cpu#6.avg_idle
173555 Â 4% -25.7% 128988 Â 8% sched_debug.cpu#27.ttwu_count
228859 Â 4% -25.2% 171229 Â 6% sched_debug.cpu#49.ttwu_count
529356 Â 9% +32.4% 700872 Â 6% sched_debug.cpu#33.avg_idle
565916 Â 5% +33.4% 755047 Â 7% sched_debug.cpu#23.avg_idle
232257 Â 3% -25.1% 173926 Â 5% sched_debug.cpu#46.ttwu_count
227954 Â 4% -24.3% 172631 Â 5% sched_debug.cpu#52.ttwu_count
197241 Â 5% -23.5% 150963 Â 8% sched_debug.cpu#4.ttwu_count
228392 Â 3% -24.8% 171779 Â 6% sched_debug.cpu#51.ttwu_count
542129 Â 5% +28.0% 693751 Â 6% sched_debug.cpu#13.avg_idle
364245 Â 8% -25.1% 272652 Â 15% sched_debug.cpu#37.sched_count
558275 Â 8% +36.1% 759570 Â 6% sched_debug.cpu#26.avg_idle
225027 Â 5% -24.7% 169427 Â 6% sched_debug.cpu#57.ttwu_count
17.06 Â 1% -23.0% 13.13 Â 3% perf-profile.cpu-cycles.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff
197493 Â 5% -22.8% 152503 Â 8% sched_debug.cpu#2.ttwu_count
16.99 Â 1% -23.2% 13.05 Â 3% perf-profile.cpu-cycles.rwsem_down_write_failed.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff
223164 Â 4% -24.7% 168105 Â 6% sched_debug.cpu#59.ttwu_count
223884 Â 4% -24.6% 168887 Â 4% sched_debug.cpu#58.ttwu_count
538073 Â 6% +27.8% 687436 Â 8% sched_debug.cpu#43.avg_idle
445916 Â 7% -22.4% 345938 Â 6% sched_debug.cpu#52.sched_count
214967 Â 6% -21.9% 167898 Â 6% sched_debug.cpu#52.sched_goidle
534156 Â 9% +25.2% 668876 Â 5% sched_debug.cpu#11.avg_idle
430800 Â 6% -21.9% 336561 Â 6% sched_debug.cpu#52.nr_switches
117 Â 8% -21.7% 91 Â 5% sched_debug.cfs_rq[36]:/.tg_runnable_contrib
224801 Â 4% -24.0% 170764 Â 5% sched_debug.cpu#55.ttwu_count
225103 Â 4% -24.9% 168953 Â 5% sched_debug.cpu#56.ttwu_count
24226456 Â 0% -23.0% 18650874 Â 1% time.voluntary_context_switches
5411 Â 8% -21.3% 4256 Â 4% sched_debug.cfs_rq[36]:/.avg->runnable_avg_sum
212964 Â 5% -21.5% 167092 Â 6% sched_debug.cpu#51.sched_goidle
426769 Â 5% -21.5% 334967 Â 6% sched_debug.cpu#51.nr_switches
519159 Â 4% +27.0% 659572 Â 7% sched_debug.cpu#52.avg_idle
193464 Â 4% -22.1% 150665 Â 7% sched_debug.cpu#7.ttwu_count
226747 Â 4% -24.4% 171347 Â 6% sched_debug.cpu#50.ttwu_count
572755 Â 6% +29.0% 738639 Â 4% sched_debug.cpu#25.avg_idle
520910 Â 7% +35.0% 703382 Â 6% sched_debug.cpu#32.avg_idle
2.696e+10 Â 1% +29.0% 3.478e+10 Â 0% cpuidle.C6-IVT-4S.time
530311 Â 12% +31.1% 695099 Â 5% sched_debug.cpu#44.avg_idle
204543 Â 8% -20.5% 162661 Â 15% sched_debug.cfs_rq[31]:/.min_vruntime
484242 Â 9% +31.7% 637669 Â 8% sched_debug.cpu#47.avg_idle
6.85 Â 1% -22.4% 5.32 Â 3% perf-profile.cpu-cycles.rwsem_spin_on_owner.rwsem_down_write_failed.call_rwsem_down_write_failed.vma_link.mmap_region
528770 Â 7% +31.6% 695818 Â 6% sched_debug.cpu#40.avg_idle
223475 Â 4% -20.9% 176718 Â 6% sched_debug.cfs_rq[52]:/.min_vruntime
577331 Â 4% +29.2% 745806 Â 4% sched_debug.cpu#16.avg_idle
192821 Â 4% -23.4% 147763 Â 8% sched_debug.cpu#10.ttwu_count
198393 Â 4% -23.1% 152635 Â 7% sched_debug.cpu#1.ttwu_count
572991 Â 9% +29.0% 738876 Â 3% sched_debug.cpu#18.avg_idle
531346 Â 5% +26.8% 673808 Â 6% sched_debug.cpu#35.avg_idle
531692 Â 8% +30.0% 691275 Â 7% sched_debug.cpu#36.avg_idle
573689 Â 9% +30.1% 746581 Â 3% sched_debug.cpu#28.avg_idle
194767 Â 3% -22.1% 151736 Â 7% sched_debug.cpu#6.ttwu_count
576252 Â 10% +29.8% 748134 Â 5% sched_debug.cpu#19.avg_idle
189779 Â 5% -22.5% 147019 Â 9% sched_debug.cpu#13.ttwu_count
574093 Â 9% +28.0% 734982 Â 4% sched_debug.cpu#22.avg_idle
530896 Â 7% +25.4% 665864 Â 7% sched_debug.cpu#9.avg_idle
222143 Â 5% -19.0% 179889 Â 9% sched_debug.cfs_rq[49]:/.min_vruntime
190106 Â 5% -22.4% 147518 Â 7% sched_debug.cpu#14.ttwu_count
192933 Â 3% -21.8% 150835 Â 8% sched_debug.cpu#8.ttwu_count
193656 Â 4% -22.7% 149678 Â 8% sched_debug.cpu#9.ttwu_count
196739 Â 5% -22.9% 151747 Â 7% sched_debug.cpu#3.ttwu_count
434909 Â 6% -18.4% 354789 Â 6% sched_debug.cpu#55.sched_count
194722 Â 4% -22.0% 151895 Â 8% sched_debug.cpu#5.ttwu_count
439285 Â 7% -19.7% 352534 Â 6% sched_debug.cpu#47.sched_count
585003 Â 7% +31.4% 768551 Â 5% sched_debug.cpu#20.avg_idle
432219 Â 7% -19.5% 347734 Â 7% sched_debug.cpu#48.sched_count
224113 Â 5% -18.9% 181798 Â 8% sched_debug.cfs_rq[48]:/.min_vruntime
534566 Â 7% +30.3% 696442 Â 6% sched_debug.cpu#31.avg_idle
525194 Â 6% +23.5% 648587 Â 7% sched_debug.cpu#7.avg_idle
209128 Â 6% -19.4% 168562 Â 7% sched_debug.cpu#48.sched_goidle
419098 Â 6% -19.4% 337908 Â 7% sched_debug.cpu#48.nr_switches
448417 Â 7% -19.8% 359813 Â 6% sched_debug.cpu#46.sched_count
491962 Â 8% +26.7% 623449 Â 4% sched_debug.cpu#48.avg_idle
212354 Â 6% -19.7% 170477 Â 5% sched_debug.cpu#47.sched_goidle
581409 Â 7% +23.6% 718790 Â 4% sched_debug.cpu#37.avg_idle
194066 Â 6% -17.8% 159565 Â 11% sched_debug.cfs_rq[16]:/.min_vruntime
425571 Â 6% -19.7% 341752 Â 5% sched_debug.cpu#47.nr_switches
215882 Â 7% -19.5% 173827 Â 5% sched_debug.cpu#46.sched_goidle
442267 Â 6% -21.4% 347467 Â 6% sched_debug.cpu#59.sched_count
432617 Â 7% -19.5% 348457 Â 5% sched_debug.cpu#46.nr_switches
569496 Â 11% +32.0% 751590 Â 5% sched_debug.cpu#21.avg_idle
189763 Â 5% -22.2% 147705 Â 8% sched_debug.cpu#12.ttwu_count
223064 Â 4% -19.3% 179906 Â 5% sched_debug.cfs_rq[53]:/.min_vruntime
213869 Â 5% -21.0% 168888 Â 5% sched_debug.cpu#59.sched_goidle
428592 Â 5% -21.0% 338537 Â 5% sched_debug.cpu#59.nr_switches
441291 Â 6% -19.8% 353975 Â 5% sched_debug.cpu#45.sched_count
213493 Â 5% -19.5% 171778 Â 5% sched_debug.cpu#45.sched_goidle
427837 Â 5% -19.5% 344360 Â 5% sched_debug.cpu#45.nr_switches
191795 Â 5% -22.6% 148369 Â 8% sched_debug.cpu#11.ttwu_count
194309 Â 6% -19.3% 156815 Â 8% sched_debug.cfs_rq[17]:/.min_vruntime
221301 Â 5% -20.9% 175025 Â 7% sched_debug.cfs_rq[56]:/.min_vruntime
538571 Â 8% +24.4% 670227 Â 8% sched_debug.cpu#3.avg_idle
223672 Â 5% -18.9% 181388 Â 7% sched_debug.cfs_rq[47]:/.min_vruntime
425387 Â 7% -18.3% 347554 Â 7% sched_debug.cpu#54.sched_count
199287 Â 6% -21.7% 156117 Â 12% sched_debug.cfs_rq[15]:/.min_vruntime
1.00 Â 5% -15.8% 0.84 Â 6% perf-profile.cpu-cycles.kmem_cache_alloc_trace.perf_event_mmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff
385907 Â 8% -23.7% 294552 Â 10% sched_debug.cpu#6.sched_count
493228 Â 5% +27.4% 628579 Â 8% sched_debug.cpu#54.avg_idle
185090 Â 8% -23.0% 142559 Â 10% sched_debug.cpu#6.sched_goidle
542727 Â 8% +27.5% 691759 Â 4% sched_debug.cpu#42.avg_idle
209830 Â 6% -18.0% 172096 Â 6% sched_debug.cpu#55.sched_goidle
370989 Â 8% -23.0% 285813 Â 10% sched_debug.cpu#6.nr_switches
420473 Â 6% -18.0% 344973 Â 6% sched_debug.cpu#55.nr_switches
201389 Â 4% -20.7% 159785 Â 7% sched_debug.cpu#0.ttwu_count
365245 Â 7% -20.1% 291843 Â 9% sched_debug.cpu#5.sched_count
176316 Â 7% -19.7% 141647 Â 9% sched_debug.cpu#5.sched_goidle
353385 Â 7% -19.6% 284000 Â 9% sched_debug.cpu#5.nr_switches
430699 Â 7% -20.6% 341980 Â 7% sched_debug.cpu#53.sched_count
434688 Â 7% -17.9% 356994 Â 7% sched_debug.cpu#58.sched_count
910 Â 11% -15.6% 767 Â 12% sched_debug.cpu#34.ttwu_local
194153 Â 7% -16.9% 161410 Â 5% sched_debug.cfs_rq[19]:/.min_vruntime
435164 Â 10% -20.0% 348238 Â 7% sched_debug.cpu#56.sched_count
25686 Â 0% -18.8% 20849 Â 1% softirqs.HRTIMER
205841 Â 7% -18.6% 167608 Â 6% sched_debug.cpu#54.sched_goidle
412493 Â 7% -18.5% 335995 Â 6% sched_debug.cpu#54.nr_switches
209629 Â 7% -17.5% 172950 Â 7% sched_debug.cpu#58.sched_goidle
420079 Â 7% -17.5% 346715 Â 7% sched_debug.cpu#58.nr_switches
190909 Â 5% -19.3% 154071 Â 8% sched_debug.cfs_rq[26]:/.min_vruntime
535739 Â 10% +25.8% 674192 Â 3% sched_debug.cpu#41.avg_idle
220505 Â 5% -17.2% 182496 Â 10% sched_debug.cfs_rq[57]:/.min_vruntime
207365 Â 6% -19.9% 166054 Â 7% sched_debug.cpu#53.sched_goidle
415569 Â 6% -19.9% 332885 Â 7% sched_debug.cpu#53.nr_switches
433052 Â 7% -19.5% 348641 Â 8% sched_debug.cpu#57.sched_count
534493 Â 4% +25.6% 671066 Â 4% sched_debug.cpu#2.avg_idle
1.02 Â 2% -20.2% 0.81 Â 8% perf-profile.cpu-cycles.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer.__run_hrtimer
5217 Â 12% -17.6% 4297 Â 9% sched_debug.cfs_rq[26]:/.avg->runnable_avg_sum
209594 Â 6% -19.1% 169509 Â 8% sched_debug.cpu#57.sched_goidle
420012 Â 6% -19.1% 339802 Â 8% sched_debug.cpu#57.nr_switches
503208 Â 10% +25.5% 631683 Â 7% sched_debug.cpu#55.avg_idle
113 Â 12% -17.9% 92 Â 9% sched_debug.cfs_rq[26]:/.tg_runnable_contrib
549109 Â 5% +22.6% 673144 Â 6% sched_debug.cpu#14.avg_idle
210495 Â 9% -19.7% 169109 Â 7% sched_debug.cpu#56.sched_goidle
421839 Â 9% -19.6% 338979 Â 7% sched_debug.cpu#56.nr_switches
221205 Â 4% -17.6% 182374 Â 7% sched_debug.cfs_rq[50]:/.min_vruntime
502758 Â 6% +23.1% 618755 Â 7% sched_debug.cpu#46.avg_idle
222043 Â 4% -18.6% 180727 Â 10% sched_debug.cfs_rq[51]:/.min_vruntime
221617 Â 4% -18.3% 181127 Â 8% sched_debug.cfs_rq[55]:/.min_vruntime
557533 Â 11% +22.2% 681218 Â 7% sched_debug.cpu#12.avg_idle
187238 Â 6% -17.7% 154099 Â 10% sched_debug.cfs_rq[29]:/.min_vruntime
551333 Â 10% +22.2% 673592 Â 7% sched_debug.cpu#34.avg_idle
224579 Â 4% -16.6% 187288 Â 9% sched_debug.cfs_rq[54]:/.min_vruntime
184873 Â 6% -15.9% 155439 Â 8% sched_debug.cfs_rq[28]:/.min_vruntime
206172 Â 6% -18.2% 168730 Â 7% sched_debug.cpu#49.sched_goidle
413179 Â 6% -18.1% 338238 Â 7% sched_debug.cpu#49.nr_switches
225157 Â 5% -16.4% 188227 Â 9% sched_debug.cfs_rq[46]:/.min_vruntime
175150 Â 5% -16.9% 145619 Â 11% sched_debug.cpu#3.sched_goidle
351054 Â 5% -16.8% 291944 Â 11% sched_debug.cpu#3.nr_switches
564018 Â 3% +21.0% 682616 Â 6% sched_debug.cpu#4.avg_idle
5529 Â 8% -18.1% 4527 Â 11% sched_debug.cfs_rq[39]:/.avg->runnable_avg_sum
600812 Â 5% -15.3% 508926 Â 9% numa-vmstat.node2.numa_local
187471 Â 4% -18.5% 152874 Â 10% sched_debug.cfs_rq[22]:/.min_vruntime
19730 Â 6% -13.7% 17025 Â 9% sched_debug.cfs_rq[26]:/.exec_clock
425149 Â 6% -18.2% 347836 Â 7% sched_debug.cpu#49.sched_count
22908 Â 5% -12.9% 19945 Â 5% sched_debug.cfs_rq[58]:/.exec_clock
191793 Â 4% -16.6% 160031 Â 10% sched_debug.cfs_rq[24]:/.min_vruntime
14.49 Â 0% +16.4% 16.87 Â 2% perf-profile.cpu-cycles.start_secondary
514287 Â 10% +19.7% 615733 Â 6% sched_debug.cpu#58.avg_idle
1622 Â 0% -15.6% 1369 Â 1% time.system_time
14.45 Â 0% +16.2% 16.80 Â 2% perf-profile.cpu-cycles.cpu_startup_entry.start_secondary
1117 Â 6% -12.1% 982 Â 3% proc-vmstat.pgmigrate_success
1117 Â 6% -12.1% 982 Â 3% proc-vmstat.numa_pages_migrated
23520 Â 4% -14.0% 20233 Â 6% sched_debug.cfs_rq[48]:/.exec_clock
372296 Â 1% -18.0% 305136 Â 9% sched_debug.cpu#2.sched_count
218377 Â 6% -16.5% 182283 Â 6% sched_debug.cfs_rq[58]:/.min_vruntime
548860 Â 8% +25.4% 688468 Â 6% sched_debug.cpu#8.avg_idle
224066 Â 5% -17.7% 184445 Â 8% sched_debug.cfs_rq[45]:/.min_vruntime
23172 Â 4% -12.9% 20184 Â 5% sched_debug.cfs_rq[52]:/.exec_clock
947362 Â 3% -14.4% 810580 Â 7% numa-numastat.node2.local_node
22992 Â 5% -12.9% 20026 Â 7% sched_debug.cfs_rq[59]:/.exec_clock
23572 Â 4% -12.3% 20668 Â 5% sched_debug.cfs_rq[46]:/.exec_clock
6293 Â 8% -16.0% 5288 Â 8% sched_debug.cfs_rq[47]:/.avg->runnable_avg_sum
23545 Â 4% -13.3% 20402 Â 6% sched_debug.cfs_rq[47]:/.exec_clock
7 Â 0% -14.3% 6 Â 0% vmstat.procs.r
136 Â 8% -16.3% 114 Â 8% sched_debug.cfs_rq[47]:/.tg_runnable_contrib
952018 Â 3% -14.1% 817562 Â 7% numa-numastat.node2.numa_hit
684874 Â 4% -13.1% 595283 Â 7% numa-vmstat.node2.numa_hit
22864 Â 5% -12.7% 19967 Â 5% sched_debug.cfs_rq[57]:/.exec_clock
190734 Â 5% -16.3% 159695 Â 8% sched_debug.cfs_rq[23]:/.min_vruntime
237300 Â 6% -14.9% 202014 Â 7% numa-vmstat.node3.nr_page_table_pages
949123 Â 6% -14.9% 808058 Â 7% numa-meminfo.node3.PageTables
11.45 Â 4% +12.2% 12.84 Â 7% perf-profile.cpu-cycles.task_numa_work.task_work_run.do_notify_resume.retint_signal
561716 Â 7% +22.7% 689210 Â 5% sched_debug.cpu#10.avg_idle
23421 Â 4% -12.5% 20484 Â 5% sched_debug.cfs_rq[45]:/.exec_clock
23090 Â 4% -13.4% 19999 Â 6% sched_debug.cfs_rq[53]:/.exec_clock
2257945 Â 6% -14.5% 1929848 Â 7% numa-meminfo.node3.SUnreclaim
564591 Â 6% -14.5% 482457 Â 7% numa-vmstat.node3.nr_slab_unreclaimable
7903 Â 0% -13.6% 6830 Â 1% sched_debug.cfs_rq[11]:/.tg->runnable_avg
522411 Â 6% +15.9% 605704 Â 4% sched_debug.cpu#59.avg_idle
7899 Â 0% -13.6% 6827 Â 1% sched_debug.cfs_rq[10]:/.tg->runnable_avg
7884 Â 0% -13.6% 6811 Â 1% sched_debug.cfs_rq[2]:/.tg->runnable_avg
7913 Â 0% -13.6% 6839 Â 1% sched_debug.cfs_rq[14]:/.tg->runnable_avg
2272179 Â 6% -14.4% 1944358 Â 7% numa-meminfo.node3.Slab
7896 Â 0% -13.6% 6826 Â 1% sched_debug.cfs_rq[9]:/.tg->runnable_avg
7894 Â 0% -13.6% 6822 Â 1% sched_debug.cfs_rq[8]:/.tg->runnable_avg
7877 Â 0% -13.6% 6802 Â 1% sched_debug.cfs_rq[0]:/.tg->runnable_avg
7890 Â 0% -13.6% 6817 Â 1% sched_debug.cfs_rq[4]:/.tg->runnable_avg
7891 Â 0% -13.6% 6816 Â 1% sched_debug.cfs_rq[5]:/.tg->runnable_avg
7894 Â 0% -13.6% 6823 Â 1% sched_debug.cfs_rq[7]:/.tg->runnable_avg
512300 Â 6% +18.2% 605480 Â 9% sched_debug.cpu#50.avg_idle
7908 Â 0% -13.6% 6836 Â 1% sched_debug.cfs_rq[13]:/.tg->runnable_avg
7888 Â 0% -13.6% 6814 Â 1% sched_debug.cfs_rq[3]:/.tg->runnable_avg
7916 Â 0% -13.6% 6843 Â 1% sched_debug.cfs_rq[15]:/.tg->runnable_avg
7892 Â 0% -13.6% 6818 Â 1% sched_debug.cfs_rq[6]:/.tg->runnable_avg
178921 Â 1% -17.9% 146921 Â 9% sched_debug.cpu#2.sched_goidle
7918 Â 0% -13.5% 6848 Â 1% sched_debug.cfs_rq[16]:/.tg->runnable_avg
7883 Â 0% -13.6% 6809 Â 1% sched_debug.cfs_rq[1]:/.tg->runnable_avg
7903 Â 0% -13.5% 6832 Â 1% sched_debug.cfs_rq[12]:/.tg->runnable_avg
7930 Â 0% -13.5% 6861 Â 1% sched_debug.cfs_rq[20]:/.tg->runnable_avg
7925 Â 0% -13.5% 6859 Â 1% sched_debug.cfs_rq[19]:/.tg->runnable_avg
7921 Â 0% -13.5% 6852 Â 1% sched_debug.cfs_rq[17]:/.tg->runnable_avg
7932 Â 0% -13.4% 6866 Â 1% sched_debug.cfs_rq[21]:/.tg->runnable_avg
358611 Â 1% -17.9% 294548 Â 9% sched_debug.cpu#2.nr_switches
7922 Â 0% -13.4% 6857 Â 1% sched_debug.cfs_rq[18]:/.tg->runnable_avg
7985 Â 0% -13.4% 6918 Â 1% sched_debug.cfs_rq[36]:/.tg->runnable_avg
555502 Â 4% +16.3% 646081 Â 5% sched_debug.cpu#5.avg_idle
7944 Â 0% -13.5% 6876 Â 1% sched_debug.cfs_rq[24]:/.tg->runnable_avg
7994 Â 0% -13.4% 6926 Â 1% sched_debug.cfs_rq[39]:/.tg->runnable_avg
7959 Â 0% -13.4% 6892 Â 1% sched_debug.cfs_rq[28]:/.tg->runnable_avg
7997 Â 0% -13.3% 6930 Â 1% sched_debug.cfs_rq[40]:/.tg->runnable_avg
7962 Â 0% -13.4% 6895 Â 1% sched_debug.cfs_rq[29]:/.tg->runnable_avg
7935 Â 0% -13.4% 6869 Â 1% sched_debug.cfs_rq[22]:/.tg->runnable_avg
7982 Â 0% -13.4% 6915 Â 1% sched_debug.cfs_rq[35]:/.tg->runnable_avg
8000 Â 0% -13.3% 6932 Â 1% sched_debug.cfs_rq[41]:/.tg->runnable_avg
7951 Â 0% -13.4% 6884 Â 1% sched_debug.cfs_rq[26]:/.tg->runnable_avg
7954 Â 0% -13.4% 6889 Â 1% sched_debug.cfs_rq[27]:/.tg->runnable_avg
7976 Â 0% -13.4% 6911 Â 1% sched_debug.cfs_rq[34]:/.tg->runnable_avg
7937 Â 0% -13.4% 6872 Â 1% sched_debug.cfs_rq[23]:/.tg->runnable_avg
8003 Â 0% -13.3% 6937 Â 1% sched_debug.cfs_rq[43]:/.tg->runnable_avg
765 Â 0% -13.1% 665 Â 1% time.percent_of_cpu_this_job_got
7964 Â 0% -13.4% 6896 Â 1% sched_debug.cfs_rq[30]:/.tg->runnable_avg
7987 Â 0% -13.4% 6919 Â 1% sched_debug.cfs_rq[37]:/.tg->runnable_avg
8027 Â 0% -13.3% 6961 Â 1% sched_debug.cfs_rq[50]:/.tg->runnable_avg
7988 Â 0% -13.4% 6921 Â 1% sched_debug.cfs_rq[38]:/.tg->runnable_avg
7946 Â 0% -13.4% 6883 Â 1% sched_debug.cfs_rq[25]:/.tg->runnable_avg
8025 Â 0% -13.3% 6955 Â 1% sched_debug.cfs_rq[48]:/.tg->runnable_avg
8012 Â 0% -13.3% 6944 Â 1% sched_debug.cfs_rq[45]:/.tg->runnable_avg
7974 Â 0% -13.3% 6909 Â 1% sched_debug.cfs_rq[33]:/.tg->runnable_avg
1.13 Â 1% -14.8% 0.96 Â 7% perf-profile.cpu-cycles.update_process_times.tick_sched_handle.tick_sched_timer.__run_hrtimer.hrtimer_interrupt
8000 Â 0% -13.3% 6933 Â 1% sched_debug.cfs_rq[42]:/.tg->runnable_avg
8035 Â 0% -13.3% 6969 Â 1% sched_debug.cfs_rq[51]:/.tg->runnable_avg
8024 Â 0% -13.3% 6954 Â 1% sched_debug.cfs_rq[47]:/.tg->runnable_avg
8014 Â 0% -13.3% 6950 Â 1% sched_debug.cfs_rq[46]:/.tg->runnable_avg
8026 Â 0% -13.3% 6959 Â 1% sched_debug.cfs_rq[49]:/.tg->runnable_avg
7968 Â 0% -13.4% 6902 Â 1% sched_debug.cfs_rq[31]:/.tg->runnable_avg
37.12 Â 0% -12.9% 32.34 Â 2% perf-profile.cpu-cycles.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.sys_mmap_pgoff
8007 Â 0% -13.4% 6938 Â 1% sched_debug.cfs_rq[44]:/.tg->runnable_avg
8037 Â 0% -13.2% 6973 Â 1% sched_debug.cfs_rq[52]:/.tg->runnable_avg
7968 Â 0% -13.3% 6906 Â 1% sched_debug.cfs_rq[32]:/.tg->runnable_avg
1.13 Â 1% -14.1% 0.97 Â 7% perf-profile.cpu-cycles.tick_sched_handle.isra.18.tick_sched_timer.__run_hrtimer.hrtimer_interrupt.local_apic_timer_interrupt
8068 Â 0% -13.1% 7007 Â 1% sched_debug.cfs_rq[63]:/.tg->runnable_avg
8040 Â 0% -13.2% 6977 Â 1% sched_debug.cfs_rq[53]:/.tg->runnable_avg
8053 Â 0% -13.2% 6990 Â 1% sched_debug.cfs_rq[57]:/.tg->runnable_avg
8064 Â 0% -13.1% 7004 Â 1% sched_debug.cfs_rq[61]:/.tg->runnable_avg
8049 Â 0% -13.2% 6985 Â 1% sched_debug.cfs_rq[56]:/.tg->runnable_avg
8070 Â 0% -13.1% 7009 Â 1% sched_debug.cfs_rq[64]:/.tg->runnable_avg
3595633 Â 5% -12.9% 3133416 Â 6% numa-meminfo.node3.MemUsed
8055 Â 0% -13.2% 6993 Â 1% sched_debug.cfs_rq[58]:/.tg->runnable_avg
8071 Â 0% -13.1% 7014 Â 1% sched_debug.cfs_rq[65]:/.tg->runnable_avg
8046 Â 0% -13.2% 6983 Â 1% sched_debug.cfs_rq[55]:/.tg->runnable_avg
8045 Â 0% -13.3% 6978 Â 1% sched_debug.cfs_rq[54]:/.tg->runnable_avg
8086 Â 0% -13.1% 7027 Â 1% sched_debug.cfs_rq[71]:/.tg->runnable_avg
8137 Â 0% -13.0% 7080 Â 1% sched_debug.cfs_rq[91]:/.tg->runnable_avg
8081 Â 0% -13.1% 7022 Â 1% sched_debug.cfs_rq[68]:/.tg->runnable_avg
8076 Â 0% -13.1% 7017 Â 1% sched_debug.cfs_rq[66]:/.tg->runnable_avg
8062 Â 0% -13.1% 7002 Â 1% sched_debug.cfs_rq[60]:/.tg->runnable_avg
8139 Â 0% -12.9% 7085 Â 1% sched_debug.cfs_rq[92]:/.tg->runnable_avg
8088 Â 0% -13.1% 7031 Â 1% sched_debug.cfs_rq[72]:/.tg->runnable_avg
8064 Â 0% -13.1% 7006 Â 1% sched_debug.cfs_rq[62]:/.tg->runnable_avg
8143 Â 0% -12.9% 7090 Â 1% sched_debug.cfs_rq[94]:/.tg->runnable_avg
41932371 Â 0% -13.0% 36489731 Â 1% slabinfo.vm_area_struct.active_objs
8078 Â 0% -13.1% 7019 Â 1% sched_debug.cfs_rq[67]:/.tg->runnable_avg
8083 Â 0% -13.1% 7025 Â 1% sched_debug.cfs_rq[69]:/.tg->runnable_avg
8058 Â 0% -13.2% 6997 Â 1% sched_debug.cfs_rq[59]:/.tg->runnable_avg
8142 Â 0% -12.9% 7089 Â 1% sched_debug.cfs_rq[93]:/.tg->runnable_avg
8084 Â 0% -13.1% 7028 Â 1% sched_debug.cfs_rq[70]:/.tg->runnable_avg
8134 Â 0% -13.0% 7078 Â 1% sched_debug.cfs_rq[90]:/.tg->runnable_avg
8123 Â 0% -13.1% 7062 Â 1% sched_debug.cfs_rq[84]:/.tg->runnable_avg
3274295 Â 0% -13.0% 2849232 Â 1% meminfo.PageTables
818565 Â 0% -13.0% 712300 Â 1% proc-vmstat.nr_page_table_pages
8094 Â 0% -13.0% 7038 Â 1% sched_debug.cfs_rq[74]:/.tg->runnable_avg
8133 Â 0% -13.0% 7077 Â 1% sched_debug.cfs_rq[89]:/.tg->runnable_avg
8103 Â 0% -13.0% 7046 Â 1% sched_debug.cfs_rq[77]:/.tg->runnable_avg
8144 Â 0% -12.9% 7094 Â 1% sched_debug.cfs_rq[95]:/.tg->runnable_avg
8146 Â 0% -12.9% 7096 Â 1% sched_debug.cfs_rq[96]:/.tg->runnable_avg
8128 Â 0% -13.0% 7070 Â 1% sched_debug.cfs_rq[86]:/.tg->runnable_avg
8091 Â 0% -13.0% 7035 Â 1% sched_debug.cfs_rq[73]:/.tg->runnable_avg
536950 Â 10% +23.4% 662508 Â 6% sched_debug.cpu#30.avg_idle
8096 Â 0% -13.1% 7039 Â 1% sched_debug.cfs_rq[75]:/.tg->runnable_avg
8127 Â 0% -13.0% 7067 Â 1% sched_debug.cfs_rq[85]:/.tg->runnable_avg
8149 Â 0% -12.9% 7097 Â 1% sched_debug.cfs_rq[97]:/.tg->runnable_avg
8151 Â 0% -12.9% 7100 Â 1% sched_debug.cfs_rq[99]:/.tg->runnable_avg
8153 Â 0% -12.9% 7101 Â 1% sched_debug.cfs_rq[100]:/.tg->runnable_avg
8129 Â 0% -13.0% 7074 Â 1% sched_debug.cfs_rq[87]:/.tg->runnable_avg
8118 Â 0% -13.0% 7060 Â 1% sched_debug.cfs_rq[83]:/.tg->runnable_avg
8107 Â 0% -13.1% 7047 Â 1% sched_debug.cfs_rq[78]:/.tg->runnable_avg
8111 Â 0% -13.0% 7054 Â 1% sched_debug.cfs_rq[80]:/.tg->runnable_avg
8098 Â 0% -13.0% 7044 Â 1% sched_debug.cfs_rq[76]:/.tg->runnable_avg
42188778 Â 0% -12.9% 36733179 Â 1% slabinfo.vm_area_struct.num_objs
958835 Â 0% -12.9% 834844 Â 1% slabinfo.vm_area_struct.num_slabs
958835 Â 0% -12.9% 834844 Â 1% slabinfo.vm_area_struct.active_slabs
8154 Â 0% -12.9% 7103 Â 1% sched_debug.cfs_rq[101]:/.tg->runnable_avg
8116 Â 0% -13.0% 7058 Â 1% sched_debug.cfs_rq[82]:/.tg->runnable_avg
8130 Â 0% -12.9% 7077 Â 1% sched_debug.cfs_rq[88]:/.tg->runnable_avg
8171 Â 0% -12.9% 7120 Â 1% sched_debug.cfs_rq[107]:/.tg->runnable_avg
8113 Â 0% -13.0% 7058 Â 1% sched_debug.cfs_rq[81]:/.tg->runnable_avg
8109 Â 0% -13.1% 7051 Â 1% sched_debug.cfs_rq[79]:/.tg->runnable_avg
8155 Â 0% -12.9% 7106 Â 1% sched_debug.cfs_rq[102]:/.tg->runnable_avg
8157 Â 0% -12.8% 7111 Â 1% sched_debug.cfs_rq[103]:/.tg->runnable_avg
8165 Â 0% -12.9% 7115 Â 1% sched_debug.cfs_rq[105]:/.tg->runnable_avg
8159 Â 0% -12.8% 7113 Â 1% sched_debug.cfs_rq[104]:/.tg->runnable_avg
8148 Â 0% -12.9% 7099 Â 1% sched_debug.cfs_rq[98]:/.tg->runnable_avg
8169 Â 0% -12.9% 7118 Â 1% sched_debug.cfs_rq[106]:/.tg->runnable_avg
1.78 Â 2% -11.3% 1.58 Â 4% perf-profile.cpu-cycles.tick_sched_timer.__run_hrtimer.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt
7802174 Â 0% -12.8% 6805780 Â 1% meminfo.SUnreclaim
1950525 Â 0% -12.8% 1701410 Â 1% proc-vmstat.nr_slab_unreclaimable
8198 Â 0% -12.7% 7154 Â 1% sched_debug.cfs_rq[119]:/.tg->runnable_avg
8175 Â 0% -12.8% 7125 Â 1% sched_debug.cfs_rq[109]:/.tg->runnable_avg
8180 Â 0% -12.8% 7136 Â 1% sched_debug.cfs_rq[112]:/.tg->runnable_avg
8188 Â 0% -12.8% 7143 Â 1% sched_debug.cfs_rq[115]:/.tg->runnable_avg
8194 Â 0% -12.7% 7151 Â 1% sched_debug.cfs_rq[117]:/.tg->runnable_avg
8176 Â 0% -12.8% 7129 Â 1% sched_debug.cfs_rq[110]:/.tg->runnable_avg
8189 Â 0% -12.8% 7145 Â 1% sched_debug.cfs_rq[116]:/.tg->runnable_avg
189413 Â 4% -13.7% 163494 Â 8% sched_debug.cfs_rq[25]:/.min_vruntime
8184 Â 0% -12.7% 7141 Â 1% sched_debug.cfs_rq[114]:/.tg->runnable_avg
8182 Â 0% -12.8% 7137 Â 1% sched_debug.cfs_rq[113]:/.tg->runnable_avg
8172 Â 0% -12.8% 7122 Â 1% sched_debug.cfs_rq[108]:/.tg->runnable_avg
8193 Â 0% -12.7% 7152 Â 1% sched_debug.cfs_rq[118]:/.tg->runnable_avg
8178 Â 0% -12.8% 7133 Â 1% sched_debug.cfs_rq[111]:/.tg->runnable_avg
7859652 Â 0% -12.7% 6863230 Â 1% meminfo.Slab
0.95 Â 6% +15.6% 1.10 Â 3% perf-profile.cpu-cycles.__fget.fget.sys_mmap_pgoff.sys_mmap.system_call_fastpath
1.10 Â 2% -10.5% 0.98 Â 4% perf-profile.cpu-cycles.ktime_get.clockevents_program_event.tick_program_event.__hrtimer_start_range_ns.hrtimer_start_range_ns
23237 Â 4% -13.3% 20143 Â 6% sched_debug.cfs_rq[51]:/.exec_clock
1.82 Â 2% -11.0% 1.62 Â 4% perf-profile.cpu-cycles.__run_hrtimer.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
0.97 Â 7% +14.0% 1.11 Â 3% perf-profile.cpu-cycles.fget.sys_mmap_pgoff.sys_mmap.system_call_fastpath
23242 Â 4% -12.9% 20253 Â 6% sched_debug.cfs_rq[49]:/.exec_clock
22886 Â 5% -13.7% 19749 Â 5% sched_debug.cfs_rq[56]:/.exec_clock
1036 Â 0% -11.9% 914 Â 1% time.user_time
8527124 Â 0% -11.6% 7534220 Â 1% vmstat.memory.cache
1695225 Â 0% -10.7% 1513459 Â 1% softirqs.TIMER
23318 Â 6% -13.1% 20269 Â 6% sched_debug.cfs_rq[54]:/.exec_clock
667649 Â 3% -10.0% 600607 Â 4% numa-vmstat.node3.numa_local
7.244e+08 Â 0% -11.1% 6.439e+08 Â 1% time.minor_page_faults
7.254e+08 Â 0% -11.1% 6.449e+08 Â 1% proc-vmstat.pgfault
1.92 Â 2% -10.9% 1.71 Â 4% perf-profile.cpu-cycles.read_hpet.ktime_get.clockevents_program_event.tick_program_event.__hrtimer_start_range_ns
22971 Â 5% -12.9% 19998 Â 5% sched_debug.cfs_rq[55]:/.exec_clock
131 Â 10% -11.7% 115 Â 6% sched_debug.cfs_rq[52]:/.tg_runnable_contrib
42.89 Â 0% -10.4% 38.41 Â 2% perf-profile.cpu-cycles.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.sys_mmap_pgoff.sys_mmap
6053 Â 10% -11.6% 5349 Â 6% sched_debug.cfs_rq[52]:/.avg->runnable_avg_sum
753344 Â 2% -9.0% 685539 Â 4% numa-vmstat.node3.numa_hit
501098 Â 0% -11.4% 443942 Â 2% softirqs.SCHED
1.19 Â 2% -9.0% 1.08 Â 3% perf-profile.cpu-cycles.clockevents_program_event.tick_program_event.__hrtimer_start_range_ns.hrtimer_start_range_ns.tick_nohz_restart
90940 Â 2% +9.4% 99459 Â 2% sched_debug.cpu#54.nr_load_updates
88590 Â 2% +9.8% 97229 Â 4% sched_debug.cpu#2.nr_load_updates
90669 Â 2% +9.6% 99355 Â 2% sched_debug.cpu#56.nr_load_updates
91272 Â 2% +9.4% 99833 Â 2% sched_debug.cpu#49.nr_load_updates
1.22 Â 1% +10.5% 1.35 Â 2% perf-profile.cpu-cycles.schedule.rwsem_down_write_failed.call_rwsem_down_write_failed.vma_link.mmap_region
89287 Â 3% +10.3% 98507 Â 4% sched_debug.cpu#1.nr_load_updates
45.38 Â 0% -9.4% 41.10 Â 2% perf-profile.cpu-cycles.do_mmap_pgoff.vm_mmap_pgoff.sys_mmap_pgoff.sys_mmap.system_call_fastpath
22980 Â 5% -12.4% 20128 Â 6% sched_debug.cfs_rq[50]:/.exec_clock
1.19 Â 2% -8.5% 1.09 Â 4% perf-profile.cpu-cycles.tick_program_event.__hrtimer_start_range_ns.hrtimer_start_range_ns.tick_nohz_restart.tick_nohz_idle_exit
5570129 Â 0% -9.4% 5048932 Â 1% proc-vmstat.pgalloc_normal
1100174 Â 3% -6.9% 1024326 Â 3% numa-numastat.node3.local_node
2.06 Â 2% -9.1% 1.87 Â 4% perf-profile.cpu-cycles.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity
46.01 Â 0% -9.2% 41.78 Â 2% perf-profile.cpu-cycles.vm_mmap_pgoff.sys_mmap_pgoff.sys_mmap.system_call_fastpath
91494 Â 2% +10.3% 100902 Â 2% sched_debug.cpu#46.nr_load_updates
20296 Â 3% -24.7% 15293 Â 3% vmstat.system.in
140148 Â 0% -21.9% 109393 Â 1% vmstat.system.cs
191 Â 0% -19.5% 153 Â 0% turbostat.CorWatt
257 Â 0% -14.7% 219 Â 0% turbostat.PkgWatt
218 Â 0% -13.5% 188 Â 1% turbostat.Avg_MHz
6.84 Â 0% -13.6% 5.91 Â 1% turbostat.%Busy
578 Â 0% -8.5% 529 Â 0% pmeter.Average_Active_Power
73.34 Â 0% -3.0% 71.15 Â 0% turbostat.RAMWatt
brickland3: Brickland Ivy Bridge-EX
Memory: 512G
vm-scalability.stddev
16 ++---------------------------------------------------------------------+
| O |
14 ++ O O |
12 ++ O O O O O |
| O OO O O |
10 O+ O O O O O |
| O O O O O O O O O O |
8 ++ O O |
| O O O |
6 ++ O |
4 ++ |
| .* .* *. .* *.|
2 *+**. .*.* .* *.**. * + *.** + : * * *.*. .*.**.* *
| ** *.* *.**.*.* *.* * * *.*.* ** |
0 ++---------------------------------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Ying Huang
---
testcase: vm-scalability
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 10
default_watchdogs:
watch-oom:
watchdog:
cpufreq_governor: performance
commit: ce43fa581f61c4756301e7fd8609f50579e13d5d
model: Brickland Ivy Bridge-EX
nr_cpu: 120
memory: 512G
hdd_partitions:
swap_partitions:
pmeter_server: lkp-st01
pmeter_device: yokogawa-wt310
perf-profile:
runtime: 300s
size:
vm-scalability:
test: small-allocs
testbox: brickland3
tbox_group: brickland3
kconfig: x86_64-rhel
enqueue_time: 2015-03-08 04:48:33.187343214 +08:00
head_commit: ce43fa581f61c4756301e7fd8609f50579e13d5d
base_commit: 13a7a6ac0a11197edcd0f756a035f472b42cdf8b
branch: linux-devel/devel-hourly-2015030900
kernel: "/kernel/x86_64-rhel/ce43fa581f61c4756301e7fd8609f50579e13d5d/vmlinuz-4.0.0-rc2-00784-gce43fa5"
user: lkp
queue: cyclic
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/brickland3/vm-scalability/performance-300s-small-allocs/debian-x86_64-2015-02-07.cgz/x86_64-rhel/ce43fa581f61c4756301e7fd8609f50579e13d5d/0"
job_file: "/lkp/scheduled/brickland3/cyclic_vm-scalability-performance-300s-small-allocs-x86_64-rhel-HEAD-ce43fa581f61c4756301e7fd8609f50579e13d5d-0-20150308-92360-aeasab.yaml"
dequeue_time: 2015-03-09 06:20:56.728013487 +08:00
job_state: finished
loadavg: 78.64 75.53 36.25 1/953 14075
start_time: '1425853327'
end_time: '1425853675'
version: "/lkp/lkp/.src-20150308-175746"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu100/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu101/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu102/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu103/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu104/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu105/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu106/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu107/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu108/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu109/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu110/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu111/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu112/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu113/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu114/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu115/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu116/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu117/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu118/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu119/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu16/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu17/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu18/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu19/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu20/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu21/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu22/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu23/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu24/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu25/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu26/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu27/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu28/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu29/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu30/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu31/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu32/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu33/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu34/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu35/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu36/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu37/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu38/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu39/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu40/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu41/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu42/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu43/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu44/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu45/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu46/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu47/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu48/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu49/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu50/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu51/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu52/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu53/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu54/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu55/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu56/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu57/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu58/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu59/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu60/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu61/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu62/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu63/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu64/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu65/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu66/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu67/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu68/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu69/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu70/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu71/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu72/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu73/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu74/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu75/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu76/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu77/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu78/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu79/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu80/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu81/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu82/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu83/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu84/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu85/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu86/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu87/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu88/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu89/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu90/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu91/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu92/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu93/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu94/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu95/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu96/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu97/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu98/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu99/cpufreq/scaling_governor
mount -t tmpfs -o size=100% vm-scalability-tmp /tmp/vm-scalability-tmp
truncate -s 540949544960 /tmp/vm-scalability.img
mkfs.xfs -q /tmp/vm-scalability.img
mount -o loop /tmp/vm-scalability.img /tmp/vm-scalability
./case-small-allocs
./usemem --runtime 300 -n 120 --readonly --unit 40960 36650387592
umount /tmp/vm-scalability-tmp
umount /tmp/vm-scalability
rm /tmp/vm-scalability.img
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx