[LKP] [mm] bea66fbd11a: -13.5% aim7.time.minor_page_faults, -41.1% time.system_time
From: Huang Ying
Date: Thu Apr 09 2015 - 21:07:47 EST
FYI, we noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit bea66fbd11af1ca98ae26855eea41eda8582923e ("mm: numa: group related processes based on VMA flags instead of page table flags")
testbox/testcase/testparams: brickland3/aim7/performance-3000-sieve
98cf21c61a7f5419 bea66fbd11af1ca98ae26855ee
---------------- --------------------------
%stddev %change %stddev
\ | \
1.155e+08 Â 1% -13.5% 99915100 Â 1% aim7.time.minor_page_faults
904971 Â 1% -64.8% 318137 Â 10% aim7.time.voluntary_context_switches
120.22 Â 1% -41.1% 70.87 Â 3% aim7.time.system_time
6122485 Â 0% -1.6% 6025423 Â 0% aim7.time.involuntary_context_switches
1464 Â 14% -66.0% 498 Â 37% cpuidle.C1-IVT-4S.usage
470 Â 1% -65.0% 164 Â 26% vmstat.procs.b
125221 Â 0% -2.2% 122500 Â 0% vmstat.system.in
32780 Â 0% -10.4% 29374 Â 1% vmstat.system.cs
1.155e+08 Â 1% -13.5% 99915100 Â 1% time.minor_page_faults
120.22 Â 1% -41.1% 70.87 Â 3% time.system_time
904971 Â 1% -64.8% 318137 Â 10% time.voluntary_context_switches
84 Â 43% -67.0% 28 Â 23% numa-numastat.node0.other_node
56733780 Â 1% -12.9% 49397913 Â 1% proc-vmstat.numa_hint_faults
55660319 Â 1% -13.2% 48314771 Â 1% proc-vmstat.numa_hint_faults_local
3054195 Â 0% -13.5% 2642998 Â 0% proc-vmstat.numa_hit
3053893 Â 0% -13.5% 2642870 Â 0% proc-vmstat.numa_local
1010298 Â 1% -41.6% 590406 Â 1% proc-vmstat.numa_pages_migrated
57648883 Â 1% -13.5% 49892114 Â 1% proc-vmstat.numa_pte_updates
3076975 Â 0% -13.6% 2659661 Â 0% proc-vmstat.pgalloc_normal
1.16e+08 Â 1% -13.4% 1.005e+08 Â 1% proc-vmstat.pgfault
3003186 Â 0% -12.3% 2633789 Â 0% proc-vmstat.pgfree
1010298 Â 1% -41.6% 590406 Â 1% proc-vmstat.pgmigrate_success
6 Â 12% +33.3% 9 Â 7% sched_debug.cfs_rq[104]:/.load
5 Â 28% +66.7% 8 Â 21% sched_debug.cfs_rq[106]:/.load
5 Â 28% +61.9% 8 Â 17% sched_debug.cfs_rq[106]:/.runnable_load_avg
5069 Â 4% +6.5% 5399 Â 5% sched_debug.cfs_rq[109]:/.tg_load_avg
6 Â 6% +20.0% 7 Â 6% sched_debug.cfs_rq[110]:/.load
5060 Â 6% +10.2% 5574 Â 5% sched_debug.cfs_rq[115]:/.tg_load_avg
6 Â 0% +20.8% 7 Â 5% sched_debug.cfs_rq[115]:/.load
4974 Â 4% +11.8% 5562 Â 6% sched_debug.cfs_rq[116]:/.tg_load_avg
4906 Â 2% +11.3% 5459 Â 7% sched_debug.cfs_rq[117]:/.tg_load_avg
5 Â 14% +50.0% 7 Â 11% sched_debug.cfs_rq[118]:/.load
5 Â 8% +38.1% 7 Â 11% sched_debug.cfs_rq[118]:/.runnable_load_avg
6 Â 13% +500.0% 37 Â 42% sched_debug.cfs_rq[26]:/.tg_load_contrib
20927468 Â 4% +8.4% 22685964 Â 4% sched_debug.cfs_rq[27]:/.min_vruntime
21203485 Â 3% +13.4% 24049232 Â 7% sched_debug.cfs_rq[28]:/.min_vruntime
18134631 Â 6% +13.3% 20548482 Â 12% sched_debug.cfs_rq[29]:/.min_vruntime
7 Â 14% -30.0% 5 Â 20% sched_debug.cfs_rq[2]:/.runnable_load_avg
5 Â 15% +36.4% 7 Â 14% sched_debug.cfs_rq[45]:/.runnable_load_avg
22214373 Â 6% +9.3% 24278287 Â 4% sched_debug.cfs_rq[64]:/.min_vruntime
7 Â 20% -40.0% 4 Â 24% sched_debug.cfs_rq[67]:/.load
8 Â 15% -40.6% 4 Â 31% sched_debug.cfs_rq[67]:/.runnable_load_avg
9 Â 37% -35.9% 6 Â 6% sched_debug.cfs_rq[6]:/.runnable_load_avg
9 Â 30% -35.1% 6 Â 0% sched_debug.cfs_rq[6]:/.load
66 Â 41% -68.9% 20 Â 38% sched_debug.cfs_rq[72]:/.blocked_load_avg
74 Â 38% -63.4% 27 Â 29% sched_debug.cfs_rq[72]:/.tg_load_contrib
7 Â 14% -33.3% 5 Â 24% sched_debug.cfs_rq[73]:/.runnable_load_avg
7 Â 17% -24.1% 5 Â 15% sched_debug.cfs_rq[73]:/.load
8 Â 13% -29.4% 6 Â 11% sched_debug.cfs_rq[7]:/.load
8 Â 13% -27.3% 6 Â 11% sched_debug.cfs_rq[7]:/.runnable_load_avg
7 Â 6% -26.7% 5 Â 15% sched_debug.cfs_rq[82]:/.load
8 Â 21% -38.2% 5 Â 34% sched_debug.cfs_rq[84]:/.runnable_load_avg
20672959 Â 3% +18.6% 24508843 Â 8% sched_debug.cfs_rq[84]:/.min_vruntime
7 Â 6% -26.7% 5 Â 20% sched_debug.cfs_rq[9]:/.load
8 Â 17% -35.3% 5 Â 20% sched_debug.cfs_rq[9]:/.runnable_load_avg
12917 Â 12% -27.9% 9309 Â 4% sched_debug.cpu#0.ttwu_count
4804 Â 3% -9.3% 4356 Â 7% sched_debug.cpu#0.ttwu_local
1907 Â 36% -61.0% 744 Â 25% sched_debug.cpu#100.ttwu_local
5295 Â 24% -56.4% 2309 Â 25% sched_debug.cpu#100.ttwu_count
32486 Â 4% -17.8% 26713 Â 7% sched_debug.cpu#104.nr_switches
33183 Â 4% -14.9% 28232 Â 7% sched_debug.cpu#104.sched_count
6 Â 12% +33.3% 9 Â 7% sched_debug.cpu#104.load
15 Â 11% +52.5% 23 Â 4% sched_debug.cpu#104.nr_running
5 Â 28% +57.1% 8 Â 13% sched_debug.cpu#106.cpu_load[0]
12 Â 22% +70.8% 20 Â 2% sched_debug.cpu#106.nr_running
242 Â 17% -29.9% 169 Â 29% sched_debug.cpu#106.sched_goidle
5 Â 28% +66.7% 8 Â 21% sched_debug.cpu#106.load
15 Â 4% +20.0% 18 Â 5% sched_debug.cpu#11.nr_running
1608 Â 25% -49.3% 815 Â 32% sched_debug.cpu#11.ttwu_local
2025 Â 28% -30.6% 1405 Â 19% sched_debug.cpu#110.ttwu_count
6 Â 6% +20.0% 7 Â 6% sched_debug.cpu#110.load
673 Â 15% -32.9% 452 Â 25% sched_debug.cpu#110.ttwu_local
14 Â 9% +28.1% 18 Â 4% sched_debug.cpu#110.nr_running
13 Â 18% +39.6% 18 Â 8% sched_debug.cpu#112.nr_running
3580 Â 36% -52.6% 1698 Â 36% sched_debug.cpu#114.ttwu_count
14 Â 10% +25.9% 18 Â 8% sched_debug.cpu#115.nr_running
240 Â 9% -31.8% 164 Â 31% sched_debug.cpu#118.sched_goidle
5 Â 9% +31.8% 7 Â 5% sched_debug.cpu#118.cpu_load[3]
5 Â 9% +31.8% 7 Â 5% sched_debug.cpu#118.cpu_load[4]
5 Â 8% +38.1% 7 Â 11% sched_debug.cpu#118.cpu_load[1]
5 Â 8% +38.1% 7 Â 11% sched_debug.cpu#118.cpu_load[2]
5 Â 14% +50.0% 7 Â 11% sched_debug.cpu#118.load
5 Â 8% +38.1% 7 Â 11% sched_debug.cpu#118.cpu_load[0]
12 Â 5% +54.2% 18 Â 16% sched_debug.cpu#118.nr_running
5276 Â 14% -39.7% 3182 Â 42% sched_debug.cpu#16.ttwu_count
6741 Â 46% -66.3% 2270 Â 28% sched_debug.cpu#17.ttwu_count
7 Â 14% -30.0% 5 Â 20% sched_debug.cpu#2.cpu_load[0]
376 Â 18% +132.8% 877 Â 29% sched_debug.cpu#21.sched_goidle
6779 Â 20% -60.5% 2681 Â 40% sched_debug.cpu#23.ttwu_count
5932 Â 36% -57.0% 2552 Â 38% sched_debug.cpu#26.ttwu_count
1871 Â 43% -50.9% 919 Â 38% sched_debug.cpu#3.ttwu_local
3782 Â 14% -34.2% 2488 Â 11% sched_debug.cpu#30.ttwu_count
13 Â 3% +36.4% 18 Â 12% sched_debug.cpu#31.nr_running
1664 Â 43% -48.2% 861 Â 22% sched_debug.cpu#32.ttwu_local
14 Â 10% +29.3% 18 Â 7% sched_debug.cpu#32.nr_running
31176 Â 12% -16.1% 26153 Â 4% sched_debug.cpu#32.sched_count
5754 Â 22% -54.7% 2606 Â 25% sched_debug.cpu#36.ttwu_count
1067 Â 16% -31.1% 735 Â 16% sched_debug.cpu#38.ttwu_local
28900 Â 5% -11.0% 25726 Â 7% sched_debug.cpu#39.sched_count
1257 Â 37% -44.8% 693 Â 49% sched_debug.cpu#39.ttwu_local
4893 Â 21% -54.0% 2249 Â 47% sched_debug.cpu#40.ttwu_count
28781 Â 4% -9.0% 26197 Â 7% sched_debug.cpu#40.sched_count
4800 Â 29% -51.7% 2320 Â 13% sched_debug.cpu#42.ttwu_count
1628 Â 35% -53.3% 760 Â 26% sched_debug.cpu#42.ttwu_local
28887 Â 5% -9.1% 26257 Â 7% sched_debug.cpu#42.sched_count
5 Â 15% +36.4% 7 Â 14% sched_debug.cpu#45.cpu_load[1]
5 Â 15% +36.4% 7 Â 14% sched_debug.cpu#45.cpu_load[0]
29036 Â 3% -11.5% 25689 Â 4% sched_debug.cpu#45.nr_switches
13 Â 16% +42.6% 19 Â 14% sched_debug.cpu#45.nr_running
6709 Â 35% -69.9% 2020 Â 36% sched_debug.cpu#45.ttwu_count
30362 Â 6% -13.8% 26186 Â 4% sched_debug.cpu#45.sched_count
1784 Â 24% -53.0% 838 Â 23% sched_debug.cpu#45.ttwu_local
29093 Â 5% -9.0% 26479 Â 5% sched_debug.cpu#46.sched_count
11 Â 13% +57.4% 18 Â 22% sched_debug.cpu#48.nr_running
12 Â 23% +40.8% 17 Â 8% sched_debug.cpu#49.nr_running
1020 Â 9% -21.7% 799 Â 12% sched_debug.cpu#50.ttwu_local
514 Â 11% -28.1% 370 Â 15% sched_debug.cpu#52.sched_goidle
5126 Â 36% -63.0% 1898 Â 11% sched_debug.cpu#53.ttwu_count
29319 Â 5% -11.6% 25920 Â 5% sched_debug.cpu#54.sched_count
13 Â 17% +39.6% 18 Â 14% sched_debug.cpu#54.nr_running
28346 Â 3% -9.9% 25537 Â 5% sched_debug.cpu#54.nr_switches
558 Â 5% -35.9% 358 Â 12% sched_debug.cpu#54.sched_goidle
3768 Â 19% -54.5% 1714 Â 43% sched_debug.cpu#55.ttwu_count
30565 Â 10% -13.6% 26415 Â 2% sched_debug.cpu#55.sched_count
1582 Â 20% -50.3% 787 Â 45% sched_debug.cpu#56.ttwu_local
6921 Â 37% -73.0% 1871 Â 41% sched_debug.cpu#56.ttwu_count
1232 Â 18% -47.0% 653 Â 20% sched_debug.cpu#57.ttwu_local
626 Â 41% -38.0% 388 Â 11% sched_debug.cpu#57.sched_goidle
32348 Â 16% -17.6% 26669 Â 2% sched_debug.cpu#57.sched_count
1441 Â 42% -51.7% 695 Â 23% sched_debug.cpu#58.ttwu_local
41619 Â 45% -35.7% 26769 Â 5% sched_debug.cpu#59.sched_count
9 Â 30% -35.1% 6 Â 0% sched_debug.cpu#6.load
9 Â 37% -35.9% 6 Â 6% sched_debug.cpu#6.cpu_load[0]
9 Â 37% -35.9% 6 Â 6% sched_debug.cpu#6.cpu_load[1]
9 Â 39% -34.2% 6 Â 6% sched_debug.cpu#6.cpu_load[2]
28059 Â 4% -8.6% 25653 Â 4% sched_debug.cpu#60.sched_count
4532 Â 36% -68.8% 1413 Â 40% sched_debug.cpu#60.ttwu_count
4914 Â 38% -60.7% 1929 Â 36% sched_debug.cpu#62.ttwu_count
1128 Â 7% -42.9% 644 Â 20% sched_debug.cpu#62.ttwu_local
7 Â 20% -40.0% 4 Â 24% sched_debug.cpu#67.load
28008 Â 2% -9.4% 25363 Â 5% sched_debug.cpu#67.sched_count
27655 Â 3% -9.5% 25024 Â 5% sched_debug.cpu#67.nr_switches
1403 Â 21% -56.1% 616 Â 8% sched_debug.cpu#67.ttwu_local
4889 Â 30% -54.4% 2228 Â 36% sched_debug.cpu#67.ttwu_count
1174 Â 36% -59.3% 477 Â 27% sched_debug.cpu#69.ttwu_local
15 Â 8% +28.3% 19 Â 14% sched_debug.cpu#69.nr_running
8 Â 13% -29.4% 6 Â 11% sched_debug.cpu#7.load
4411 Â 33% -41.1% 2596 Â 36% sched_debug.cpu#7.ttwu_count
1528 Â 23% -65.7% 524 Â 21% sched_debug.cpu#71.ttwu_local
28435 Â 3% -11.3% 25224 Â 4% sched_debug.cpu#71.sched_count
7 Â 14% -35.5% 5 Â 24% sched_debug.cpu#73.cpu_load[2]
8 Â 15% -37.5% 5 Â 24% sched_debug.cpu#73.cpu_load[3]
7 Â 14% -33.3% 5 Â 24% sched_debug.cpu#73.cpu_load[1]
8 Â 15% -34.4% 5 Â 20% sched_debug.cpu#73.cpu_load[4]
7 Â 14% -33.3% 5 Â 24% sched_debug.cpu#73.cpu_load[0]
7 Â 17% -24.1% 5 Â 15% sched_debug.cpu#73.load
32392 Â 4% -17.2% 26828 Â 5% sched_debug.cpu#74.nr_switches
34243 Â 8% -17.3% 28320 Â 4% sched_debug.cpu#74.sched_count
1728 Â 29% -50.9% 849 Â 39% sched_debug.cpu#75.ttwu_local
5389 Â 21% -61.6% 2072 Â 39% sched_debug.cpu#75.ttwu_count
28023 Â 4% -8.3% 25700 Â 6% sched_debug.cpu#75.nr_switches
3398 Â 11% -33.9% 2248 Â 38% sched_debug.cpu#76.ttwu_count
7 Â 6% -26.7% 5 Â 15% sched_debug.cpu#82.load
8 Â 16% -37.1% 5 Â 32% sched_debug.cpu#84.cpu_load[3]
8 Â 21% -40.0% 5 Â 34% sched_debug.cpu#84.cpu_load[1]
8 Â 16% -40.0% 5 Â 34% sched_debug.cpu#84.cpu_load[2]
8 Â 16% -37.1% 5 Â 32% sched_debug.cpu#84.cpu_load[4]
8 Â 21% -38.2% 5 Â 34% sched_debug.cpu#84.cpu_load[0]
1555 Â 43% -56.6% 675 Â 38% sched_debug.cpu#87.ttwu_local
33842 Â 1% -15.1% 28727 Â 6% sched_debug.cpu#89.sched_count
33213 Â 1% -18.8% 26968 Â 7% sched_debug.cpu#89.nr_switches
7 Â 6% -26.7% 5 Â 20% sched_debug.cpu#9.load
12 Â 10% +37.3% 17 Â 16% sched_debug.cpu#90.nr_running
5 Â 8% +33.3% 7 Â 17% sched_debug.cpu#90.cpu_load[4]
5 Â 8% +33.3% 7 Â 17% sched_debug.cpu#90.cpu_load[3]
2539 Â 15% -66.6% 847 Â 39% sched_debug.cpu#96.ttwu_count
1124 Â 47% -49.8% 564 Â 23% sched_debug.cpu#97.ttwu_local
4 Â 10% +58.8% 6 Â 21% sched_debug.cpu#99.cpu_load[3]
4 Â 10% +47.1% 6 Â 13% sched_debug.cpu#99.cpu_load[4]
4 Â 10% +58.8% 6 Â 21% sched_debug.cpu#99.cpu_load[2]
1124 Â 34% -53.8% 519 Â 12% sched_debug.cpu#99.ttwu_local
testbox/testcase/testparams: lkp-ne02/netperf/performance-300s-200%-TCP_SENDFILE
98cf21c61a7f5419 bea66fbd11af1ca98ae26855ee
---------------- --------------------------
3618 Â 2% -8.1% 3326 Â 2% slabinfo.proc_inode_cache.num_objs
0.26 Â 7% -8.7% 0.24 Â 5% turbostat.CPU%c1
2937 Â 0% +12.9% 3314 Â 9% numa-meminfo.node1.PageTables
387540 Â 29% -49.3% 196421 Â 39% cpuidle.C1-NHM.time
85799 Â 6% +214.0% 269448 Â 37% numa-vmstat.node0.numa_local
733 Â 0% +12.9% 827 Â 9% numa-vmstat.node1.nr_page_table_pages
69008 Â 4% +555.5% 452353 Â 47% numa-numastat.node0.local_node
68999 Â 4% +555.6% 452349 Â 47% numa-numastat.node0.numa_hit
3720 Â 9% +67.5% 6232 Â 21% proc-vmstat.pgalloc_dma
550092 Â 8% +32.5% 728886 Â 12% proc-vmstat.pgalloc_dma32
124 Â 6% -5.4% 117 Â 7% sched_debug.cfs_rq[0]:/.load
5872 Â 11% -17.9% 4821 Â 10% sched_debug.cfs_rq[10]:/.tg_load_avg
129 Â 4% -7.6% 119 Â 5% sched_debug.cfs_rq[10]:/.runnable_load_avg
129 Â 4% -10.4% 116 Â 4% sched_debug.cfs_rq[10]:/.load
5817 Â 13% -17.2% 4816 Â 9% sched_debug.cfs_rq[11]:/.tg_load_avg
1511306 Â 3% -7.4% 1399297 Â 5% sched_debug.cfs_rq[12]:/.min_vruntime
5799 Â 13% -17.2% 4800 Â 10% sched_debug.cfs_rq[12]:/.tg_load_avg
629 Â 33% -51.0% 308 Â 35% sched_debug.cfs_rq[12]:/.tg_load_contrib
1511248 Â 3% -11.9% 1331134 Â 5% sched_debug.cfs_rq[12]:/.MIN_vruntime
1511249 Â 3% -11.9% 1331134 Â 5% sched_debug.cfs_rq[12]:/.max_vruntime
522 Â 1% -55.4% 233 Â 15% sched_debug.cfs_rq[13]:/.tg_load_contrib
406 Â 0% -71.9% 114 Â 33% sched_debug.cfs_rq[13]:/.blocked_load_avg
5789 Â 13% -17.3% 4788 Â 10% sched_debug.cfs_rq[13]:/.tg_load_avg
5784 Â 13% -17.7% 4760 Â 9% sched_debug.cfs_rq[14]:/.tg_load_avg
643 Â 16% -73.9% 168 Â 34% sched_debug.cfs_rq[15]:/.tg_load_contrib
5760 Â 12% -17.7% 4742 Â 9% sched_debug.cfs_rq[15]:/.tg_load_avg
153 Â 9% +142.6% 371 Â 33% sched_debug.cfs_rq[2]:/.tg_load_contrib
178 Â 20% +191.3% 518 Â 37% sched_debug.cfs_rq[3]:/.tg_load_contrib
1484495 Â 1% -10.4% 1330254 Â 0% sched_debug.cfs_rq[4]:/.max_vruntime
1484495 Â 1% -10.4% 1330254 Â 0% sched_debug.cfs_rq[4]:/.MIN_vruntime
1525410 Â 4% -9.1% 1387280 Â 5% sched_debug.cfs_rq[4]:/.min_vruntime
363 Â 0% -38.9% 222 Â 31% sched_debug.cfs_rq[5]:/.tg_load_contrib
5881 Â 12% -14.0% 5059 Â 5% sched_debug.cfs_rq[6]:/.tg_load_avg
654 Â 4% -70.7% 191 Â 28% sched_debug.cfs_rq[7]:/.tg_load_contrib
5915 Â 11% -14.7% 5043 Â 5% sched_debug.cfs_rq[7]:/.tg_load_avg
5895 Â 11% -14.8% 5024 Â 5% sched_debug.cfs_rq[8]:/.tg_load_avg
144 Â 9% -18.2% 117 Â 5% sched_debug.cfs_rq[8]:/.load
72 Â 18% +305.9% 294 Â 38% sched_debug.cfs_rq[8]:/.blocked_load_avg
202 Â 6% +104.7% 413 Â 26% sched_debug.cfs_rq[8]:/.tg_load_contrib
5887 Â 11% -15.1% 4998 Â 6% sched_debug.cfs_rq[9]:/.tg_load_avg
128 Â 5% -7.4% 119 Â 6% sched_debug.cpu#0.cpu_load[4]
128 Â 6% -6.6% 119 Â 6% sched_debug.cpu#0.cpu_load[3]
127 Â 6% -5.5% 120 Â 5% sched_debug.cpu#0.cpu_load[2]
12677 Â 35% -49.6% 6383 Â 48% sched_debug.cpu#1.sched_goidle
127 Â 3% -7.5% 117 Â 4% sched_debug.cpu#10.cpu_load[3]
127 Â 5% -10.6% 114 Â 5% sched_debug.cpu#10.load
127 Â 3% -7.3% 117 Â 4% sched_debug.cpu#10.cpu_load[4]
285821 Â 31% -31.8% 194934 Â 0% sched_debug.cpu#12.ttwu_local
289858 Â 31% -31.6% 198184 Â 0% sched_debug.cpu#12.ttwu_count
382013 Â 45% -47.0% 202396 Â 0% sched_debug.cpu#12.nr_switches
382110 Â 45% -47.0% 202488 Â 0% sched_debug.cpu#12.sched_count
1504 Â 24% -22.1% 1171 Â 29% sched_debug.cpu#12.curr->pid
1670 Â 24% -72.9% 453 Â 17% sched_debug.cpu#13.sched_goidle
1840 Â 18% -41.9% 1068 Â 15% sched_debug.cpu#14.curr->pid
114 Â 0% +6.4% 121 Â 4% sched_debug.cpu#15.cpu_load[2]
1494 Â 18% -29.7% 1050 Â 13% sched_debug.cpu#15.curr->pid
211892 Â 5% -5.7% 199867 Â 0% sched_debug.cpu#2.ttwu_local
330774 Â 35% -37.9% 205319 Â 2% sched_debug.cpu#4.ttwu_count
324839 Â 36% -38.4% 200110 Â 2% sched_debug.cpu#4.ttwu_local
1397 Â 0% -23.7% 1066 Â 13% sched_debug.cpu#6.curr->pid
128 Â 2% -7.2% 118 Â 4% sched_debug.cpu#8.cpu_load[4]
144 Â 7% -18.2% 118 Â 5% sched_debug.cpu#8.load
141 Â 11% -18.1% 115 Â 5% sched_debug.cpu#9.load
brickland3: Brickland Ivy Bridge-EX
Memory: 512G
lkp-ne02: Nehalem-EP
Memory: 5G
aim7.time.system_time
140 ++--------------------------------------------------------------------+
130 ++ .*.. .*.. |
| ..*. .. *.. .*.. .* |
120 *+.*. * *...*. ..*..*..*... .*..*...*. |
110 ++ *. *. |
| |
100 ++ |
90 ++ |
80 ++ |
| O |
70 ++ O O O
60 ++ |
| O O O O |
50 ++ O O O O O O |
40 O+--------O------O-----O------O--O------O------O----------------------+
aim7.time.voluntary_context_switches
1.1e+06 ++----------------------------------------------------------------+
| *.. |
1e+06 ++ .*.. .. .*.. |
900000 *+.*. *...* *..*..*. ..*..*..*.. .*..*..*...* |
| *. *. |
800000 ++ |
700000 ++ |
| |
600000 ++ |
500000 ++ |
| |
400000 ++ O O O O O O O O |
300000 ++ O O O O O O O O O O O
O O O |
200000 ++----------------------------------------------------------------+
time.system_time
140 ++--------------------------------------------------------------------+
130 ++ .*.. .*.. |
| ..*. .. *.. .*.. .* |
120 *+.*. * *...*. ..*..*..*... .*..*...*. |
110 ++ *. *. |
| |
100 ++ |
90 ++ |
80 ++ |
| O |
70 ++ O O O
60 ++ |
| O O O O |
50 ++ O O O O O O |
40 O+--------O------O-----O------O--O------O------O----------------------+
time.voluntary_context_switches
1.1e+06 ++----------------------------------------------------------------+
| *.. |
1e+06 ++ .*.. .. .*.. |
900000 *+.*. *...* *..*..*. ..*..*..*.. .*..*..*...* |
| *. *. |
800000 ++ |
700000 ++ |
| |
600000 ++ |
500000 ++ |
| |
400000 ++ O O O O O O O O |
300000 ++ O O O O O O O O O O O
O O O |
200000 ++----------------------------------------------------------------+
vmstat.procs.b
500 ++-----*--*--*--------------------------------------------------------+
*.. .. *..*..*...*..*..*...*.. ..*..*..*...*..* |
450 ++ * *..*. |
400 ++ |
| |
350 ++ |
| |
300 ++ |
| |
250 ++ |
200 ++ O O O O |
| O O O O O O O O O O O O
150 ++ O O O |
O O |
100 ++----------------------------------------------------------O---------+
vmstat.system.in
126000 ++-----------------------------------------------------------------+
| .*.. |
125500 *+ ..*.. .*.. .*.. *.. .*.. *..* |
125000 ++ *. *. *.. ..*. .. *...*. .. |
| *. *..* * |
124500 ++ |
124000 ++ |
| |
123500 ++ |
123000 ++ O |
| O O O O O |
122500 ++ O O O O O O O O
122000 ++ O O O O O O O |
| |
121500 O+-----------------------------------------------------------------+
vmstat.system.cs
33500 ++------------------------------------------------------------------+
33000 ++ .*. .*.. .*..*.. |
*..*. .. *. *...*. ..*..*..*.. ..*..*..*..* |
32500 ++ .. *. *. |
32000 ++ * |
| |
31500 ++ |
31000 ++ |
30500 ++ |
| O |
30000 ++ O O O O O O O O O O O O |
29500 O+ O O O O O |
| O O
29000 ++ O |
28500 ++------------------------------------------------------------------+
proc-vmstat.numa_hit
3.3e+06 ++--------------*-------------------------------------------------+
| *. : : |
3.2e+06 ++ + .. : : |
*..*.. + : : .*.. |
3.1e+06 ++ * * *..*.. ..*. *.. .*.. .*...* |
3e+06 ++ *..*..*. *. *. |
| |
2.9e+06 ++ |
| |
2.8e+06 ++ |
2.7e+06 ++ |
| O O O
2.6e+06 O+ O O O O O O O O O O O O O O |
| O O O O |
2.5e+06 ++----------------------------------------------------------------+
proc-vmstat.numa_local
3.3e+06 ++--------------*-------------------------------------------------+
| *. : : |
3.2e+06 ++ + .. : : |
*..*.. + : : .*.. |
3.1e+06 ++ * * *..*.. ..*. *.. .*.. .*...* |
3e+06 ++ *..*..*. *. *. |
| |
2.9e+06 ++ |
| |
2.8e+06 ++ |
2.7e+06 ++ |
| O O
2.6e+06 O+ O O O O O O O O O O O O O O O |
| O O O O |
2.5e+06 ++----------------------------------------------------------------+
proc-vmstat.pgalloc_normal
3.4e+06 ++----------------------------------------------------------------+
| * |
3.3e+06 ++ : : |
3.2e+06 ++ *.. : : |
| .*.. .. . : : .*.. |
3.1e+06 *+ * * *..*.. .*...*. *.. .*.. .*...* |
3e+06 ++ *..*. *. *. |
| |
2.9e+06 ++ |
2.8e+06 ++ |
| |
2.7e+06 ++ O O |
2.6e+06 O+ O O O O O O O O O O O O O O O O
| O O O |
2.5e+06 ++----------------------------------------------------------------+
proc-vmstat.numa_pages_migrated
1.3e+06 ++----------------------------------------------------------------+
| * |
1.2e+06 ++ *. : : |
| + .. : : |
1.1e+06 *+.*.. + : : .*.. |
1e+06 ++ * * *..*.. ..*. *.. .*.. .*...* |
| *..*..*. *. *. |
900000 ++ |
| |
800000 ++ |
700000 ++ |
| |
600000 ++ O O O O
O O O O O O O O O O O O O |
500000 ++----O--O---------O--O------------------------O------------------+
proc-vmstat.pgmigrate_success
1.3e+06 ++----------------------------------------------------------------+
| * |
1.2e+06 ++ *. : : |
| + .. : : |
1.1e+06 *+.*.. + : : .*.. |
1e+06 ++ * * *..*.. ..*. *.. .*.. .*...* |
| *..*..*. *. *. |
900000 ++ |
| |
800000 ++ |
700000 ++ |
| |
600000 ++ O O O O
O O O O O O O O O O O O O |
500000 ++----O--O---------O--O------------------------O------------------+
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Ying Huang
---
testcase: aim7
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 10
default-watchdogs:
watch-oom:
watchdog:
cpufreq_governor: performance
model: Brickland Ivy Bridge-EX
nr_cpu: 120
memory: 512G
hdd_partitions:
swap_partitions:
pmeter_server: lkp-st01
pmeter_device: yokogawa-wt310
aim7:
load: 3000
test: sieve
branch: pm/linux-next
commit: f22e6e847115abc3a0e2ad7bb18d243d42275af1
repeat_to: 2
testbox: brickland3
tbox_group: brickland3
kconfig: x86_64-rhel
enqueue_time: 2015-03-30 08:06:58.285296102 +08:00
head_commit: 09d26e637a56ed8e935a82c0701effe9261fe943
base_commit: f22e6e847115abc3a0e2ad7bb18d243d42275af1
kernel: "/kernel/x86_64-rhel/f22e6e847115abc3a0e2ad7bb18d243d42275af1/vmlinuz-4.0.0-rc7"
user: lkp
queue: unit
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/brickland3/aim7/performance-3000-sieve/debian-x86_64-2015-02-07.cgz/x86_64-rhel/f22e6e847115abc3a0e2ad7bb18d243d42275af1/0"
LKP_SERVER: inn
job_file: "/lkp/scheduled/brickland3/unit_aim7-performance-3000-sieve-x86_64-rhel-BASE-f22e6e847115abc3a0e2ad7bb18d243d42275af1-1-20150330-121711-1uflwdz.yaml"
dequeue_time: 2015-04-08 02:07:02.968063261 +08:00
max_uptime: 2170.0200000000004
modules_initrd: "/kernel/x86_64-rhel/f22e6e847115abc3a0e2ad7bb18d243d42275af1/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/aim7-x86_64.cgz"
job_state: finished
loadavg: 1830.31 1381.32 590.55 1/982 11882
start_time: '1428430105'
end_time: '1428430337'
version: "/lkp/lkp/.src-20150406-212218"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu100/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu101/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu102/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu103/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu104/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu105/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu106/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu107/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu108/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu109/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu110/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu111/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu112/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu113/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu114/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu115/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu116/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu117/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu118/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu119/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu16/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu17/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu18/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu19/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu20/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu21/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu22/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu23/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu24/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu25/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu26/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu27/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu28/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu29/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu30/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu31/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu32/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu33/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu34/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu35/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu36/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu37/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu38/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu39/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu40/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu41/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu42/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu43/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu44/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu45/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu46/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu47/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu48/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu49/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu50/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu51/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu52/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu53/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu54/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu55/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu56/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu57/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu58/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu59/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu60/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu61/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu62/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu63/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu64/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu65/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu66/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu67/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu68/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu69/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu70/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu71/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu72/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu73/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu74/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu75/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu76/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu77/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu78/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu79/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu80/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu81/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu82/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu83/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu84/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu85/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu86/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu87/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu88/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu89/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu90/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu91/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu92/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu93/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu94/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu95/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu96/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu97/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu98/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu99/cpufreq/scaling_governor
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx