[LKP] [f2fs] 465a05fecc2: +147.3% fsmark.files_per_sec

From: Huang Ying
Date: Sun Apr 19 2015 - 20:44:42 EST


FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit 465a05fecc2c7921d50f33cd10621abc049cbabf ("f2fs: enable inline data by default")


testbox/testcase/testparams: lkp-st02/fsmark/1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd

6a2de6eb3f267ba6 465a05fecc2c7921d50f33cd10
---------------- --------------------------
%stddev %change %stddev
\ | \
679 Â 1% +147.3% 1681 Â 1% fsmark.files_per_sec
15 Â 3% +177.4% 43 Â 0% fsmark.time.percent_of_cpu_this_job_got
150.90 Â 2% -59.5% 61.13 Â 1% fsmark.time.elapsed_time.max
150.90 Â 2% -59.5% 61.13 Â 1% fsmark.time.elapsed_time
1771558 Â 2% +6.2% 1881064 Â 0% fsmark.time.file_system_outputs
949761 Â 0% +22.2% 1160509 Â 0% fsmark.time.voluntary_context_switches
8240 Â 3% +80.1% 14840 Â 4% fsmark.time.involuntary_context_switches
184.27 Â 1% -48.5% 94.88 Â 0% uptime.boot
525 Â 2% -12.3% 460 Â 1% uptime.idle
89898 Â 0% -60.6% 35417 Â 0% softirqs.BLOCK
30856 Â 3% -19.7% 24774 Â 2% softirqs.RCU
34393 Â 0% -9.4% 31172 Â 0% softirqs.SCHED
5775 Â 1% +44.3% 8335 Â 0% vmstat.io.bo
18 Â 3% -47.2% 9 Â 5% vmstat.procs.b
11960 Â 1% +141.8% 28918 Â 0% vmstat.system.in
21443 Â 1% +152.8% 54201 Â 0% vmstat.system.cs
3093 Â 2% +9.5% 3387 Â 4% slabinfo.kmalloc-192.active_objs
3245 Â 3% +4.4% 3388 Â 4% slabinfo.kmalloc-192.num_objs
3627 Â 1% +16.7% 4233 Â 5% slabinfo.vm_area_struct.active_objs
3681 Â 1% +16.9% 4302 Â 5% slabinfo.vm_area_struct.num_objs
32653 Â 2% +10.9% 36223 Â 2% meminfo.Active(anon)
32637 Â 2% +10.4% 36030 Â 2% meminfo.AnonPages
1517 Â 0% -68.3% 481 Â 4% meminfo.Mlocked
3562 Â 1% +15.9% 4127 Â 0% meminfo.PageTables
1517 Â 0% -68.3% 481 Â 4% meminfo.Unevictable
150.90 Â 2% -59.5% 61.13 Â 1% time.elapsed_time.max
150.90 Â 2% -59.5% 61.13 Â 1% time.elapsed_time
8240 Â 3% +80.1% 14840 Â 4% time.involuntary_context_switches
15 Â 3% +177.4% 43 Â 0% time.percent_of_cpu_this_job_got
949761 Â 0% +22.2% 1160509 Â 0% time.voluntary_context_switches
8162 Â 2% +10.9% 9050 Â 2% proc-vmstat.nr_active_anon
8157 Â 2% +10.4% 9004 Â 2% proc-vmstat.nr_anon_pages
150 Â 6% -46.8% 80 Â 4% proc-vmstat.nr_dirty
379 Â 0% -68.4% 119 Â 4% proc-vmstat.nr_mlock
889 Â 1% +15.8% 1029 Â 0% proc-vmstat.nr_page_table_pages
379 Â 0% -68.4% 119 Â 4% proc-vmstat.nr_unevictable
220865 Â 2% -40.2% 131982 Â 1% proc-vmstat.nr_written
411249 Â 0% -23.1% 316279 Â 0% proc-vmstat.numa_hit
411249 Â 0% -23.1% 316279 Â 0% proc-vmstat.numa_local
26883 Â 1% +45.7% 39168 Â 2% proc-vmstat.pgactivate
169935 Â 1% -22.0% 132534 Â 1% proc-vmstat.pgalloc_dma32
264529 Â 1% -23.3% 202778 Â 1% proc-vmstat.pgalloc_normal
231722 Â 1% -51.0% 113542 Â 0% proc-vmstat.pgfault
193885 Â 2% -49.8% 97263 Â 3% proc-vmstat.pgfree
883675 Â 2% -40.2% 528402 Â 1% proc-vmstat.pgpgout
396 Â 6% +72.6% 683 Â 5% sched_debug.cfs_rq[0]:/.tg->runnable_avg
5121 Â 6% +106.0% 10549 Â 7% sched_debug.cfs_rq[0]:/.tg_load_avg
50 Â 12% +66.8% 84 Â 4% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
3813 Â 10% -18.1% 3122 Â 1% sched_debug.cfs_rq[0]:/.exec_clock
2357 Â 12% +65.8% 3907 Â 4% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
5066 Â 7% +107.6% 10516 Â 8% sched_debug.cfs_rq[1]:/.tg_load_avg
402 Â 6% +71.6% 690 Â 4% sched_debug.cfs_rq[1]:/.tg->runnable_avg
47 Â 2% +73.8% 83 Â 3% sched_debug.cfs_rq[1]:/.tg_runnable_contrib
2234 Â 2% +71.6% 3832 Â 2% sched_debug.cfs_rq[1]:/.avg->runnable_avg_sum
144 Â 15% -34.3% 94 Â 16% sched_debug.cfs_rq[2]:/.load
2213 Â 8% +94.6% 4307 Â 3% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
5026 Â 7% +105.0% 10306 Â 9% sched_debug.cfs_rq[2]:/.tg_load_avg
47 Â 9% +96.3% 93 Â 4% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
404 Â 6% +70.6% 690 Â 4% sched_debug.cfs_rq[2]:/.tg->runnable_avg
25 Â 10% -33.7% 16 Â 12% sched_debug.cfs_rq[2]:/.runnable_load_avg
591 Â 33% +176.1% 1632 Â 31% sched_debug.cfs_rq[3]:/.blocked_load_avg
620 Â 32% +170.6% 1678 Â 31% sched_debug.cfs_rq[3]:/.tg_load_contrib
5004 Â 7% +105.0% 10259 Â 9% sched_debug.cfs_rq[3]:/.tg_load_avg
408 Â 5% +69.8% 693 Â 4% sched_debug.cfs_rq[3]:/.tg->runnable_avg
13 Â 12% -25.0% 9 Â 15% sched_debug.cfs_rq[3]:/.nr_spread_over
2289 Â 4% +64.4% 3763 Â 4% sched_debug.cfs_rq[3]:/.avg->runnable_avg_sum
48 Â 4% +67.5% 81 Â 4% sched_debug.cfs_rq[3]:/.tg_runnable_contrib
60 Â 27% +50.8% 90 Â 10% sched_debug.cfs_rq[4]:/.tg_runnable_contrib
412 Â 5% +68.9% 696 Â 4% sched_debug.cfs_rq[4]:/.tg->runnable_avg
2784 Â 26% +49.6% 4166 Â 10% sched_debug.cfs_rq[4]:/.avg->runnable_avg_sum
4946 Â 8% +106.3% 10206 Â 8% sched_debug.cfs_rq[4]:/.tg_load_avg
55 Â 8% +55.9% 85 Â 7% sched_debug.cfs_rq[5]:/.tg_runnable_contrib
4907 Â 8% +106.6% 10141 Â 9% sched_debug.cfs_rq[5]:/.tg_load_avg
415 Â 6% +68.1% 698 Â 4% sched_debug.cfs_rq[5]:/.tg->runnable_avg
2564 Â 8% +53.9% 3947 Â 7% sched_debug.cfs_rq[5]:/.avg->runnable_avg_sum
191 Â 13% -33.9% 126 Â 31% sched_debug.cfs_rq[5]:/.load
2337 Â 6% +66.3% 3888 Â 8% sched_debug.cfs_rq[6]:/.avg->runnable_avg_sum
419 Â 6% +67.2% 700 Â 4% sched_debug.cfs_rq[6]:/.tg->runnable_avg
4900 Â 8% +106.2% 10103 Â 9% sched_debug.cfs_rq[6]:/.tg_load_avg
50 Â 7% +69.5% 84 Â 8% sched_debug.cfs_rq[6]:/.tg_runnable_contrib
4844 Â 7% +106.1% 9983 Â 9% sched_debug.cfs_rq[7]:/.tg_load_avg
2327 Â 7% +79.9% 4185 Â 15% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
50 Â 7% +81.5% 90 Â 16% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
423 Â 5% +66.7% 705 Â 4% sched_debug.cfs_rq[7]:/.tg->runnable_avg
295623 Â 17% -18.7% 240304 Â 6% sched_debug.cpu#0.nr_switches
107733 Â 24% -37.6% 67259 Â 10% sched_debug.cpu#0.ttwu_local
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#0.clock_task
135495 Â 18% -23.9% 103117 Â 7% sched_debug.cpu#0.sched_goidle
26384 Â 7% -20.4% 20995 Â 11% sched_debug.cpu#0.nr_load_updates
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#0.clock
296932 Â 17% -18.7% 241501 Â 6% sched_debug.cpu#0.sched_count
30451 Â 4% -26.1% 22493 Â 12% sched_debug.cpu#1.nr_load_updates
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#1.clock_task
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#1.clock
3 Â 25% +153.8% 8 Â 48% sched_debug.cpu#1.cpu_load[4]
715589 Â 5% -19.1% 578901 Â 11% sched_debug.cpu#1.avg_idle
142 Â 16% -33.3% 94 Â 16% sched_debug.cpu#2.load
5 Â 31% +114.3% 11 Â 15% sched_debug.cpu#2.cpu_load[3]
126380 Â 10% -18.8% 102610 Â 3% sched_debug.cpu#2.sched_goidle
418 Â 19% -41.2% 245 Â 12% sched_debug.cpu#2.curr->pid
277074 Â 9% -13.8% 238909 Â 3% sched_debug.cpu#2.sched_count
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#2.clock_task
277012 Â 9% -13.8% 238854 Â 3% sched_debug.cpu#2.nr_switches
9 Â 28% +89.5% 18 Â 27% sched_debug.cpu#2.cpu_load[2]
98388 Â 14% -32.3% 66641 Â 5% sched_debug.cpu#2.ttwu_local
740677 Â 2% -21.0% 584865 Â 9% sched_debug.cpu#2.avg_idle
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#2.clock
3 Â 31% +142.9% 8 Â 31% sched_debug.cpu#2.cpu_load[4]
25773 Â 9% -19.9% 20654 Â 8% sched_debug.cpu#2.nr_load_updates
690785 Â 3% -14.3% 591985 Â 6% sched_debug.cpu#3.avg_idle
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#3.clock_task
30028 Â 5% -30.5% 20877 Â 13% sched_debug.cpu#3.nr_load_updates
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#3.clock
113746 Â 18% -37.1% 71517 Â 18% sched_debug.cpu#4.ttwu_local
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#4.clock
27593 Â 7% -36.0% 17656 Â 4% sched_debug.cpu#4.nr_load_updates
106260 Â 1% -40.8% 62862 Â 0% sched_debug.cpu#4.clock_task
141639 Â 14% -24.8% 106524 Â 12% sched_debug.cpu#4.sched_goidle
184 Â 13% -31.3% 126 Â 31% sched_debug.cpu#5.load
28115 Â 9% -26.4% 20695 Â 15% sched_debug.cpu#5.nr_load_updates
106261 Â 1% -40.8% 62863 Â 0% sched_debug.cpu#5.clock_task
106261 Â 1% -40.8% 62863 Â 0% sched_debug.cpu#5.clock
710881 Â 5% -13.0% 618742 Â 11% sched_debug.cpu#5.avg_idle
151901 Â 11% -29.7% 106778 Â 9% sched_debug.cpu#6.sched_goidle
106261 Â 1% -40.8% 62861 Â 0% sched_debug.cpu#6.clock_task
106261 Â 1% -40.8% 62861 Â 0% sched_debug.cpu#6.clock
124231 Â 14% -42.0% 72021 Â 14% sched_debug.cpu#6.ttwu_local
27846 Â 4% -38.4% 17152 Â 3% sched_debug.cpu#6.nr_load_updates
175655 Â 9% -21.3% 138225 Â 7% sched_debug.cpu#6.ttwu_count
328538 Â 10% -25.1% 246230 Â 8% sched_debug.cpu#6.nr_switches
328606 Â 10% -25.1% 246282 Â 8% sched_debug.cpu#6.sched_count
106261 Â 1% -40.8% 62863 Â 0% sched_debug.cpu#7.clock
27346 Â 9% -26.6% 20061 Â 15% sched_debug.cpu#7.nr_load_updates
106261 Â 1% -40.8% 62863 Â 0% sched_debug.cpu#7.clock_task
106261 Â 1% -40.8% 62863 Â 0% sched_debug.cpu_clk
106261 Â 1% -40.8% 62863 Â 0% sched_debug.ktime
107044 Â 1% -40.5% 63646 Â 0% sched_debug.sched_clk

testbox/testcase/testparams: nhm4/fsmark/performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd

6a2de6eb3f267ba6 465a05fecc2c7921d50f33cd10
---------------- --------------------------
9381393 Â 2% -52.3% 4475975 Â 15% fsmark.app_overhead
559 Â 0% +134.7% 1313 Â 0% fsmark.files_per_sec
12 Â 0% +214.6% 37 Â 1% fsmark.time.percent_of_cpu_this_job_got
183.94 Â 0% -57.3% 78.57 Â 0% fsmark.time.elapsed_time.max
183.94 Â 0% -57.3% 78.57 Â 0% fsmark.time.elapsed_time
1839298 Â 0% +4.3% 1918608 Â 0% fsmark.time.file_system_outputs
22.50 Â 0% +30.0% 29.25 Â 0% fsmark.time.system_time
761103 Â 0% +37.7% 1048153 Â 0% fsmark.time.voluntary_context_switches
19670 Â 0% -30.8% 13615 Â 3% fsmark.time.involuntary_context_switches
206.26 Â 1% -51.7% 99.57 Â 0% uptime.boot
367 Â 5% +42.1% 522 Â 1% uptime.idle
47369 Â 0% -65.4% 16377 Â 0% softirqs.BLOCK
29294 Â 2% -13.9% 25233 Â 2% softirqs.RCU
2.29 Â 0% +151.2% 5.76 Â 0% turbostat.%Busy
68 Â 0% +174.0% 187 Â 0% turbostat.Avg_MHz
40.65 Â 1% +82.9% 74.36 Â 0% turbostat.CPU%c1
48.36 Â 0% -76.3% 11.44 Â 4% turbostat.CPU%c3
4943 Â 0% +37.1% 6778 Â 0% vmstat.io.bo
14 Â 2% -33.9% 9 Â 11% vmstat.procs.b
5720 Â 0% +112.1% 12131 Â 0% vmstat.system.in
18895 Â 0% +135.5% 44500 Â 0% vmstat.system.cs
58365 Â 2% -8.8% 53258 Â 0% meminfo.DirectMap4k
1541 Â 0% -60.0% 616 Â 0% meminfo.Mlocked
1541 Â 0% -60.0% 616 Â 0% meminfo.Unevictable
183.94 Â 0% -57.3% 78.57 Â 0% time.elapsed_time.max
183.94 Â 0% -57.3% 78.57 Â 0% time.elapsed_time
19670 Â 0% -30.8% 13615 Â 3% time.involuntary_context_switches
12 Â 0% +214.6% 37 Â 1% time.percent_of_cpu_this_job_got
22.50 Â 0% +30.0% 29.25 Â 0% time.system_time
1.13 Â 2% -28.6% 0.80 Â 3% time.user_time
761103 Â 0% +37.7% 1048153 Â 0% time.voluntary_context_switches
982895 Â 0% -28.3% 704285 Â 0% cpuidle.C1-NHM.usage
3.274e+08 Â 1% -14.2% 2.81e+08 Â 1% cpuidle.C1-NHM.time
180656 Â 3% +254.8% 641058 Â 0% cpuidle.C1E-NHM.usage
42872379 Â 2% +96.4% 84210125 Â 0% cpuidle.C1E-NHM.time
373512 Â 0% -60.1% 149080 Â 1% cpuidle.C3-NHM.usage
7.652e+08 Â 0% -85.6% 1.099e+08 Â 2% cpuidle.C3-NHM.time
3.122e+08 Â 3% -59.8% 1.254e+08 Â 1% cpuidle.C6-NHM.time
134366 Â 2% -25.7% 99891 Â 1% cpuidle.C6-NHM.usage
2400 Â 1% +80.8% 4339 Â 1% cpuidle.POLL.usage
143 Â 2% -48.3% 74 Â 6% proc-vmstat.nr_dirty
385 Â 0% -60.0% 154 Â 0% proc-vmstat.nr_mlock
385 Â 0% -60.0% 154 Â 0% proc-vmstat.nr_unevictable
230004 Â 0% -40.3% 137234 Â 0% proc-vmstat.nr_written
436867 Â 0% -25.6% 325170 Â 0% proc-vmstat.numa_hit
436867 Â 0% -25.6% 325170 Â 0% proc-vmstat.numa_local
24976 Â 0% +79.6% 44865 Â 0% proc-vmstat.pgactivate
467392 Â 0% -24.9% 350901 Â 0% proc-vmstat.pgalloc_dma32
268016 Â 0% -51.5% 129939 Â 0% proc-vmstat.pgfault
226937 Â 1% -51.6% 109882 Â 1% proc-vmstat.pgfree
919916 Â 0% -40.3% 548911 Â 0% proc-vmstat.pgpgout
283 Â 3% +131.3% 655 Â 5% sched_debug.cfs_rq[0]:/.tg->runnable_avg
7165 Â 5% +85.5% 13292 Â 2% sched_debug.cfs_rq[0]:/.tg_load_avg
6701 Â 7% +11.4% 7465 Â 5% sched_debug.cfs_rq[0]:/.min_vruntime
38 Â 6% +133.6% 88 Â 5% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
1783 Â 6% +129.9% 4100 Â 5% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
7165 Â 5% +85.2% 13270 Â 2% sched_debug.cfs_rq[1]:/.tg_load_avg
285 Â 3% +130.6% 658 Â 5% sched_debug.cfs_rq[1]:/.tg->runnable_avg
41 Â 24% +103.0% 83 Â 5% sched_debug.cfs_rq[1]:/.tg_runnable_contrib
4678 Â 4% +19.4% 5586 Â 5% sched_debug.cfs_rq[1]:/.min_vruntime
1920 Â 24% +100.2% 3843 Â 5% sched_debug.cfs_rq[1]:/.avg->runnable_avg_sum
4979 Â 7% +14.6% 5704 Â 6% sched_debug.cfs_rq[2]:/.min_vruntime
1682 Â 5% +127.1% 3820 Â 6% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
7133 Â 5% +86.0% 13267 Â 2% sched_debug.cfs_rq[2]:/.tg_load_avg
36 Â 6% +129.9% 82 Â 6% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
287 Â 3% +130.2% 662 Â 5% sched_debug.cfs_rq[2]:/.tg->runnable_avg
7125 Â 5% +82.1% 12973 Â 2% sched_debug.cfs_rq[3]:/.tg_load_avg
290 Â 4% +128.4% 663 Â 5% sched_debug.cfs_rq[3]:/.tg->runnable_avg
1624 Â 4% +134.6% 3810 Â 8% sched_debug.cfs_rq[3]:/.avg->runnable_avg_sum
34 Â 5% +140.9% 82 Â 9% sched_debug.cfs_rq[3]:/.tg_runnable_contrib
32 Â 10% +149.2% 81 Â 6% sched_debug.cfs_rq[4]:/.tg_runnable_contrib
293 Â 4% +127.2% 665 Â 5% sched_debug.cfs_rq[4]:/.tg->runnable_avg
1172 Â 29% +137.4% 2782 Â 18% sched_debug.cfs_rq[4]:/.blocked_load_avg
3666 Â 4% +20.7% 4426 Â 6% sched_debug.cfs_rq[4]:/.min_vruntime
1214 Â 31% +138.4% 2893 Â 17% sched_debug.cfs_rq[4]:/.tg_load_contrib
1535 Â 10% +143.7% 3741 Â 6% sched_debug.cfs_rq[4]:/.avg->runnable_avg_sum
7125 Â 5% +82.3% 12986 Â 2% sched_debug.cfs_rq[4]:/.tg_load_avg
34 Â 17% +133.8% 79 Â 15% sched_debug.cfs_rq[5]:/.tg_runnable_contrib
7121 Â 5% +82.8% 13020 Â 2% sched_debug.cfs_rq[5]:/.tg_load_avg
294 Â 4% +127.2% 669 Â 5% sched_debug.cfs_rq[5]:/.tg->runnable_avg
1034 Â 32% +123.5% 2312 Â 23% sched_debug.cfs_rq[5]:/.tg_load_contrib
3739 Â 2% +28.2% 4792 Â 12% sched_debug.cfs_rq[5]:/.min_vruntime
1589 Â 16% +131.5% 3679 Â 15% sched_debug.cfs_rq[5]:/.avg->runnable_avg_sum
1001 Â 32% +128.8% 2292 Â 23% sched_debug.cfs_rq[5]:/.blocked_load_avg
3851 Â 1% +19.0% 4583 Â 5% sched_debug.cfs_rq[6]:/.min_vruntime
1882 Â 5% +94.7% 3666 Â 5% sched_debug.cfs_rq[6]:/.avg->runnable_avg_sum
297 Â 4% +126.0% 671 Â 5% sched_debug.cfs_rq[6]:/.tg->runnable_avg
7120 Â 5% +82.7% 13005 Â 2% sched_debug.cfs_rq[6]:/.tg_load_avg
40 Â 5% +95.1% 79 Â 5% sched_debug.cfs_rq[6]:/.tg_runnable_contrib
3826 Â 2% +17.5% 4495 Â 6% sched_debug.cfs_rq[7]:/.min_vruntime
7078 Â 5% +83.5% 12987 Â 1% sched_debug.cfs_rq[7]:/.tg_load_avg
1517 Â 9% +155.4% 3875 Â 14% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
32 Â 9% +162.5% 84 Â 15% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
299 Â 3% +125.0% 673 Â 5% sched_debug.cfs_rq[7]:/.tg->runnable_avg
331525 Â 3% -34.0% 218740 Â 5% sched_debug.cpu#0.nr_switches
142801 Â 4% -48.4% 73667 Â 5% sched_debug.cpu#0.ttwu_local
678550 Â 7% -32.7% 456720 Â 34% sched_debug.cpu#0.avg_idle
313488 Â 1% -31.1% 215938 Â 4% sched_debug.cpu#0.ttwu_count
111534 Â 2% -49.4% 56460 Â 3% sched_debug.cpu#0.clock_task
155708 Â 3% -38.2% 96199 Â 5% sched_debug.cpu#0.sched_goidle
28188 Â 1% -33.4% 18783 Â 3% sched_debug.cpu#0.nr_load_updates
111534 Â 2% -49.4% 56460 Â 3% sched_debug.cpu#0.clock
331695 Â 3% -34.0% 218808 Â 5% sched_debug.cpu#0.sched_count
446397 Â 5% -18.7% 362850 Â 3% sched_debug.cpu#1.sched_count
208484 Â 6% -13.1% 181078 Â 3% sched_debug.cpu#1.ttwu_count
29358 Â 2% -32.6% 19790 Â 1% sched_debug.cpu#1.nr_load_updates
111537 Â 2% -49.4% 56461 Â 3% sched_debug.cpu#1.clock_task
111537 Â 2% -49.4% 56461 Â 3% sched_debug.cpu#1.clock
446310 Â 5% -18.7% 362793 Â 3% sched_debug.cpu#1.nr_switches
214342 Â 6% -22.4% 166319 Â 3% sched_debug.cpu#1.sched_goidle
166966 Â 8% -26.5% 122663 Â 4% sched_debug.cpu#1.ttwu_local
707893 Â 7% -31.1% 487966 Â 9% sched_debug.cpu#1.avg_idle
198095 Â 3% -11.9% 174593 Â 9% sched_debug.cpu#2.sched_goidle
111536 Â 2% -49.4% 56460 Â 3% sched_debug.cpu#2.clock_task
151594 Â 4% -12.4% 132847 Â 11% sched_debug.cpu#2.ttwu_local
711629 Â 8% -48.7% 364936 Â 30% sched_debug.cpu#2.avg_idle
111536 Â 2% -49.4% 56460 Â 3% sched_debug.cpu#2.clock
28629 Â 4% -32.1% 19433 Â 7% sched_debug.cpu#2.nr_load_updates
164980 Â 11% -32.8% 110931 Â 7% sched_debug.cpu#3.ttwu_local
441918 Â 8% -24.3% 334685 Â 6% sched_debug.cpu#3.sched_count
441832 Â 8% -24.3% 334627 Â 6% sched_debug.cpu#3.nr_switches
718484 Â 5% -34.8% 468267 Â 21% sched_debug.cpu#3.avg_idle
207428 Â 9% -18.2% 169613 Â 6% sched_debug.cpu#3.ttwu_count
111536 Â 2% -49.4% 56462 Â 3% sched_debug.cpu#3.clock_task
29300 Â 4% -33.8% 19394 Â 7% sched_debug.cpu#3.nr_load_updates
111536 Â 2% -49.4% 56462 Â 3% sched_debug.cpu#3.clock
212194 Â 9% -28.0% 152779 Â 6% sched_debug.cpu#3.sched_goidle
2015 Â 1% -36.0% 1290 Â 1% sched_debug.cpu#4.nr_uninterruptible
68219 Â 3% +16.4% 79412 Â 3% sched_debug.cpu#4.ttwu_count
111535 Â 2% -49.4% 56461 Â 3% sched_debug.cpu#4.clock
15884 Â 0% -35.3% 10276 Â 5% sched_debug.cpu#4.nr_load_updates
111535 Â 2% -49.4% 56461 Â 3% sched_debug.cpu#4.clock_task
101874 Â 19% -26.3% 75129 Â 7% sched_debug.cpu#5.sched_goidle
67242 Â 29% -39.0% 41027 Â 11% sched_debug.cpu#5.ttwu_local
16499 Â 7% -36.4% 10496 Â 5% sched_debug.cpu#5.nr_load_updates
218670 Â 17% -19.5% 175923 Â 6% sched_debug.cpu#5.nr_switches
3 Â 25% +123.1% 7 Â 46% sched_debug.cpu#5.cpu_load[4]
111534 Â 2% -49.4% 56454 Â 3% sched_debug.cpu#5.clock_task
111534 Â 2% -49.4% 56454 Â 3% sched_debug.cpu#5.clock
748334 Â 5% -24.6% 563910 Â 15% sched_debug.cpu#5.avg_idle
218737 Â 17% -19.6% 175958 Â 6% sched_debug.cpu#5.sched_count
111534 Â 2% -49.4% 56460 Â 3% sched_debug.cpu#6.clock_task
111534 Â 2% -49.4% 56460 Â 3% sched_debug.cpu#6.clock
15913 Â 1% -31.5% 10903 Â 10% sched_debug.cpu#6.nr_load_updates
697624 Â 9% -19.9% 559027 Â 3% sched_debug.cpu#6.avg_idle
199869 Â 3% -17.1% 165770 Â 8% sched_debug.cpu#7.sched_count
199797 Â 3% -17.0% 165733 Â 8% sched_debug.cpu#7.nr_switches
111533 Â 2% -49.4% 56460 Â 3% sched_debug.cpu#7.clock
16124 Â 2% -36.8% 10191 Â 7% sched_debug.cpu#7.nr_load_updates
57841 Â 6% -37.1% 36364 Â 13% sched_debug.cpu#7.ttwu_local
92125 Â 3% -24.0% 70057 Â 9% sched_debug.cpu#7.sched_goidle
111533 Â 2% -49.4% 56460 Â 3% sched_debug.cpu#7.clock_task
111537 Â 2% -49.4% 56462 Â 3% sched_debug.cpu_clk
111387 Â 2% -49.4% 56313 Â 3% sched_debug.ktime
111537 Â 2% -49.4% 56462 Â 3% sched_debug.sched_clk

lkp-st02: Core2
Memory: 8G

nhm4: Nehalem
Memory: 4G


vmstat.system.cs

60000 ++------------------------------------------------------------------+
| O O O O O O O O O O O O O O O O O O
50000 O+ O O O |
| |
| |
40000 ++ |
| .*. |
30000 ++.*.. *.. .*. *.. |
*. * : *. |
20000 ++ : : *..*..*..*..*.*..*..* |
| : : |
| : : |
10000 ++ : : |
| : : |
0 ++-O-----*-*--O-----O-----------------------------------------------+


softirqs.BLOCK

120000 ++-----------------------------------------------------------------+
| |
100000 *+.*..* *..*.*..*..*.. |
| : : .*..*.. .*. .* |
| : : *..* *. *. |
80000 ++ : : |
| : : |
60000 ++ : : |
| : : |
40000 ++ : : |
O O:O O : O O O O O O O O O O O O O O O O O O
| : : |
20000 ++ : : |
| : : |
0 ++-O----*--*--O-----O----------------------------------------------+


fsmark.files_per_sec

1800 ++-------------------------------------------------------------------+
O O O O O O O O O O O O O O O O O O O O O O
1600 ++ |
1400 ++ |
| |
1200 ++ |
1000 ++ .*.. |
*..*..* *..*..*. *.. |
800 ++ : : |
600 ++ : : *..*.*..*..*..*..*..* |
| : : |
400 ++ : : |
200 ++ : : |
| : : |
0 ++-O-----*--*-O-----O------------------------------------------------+


fsmark.time.percent_of_cpu_this_job_got

45 ++---------------------------------------------------------------------+
O O O O O O O O O O O O O O O O O O O O O O
40 ++ |
35 ++ |
| |
30 ++ |
25 ++ |
| *.. .*.. |
20 *+.*..* : *..*. *.. |
15 ++ : : *..*.*..*..*..*..*..* |
| : : |
10 ++ : : |
5 ++ : : |
| : : |
0 ++-O-----*--*--O-----O-------------------------------------------------+


fsmark.time.voluntary_context_switches

1.2e+06 ++--------------------O-------O--------O-----------------------O--O
O O O O O O O O O O O O O O O O O |
1e+06 ++ |
*..*..* *.*..*..*..*.*..*..*..*.*..*..*..* |
| : : |
800000 ++ : : |
| : : |
600000 ++ : : |
| : : |
400000 ++ : : |
| : : |
| : : |
200000 ++ : : |
| : : |
0 ++-O----*--*--O----O----------------------------------------------+


fsmark.time.involuntary_context_switches

16000 ++------------------------------------------------------------------O
O O O O O O O O O O O O O O |
14000 ++ O O O O O O O |
12000 ++ |
| |
10000 ++ *.. |
*..*..* : *..*..*.*.. .*.. |
8000 ++ : : *..*..*..*..*.*. * |
| : : |
6000 ++ : : |
4000 ++ : : |
| : : |
2000 ++ : : |
| : : |
0 ++-O-----*-*--O-----O-----------------------------------------------+


time.system_time

30 ++---------------------------------------------------------------------+
| |
25 O+ O O O O O O O O O O O O O O O O O O O O O
| .*..*.*..*..*..*..*..* |
*..*..* *..*..*..*..*. |
20 ++ : : |
| : : |
15 ++ : : |
| : : |
10 ++ : : |
| : : |
| : : |
5 ++ : : |
| : : |
0 ++-O-----*--*--O-----O-------------------------------------------------+


time.percent_of_cpu_this_job_got

45 ++---------------------------------------------------------------------+
O O O O O O O O O O O O O O O O O O O O O O
40 ++ |
35 ++ |
| |
30 ++ |
25 ++ |
| *.. .*.. |
20 *+.*..* : *..*. *.. |
15 ++ : : *..*.*..*..*..*..*..* |
| : : |
10 ++ : : |
5 ++ : : |
| : : |
0 ++-O-----*--*--O-----O-------------------------------------------------+


time.voluntary_context_switches

1.2e+06 ++--------------------O-------O--------O-----------------------O--O
O O O O O O O O O O O O O O O O O |
1e+06 ++ |
*..*..* *.*..*..*..*.*..*..*..*.*..*..*..* |
| : : |
800000 ++ : : |
| : : |
600000 ++ : : |
| : : |
400000 ++ : : |
| : : |
| : : |
200000 ++ : : |
| : : |
0 ++-O----*--*--O----O----------------------------------------------+


time.involuntary_context_switches

16000 ++------------------------------------------------------------------O
O O O O O O O O O O O O O O |
14000 ++ O O O O O O O |
12000 ++ |
| |
10000 ++ *.. |
*..*..* : *..*..*.*.. .*.. |
8000 ++ : : *..*..*..*..*.*. * |
| : : |
6000 ++ : : |
4000 ++ : : |
| : : |
2000 ++ : : |
| : : |
0 ++-O-----*-*--O-----O-----------------------------------------------+


proc-vmstat.nr_dirty

180 ++--------------------------------------------------------------------+
| *.. *.. |
160 ++. .*..*.. .*..*..*.. .. .*..* |
140 *+ * *.*. *. * *. |
| : : |
120 ++ : : |
100 ++ : : O O O |
| O: O :O O O O O O |
80 ++ : : O O O O O O O
60 O+ :O : O O |
| : : |
40 ++ : : |
20 ++ : : |
| : : |
0 ++-O-----*--*--O----O-------------------------------------------------+


proc-vmstat.nr_written

250000 ++-----------------------------------------------------------------+
| .* .*.. .*. |
*..*. : *..*.*..*..*..*..*.*. *. *..* |
200000 ++ : : |
| : : |
| : : |
150000 ++ : :O |
O O:O O : O O O O O O O O O O O O O O O O O
100000 ++ : : |
| : : |
| : : |
50000 ++ : : |
| : : |
| : : |
0 ++-O----*--*--O-----O----------------------------------------------+


proc-vmstat.pgpgout

1e+06 ++-----------------------------------------------------------------+
900000 ++ .* .*.. .*. |
*..*. : *..*.*..*..*..*..*.*. *. *..* |
800000 ++ : : |
700000 ++ : : |
| : : |
600000 ++ : :O |
500000 O+ O:O O : O O O O O O O O O O O O O O O O O
400000 ++ : : |
| : : |
300000 ++ : : |
200000 ++ : : |
| : : |
100000 ++ : : |
0 ++-O----*--*--O-----O----------------------------------------------+


proc-vmstat.pgactivate

45000 ++------------------------------------------------------------------+
| |
40000 O+ O O O O O O O O O O O O O O O O O O O
35000 ++ O O |
| |
30000 *+.*..* *..*..*..*.*.. .*.. .*.. |
25000 ++ : : *. *..*..*.*. * |
| : : |
20000 ++ : : |
15000 ++ : : |
| : : |
10000 ++ : : |
5000 ++ : : |
| : : |
0 ++-O-----*-*--O-----O-----------------------------------------------+


[*] bisect-good sample
[O] bisect-bad sample

To reproduce:

apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang

---
testcase: fsmark
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 10
default-watchdogs:
watch-oom:
watchdog:
cpufreq_governor:
commit: a897436e0e233e84b664bb7f33c4e0d4d3e3bdad
model: Core2
memory: 8G
nr_hdd_partitions: 12
hdd_partitions: "/dev/disk/by-id/scsi-35000c5000???????"
swap_partitions:
iterations: 1x
nr_threads: 32t
disk: 1HDD
fs: f2fs
fs2:
fsmark:
filesize: 9B
test_size: 400M
sync_method: fsyncBeforeClose
nr_directories: 16d
nr_files_per_directory: 256fpd
testbox: lkp-st02
tbox_group: lkp-st02
kconfig: x86_64-rhel
enqueue_time: 2015-04-08 21:39:36.092248041 +08:00
head_commit: a897436e0e233e84b664bb7f33c4e0d4d3e3bdad
base_commit: f22e6e847115abc3a0e2ad7bb18d243d42275af1
branch: next/master
kernel: "/kernel/x86_64-rhel/a897436e0e233e84b664bb7f33c4e0d4d3e3bdad/vmlinuz-4.0.0-rc7-next-20150408"
user: lkp
queue: cyclic
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/lkp-st02/fsmark/1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd/debian-x86_64-2015-02-07.cgz/x86_64-rhel/a897436e0e233e84b664bb7f33c4e0d4d3e3bdad/0"
LKP_SERVER: inn
job_file: "/lkp/scheduled/lkp-st02/cyclic_fsmark-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd-x86_64-rhel-HEAD-a897436e0e233e84b664bb7f33c4e0d4d3e3bdad-0-20150408-97628-13894mo.yaml"
dequeue_time: 2015-04-09 07:24:20.307506478 +08:00
nr_cpu: "$(nproc)"
max_uptime: 1470.84
modules_initrd: "/kernel/x86_64-rhel/a897436e0e233e84b664bb7f33c4e0d4d3e3bdad/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs2.cgz,/lkp/benchmarks/fsmark.cgz"
job_state: finished
loadavg: 15.28 5.81 2.10 1/155 2024
start_time: '1428535506'
end_time: '1428535568'
version: "/lkp/lkp/.src-20150408-174023"
mkfs -t f2fs /dev/sdb
mount -t f2fs /dev/sdb /fs/sdb
./fs_mark -d /fs/sdb/1 -d /fs/sdb/2 -d /fs/sdb/3 -d /fs/sdb/4 -d /fs/sdb/5 -d /fs/sdb/6 -d /fs/sdb/7 -d /fs/sdb/8 -d /fs/sdb/9 -d /fs/sdb/10 -d /fs/sdb/11 -d /fs/sdb/12 -d /fs/sdb/13 -d /fs/sdb/14 -d /fs/sdb/15 -d /fs/sdb/16 -d /fs/sdb/17 -d /fs/sdb/18 -d /fs/sdb/19 -d /fs/sdb/20 -d /fs/sdb/21 -d /fs/sdb/22 -d /fs/sdb/23 -d /fs/sdb/24 -d /fs/sdb/25 -d /fs/sdb/26 -d /fs/sdb/27 -d /fs/sdb/28 -d /fs/sdb/29 -d /fs/sdb/30 -d /fs/sdb/31 -d /fs/sdb/32 -D 16 -N 256 -n 3200 -L 1 -S 1 -s 9
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx