[lkp] [sched/preempt] 59fe8231b68: 42.7% fsmark.app_overhead
From: kernel test robot
Date: Mon Aug 03 2015 - 09:33:57 EST
FYI, we noticed the below changes on
git://internal_merge_and_test_tree revert-59fe8231b68905f4ca5f7a115b1a675829016b53-59fe8231b68905f4ca5f7a115b1a675829016b53
commit 59fe8231b68905f4ca5f7a115b1a675829016b53 ("sched/preempt: Fix cond_resched_lock() and cond_resched_softirq()")
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/iterations/nr_threads/disk/fs/fs2/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
lkp-ws02/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/1x/32t/1HDD/btrfs/nfsv4/8K/400M/fsyncBeforeClose/16d/256fpd
commit:
2f008f5b6184088f3f435557f0592d876bda78b9
59fe8231b68905f4ca5f7a115b1a675829016b53
2f008f5b6184088f 59fe8231b68905f4ca5f7a115b
---------------- --------------------------
%stddev %change %stddev
\ | \
44649848 ± 7% +42.7% 63695806 ± 12% fsmark.app_overhead
60010 ± 0% +19.0% 71395 ± 0% fsmark.time.involuntary_context_switches
227729 ± 0% -2.8% 221420 ± 0% fsmark.time.voluntary_context_switches
60010 ± 0% +19.0% 71395 ± 0% time.involuntary_context_switches
11321 ± 0% +3.2% 11684 ± 1% vmstat.system.cs
1.29 ± 0% +1.9% 1.31 ± 0% turbostat.%Busy
29.09 ± 8% -10.1% 26.14 ± 5% turbostat.Pkg%pc3
121962 ± 5% +12.9% 137665 ± 7% numa-meminfo.node0.Active(file)
190283 ± 0% -7.9% 175324 ± 3% numa-meminfo.node1.Active
177386 ± 1% -10.0% 159598 ± 3% numa-meminfo.node1.Active(file)
53060080 ± 8% +22.4% 64966682 ± 3% cpuidle.C1E-NHM.time
47661 ± 3% +9.4% 52136 ± 3% cpuidle.C1E-NHM.usage
421155 ± 1% +9.9% 462828 ± 0% cpuidle.C3-NHM.usage
588.00 ± 2% +15.0% 676.00 ± 3% cpuidle.POLL.usage
30490 ± 5% +12.9% 34416 ± 7% numa-vmstat.node0.nr_active_file
66978 ± 4% +11.7% 74839 ± 6% numa-vmstat.node0.nr_dirtied
66802 ± 4% +11.7% 74647 ± 6% numa-vmstat.node0.nr_written
44345 ± 1% -10.0% 39899 ± 3% numa-vmstat.node1.nr_active_file
94710 ± 1% -10.1% 85175 ± 4% numa-vmstat.node1.nr_dirtied
94375 ± 1% -10.1% 84851 ± 5% numa-vmstat.node1.nr_written
15238374 ±100% +178.2% 42391545 ±100% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
70514 ± 0% -8.8% 64327 ± 1% latency_stats.hits.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_lookup.[nfsv4].nfs4_proc_lookup_common.[nfsv4].nfs4_proc_lookup.[nfsv4].nfs_lookup_revalidate.nfs4_lookup_revalidate.lookup_dcache.__lookup_hash
164.00 ±141% +12519.1% 20695 ± 6% latency_stats.max.blk_execute_rq.sg_io.scsi_cmd_ioctl.scsi_cmd_blk_ioctl.sd_ioctl.[sd_mod].blkdev_ioctl.block_ioctl.do_vfs_ioctl.SyS_ioctl.entry_SYSCALL_64_fastpath
15238374 ±100% +178.2% 42391545 ±100% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
4843 ±141% +1639.1% 84223 ± 29% latency_stats.sum.blk_execute_rq.sg_io.scsi_cmd_ioctl.scsi_cmd_blk_ioctl.sd_ioctl.[sd_mod].blkdev_ioctl.block_ioctl.do_vfs_ioctl.SyS_ioctl.entry_SYSCALL_64_fastpath
6826 ±141% +557.4% 44879 ± 32% latency_stats.sum.flush_work.__cancel_work_timer.cancel_delayed_work_sync.disk_block_events.__blkdev_get.blkdev_get.blkdev_open.do_dentry_open.vfs_open.path_openat.do_filp_open.do_sys_open
15238374 ±100% +178.2% 42391545 ±100% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
5202 ± 11% +18.6% 6168 ± 5% sched_debug.cfs_rq[0]:/.exec_clock
2249 ± 29% -29.7% 1581 ± 4% sched_debug.cfs_rq[10]:/.avg->runnable_avg_sum
48.25 ± 29% -30.2% 33.67 ± 5% sched_debug.cfs_rq[10]:/.tg_runnable_contrib
744.73 ± 6% -27.7% 538.65 ± 11% sched_debug.cfs_rq[14]:/.exec_clock
2732 ± 7% +18.0% 3224 ± 8% sched_debug.cfs_rq[1]:/.exec_clock
811.75 ± 45% +124.5% 1822 ± 37% sched_debug.cfs_rq[20]:/.avg->runnable_avg_sum
289.50 ± 59% +100.9% 581.67 ± 16% sched_debug.cfs_rq[20]:/.blocked_load_avg
3.33 ± 61% +80.0% 6.00 ± 23% sched_debug.cfs_rq[20]:/.nr_spread_over
298.75 ± 60% +95.6% 584.33 ± 15% sched_debug.cfs_rq[20]:/.tg_load_contrib
17.00 ± 47% +127.5% 38.67 ± 37% sched_debug.cfs_rq[20]:/.tg_runnable_contrib
1772 ± 14% +27.2% 2253 ± 17% sched_debug.cfs_rq[3]:/.avg->runnable_avg_sum
526.00 ± 35% -49.3% 266.67 ± 40% sched_debug.cfs_rq[4]:/.blocked_load_avg
547.00 ± 31% -52.3% 261.00 ± 44% sched_debug.cfs_rq[4]:/.tg_load_contrib
11.00 ± 44% -60.6% 4.33 ± 39% sched_debug.cfs_rq[9]:/.nr_spread_over
-247.50 ± -7% +16.2% -287.67 ±-10% sched_debug.cpu#0.nr_uninterruptible
-29.25 ±-37% -110.3% 3.00 ±244% sched_debug.cpu#1.nr_uninterruptible
15992 ± 3% +10.4% 17660 ± 3% sched_debug.cpu#1.ttwu_local
9910 ± 6% +29.7% 12854 ± 5% sched_debug.cpu#15.nr_switches
9912 ± 6% +29.7% 12855 ± 5% sched_debug.cpu#15.sched_count
4129 ± 6% +33.2% 5501 ± 8% sched_debug.cpu#15.sched_goidle
1461 ± 12% +33.9% 1956 ± 16% sched_debug.cpu#15.ttwu_local
21421 ± 8% -23.1% 16476 ± 16% sched_debug.cpu#18.nr_switches
21423 ± 8% -23.1% 16479 ± 16% sched_debug.cpu#18.sched_count
8919 ± 10% -23.9% 6787 ± 18% sched_debug.cpu#18.sched_goidle
3098 ± 4% -19.8% 2484 ± 8% sched_debug.cpu#18.ttwu_local
980386 ± 2% -7.5% 906703 ± 6% sched_debug.cpu#20.avg_idle
25056 ± 24% -35.5% 16150 ± 5% sched_debug.cpu#20.nr_switches
25060 ± 24% -35.5% 16154 ± 5% sched_debug.cpu#20.sched_count
10289 ± 33% -35.6% 6626 ± 5% sched_debug.cpu#20.sched_goidle
3706 ± 16% -31.6% 2533 ± 4% sched_debug.cpu#20.ttwu_local
10765 ± 3% -7.4% 9970 ± 1% sched_debug.cpu#21.nr_load_updates
22777 ± 17% -20.9% 18006 ± 6% sched_debug.cpu#21.nr_switches
22779 ± 17% -20.9% 18008 ± 6% sched_debug.cpu#21.sched_count
3401 ± 17% -21.3% 2678 ± 7% sched_debug.cpu#21.ttwu_local
3359 ± 8% -17.8% 2761 ± 12% sched_debug.cpu#22.ttwu_local
21013 ± 7% -20.3% 16754 ± 12% sched_debug.cpu#23.nr_switches
19.75 ± 30% -52.7% 9.33 ± 43% sched_debug.cpu#23.nr_uninterruptible
21015 ± 7% -20.3% 16757 ± 12% sched_debug.cpu#23.sched_count
8738 ± 9% -20.9% 6910 ± 13% sched_debug.cpu#23.sched_goidle
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/fs2/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/nfsv4/5K/400M/fsyncBeforeClose/16d/256fpd
commit:
2f008f5b6184088f3f435557f0592d876bda78b9
59fe8231b68905f4ca5f7a115b1a675829016b53
2f008f5b6184088f 59fe8231b68905f4ca5f7a115b
---------------- --------------------------
%stddev %change %stddev
\ | \
24073648 ± 2% -20.1% 19225873 ± 6% fsmark.app_overhead
128.00 ± 0% -5.0% 121.60 ± 0% fsmark.files_per_sec
639.91 ± 0% +5.8% 676.92 ± 0% fsmark.time.elapsed_time
639.91 ± 0% +5.8% 676.92 ± 0% fsmark.time.elapsed_time.max
88472 ± 0% +14.1% 100955 ± 0% fsmark.time.involuntary_context_switches
374294 ± 0% -2.7% 364099 ± 0% fsmark.time.voluntary_context_switches
54180 ± 5% +16.4% 63054 ± 6% meminfo.DirectMap4k
88472 ± 0% +14.1% 100955 ± 0% time.involuntary_context_switches
18369 ± 2% -10.3% 16482 ± 2% slabinfo.kmalloc-128.active_objs
19466 ± 2% -8.0% 17905 ± 4% slabinfo.kmalloc-128.num_objs
1.80 ± 0% -5.2% 1.71 ± 0% turbostat.%Busy
52.50 ± 0% -5.4% 49.67 ± 0% turbostat.Avg_MHz
19.30 ± 0% +11.7% 21.56 ± 0% turbostat.CPU%c1
1590 ± 0% -5.0% 1511 ± 0% vmstat.io.bo
22896 ± 0% -4.0% 21990 ± 0% vmstat.system.cs
7893 ± 0% -5.9% 7428 ± 0% vmstat.system.in
4.168e+08 ± 0% +19.5% 4.981e+08 ± 0% cpuidle.C1-NHM.time
83043899 ± 0% +24.4% 1.033e+08 ± 1% cpuidle.C1E-NHM.time
84577 ± 1% +24.1% 104956 ± 0% cpuidle.C1E-NHM.usage
1327 ± 7% +14.2% 1516 ± 4% cpuidle.POLL.usage
75953 ± 0% +10.2% 83669 ± 0% latency_stats.avg.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs_atomic_open.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
1356468 ± 0% +4.5% 1416945 ± 0% latency_stats.sum.do_wait.SyS_wait4.entry_SYSCALL_64_fastpath
48762209 ± 89% -71.4% 13924614 ± 70% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
2460268 ± 0% +4.3% 2566834 ± 0% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
18440643 ± 3% -24.9% 13841944 ± 5% latency_stats.sum.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_lookup.[nfsv4].nfs4_proc_lookup_common.[nfsv4].nfs4_proc_lookup.[nfsv4].nfs_lookup_revalidate.nfs4_lookup_revalidate.lookup_dcache.__lookup_hash
5.926e+09 ± 0% +3.3% 6.124e+09 ± 0% latency_stats.sum.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_do_close.[nfsv4].__nfs4_close.[nfsv4].nfs4_close_sync.[nfsv4].nfs4_close_context.[nfsv4].__put_nfs_open_context.nfs_release.nfs_file_release.__fput.____fput.task_work_run
6.254e+09 ± 0% +10.2% 6.889e+09 ± 0% latency_stats.sum.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs_atomic_open.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
4.629e+09 ± 0% +7.0% 4.951e+09 ± 0% latency_stats.sum.wait_on_page_bit.filemap_fdatawait_range.filemap_write_and_wait_range.nfs4_file_fsync.[nfsv4].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
401.75 ± 5% -9.3% 364.33 ± 4% sched_debug.cfs_rq[0]:/.tg->runnable_avg
405.00 ± 5% -9.4% 367.00 ± 4% sched_debug.cfs_rq[1]:/.tg->runnable_avg
407.75 ± 5% -9.4% 369.33 ± 4% sched_debug.cfs_rq[2]:/.tg->runnable_avg
4097 ± 6% +15.9% 4747 ± 1% sched_debug.cfs_rq[3]:/.min_vruntime
4.33 ± 71% +300.0% 17.33 ± 24% sched_debug.cfs_rq[3]:/.runnable_load_avg
411.25 ± 5% -9.2% 373.33 ± 3% sched_debug.cfs_rq[3]:/.tg->runnable_avg
426.00 ± 45% +205.6% 1301 ± 43% sched_debug.cfs_rq[5]:/.blocked_load_avg
434.50 ± 44% +200.4% 1305 ± 43% sched_debug.cfs_rq[5]:/.tg_load_contrib
40927 ± 6% +36.6% 55890 ± 5% sched_debug.cpu#0.nr_load_updates
704201 ± 78% +260.9% 2541162 ± 20% sched_debug.cpu#0.nr_switches
-3973 ± 0% -9.1% -3614 ± 0% sched_debug.cpu#0.nr_uninterruptible
704467 ± 78% +260.8% 2541407 ± 20% sched_debug.cpu#0.sched_count
283563 ± 96% +322.9% 1199297 ± 21% sched_debug.cpu#0.sched_goidle
466777 ± 57% +208.6% 1440632 ± 19% sched_debug.cpu#0.ttwu_count
290893 ± 95% +317.8% 1215231 ± 21% sched_debug.cpu#0.ttwu_local
4.75 ± 40% -64.9% 1.67 ±101% sched_debug.cpu#1.cpu_load[1]
5.25 ± 36% -68.3% 1.67 ± 74% sched_debug.cpu#1.cpu_load[2]
7.50 ± 73% -73.3% 2.00 ± 81% sched_debug.cpu#2.cpu_load[2]
6.00 ± 42% -55.6% 2.67 ± 63% sched_debug.cpu#2.cpu_load[3]
5.00 ± 20% -40.0% 3.00 ± 47% sched_debug.cpu#2.cpu_load[4]
441.50 ± 7% +27.1% 561.33 ± 6% sched_debug.cpu#2.nr_uninterruptible
58471 ± 0% +11.7% 65289 ± 1% sched_debug.cpu#2.ttwu_local
403.50 ± 13% +32.1% 533.00 ± 9% sched_debug.cpu#3.nr_uninterruptible
529.75 ± 7% -46.3% 284.33 ± 4% sched_debug.cpu#5.nr_uninterruptible
34.00 ±141% +7756.9% 2671 ±127% sched_debug.cpu#6.load
527.50 ± 5% -42.6% 302.67 ± 6% sched_debug.cpu#6.nr_uninterruptible
555.50 ± 4% -46.7% 296.00 ± 1% sched_debug.cpu#7.nr_uninterruptible
1030379 ± 99% -97.8% 22439 ± 0% sched_debug.cpu#7.ttwu_local
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/fs2/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/nfsv4/8K/400M/fsyncBeforeClose/16d/256fpd
commit:
2f008f5b6184088f3f435557f0592d876bda78b9
59fe8231b68905f4ca5f7a115b1a675829016b53
2f008f5b6184088f 59fe8231b68905f4ca5f7a115b
---------------- --------------------------
%stddev %change %stddev
\ | \
15540360 ± 8% -18.7% 12633119 ± 5% fsmark.app_overhead
118.40 ± 0% -5.4% 112.00 ± 0% fsmark.files_per_sec
436.94 ± 0% +4.9% 458.34 ± 0% fsmark.time.elapsed_time
436.94 ± 0% +4.9% 458.34 ± 0% fsmark.time.elapsed_time.max
55494 ± 0% +14.5% 63515 ± 0% fsmark.time.involuntary_context_switches
234664 ± 0% -2.8% 228125 ± 0% fsmark.time.voluntary_context_switches
1871 ± 8% -8.0% 1720 ± 5% slabinfo.proc_inode_cache.num_objs
55494 ± 0% +14.5% 63515 ± 0% time.involuntary_context_switches
2.794e+08 ± 1% +18.7% 3.317e+08 ± 1% cpuidle.C1-NHM.time
51201346 ± 2% +24.3% 63637344 ± 2% cpuidle.C1E-NHM.time
52453 ± 0% +23.8% 64952 ± 0% cpuidle.C1E-NHM.usage
1.86 ± 0% -4.3% 1.77 ± 0% turbostat.%Busy
55.00 ± 0% -3.6% 53.00 ± 0% turbostat.Avg_MHz
18.82 ± 1% +12.1% 21.10 ± 1% turbostat.CPU%c1
1454 ± 0% -4.2% 1393 ± 0% vmstat.io.bo
28806 ± 0% -3.4% 27821 ± 0% vmstat.system.cs
11085 ± 0% -4.8% 10553 ± 0% vmstat.system.in
13502037 ±171% -88.6% 1537756 ±141% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
75526 ± 0% +9.7% 82875 ± 0% latency_stats.avg.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs_atomic_open.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
21836955 ±171% -93.0% 1537756 ±141% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
946840 ± 0% +4.0% 984300 ± 0% latency_stats.sum.do_wait.SyS_wait4.entry_SYSCALL_64_fastpath
26885081 ±172% -94.3% 1537756 ±141% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
1699379 ± 0% +3.6% 1760791 ± 0% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
11574601 ± 6% -22.9% 8922270 ± 4% latency_stats.sum.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_lookup.[nfsv4].nfs4_proc_lookup_common.[nfsv4].nfs4_proc_lookup.[nfsv4].nfs_lookup_revalidate.nfs4_lookup_revalidate.lookup_dcache.__lookup_hash
3.694e+09 ± 0% +2.9% 3.799e+09 ± 0% latency_stats.sum.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_do_close.[nfsv4].__nfs4_close.[nfsv4].nfs4_close_sync.[nfsv4].nfs4_close_context.[nfsv4].__put_nfs_open_context.nfs_release.nfs_file_release.__fput.____fput.task_work_run
3.898e+09 ± 0% +9.7% 4.278e+09 ± 0% latency_stats.sum.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs_atomic_open.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
2.886e+09 ± 0% +6.4% 3.072e+09 ± 0% latency_stats.sum.wait_on_page_bit.filemap_fdatawait_range.filemap_write_and_wait_range.nfs4_file_fsync.[nfsv4].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
7.00 ± 20% +39.3% 9.75 ± 8% sched_debug.cfs_rq[0]:/.nr_spread_over
2721 ± 8% -13.7% 2348 ± 6% sched_debug.cfs_rq[3]:/.avg->runnable_avg_sum
20.00 ± 65% -93.8% 1.25 ±173% sched_debug.cfs_rq[3]:/.runnable_load_avg
-1880 ±-10% +20.1% -2258 ± -4% sched_debug.cfs_rq[3]:/.spread0
59.00 ± 8% -14.0% 50.75 ± 7% sched_debug.cfs_rq[3]:/.tg_runnable_contrib
15.75 ± 14% -27.0% 11.50 ± 15% sched_debug.cfs_rq[6]:/.nr_spread_over
706214 ± 10% +18.3% 835635 ± 4% sched_debug.cpu#0.avg_idle
-2419 ± -1% -19.3% -1952 ± -1% sched_debug.cpu#0.nr_uninterruptible
99659 ± 13% +1470.7% 1565321 ± 93% sched_debug.cpu#2.ttwu_count
212.25 ± 13% +38.5% 294.00 ± 5% sched_debug.cpu#3.nr_uninterruptible
3.50 ± 91% +114.3% 7.50 ± 40% sched_debug.cpu#5.cpu_load[3]
342.00 ± 6% -55.0% 154.00 ± 11% sched_debug.cpu#5.nr_uninterruptible
328.75 ± 5% -47.8% 171.50 ± 12% sched_debug.cpu#6.nr_uninterruptible
17166 ± 16% -18.4% 14001 ± 3% sched_debug.cpu#7.nr_load_updates
465509 ±104% -72.1% 129808 ± 28% sched_debug.cpu#7.nr_switches
342.25 ± 1% -52.6% 162.25 ± 9% sched_debug.cpu#7.nr_uninterruptible
465602 ±104% -72.1% 129920 ± 28% sched_debug.cpu#7.sched_count
223413 ±109% -74.5% 56970 ± 32% sched_debug.cpu#7.sched_goidle
166932 ±157% -92.8% 12041 ± 4% sched_debug.cpu#7.ttwu_local
lkp-ws02: Westmere-EP
Memory: 16G
nhm4: Nehalem
Memory: 4G
turbostat.Avg_MHz
60 ++---------------------------------------------------------------------+
*..*..* *..*..*...*..*..*..*...*..*..*..*..*...*..*..* |
50 O+ O O O O O O O O O O O O O O O O O O O O O O
| : : |
| : : |
40 ++ : : |
| : : |
30 ++ : : |
| : : |
20 ++ : : |
| : : |
| : : |
10 ++ :: |
| : |
0 ++--------*------------------------------------------------------------+
turbostat.%Busy
2 ++--------------------------------------------------------------------+
1.8 *+.*..* *..*..*..*..*...*..*..*..*..*...*..*..*..*..* |
O O O O O O O O O O O O O O O O O O O O O O O
1.6 ++ : : |
1.4 ++ : : |
| : : |
1.2 ++ : : |
1 ++ : : |
0.8 ++ : : |
| : : |
0.6 ++ : : |
0.4 ++ : : |
| :: |
0.2 ++ : |
0 ++--------*-----------------------------------------------------------+
fsmark.files_per_sec
120 *+-*--*------*--*--*--*--*---------*-----*---*--*--*--*--*------------+
O O O O O O O O O O..O. O O. O O O O O O O O O O
100 ++ : : |
| : : |
| : : |
80 ++ : : |
| : : |
60 ++ : : |
| : : |
40 ++ : : |
| : : |
| : : |
20 ++ :: |
| : |
0 ++--------*-----------------------------------------------------------+
fsmark.time.elapsed_time
500 ++--------------------------------------------------------------------+
450 O+ O O O O O O O O O O O O O O O O O O O O O O
*..*..* *..*..*..*..*...*..*..*..*..*...*..*..*..*..* |
400 ++ : : |
350 ++ : : |
| : : |
300 ++ : : |
250 ++ : : |
200 ++ : : |
| : : |
150 ++ : : |
100 ++ : : |
| : : |
50 ++ : |
0 ++--------*-----------------------------------------------------------+
fsmark.time.elapsed_time.max
500 ++--------------------------------------------------------------------+
450 O+ O O O O O O O O O O O O O O O O O O O O O O
*..*..* *..*..*..*..*...*..*..*..*..*...*..*..*..*..* |
400 ++ : : |
350 ++ : : |
| : : |
300 ++ : : |
250 ++ : : |
200 ++ : : |
| : : |
150 ++ : : |
100 ++ : : |
| : : |
50 ++ : |
0 ++--------*-----------------------------------------------------------+
fsmark.time.voluntary_context_switches
250000 ++-----------------------------------------------------------------+
O..O..O O O..O..O..O..O..O..O...O..O..O..O..O..O..O..O O O O O
| : : |
200000 ++ : : |
| : : |
| : : |
150000 ++ : : |
| : : |
100000 ++ : : |
| : : |
| : : |
50000 ++ : : |
| : |
| : |
0 ++-------*---------------------------------------------------------+
fsmark.time.involuntary_context_switches
70000 ++------------------------------------------------------------------+
O O O O O O O O O O O O O O O O O O O O O O O
60000 ++ |
*..*..* *..*...*..*..*..*..*..*..*..*..*..*..*...*..* |
50000 ++ : : |
| : : |
40000 ++ : : |
| : : |
30000 ++ : : |
| : : |
20000 ++ : : |
| : : |
10000 ++ : : |
| : |
0 ++-------*----------------------------------------------------------+
vmstat.io.bo
1600 ++-------------------------------------------------------------------+
*..*..* *..*..*..*..*..*..*...*..*..*..*..*..*..*..* |
1400 O+ O O O O O O O O O O O O O O O O O O O O O O
1200 ++ : : |
| : : |
1000 ++ : : |
| : : |
800 ++ : : |
| : : |
600 ++ : : |
400 ++ : : |
| : : |
200 ++ :: |
| : |
0 ++-------*-----------------------------------------------------------+
vmstat.system.in
12000 ++------------------------------------------------------------------+
*..*..* *..*...*..*..*..*..*..*..*..*..*..*..*...*..* |
10000 O+ O O O O O O O O O O O O O O O O O O O O O O
| : : |
| : : |
8000 ++ : : |
| : : |
6000 ++ : : |
| : : |
4000 ++ : : |
| : : |
| : : |
2000 ++ : |
| : |
0 ++-------*----------------------------------------------------------+
sched_debug.cpu#0.nr_uninterruptible
0 ++-------*-----------------------------------------------------------+
| : |
| :: |
-500 ++ : : |
| : : |
| : : |
-1000 ++ : : |
| : : |
-1500 ++ : : |
| : : |
| : : |
-2000 O+ O O :O O O O O O O O O O O O O O O O O
| O : : O O |
*..*..* *..*..*..*..*..*..*...*..*..*..*..*..*.. |
-2500 ++---------------------------------------------------*--*------------+
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: fsmark
default-monitors:
wait: activate-monitor
kmsg:
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
cpufreq_governor: performance
default-watchdogs:
oom-killer:
watchdog:
commit: ba0fe0e85621d7cf3945536fec80c0f79328c73a
model: Nehalem
nr_cpu: 8
memory: 4G
hdd_partitions: "/dev/disk/by-id/ata-WDC_WD1003FBYZ-010FB0_WD-WCAW36812041-part1"
swap_partitions: "/dev/disk/by-id/ata-WDC_WD1003FBYZ-010FB0_WD-WCAW36812041-part2"
rootfs_partition: "/dev/disk/by-id/ata-WDC_WD1003FBYZ-010FB0_WD-WCAW36812041-part3"
netconsole_port: 6649
category: benchmark
iterations: 1x
nr_threads: 32t
disk: 1HDD
fs: f2fs
fs2: nfsv4
fsmark:
filesize: 8K
test_size: 400M
sync_method: fsyncBeforeClose
nr_directories: 16d
nr_files_per_directory: 256fpd
queue: cyclic
testbox: nhm4
tbox_group: nhm4
kconfig: x86_64-rhel
enqueue_time: 2015-07-29 20:54:40.248165108 +08:00
id: 0ddcb01da77a9327f2d3f9f5028f20a43a340af4
user: lkp
compiler: gcc-4.9
head_commit: ba0fe0e85621d7cf3945536fec80c0f79328c73a
base_commit: cbfe8fa6cd672011c755c3cd85c9ffd4e2d10a6f
branch: linux-devel/devel-hourly-2015073118
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/ba0fe0e85621d7cf3945536fec80c0f79328c73a/vmlinuz-4.2.0-rc4-02728-gba0fe0e"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/fsmark/performance-1x-32t-1HDD-f2fs-nfsv4-8K-400M-fsyncBeforeClose-16d-256fpd/nhm4/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ba0fe0e85621d7cf3945536fec80c0f79328c73a/0"
job_file: "/lkp/scheduled/nhm4/cyclic_fsmark-performance-1x-32t-1HDD-f2fs-nfsv4-8K-400M-fsyncBeforeClose-16d-256fpd-x86_64-rhel-CYCLIC_HEAD-ba0fe0e85621d7cf3945536fec80c0f79328c73a-20150729-97981-1pfnokn-0.yaml"
dequeue_time: 2015-08-01 00:56:11.629795788 +08:00
max_uptime: 1534.06
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/nhm4/cyclic_fsmark-performance-1x-32t-1HDD-f2fs-nfsv4-8K-400M-fsyncBeforeClose-16d-256fpd-x86_64-rhel-CYCLIC_HEAD-ba0fe0e85621d7cf3945536fec80c0f79328c73a-20150729-97981-1pfnokn-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015073118
- commit=ba0fe0e85621d7cf3945536fec80c0f79328c73a
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/ba0fe0e85621d7cf3945536fec80c0f79328c73a/vmlinuz-4.2.0-rc4-02728-gba0fe0e
- max_uptime=1534
- RESULT_ROOT=/result/fsmark/performance-1x-32t-1HDD-f2fs-nfsv4-8K-400M-fsyncBeforeClose-16d-256fpd/nhm4/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ba0fe0e85621d7cf3945536fec80c0f79328c73a/0
- LKP_SERVER=inn
- |-
libata.force=1.5Gbps
earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal
rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/ba0fe0e85621d7cf3945536fec80c0f79328c73a/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs2.cgz,/lkp/benchmarks/fsmark.cgz"
job_state: finished
loadavg: 39.53 30.69 15.07 1/171 6861
start_time: '1438362512'
end_time: '1438362970'
version: "/lkp/lkp/.src-20150731-141803"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
mkfs -t f2fs /dev/sda1
mount -t f2fs /dev/sda1 /fs/sda1
/etc/init.d/rpcbind start
/etc/init.d/nfs-common start
/etc/init.d/nfs-kernel-server start
mount -t nfs -o vers=4 localhost:/fs/sda1 /nfs/sda1
./fs_mark -d /nfs/sda1/1 -d /nfs/sda1/2 -d /nfs/sda1/3 -d /nfs/sda1/4 -d /nfs/sda1/5 -d /nfs/sda1/6 -d /nfs/sda1/7 -d /nfs/sda1/8 -d /nfs/sda1/9 -d /nfs/sda1/10 -d /nfs/sda1/11 -d /nfs/sda1/12 -d /nfs/sda1/13 -d /nfs/sda1/14 -d /nfs/sda1/15 -d /nfs/sda1/16 -d /nfs/sda1/17 -d /nfs/sda1/18 -d /nfs/sda1/19 -d /nfs/sda1/20 -d /nfs/sda1/21 -d /nfs/sda1/22 -d /nfs/sda1/23 -d /nfs/sda1/24 -d /nfs/sda1/25 -d /nfs/sda1/26 -d /nfs/sda1/27 -d /nfs/sda1/28 -d /nfs/sda1/29 -d /nfs/sda1/30 -d /nfs/sda1/31 -d /nfs/sda1/32 -D 16 -N 256 -n 1600 -L 1 -S 1 -s 8192