[lkp] [jbd2] de92c8caf16: no primary result change, +270.9% vmstat.procs.b, -61.0% vmstat.procs.r
From: Huang Ying
Date: Wed Jun 17 2015 - 23:21:13 EST
FYI, we noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit de92c8caf16ca84926fa31b7a5590c0fb9c0d5ca ("jbd2: speedup jbd2_journal_get_[write|undo]_access()")
It appears that more processes are put in uninterruptible state after the commit.
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/wait_disks_timeout/runtime/disk/md/iosched/fs/nr_threads:
lkp-st02/dd-write/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/300/5m/11HDD/JBOD/cfq/ext4/10dd
commit:
8b00f400eedf91d074f831077003c0d4d9147377
de92c8caf16ca84926fa31b7a5590c0fb9c0d5ca
8b00f400eedf91d0 de92c8caf16ca84926fa31b7a5
---------------- --------------------------
%stddev %change %stddev
\ | \
48241 Â 8% +139.2% 115399 Â 2% softirqs.SCHED
1565 Â 5% +12.4% 1759 Â 6% time.involuntary_context_switches
391202 Â 5% +110.7% 824392 Â 1% proc-vmstat.pgactivate
70008 Â 56% -73.1% 18828 Â 50% proc-vmstat.pgrotated
21.50 Â 10% +270.9% 79.75 Â 1% vmstat.procs.b
86.00 Â 1% -61.0% 33.50 Â 2% vmstat.procs.r
23827 Â 0% -2.7% 23187 Â 0% vmstat.system.in
2310 Â 4% -16.9% 1919 Â 10% slabinfo.ext4_io_end.active_objs
2310 Â 4% -16.9% 1919 Â 10% slabinfo.ext4_io_end.num_objs
2110 Â 18% +18.4% 2499 Â 3% slabinfo.kmalloc-96.active_objs
2163 Â 13% +15.5% 2499 Â 3% slabinfo.kmalloc-96.num_objs
1.683e+12 Â 1% -6.1% 1.58e+12 Â 0% perf-stat.L1-dcache-loads
6.332e+08 Â 2% -6.8% 5.904e+08 Â 1% perf-stat.L1-dcache-prefetches
9.849e+11 Â 1% -4.0% 9.457e+11 Â 0% perf-stat.L1-dcache-stores
6.932e+10 Â 1% +6.7% 7.397e+10 Â 0% perf-stat.L1-icache-load-misses
5.064e+12 Â 1% -5.7% 4.777e+12 Â 0% perf-stat.L1-icache-loads
1.823e+09 Â 1% +8.0% 1.969e+09 Â 0% perf-stat.LLC-load-misses
8.519e+11 Â 1% -3.5% 8.222e+11 Â 0% perf-stat.branch-instructions
8.99e+09 Â 2% -23.1% 6.916e+09 Â 0% perf-stat.branch-load-misses
8.513e+11 Â 1% -3.5% 8.216e+11 Â 0% perf-stat.branch-loads
8.967e+09 Â 2% -23.5% 6.858e+09 Â 0% perf-stat.branch-misses
8.059e+11 Â 1% -3.4% 7.786e+11 Â 0% perf-stat.bus-cycles
3.658e+09 Â 1% +5.6% 3.863e+09 Â 0% perf-stat.cache-misses
1.612e+11 Â 2% +5.6% 1.701e+11 Â 0% perf-stat.cache-references
6.452e+12 Â 1% -3.5% 6.227e+12 Â 0% perf-stat.cpu-cycles
92243 Â 6% +175.6% 254241 Â 2% perf-stat.cpu-migrations
1.305e+10 Â 12% -15.2% 1.106e+10 Â 4% perf-stat.dTLB-load-misses
1.683e+12 Â 1% -6.2% 1.579e+12 Â 0% perf-stat.dTLB-loads
9.845e+11 Â 1% -4.0% 9.446e+11 Â 0% perf-stat.dTLB-stores
31933083 Â 9% -31.3% 21923765 Â 2% perf-stat.iTLB-load-misses
4.638e+12 Â 1% -3.6% 4.471e+12 Â 0% perf-stat.iTLB-loads
4.639e+12 Â 1% -3.6% 4.471e+12 Â 0% perf-stat.instructions
6.448e+12 Â 1% -3.4% 6.229e+12 Â 0% perf-stat.ref-cycles
1.36 Â 4% +17.9% 1.60 Â 3% perf-profile.cpu-cycles.__clear_user.iov_iter_zero.read_iter_zero.__vfs_read.vfs_read
3.48 Â 4% +11.1% 3.87 Â 2% perf-profile.cpu-cycles.__ext4_get_inode_loc.ext4_get_inode_loc.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode
5.15 Â 1% +22.6% 6.31 Â 2% perf-profile.cpu-cycles.__ext4_handle_dirty_metadata.ext4_mark_iloc_dirty.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty
7.90 Â 3% -87.7% 0.98 Â 14% perf-profile.cpu-cycles.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty
8.53 Â 0% +17.7% 10.04 Â 1% perf-profile.cpu-cycles.__ext4_journal_start_sb.ext4_da_write_begin.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter
0.91 Â 3% +11.3% 1.01 Â 9% perf-profile.cpu-cycles.__ext4_journal_start_sb.ext4_dirty_inode.__mark_inode_dirty.generic_write_end.ext4_da_write_end
2.99 Â 2% +17.4% 3.50 Â 4% perf-profile.cpu-cycles.__ext4_journal_stop.ext4_da_write_end.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter
0.78 Â 6% +22.4% 0.96 Â 6% perf-profile.cpu-cycles.__ext4_journal_stop.ext4_dirty_inode.__mark_inode_dirty.generic_write_end.ext4_da_write_end
1.19 Â 3% +29.1% 1.53 Â 4% perf-profile.cpu-cycles.__find_get_block.__getblk_gfp.__ext4_get_inode_loc.ext4_get_inode_loc.ext4_reserve_inode_write
1.88 Â 4% +20.2% 2.26 Â 4% perf-profile.cpu-cycles.__getblk_gfp.__ext4_get_inode_loc.ext4_get_inode_loc.ext4_reserve_inode_write.ext4_mark_inode_dirty
26.69 Â 0% -16.5% 22.29 Â 1% perf-profile.cpu-cycles.__mark_inode_dirty.generic_write_end.ext4_da_write_end.generic_perform_write.__generic_file_write_iter
3.39 Â 3% +18.7% 4.02 Â 2% perf-profile.cpu-cycles.__vfs_read.vfs_read.sys_read.system_call_fastpath
0.99 Â 3% +36.5% 1.35 Â 5% perf-profile.cpu-cycles._raw_read_lock.jbd2__journal_start.__ext4_journal_start_sb.ext4_da_write_begin.generic_perform_write
4.13 Â 6% -99.8% 0.01 Â-10000% perf-profile.cpu-cycles.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty
34.70 Â 0% -10.1% 31.19 Â 1% perf-profile.cpu-cycles.ext4_da_write_end.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write
26.13 Â 1% -16.6% 21.80 Â 1% perf-profile.cpu-cycles.ext4_dirty_inode.__mark_inode_dirty.generic_write_end.ext4_da_write_end.generic_perform_write
3.76 Â 3% +9.2% 4.11 Â 2% perf-profile.cpu-cycles.ext4_get_inode_loc.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty
10.35 Â 0% +13.4% 11.74 Â 2% perf-profile.cpu-cycles.ext4_mark_iloc_dirty.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_write_end
23.99 Â 1% -19.4% 19.35 Â 2% perf-profile.cpu-cycles.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_write_end.ext4_da_write_end
11.94 Â 4% -53.5% 5.55 Â 3% perf-profile.cpu-cycles.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_write_end
30.83 Â 0% -13.8% 26.58 Â 1% perf-profile.cpu-cycles.generic_write_end.ext4_da_write_end.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter
7.42 Â 0% +16.8% 8.67 Â 1% perf-profile.cpu-cycles.jbd2__journal_start.__ext4_journal_start_sb.ext4_da_write_begin.generic_perform_write.__generic_file_write_iter
1.62 Â 7% -100.0% 0.00 Â -1% perf-profile.cpu-cycles.jbd2_journal_add_journal_head.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty
3.63 Â 1% +19.7% 4.34 Â 3% perf-profile.cpu-cycles.jbd2_journal_dirty_metadata.__ext4_handle_dirty_metadata.ext4_mark_iloc_dirty.ext4_mark_inode_dirty.ext4_dirty_inode
7.43 Â 3% -92.3% 0.57 Â 22% perf-profile.cpu-cycles.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode
1.01 Â 9% +45.9% 1.47 Â 5% perf-profile.cpu-cycles.jbd2_journal_grab_journal_head.jbd2_journal_dirty_metadata.__ext4_handle_dirty_metadata.ext4_mark_iloc_dirty.ext4_mark_inode_dirty
1.00 Â 6% -100.0% 0.00 Â -1% perf-profile.cpu-cycles.jbd2_journal_put_journal_head.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty
2.68 Â 1% +17.4% 3.15 Â 5% perf-profile.cpu-cycles.jbd2_journal_stop.__ext4_journal_stop.ext4_da_write_end.generic_perform_write.__generic_file_write_iter
3.89 Â 3% +18.0% 4.58 Â 2% perf-profile.cpu-cycles.start_this_handle.jbd2__journal_start.__ext4_journal_start_sb.ext4_da_write_begin.generic_perform_write
6.77 Â 1% +14.2% 7.74 Â 2% perf-profile.cpu-cycles.sys_read.system_call_fastpath
0.90 Â 10% -100.0% 0.00 Â -1% perf-profile.cpu-cycles.unlock_buffer.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write
5.98 Â 1% +16.8% 6.98 Â 1% perf-profile.cpu-cycles.vfs_read.sys_read.system_call_fastpath
1323 Â 24% -67.3% 433.50 Â 14% sched_debug.cfs_rq[0]:/.utilization_load_avg
1153750 Â 5% +12.2% 1294517 Â 1% sched_debug.cfs_rq[1]:/.min_vruntime
-260439 Â-69% -103.8% 9885 Â199% sched_debug.cfs_rq[1]:/.spread0
1237 Â 18% -78.8% 262.50 Â 28% sched_debug.cfs_rq[1]:/.utilization_load_avg
87.25 Â 17% +165.6% 231.75 Â 56% sched_debug.cfs_rq[2]:/.load
942.25 Â 16% -78.5% 203.00 Â 36% sched_debug.cfs_rq[2]:/.utilization_load_avg
1749 Â 26% -48.3% 903.50 Â 72% sched_debug.cfs_rq[3]:/.blocked_load_avg
206.25 Â 39% -44.5% 114.50 Â 14% sched_debug.cfs_rq[3]:/.load
162.00 Â 3% -25.8% 120.25 Â 21% sched_debug.cfs_rq[3]:/.runnable_load_avg
-241296 Â-76% -102.5% 5936 Â118% sched_debug.cfs_rq[3]:/.spread0
1930 Â 22% -46.9% 1024 Â 63% sched_debug.cfs_rq[3]:/.tg_load_contrib
1203 Â 9% -72.3% 333.50 Â 46% sched_debug.cfs_rq[3]:/.utilization_load_avg
921.50 Â 95% +131.9% 2137 Â 37% sched_debug.cfs_rq[4]:/.blocked_load_avg
1080842 Â 3% +17.8% 1273061 Â 0% sched_debug.cfs_rq[4]:/.min_vruntime
-333430 Â-48% -96.5% -11621 Â-147% sched_debug.cfs_rq[4]:/.spread0
1077 Â 86% +110.7% 2269 Â 35% sched_debug.cfs_rq[4]:/.tg_load_contrib
1288 Â 19% -76.7% 300.50 Â 23% sched_debug.cfs_rq[4]:/.utilization_load_avg
1205 Â 14% -68.7% 376.75 Â 52% sched_debug.cfs_rq[5]:/.utilization_load_avg
1095954 Â 4% +17.7% 1290354 Â 0% sched_debug.cfs_rq[6]:/.min_vruntime
-318381 Â-49% -101.8% 5587 Â256% sched_debug.cfs_rq[6]:/.spread0
1265 Â 14% -71.3% 363.50 Â 46% sched_debug.cfs_rq[6]:/.utilization_load_avg
1251 Â 20% -76.9% 289.75 Â 32% sched_debug.cfs_rq[7]:/.utilization_load_avg
10.00 Â 23% -75.0% 2.50 Â 20% sched_debug.cpu#0.nr_running
10.50 Â 14% -76.2% 2.50 Â 44% sched_debug.cpu#1.nr_running
1793 Â 12% -26.9% 1311 Â 12% sched_debug.cpu#2.curr->pid
8.00 Â 26% -71.9% 2.25 Â 36% sched_debug.cpu#2.nr_running
-246.00 Â-46% -93.2% -16.75 Â-607% sched_debug.cpu#2.nr_uninterruptible
10.50 Â 14% -73.8% 2.75 Â 30% sched_debug.cpu#3.nr_running
128.50 Â 11% -20.0% 102.75 Â 8% sched_debug.cpu#4.cpu_load[0]
127.75 Â 9% -16.0% 107.25 Â 11% sched_debug.cpu#4.cpu_load[1]
10.50 Â 20% -73.8% 2.75 Â 30% sched_debug.cpu#4.nr_running
9.75 Â 15% -71.8% 2.75 Â 15% sched_debug.cpu#5.nr_running
122.00 Â 9% +19.7% 146.00 Â 8% sched_debug.cpu#6.cpu_load[1]
120.00 Â 7% +25.8% 151.00 Â 15% sched_debug.cpu#6.cpu_load[2]
10.75 Â 21% -72.1% 3.00 Â 52% sched_debug.cpu#6.nr_running
1971 Â 9% -34.0% 1300 Â 17% sched_debug.cpu#7.curr->pid
10.00 Â 23% -82.5% 1.75 Â 24% sched_debug.cpu#7.nr_running
40663 Â 2% -18.6% 33116 Â 0% latency_stats.avg.balance_dirty_pages.balance_dirty_pages_ratelimited.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write.SyS_write.system_call_fastpath
508303 Â 3% +3.3% 525071 Â 2% latency_stats.avg.blk_execute_rq.sg_io.scsi_cmd_ioctl.scsi_cmd_blk_ioctl.sd_ioctl.[sd_mod].blkdev_ioctl.block_ioctl.do_vfs_ioctl.SyS_ioctl.system_call_fastpath
65195 Â 4% -13.7% 56254 Â 3% latency_stats.avg.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_update_time.file_update_time.__generic_file_write_iter.ext4_file_write_iter.__vfs_write
3440 Â 71% -100.0% 0.00 Â -1% latency_stats.avg.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_ext_truncate.ext4_truncate.ext4_setattr.notify_change.do_truncate.do_sys_ftruncate.SyS_ftruncate
453.25 Â 4% +67.1% 757.50 Â 1% latency_stats.avg.do_wait.SyS_wait4.system_call_fastpath
968.50 Â 16% +28.9% 1248 Â 10% latency_stats.avg.ep_poll.SyS_epoll_wait.system_call_fastpath
438.50 Â 8% -32.7% 295.00 Â 1% latency_stats.avg.ring_buffer_wait.wait_on_pipe.tracing_wait_pipe.tracing_read_pipe.__vfs_read.vfs_read.SyS_read.system_call_fastpath
686.00 Â 22% -21.1% 541.50 Â 16% latency_stats.avg.wait_for_response.[cifs].SendReceive.[cifs].CIFSPOSIXCreate.[cifs].cifs_posix_open.[cifs].cifs_open.[cifs].do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_open
874.50 Â 55% -47.7% 457.75 Â 11% latency_stats.avg.wait_for_response.[cifs].SendReceive.[cifs].CIFSPOSIXCreate.[cifs].cifs_posix_open.[cifs].cifs_open.[cifs].do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_openat
1169 Â140% -86.2% 161.75 Â 5% latency_stats.avg.wait_for_response.[cifs].SendReceive.[cifs].CIFSSMBUnixQPathInfo.[cifs].cifs_get_inode_info_unix.[cifs].cifs_lookup.[cifs].lookup_real.__lookup_hash.do_unlinkat.SyS_unlink.system_call_fastpath
199.00 Â 7% -19.5% 160.25 Â 15% latency_stats.avg.wait_for_response.[cifs].SendReceive2.[cifs].CIFSSMBWrite2.[cifs].cifs_sync_write.[cifs].cifs_write.[cifs].cifs_write_end.[cifs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.cifs_strict_writev.[cifs].__vfs_write.vfs_write
107505 Â 14% +51.7% 163094 Â 11% latency_stats.avg.wait_transaction_locked.start_this_handle.jbd2__journal_start.__ext4_journal_start_sb.ext4_dirty_inode.__mark_inode_dirty.generic_update_time.file_update_time.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write
103169 Â 12% +514.6% 634103 Â 1% latency_stats.hits.balance_dirty_pages.balance_dirty_pages_ratelimited.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write.SyS_write.system_call_fastpath
1.00 Â 81% +225.0% 3.25 Â 50% latency_stats.hits.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execveat_common.SyS_execve.return_from_execve
26.00 Â 14% +30.8% 34.00 Â 13% latency_stats.hits.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_da_write_end.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write.SyS_write
2835 Â 5% +11.4% 3158 Â 3% latency_stats.hits.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_update_time.file_update_time.__generic_file_write_iter.ext4_file_write_iter.__vfs_write
663.50 Â 10% -40.2% 396.75 Â 8% latency_stats.hits.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_write_end.ext4_da_write_end.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter
1.75 Â 47% -100.0% 0.00 Â -1% latency_stats.hits.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_ext_truncate.ext4_truncate.ext4_setattr.notify_change.do_truncate.do_sys_ftruncate.SyS_ftruncate
1661 Â 0% +17.3% 1948 Â 1% latency_stats.hits.do_wait.SyS_wait4.system_call_fastpath
142.50 Â 6% +15.3% 164.25 Â 11% latency_stats.hits.ep_poll.SyS_epoll_wait.system_call_fastpath
394313 Â 0% -10.2% 354275 Â 4% latency_stats.hits.hrtimer_nanosleep.SyS_nanosleep.system_call_fastpath
465945 Â 23% -37.9% 289496 Â 4% latency_stats.hits.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
337392 Â 8% -49.8% 169230 Â 8% latency_stats.hits.ring_buffer_wait.wait_on_pipe.tracing_wait_pipe.tracing_read_pipe.__vfs_read.vfs_read.SyS_read.system_call_fastpath
91.75 Â 47% -87.6% 11.33 Â141% latency_stats.hits.wait_iff_congested.shrink_inactive_list.shrink_lruvec.shrink_zone.do_try_to_free_pages.try_to_free_pages.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc.pagecache_get_page.grab_cache_page_write_begin.ext4_da_write_begin
1123 Â 3% -41.6% 656.00 Â 6% latency_stats.hits.wait_transaction_locked.start_this_handle.jbd2__journal_start.__ext4_journal_start_sb.ext4_da_write_begin.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write.SyS_write.system_call_fastpath
4105 Â 0% -38.2% 2535 Â 3% latency_stats.hits.wait_transaction_locked.start_this_handle.jbd2__journal_start.__ext4_journal_start_sb.ext4_dirty_inode.__mark_inode_dirty.generic_update_time.file_update_time.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write
1030093 Â 9% +11.9% 1152679 Â 6% latency_stats.max.blk_execute_rq.sg_io.scsi_cmd_ioctl.scsi_cmd_blk_ioctl.sd_ioctl.[sd_mod].blkdev_ioctl.block_ioctl.do_vfs_ioctl.SyS_ioctl.system_call_fastpath
309120 Â 43% -41.6% 180418 Â 23% latency_stats.max.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_da_write_end.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write.SyS_write
4459 Â 75% -100.0% 0.00 Â -1% latency_stats.max.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_ext_truncate.ext4_truncate.ext4_setattr.notify_change.do_truncate.do_sys_ftruncate.SyS_ftruncate
4823 Â 1% +2.5% 4943 Â 0% latency_stats.max.ep_poll.SyS_epoll_wait.system_call_fastpath
1271 Â 69% -55.1% 570.75 Â 8% latency_stats.max.wait_for_response.[cifs].SendReceive.[cifs].CIFSPOSIXCreate.[cifs].cifs_posix_open.[cifs].cifs_do_create.[cifs].cifs_atomic_open.[cifs].do_last.path_openat.do_filp_open.do_sys_open.SyS_open.system_call_fastpath
11400 Â126% -91.8% 939.75 Â 37% latency_stats.max.wait_for_response.[cifs].SendReceive.[cifs].CIFSPOSIXCreate.[cifs].cifs_posix_open.[cifs].cifs_open.[cifs].do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_openat
1169 Â140% -86.2% 161.75 Â 5% latency_stats.max.wait_for_response.[cifs].SendReceive.[cifs].CIFSSMBUnixQPathInfo.[cifs].cifs_get_inode_info_unix.[cifs].cifs_lookup.[cifs].lookup_real.__lookup_hash.do_unlinkat.SyS_unlink.system_call_fastpath
4.2e+09 Â 13% +399.9% 2.1e+10 Â 1% latency_stats.sum.balance_dirty_pages.balance_dirty_pages_ratelimited.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write.SyS_write.system_call_fastpath
24398593 Â 3% +3.3% 25203443 Â 2% latency_stats.sum.blk_execute_rq.sg_io.scsi_cmd_ioctl.scsi_cmd_blk_ioctl.sd_ioctl.[sd_mod].blkdev_ioctl.block_ioctl.do_vfs_ioctl.SyS_ioctl.system_call_fastpath
10100882 Â 15% -24.3% 7648970 Â 12% latency_stats.sum.call_rwsem_down_read_failed.ext4_da_get_block_prep.__block_write_begin.ext4_da_write_begin.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write.SyS_write.system_call_fastpath
6.00 Â 81% +216.7% 19.00 Â 56% latency_stats.sum.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execveat_common.SyS_execve.return_from_execve
38146064 Â 11% -40.7% 22612285 Â 6% latency_stats.sum.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_dirty_inode.__mark_inode_dirty.generic_write_end.ext4_da_write_end.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter
7994 Â100% -100.0% 0.00 Â -1% latency_stats.sum.do_get_write_access.jbd2_journal_get_write_access.__ext4_journal_get_write_access.ext4_reserve_inode_write.ext4_mark_inode_dirty.ext4_ext_truncate.ext4_truncate.ext4_setattr.notify_change.do_truncate.do_sys_ftruncate.SyS_ftruncate
753440 Â 3% +96.0% 1476939 Â 2% latency_stats.sum.do_wait.SyS_wait4.system_call_fastpath
139159 Â 21% +49.1% 207524 Â 22% latency_stats.sum.ep_poll.SyS_epoll_wait.system_call_fastpath
22735513 Â 0% -10.0% 20470551 Â 4% latency_stats.sum.hrtimer_nanosleep.SyS_nanosleep.system_call_fastpath
2.014e+08 Â 6% -51.4% 97800459 Â 4% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
1.488e+08 Â 14% -66.4% 49982280 Â 7% latency_stats.sum.ring_buffer_wait.wait_on_pipe.tracing_wait_pipe.tracing_read_pipe.__vfs_read.vfs_read.SyS_read.system_call_fastpath
79639 Â 22% -21.2% 62767 Â 17% latency_stats.sum.wait_for_response.[cifs].SendReceive.[cifs].CIFSPOSIXCreate.[cifs].cifs_posix_open.[cifs].cifs_open.[cifs].do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_open
25189 Â 56% -49.1% 12823 Â 11% latency_stats.sum.wait_for_response.[cifs].SendReceive.[cifs].CIFSPOSIXCreate.[cifs].cifs_posix_open.[cifs].cifs_open.[cifs].do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_openat
1169 Â140% -86.2% 161.75 Â 5% latency_stats.sum.wait_for_response.[cifs].SendReceive.[cifs].CIFSSMBUnixQPathInfo.[cifs].cifs_get_inode_info_unix.[cifs].cifs_lookup.[cifs].lookup_real.__lookup_hash.do_unlinkat.SyS_unlink.system_call_fastpath
24794 Â 13% -25.1% 18564 Â 14% latency_stats.sum.wait_for_response.[cifs].SendReceive2.[cifs].CIFSSMBWrite2.[cifs].cifs_sync_write.[cifs].cifs_write.[cifs].cifs_write_end.[cifs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.cifs_strict_writev.[cifs].__vfs_write.vfs_write
108340 Â 27% -81.3% 20256 Â141% latency_stats.sum.wait_iff_congested.shrink_inactive_list.shrink_lruvec.shrink_zone.do_try_to_free_pages.try_to_free_pages.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc.pagecache_get_page.grab_cache_page_write_begin.ext4_da_write_begin
1.015e+08 Â 11% -36.3% 64626646 Â 16% latency_stats.sum.wait_transaction_locked.start_this_handle.jbd2__journal_start.__ext4_journal_start_sb.ext4_da_write_begin.generic_perform_write.__generic_file_write_iter.ext4_file_write_iter.__vfs_write.vfs_write.SyS_write.system_call_fastpath
26524 Â 5% +16.1% 30791 Â 4% ftrace.balance_dirty_pages.sdb.bdi_dirty
179.00 Â 15% -46.4% 96.00 Â 4% ftrace.balance_dirty_pages.sdb.dirtied
179.25 Â 15% -45.0% 98.50 Â 4% ftrace.balance_dirty_pages.sdb.dirtied_pause
58927 Â 13% -80.8% 11327 Â 3% ftrace.balance_dirty_pages.sdb.dirty_ratelimit
-3430677 Â-173% -100.0% -11.00 Â-75% ftrace.balance_dirty_pages.sdb.pause
22.50 Â 25% +76.7% 39.75 Â 3% ftrace.balance_dirty_pages.sdb.period
80869 Â 18% -86.9% 10600 Â 2% ftrace.balance_dirty_pages.sdb.task_ratelimit
3430696 Â173% -100.0% 49.25 Â 18% ftrace.balance_dirty_pages.sdb.think
190.25 Â 13% -51.8% 91.75 Â 5% ftrace.balance_dirty_pages.sdc.dirtied
191.75 Â 13% -51.1% 93.75 Â 5% ftrace.balance_dirty_pages.sdc.dirtied_pause
58193 Â 19% -81.2% 10953 Â 5% ftrace.balance_dirty_pages.sdc.dirty_ratelimit
-424.50 Â -2% -98.4% -7.00 Â-286% ftrace.balance_dirty_pages.sdc.pause
20.00 Â 17% +448.8% 109.75 Â114% ftrace.balance_dirty_pages.sdc.period
76965 Â 21% -84.8% 11683 Â 7% ftrace.balance_dirty_pages.sdc.task_ratelimit
443.75 Â 2% -90.4% 42.75 Â 47% ftrace.balance_dirty_pages.sdc.think
23458 Â 9% +22.1% 28637 Â 7% ftrace.balance_dirty_pages.sdd.bdi_dirty
185.75 Â 14% -49.7% 93.50 Â 5% ftrace.balance_dirty_pages.sdd.dirtied
186.00 Â 14% -49.2% 94.50 Â 4% ftrace.balance_dirty_pages.sdd.dirtied_pause
61258 Â 14% -81.7% 11213 Â 3% ftrace.balance_dirty_pages.sdd.dirty_ratelimit
-416.00 Â -4% -100.1% 0.50 Â830% ftrace.balance_dirty_pages.sdd.pause
19.25 Â 22% +97.4% 38.00 Â 4% ftrace.balance_dirty_pages.sdd.period
86687 Â 20% -87.2% 11105 Â 2% ftrace.balance_dirty_pages.sdd.task_ratelimit
433.50 Â 4% -91.5% 36.75 Â 7% ftrace.balance_dirty_pages.sdd.think
186.75 Â 13% -53.1% 87.50 Â 0% ftrace.balance_dirty_pages.sde.dirtied
188.25 Â 13% -52.6% 89.25 Â 0% ftrace.balance_dirty_pages.sde.dirtied_pause
60931 Â 16% -82.8% 10461 Â 1% ftrace.balance_dirty_pages.sde.dirty_ratelimit
-3387398 Â-173% -100.0% -1.25 Â-1079% ftrace.balance_dirty_pages.sde.pause
19.25 Â 14% +94.8% 37.50 Â 1% ftrace.balance_dirty_pages.sde.period
80501 Â 23% -86.6% 10750 Â 3% ftrace.balance_dirty_pages.sde.task_ratelimit
3387416 Â173% -100.0% 37.00 Â 34% ftrace.balance_dirty_pages.sde.think
191.50 Â 8% -52.3% 91.25 Â 3% ftrace.balance_dirty_pages.sdf.dirtied
192.50 Â 8% -51.8% 92.75 Â 4% ftrace.balance_dirty_pages.sdf.dirtied_pause
59318 Â 10% -81.4% 11026 Â 1% ftrace.balance_dirty_pages.sdf.dirty_ratelimit
-425.75 Â -1% -96.6% -14.50 Â-84% ftrace.balance_dirty_pages.sdf.pause
21.50 Â 14% +77.9% 38.25 Â 3% ftrace.balance_dirty_pages.sdf.period
78892 Â 14% -85.6% 11371 Â 2% ftrace.balance_dirty_pages.sdf.task_ratelimit
446.00 Â 1% -88.5% 51.50 Â 25% ftrace.balance_dirty_pages.sdf.think
29879 Â 6% -10.9% 26610 Â 2% ftrace.balance_dirty_pages.sdg.bdi_dirty
181.75 Â 14% -51.3% 88.50 Â 0% ftrace.balance_dirty_pages.sdg.dirtied
182.50 Â 14% -50.3% 90.75 Â 1% ftrace.balance_dirty_pages.sdg.dirtied_pause
49491 Â 23% -78.7% 10564 Â 2% ftrace.balance_dirty_pages.sdg.dirty_ratelimit
-372.25 Â -5% -99.3% -2.75 Â-376% ftrace.balance_dirty_pages.sdg.pause
27.75 Â 16% +48.6% 41.25 Â 17% ftrace.balance_dirty_pages.sdg.period
63160 Â 27% -83.3% 10550 Â 5% ftrace.balance_dirty_pages.sdg.task_ratelimit
398.25 Â 3% -90.4% 38.25 Â 24% ftrace.balance_dirty_pages.sdg.think
184.00 Â 14% -50.7% 90.75 Â 5% ftrace.balance_dirty_pages.sdh.dirtied
185.50 Â 14% -50.0% 92.75 Â 4% ftrace.balance_dirty_pages.sdh.dirtied_pause
55615 Â 12% -80.6% 10769 Â 5% ftrace.balance_dirty_pages.sdh.dirty_ratelimit
-388.00 Â-10% -97.9% -8.00 Â-168% ftrace.balance_dirty_pages.sdh.pause
23.50 Â 16% +62.8% 38.25 Â 3% ftrace.balance_dirty_pages.sdh.period
70106 Â 20% -84.2% 11100 Â 6% ftrace.balance_dirty_pages.sdh.task_ratelimit
410.25 Â 9% -89.0% 45.00 Â 32% ftrace.balance_dirty_pages.sdh.think
176.25 Â 14% -47.1% 93.25 Â 2% ftrace.balance_dirty_pages.sdi.dirtied
177.25 Â 14% -46.3% 95.25 Â 2% ftrace.balance_dirty_pages.sdi.dirtied_pause
45550 Â 19% -75.7% 11062 Â 3% ftrace.balance_dirty_pages.sdi.dirty_ratelimit
-363.75 Â -8% -95.3% -17.25 Â-85% ftrace.balance_dirty_pages.sdi.pause
2.00 Â 50% -87.5% 0.25 Â173% ftrace.balance_dirty_pages.sdi.paused
29.25 Â 4% +35.0% 39.50 Â 1% ftrace.balance_dirty_pages.sdi.period
54404 Â 24% -80.4% 10681 Â 6% ftrace.balance_dirty_pages.sdi.task_ratelimit
391.75 Â 7% -85.8% 55.75 Â 26% ftrace.balance_dirty_pages.sdi.think
180.50 Â 11% -48.2% 93.50 Â 4% ftrace.balance_dirty_pages.sdk.dirtied
181.75 Â 12% -47.5% 95.50 Â 3% ftrace.balance_dirty_pages.sdk.dirtied_pause
53838 Â 14% -79.4% 11111 Â 3% ftrace.balance_dirty_pages.sdk.dirty_ratelimit
-383.50 Â -8% -97.1% -11.25 Â-84% ftrace.balance_dirty_pages.sdk.pause
23.75 Â 22% +65.3% 39.25 Â 3% ftrace.balance_dirty_pages.sdk.period
73059 Â 22% -84.8% 11122 Â 6% ftrace.balance_dirty_pages.sdk.task_ratelimit
406.50 Â 6% -88.0% 48.75 Â 20% ftrace.balance_dirty_pages.sdk.think
179.75 Â 14% -49.5% 90.75 Â 3% ftrace.balance_dirty_pages.sdl.dirtied
180.50 Â 14% -48.2% 93.50 Â 3% ftrace.balance_dirty_pages.sdl.dirtied_pause
59262 Â 9% -81.7% 10868 Â 4% ftrace.balance_dirty_pages.sdl.dirty_ratelimit
-401.25 Â -2% -98.0% -8.00 Â-115% ftrace.balance_dirty_pages.sdl.pause
21.00 Â 10% +82.1% 38.25 Â 2% ftrace.balance_dirty_pages.sdl.period
77465 Â 15% -86.1% 10789 Â 4% ftrace.balance_dirty_pages.sdl.task_ratelimit
421.75 Â 1% -89.4% 44.75 Â 21% ftrace.balance_dirty_pages.sdl.think
185.75 Â 13% -49.0% 94.75 Â 4% ftrace.balance_dirty_pages.sdm.dirtied
186.75 Â 13% -48.7% 95.75 Â 4% ftrace.balance_dirty_pages.sdm.dirtied_pause
59214 Â 18% -81.0% 11276 Â 3% ftrace.balance_dirty_pages.sdm.dirty_ratelimit
-402.75 Â -3% -98.8% -5.00 Â-183% ftrace.balance_dirty_pages.sdm.pause
1.50 Â 57% -100.0% 0.00 Â 0% ftrace.balance_dirty_pages.sdm.paused
21.25 Â 20% +84.7% 39.25 Â 2% ftrace.balance_dirty_pages.sdm.period
80010 Â 25% -85.6% 11485 Â 7% ftrace.balance_dirty_pages.sdm.task_ratelimit
422.00 Â 3% -89.9% 42.50 Â 20% ftrace.balance_dirty_pages.sdm.think
60203 Â 14% -81.5% 11163 Â 4% ftrace.bdi_dirty_ratelimit.sdb.balanced_dirty_ratelimit
58856 Â 13% -80.9% 11269 Â 3% ftrace.bdi_dirty_ratelimit.sdb.dirty_ratelimit
80299 Â 18% -87.2% 10313 Â 3% ftrace.bdi_dirty_ratelimit.sdb.task_ratelimit
58684 Â 18% -81.5% 10842 Â 3% ftrace.bdi_dirty_ratelimit.sdc.balanced_dirty_ratelimit
58173 Â 19% -81.3% 10902 Â 5% ftrace.bdi_dirty_ratelimit.sdc.dirty_ratelimit
77036 Â 21% -85.1% 11441 Â 9% ftrace.bdi_dirty_ratelimit.sdc.task_ratelimit
63739 Â 14% -82.8% 10955 Â 1% ftrace.bdi_dirty_ratelimit.sdd.balanced_dirty_ratelimit
61299 Â 14% -81.8% 11184 Â 3% ftrace.bdi_dirty_ratelimit.sdd.dirty_ratelimit
86718 Â 20% -87.4% 10969 Â 1% ftrace.bdi_dirty_ratelimit.sdd.task_ratelimit
62431 Â 16% -82.8% 10760 Â 3% ftrace.bdi_dirty_ratelimit.sde.balanced_dirty_ratelimit
60940 Â 16% -82.9% 10415 Â 1% ftrace.bdi_dirty_ratelimit.sde.dirty_ratelimit
80144 Â 22% -86.9% 10513 Â 4% ftrace.bdi_dirty_ratelimit.sde.task_ratelimit
61030 Â 11% -81.6% 11236 Â 1% ftrace.bdi_dirty_ratelimit.sdf.balanced_dirty_ratelimit
59341 Â 10% -81.5% 10997 Â 1% ftrace.bdi_dirty_ratelimit.sdf.dirty_ratelimit
78573 Â 14% -85.5% 11382 Â 2% ftrace.bdi_dirty_ratelimit.sdf.task_ratelimit
49805 Â 24% -78.4% 10782 Â 5% ftrace.bdi_dirty_ratelimit.sdg.balanced_dirty_ratelimit
49586 Â 23% -78.8% 10508 Â 2% ftrace.bdi_dirty_ratelimit.sdg.dirty_ratelimit
62941 Â 28% -83.5% 10409 Â 6% ftrace.bdi_dirty_ratelimit.sdg.task_ratelimit
56232 Â 13% -80.5% 10962 Â 7% ftrace.bdi_dirty_ratelimit.sdh.balanced_dirty_ratelimit
55658 Â 12% -80.7% 10737 Â 6% ftrace.bdi_dirty_ratelimit.sdh.dirty_ratelimit
69833 Â 20% -84.4% 10906 Â 7% ftrace.bdi_dirty_ratelimit.sdh.task_ratelimit
44050 Â 17% -75.7% 10685 Â 4% ftrace.bdi_dirty_ratelimit.sdi.balanced_dirty_ratelimit
45546 Â 19% -75.8% 11013 Â 3% ftrace.bdi_dirty_ratelimit.sdi.dirty_ratelimit
54325 Â 24% -80.6% 10513 Â 7% ftrace.bdi_dirty_ratelimit.sdi.task_ratelimit
54470 Â 16% -80.2% 10809 Â 2% ftrace.bdi_dirty_ratelimit.sdk.balanced_dirty_ratelimit
53799 Â 14% -79.4% 11069 Â 4% ftrace.bdi_dirty_ratelimit.sdk.dirty_ratelimit
72874 Â 22% -85.2% 10787 Â 6% ftrace.bdi_dirty_ratelimit.sdk.task_ratelimit
60161 Â 10% -82.0% 10837 Â 2% ftrace.bdi_dirty_ratelimit.sdl.balanced_dirty_ratelimit
59266 Â 9% -81.7% 10819 Â 5% ftrace.bdi_dirty_ratelimit.sdl.dirty_ratelimit
77010 Â 16% -86.3% 10563 Â 2% ftrace.bdi_dirty_ratelimit.sdl.task_ratelimit
60632 Â 18% -81.0% 11544 Â 3% ftrace.bdi_dirty_ratelimit.sdm.balanced_dirty_ratelimit
59157 Â 18% -81.0% 11246 Â 3% ftrace.bdi_dirty_ratelimit.sdm.dirty_ratelimit
79666 Â 25% -86.0% 11179 Â 6% ftrace.bdi_dirty_ratelimit.sdm.task_ratelimit
263022 Â 7% -10.6% 235031 Â 4% ftrace.writeback_single_inode.sde.index
2635 Â 5% -12.1% 2317 Â 2% ftrace.writeback_single_inode.sde.wrote
2810 Â 8% -12.2% 2466 Â 4% ftrace.writeback_single_inode.sdf.wrote
1.00 Â 0% -100.0% 0.00 Â 0% ftrace.writeback_single_inode.sdg.age
3173 Â 10% -25.6% 2360 Â 4% ftrace.writeback_single_inode.sdg.wrote
1.00 Â 0% -100.0% 0.00 Â 0% ftrace.writeback_single_inode.sdh.age
3521 Â 14% -26.7% 2581 Â 7% ftrace.writeback_single_inode.sdi.wrote
lkp-st02: Core2
Memory: 8G
softirqs.SCHED
200000 ++-----------------------------------------------------------------+
180000 ++ O |
| O O |
160000 ++ O O |
140000 O+ O O O O O O O O |
| O O O O O |
120000 ++ O O O O O O O
100000 ++ |
80000 ++ |
| |
60000 ++ .*.*.. .*. .*.. .*. |
40000 ++ *.*..*.*. *. *..*.*. *.*..*.*. * |
| + |
20000 ++ |
0 *+-O---------------------------------------------------------------+
perf-stat.cpu-migrations
400000 ++-------------------O---------------------------------------------+
| O |
350000 ++ O O O |
O O O O O O |
300000 ++ O O O O O O O O |
250000 ++ O O O O O O O
| |
200000 ++ |
| |
150000 ++ |
100000 ++ .*.*.. .*. .*.. |
| *.*..*.*. *. *..*.*. *.*..*.*..*.* |
50000 +++ |
|+ |
0 *+-O---------------------------------------------------------------+
vmstat.procs.r
100 ++--------------------------------------------------------------------+
90 ++ *. .*..*. .*.. |
| : *..*..*. .*.. .*. *. *.*..*..*.* |
80 ++ : *..*. * |
70 ++: |
| : |
60 ++: |
50 ++: |
40 ++ |
|: O O O O O O
30 O+ O O O O O O O O O O O O O O O O O |
20 ++ O O |
| |
10 ++ |
0 *+-O------------------------------------------------------------------+
vmstat.procs.b
100 ++--------------------------------------------------------------------+
90 ++ O O O O |
O O O O O O O O O O O O |
80 ++ O O O O O O O O O O
70 ++ |
| |
60 ++ |
50 ++ |
40 ++ |
| .*.. * |
30 ++ .*. .. + .*.. |
20 ++ .*..*..* * *..*.. .*.. .*.*. *.* |
| * * *. |
10 ++. |
0 *+-O------------------------------------------------------------------+
proc-vmstat.pgactivate
900000 ++----------------O--O------O--O----------------------O--O---------+
O O O O O O O O O O O O O O O O O O O O
800000 ++ |
700000 ++ |
| |
600000 ++ |
500000 ++ |
| .*.*.. .*. |
400000 ++ .*.. .*. *. *.. .*..*.*..*.* |
300000 ++ * * *.*..*..* |
| : |
200000 ++: |
100000 ++ |
|: |
0 *+-O---------------------------------------------------------------+
ftrace.balance_dirty_pages.sdc.pause
50 ++--------------------------------------------------------------------+
0 O+ O O O O O O O O O O O O O O O O O
| O O O O O O O O O |
-50 ++ |
-100 ++ |
|: |
-150 ++ |
-200 ++: |
-250 ++: |
| : |
-300 ++: *. .* |
-350 ++ : * * .. *. + |
| : .. + + + .* + |
-400 ++ *.*.. .*.* + + *. *. .*..*. |
-450 ++------*----------*--------------------*-------*---------------------+
ftrace.balance_dirty_pages.sdc.think
500 ++--------------------------------------------------------------------+
450 ++ .*..*. *.. .*.. .* |
| *.*. *.. + .*.. * *..* |
400 ++ : + * *.. + |
350 ++ : * .*.. + |
| : * * |
300 ++: |
250 ++: |
200 ++: |
|: |
150 ++ |
100 ++ |
|: O O O |
50 O+ O O O O O O O O O O O O O O O O O O O O O
0 *+-O--------------------------O---------------------------------------+
ftrace.balance_dirty_pages.sdg.pause
50 ++--------------------------------------------------------------------+
0 O+ O O O O O O O O O O O O O O O O O O
| O O O O O O O O |
-50 ++ |
-100 ++ |
|: |
-150 ++ |
-200 ++: |
-250 ++: |
| : * * |
-300 ++: .* : : + + |
-350 ++ : *. + : : + *..*.. .*.. * |
| : + + : : .* *.*. + |
-400 ++ *.*..*..* * *. * |
-450 ++--------------------------------------------------------------------+
ftrace.balance_dirty_pages.sdg.think
450 ++------*--*-------*----*---------------------------------------------+
| *.*. : + : : .*. .*. |
400 ++ : : + : : * .*. *..*. * |
350 ++ : *..* : : + *. |
| : * + + |
300 ++: * |
250 ++: |
| : |
200 ++ |
150 ++ |
|: |
100 ++ |
50 ++ O O O O |
O O O O O O O O O O O O O O O O O O O O O O
0 *+-O------------------------------------------------------------------+
ftrace.balance_dirty_pages.sdl.pause
50 ++--------------------------------------------------------------------+
0 O+ O O O O O O O O O O O O O O
| O O O O O O O O O O O O |
-50 ++ |
-100 ++ |
|: |
-150 ++ |
-200 ++: |
-250 ++: * |
| : : : |
-300 ++: : : * .*. |
-350 ++ : : : .. : *. *..*.. |
| *. : * : .. .*.. |
-400 ++ *.. .*.* * *.*. *.* |
-450 ++------*-------------------------------------------------------------+
ftrace.balance_dirty_pages.sdl.think
500 ++--------------------------------------------------------------------+
450 ++ .*.. |
| .*. *.* *.. *.. *.*.. .*.* |
400 ++ * : : + .. *. |
350 ++ : : : * *.. .*..* |
| : : : * |
300 ++: * |
250 ++: |
200 ++: |
|: |
150 ++ |
100 ++ |
|: O O O |
50 O+ O O O O O O O O O O O O O O O O O O O O O
0 *+-O---------------O--------------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Ying Huang
-------------------------------------
lkp@xxxxxxxxxxxxxxxxx
https://eclists.intel.com/sympa/info/lkp
Unsubscribe by sending email to sympa@xxxxxxxxxxxxxxxxx with subject "Unsubscribe lkp"
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: dd-write
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
default-watchdogs:
watch-oom:
watchdog:
cpufreq_governor:
commit: a3aca33b3af0f6961719c68a5b1b561206eaa4b8
model: Core2
memory: 8G
nr_hdd_partitions: 12
wait_disks_timeout: 300
hdd_partitions: "/dev/disk/by-id/scsi-35000c5000???????"
swap_partitions:
runtime: 5m
disk: 11HDD
md: JBOD
iosched: cfq
fs: ext4
fs2:
monitors:
perf-stat:
perf-profile:
ftrace:
events: balance_dirty_pages bdi_dirty_ratelimit global_dirty_state writeback_single_inode
nr_threads: 10dd
dd:
testbox: lkp-st02
tbox_group: lkp-st02
kconfig: x86_64-rhel
enqueue_time: 2015-06-15 11:39:27.015808126 +08:00
user: lkp
queue: cyclic
compiler: gcc-4.9
head_commit: a3aca33b3af0f6961719c68a5b1b561206eaa4b8
base_commit: 0f57d86787d8b1076ea8f9cbdddda2a46d534a27
branch: linux-devel/devel-hourly-2015061609
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/a3aca33b3af0f6961719c68a5b1b561206eaa4b8/vmlinuz-4.1.0-rc8-wl-04178-ga3aca33"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/dd-write/300-5m-11HDD-JBOD-cfq-ext4-10dd/lkp-st02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/a3aca33b3af0f6961719c68a5b1b561206eaa4b8/0"
job_file: "/lkp/scheduled/lkp-st02/cyclic_dd-write-300-5m-11HDD-JBOD-cfq-ext4-10dd-x86_64-rhel-CYCLIC_HEAD-a3aca33b3af0f6961719c68a5b1b561206eaa4b8-0-20150615-96200-tjp09g.yaml"
dequeue_time: 2015-06-16 09:49:33.945435115 +08:00
nr_cpu: "$(nproc)"
max_uptime: 1500
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/lkp-st02/cyclic_dd-write-300-5m-11HDD-JBOD-cfq-ext4-10dd-x86_64-rhel-CYCLIC_HEAD-a3aca33b3af0f6961719c68a5b1b561206eaa4b8-0-20150615-96200-tjp09g.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015061609
- commit=a3aca33b3af0f6961719c68a5b1b561206eaa4b8
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/a3aca33b3af0f6961719c68a5b1b561206eaa4b8/vmlinuz-4.1.0-rc8-wl-04178-ga3aca33
- max_uptime=1500
- RESULT_ROOT=/result/dd-write/300-5m-11HDD-JBOD-cfq-ext4-10dd/lkp-st02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/a3aca33b3af0f6961719c68a5b1b561206eaa4b8/0
- LKP_SERVER=inn
- |2-
earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal
rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/a3aca33b3af0f6961719c68a5b1b561206eaa4b8/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs2.cgz"
job_state: finished
loadavg: 65.04 66.52 31.46 1/169 5575
start_time: '1434419421'
end_time: '1434419742'
version: "/lkp/lkp/.src-20150616-071222"
time_delta: '1434419386.307633460'
mkfs -t ext4 -q -F /dev/sdh
mkfs -t ext4 -q -F /dev/sdi
mount -t ext4 /dev/sdb /fs/sdb
mount -t ext4 /dev/sdg /fs/sdg
mount -t ext4 /dev/sdi /fs/sdi
mount -t ext4 /dev/sdh /fs/sdh
mount -t ext4 /dev/sdl /fs/sdl
mount -t ext4 /dev/sdf /fs/sdf
mount -t ext4 /dev/sdm /fs/sdm
mount -t ext4 /dev/sdk /fs/sdk
mount -t ext4 /dev/sdd /fs/sdd
mount -t ext4 /dev/sde /fs/sde
mount -t ext4 /dev/sdc /fs/sdc
echo 1 > /sys/kernel/debug/tracing/events/writeback/balance_dirty_pages/enable
echo 1 > /sys/kernel/debug/tracing/events/writeback/bdi_dirty_ratelimit/enable
echo 1 > /sys/kernel/debug/tracing/events/writeback/global_dirty_state/enable
echo 1 > /sys/kernel/debug/tracing/events/writeback/writeback_single_inode/enable
dd if=/dev/zero of=/fs/sdb/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-1 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-2 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-3 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-4 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-5 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-6 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-7 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-8 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-9 status=noxfer &
dd if=/dev/zero of=/fs/sdb/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdg/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdi/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdh/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdl/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdf/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdm/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdk/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdd/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sde/zero-10 status=noxfer &
dd if=/dev/zero of=/fs/sdc/zero-10 status=noxfer &
sleep 289
killall -9 dd