[lkp] [thp] 79553da293d: +1.8% fileio.time.file_system_inputs
From: Huang Ying
Date: Tue Jul 07 2015 - 21:58:13 EST
FYI, we noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit 79553da293d38d63097278de13e28a3b371f43c1 ("thp: cleanup khugepaged startup")
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/disk/iosched/fs/nr_threads:
bay/dd-write/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1HDD/cfq/xfs/10dd
commit:
e39155ea11eac6da056b04669d7c9fc612e2065a
79553da293d38d63097278de13e28a3b371f43c1
e39155ea11eac6da 79553da293d38d63097278de13
---------------- --------------------------
%stddev %change %stddev
\ | \
30460 Â 0% +34.3% 40920 Â 0% softirqs.BLOCK
231.27 Â 4% -17.3% 191.30 Â 3% uptime.idle
765.75 Â 7% +19.7% 916.50 Â 4% slabinfo.kmalloc-512.active_objs
972.25 Â 8% +17.4% 1141 Â 5% slabinfo.kmalloc-512.num_objs
74.00 Â 0% +15.9% 85.75 Â 10% vmstat.memory.buff
91092 Â 0% -62.3% 34370 Â 1% vmstat.memory.free
22460 Â 1% +29.2% 29026 Â 1% ftrace.global_dirty_state.dirty
35516 Â 1% -11.0% 31615 Â 0% ftrace.global_dirty_state.writeback
2.00 Â 0% +50.0% 3.00 Â 0% ftrace.writeback_single_inode.sda.age
4913 Â 1% +35.7% 6666 Â 1% ftrace.writeback_single_inode.sda.wrote
36958 Â 4% -67.3% 12083 Â 28% meminfo.AnonHugePages
89634 Â 0% +29.2% 115815 Â 1% meminfo.Dirty
1315242 Â 0% +10.4% 1451507 Â 0% meminfo.MemAvailable
87870 Â 0% -63.5% 32046 Â 2% meminfo.MemFree
142291 Â 1% -11.2% 126331 Â 0% meminfo.Writeback
2.76 Â 14% +22.8% 3.39 Â 8% perf-profile.cpu-cycles.__clear_user.iov_iter_zero.read_iter_zero.new_sync_read.__vfs_read
4.94 Â 16% +31.2% 6.48 Â 6% perf-profile.cpu-cycles.__memset.xfs_vm_write_begin.generic_perform_write.xfs_file_buffered_aio_write.xfs_file_write_iter
1.31 Â 34% -50.4% 0.65 Â 36% perf-profile.cpu-cycles.end_page_writeback.end_buffer_async_write.xfs_destroy_ioend.xfs_end_io.process_one_work
0.86 Â 24% -44.1% 0.48 Â 42% perf-profile.cpu-cycles.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
1.15 Â 27% -50.4% 0.57 Â 39% perf-profile.cpu-cycles.test_clear_page_writeback.end_page_writeback.end_buffer_async_write.xfs_destroy_ioend.xfs_end_io
0.76 Â 39% +65.6% 1.26 Â 23% perf-profile.cpu-cycles.try_to_free_buffers.xfs_vm_releasepage.try_to_release_page.shrink_page_list.shrink_inactive_list
0.00 Â -1% +Inf% 9581 Â 43% latency_stats.avg.do_truncate.do_sys_ftruncate.SyS_ftruncate.system_call_fastpath
0.00 Â -1% +Inf% 500330 Â 58% latency_stats.avg.xfs_file_buffered_aio_write.xfs_file_write_iter.new_sync_write.vfs_write.SyS_write.system_call_fastpath
163371 Â 3% +26.6% 206775 Â 1% latency_stats.hits.ring_buffer_wait.wait_on_pipe.tracing_wait_pipe.tracing_read_pipe.__vfs_read.vfs_read.SyS_read.system_call_fastpath
0.00 Â -1% +Inf% 32695 Â 10% latency_stats.max.do_truncate.do_sys_ftruncate.SyS_ftruncate.system_call_fastpath
0.00 Â -1% +Inf% 669208 Â 18% latency_stats.max.xfs_file_buffered_aio_write.xfs_file_write_iter.new_sync_write.vfs_write.SyS_write.system_call_fastpath
0.00 Â -1% +Inf% 53331 Â 28% latency_stats.sum.do_truncate.do_sys_ftruncate.SyS_ftruncate.system_call_fastpath
0.00 Â -1% +Inf% 706140 Â 12% latency_stats.sum.xfs_file_buffered_aio_write.xfs_file_write_iter.new_sync_write.vfs_write.SyS_write.system_call_fastpath
394.50 Â 24% -53.2% 184.50 Â 37% sched_debug.cfs_rq[3]:/.load
37.00 Â 34% +51.4% 56.00 Â 17% sched_debug.cpu#1.cpu_load[1]
84.75 Â 19% -52.8% 40.00 Â 68% sched_debug.cpu#3.cpu_load[0]
69.25 Â 20% -50.5% 34.25 Â 34% sched_debug.cpu#3.cpu_load[1]
50.25 Â 11% -45.8% 27.25 Â 23% sched_debug.cpu#3.cpu_load[2]
36.25 Â 5% -37.9% 22.50 Â 22% sched_debug.cpu#3.cpu_load[3]
394.50 Â 24% -53.2% 184.50 Â 37% sched_debug.cpu#3.load
2.138e+11 Â 0% -1.4% 2.108e+11 Â 0% perf-stat.L1-dcache-loads
2.483e+08 Â 0% -4.3% 2.376e+08 Â 0% perf-stat.L1-dcache-prefetches
4.605e+09 Â 0% +7.8% 4.962e+09 Â 0% perf-stat.L1-icache-load-misses
5.684e+11 Â 0% +2.0% 5.797e+11 Â 0% perf-stat.L1-icache-loads
1.637e+08 Â 0% +15.8% 1.895e+08 Â 1% perf-stat.LLC-load-misses
1.089e+11 Â 0% +1.3% 1.103e+11 Â 0% perf-stat.branch-loads
8.163e+10 Â 0% +2.9% 8.396e+10 Â 1% perf-stat.bus-cycles
3.116e+08 Â 1% +9.6% 3.415e+08 Â 0% perf-stat.cache-misses
9306030 Â 1% -1.4% 9171655 Â 0% perf-stat.context-switches
151544 Â 2% +8.3% 164060 Â 1% perf-stat.cpu-migrations
2.299e+08 Â 12% +25.5% 2.886e+08 Â 10% perf-stat.dTLB-load-misses
8302411 Â 3% +17.4% 9747172 Â 2% perf-stat.iTLB-load-misses
7.348e+11 Â 0% +2.3% 7.515e+11 Â 0% perf-stat.ref-cycles
52222 Â 6% -72.6% 14324 Â 28% proc-vmstat.compact_isolated
42280 Â 10% -71.6% 11991 Â 35% proc-vmstat.compact_migrate_scanned
5320 Â 1% -99.9% 6.50 Â 35% proc-vmstat.kswapd_high_wmark_hit_quickly
3377 Â 13% +602.5% 23725 Â 1% proc-vmstat.kswapd_low_wmark_hit_quickly
1446 Â 0% -86.9% 190.00 Â 4% proc-vmstat.nr_alloc_batch
22517 Â 0% +28.9% 29013 Â 1% proc-vmstat.nr_dirty
21984 Â 0% -63.5% 8021 Â 2% proc-vmstat.nr_free_pages
1142 Â 15% -24.3% 864.75 Â 4% proc-vmstat.nr_vmscan_immediate_reclaim
35464 Â 1% -11.0% 31557 Â 0% proc-vmstat.nr_writeback
8844 Â 4% +169.2% 23813 Â 1% proc-vmstat.pageoutrun
17473 Â 0% -91.7% 1447 Â 33% proc-vmstat.pgalloc_dma
24252 Â 5% -74.5% 6188 Â 25% proc-vmstat.pgmigrate_success
16476 Â 0% -94.4% 925.00 Â 48% proc-vmstat.pgrefill_dma
8267 Â 9% +816.4% 75755 Â 8% proc-vmstat.pgscan_direct_dma32
16728 Â 0% -94.5% 927.50 Â 38% proc-vmstat.pgscan_kswapd_dma
8267 Â 9% +128.8% 18917 Â 8% proc-vmstat.pgsteal_direct_dma32
15954 Â 0% -97.0% 483.75 Â 59% proc-vmstat.pgsteal_kswapd_dma
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/disk/iosched/fs/nr_threads:
bay/dd-write/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1HDD/cfq/xfs/1dd
commit:
e39155ea11eac6da056b04669d7c9fc612e2065a
79553da293d38d63097278de13e28a3b371f43c1
e39155ea11eac6da 79553da293d38d63097278de13
---------------- --------------------------
%stddev %change %stddev
\ | \
3.50 Â 14% +1750.0% 64.75 Â 15% ftrace.writeback_single_inode.sda.age
471.00 Â 2% +28.9% 607.00 Â 5% slabinfo.blkdev_requests.active_objs
476.25 Â 1% +29.6% 617.25 Â 4% slabinfo.blkdev_requests.num_objs
10396 Â 1% +45.5% 15129 Â 0% softirqs.BLOCK
37406 Â 3% +18.5% 44317 Â 4% softirqs.RCU
25121 Â 5% -59.5% 10170 Â 12% meminfo.AnonHugePages
1332448 Â 0% +10.1% 1466476 Â 0% meminfo.MemAvailable
89924 Â 0% -63.0% 33289 Â 0% meminfo.MemFree
69.25 Â 2% +32.9% 92.00 Â 11% vmstat.memory.buff
90542 Â 1% -62.6% 33856 Â 0% vmstat.memory.free
17946 Â 2% -8.6% 16406 Â 4% vmstat.system.cs
6455037 Â100% -99.7% 19975 Â173% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.new_sync_write.vfs_write.SyS_write.system_call_fastpath
108228 Â 3% +34.7% 145793 Â 5% latency_stats.hits.pipe_wait.pipe_read.new_sync_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
52101 Â 1% +140.2% 125153 Â 2% latency_stats.hits.ring_buffer_wait.wait_on_pipe.tracing_wait_pipe.tracing_read_pipe.__vfs_read.vfs_read.SyS_read.system_call_fastpath
6737920 Â100% -99.7% 19975 Â173% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.new_sync_write.vfs_write.SyS_write.system_call_fastpath
572233 Â 87% -41.2% 336545 Â 3% latency_stats.max.wait_on_page_bit.truncate_inode_pages_range.truncate_pagecache.truncate_setsize.xfs_setattr_size.xfs_vn_setattr.notify_change.do_truncate.do_sys_ftruncate.SyS_ftruncate.system_call_fastpath
9804520 Â112% -99.8% 19975 Â173% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.new_sync_write.vfs_write.SyS_write.system_call_fastpath
5007 Â 0% -99.8% 8.50 Â 35% proc-vmstat.kswapd_high_wmark_hit_quickly
4012 Â 5% +584.2% 27456 Â 2% proc-vmstat.kswapd_low_wmark_hit_quickly
1441 Â 0% -87.2% 184.75 Â 2% proc-vmstat.nr_alloc_batch
22489 Â 0% -63.0% 8329 Â 0% proc-vmstat.nr_free_pages
853.75 Â 6% +31.1% 1119 Â 8% proc-vmstat.nr_vmscan_immediate_reclaim
9282 Â 2% +196.4% 27510 Â 2% proc-vmstat.pageoutrun
15361 Â 12% -95.3% 717.00 Â 37% proc-vmstat.pgalloc_dma
14328 Â 12% -98.3% 249.75 Â 79% proc-vmstat.pgrefill_dma
14670 Â 12% -97.8% 321.50 Â 2% proc-vmstat.pgscan_kswapd_dma
13886 Â 13% -100.0% 0.00 Â -1% proc-vmstat.pgsteal_kswapd_dma
108.75 Â 29% -43.2% 61.75 Â 31% sched_debug.cfs_rq[2]:/.runnable_load_avg
396.25 Â 20% -45.9% 214.50 Â 18% sched_debug.cfs_rq[2]:/.utilization_load_avg
664.00 Â 34% +144.8% 1625 Â 59% sched_debug.cfs_rq[3]:/.blocked_load_avg
738.75 Â 31% +132.6% 1718 Â 57% sched_debug.cfs_rq[3]:/.tg_load_contrib
34.75 Â 36% +86.3% 64.75 Â 22% sched_debug.cpu#0.cpu_load[2]
29.00 Â 23% +92.2% 55.75 Â 28% sched_debug.cpu#0.cpu_load[3]
69144 Â 1% +17.7% 81411 Â 13% sched_debug.cpu#1.nr_load_updates
107.75 Â 22% -39.9% 64.75 Â 23% sched_debug.cpu#2.cpu_load[0]
2359 Â 26% -57.2% 1009 Â 34% sched_debug.cpu#2.curr->pid
414.50 Â 10% -32.8% 278.75 Â 23% sched_debug.cpu#2.load
184007 Â 12% +299.5% 735051 Â 61% sched_debug.cpu#3.sched_goidle
1.55 Â 12% +27.2% 1.97 Â 9% perf-profile.cpu-cycles.__xfs_get_blocks.xfs_get_blocks.__block_write_begin.xfs_vm_write_begin.generic_perform_write
1.34 Â 22% +46.9% 1.97 Â 24% perf-profile.cpu-cycles.cpu_startup_entry.rest_init.start_kernel.x86_64_start_reservations.x86_64_start_kernel
1.30 Â 20% -34.7% 0.85 Â 8% perf-profile.cpu-cycles.down_write.xfs_ilock.xfs_file_buffered_aio_write.xfs_file_write_iter.new_sync_write
0.30 Â 16% +109.0% 0.62 Â 50% perf-profile.cpu-cycles.fsnotify.vfs_read.sys_read.system_call_fastpath
12.11 Â 9% +19.3% 14.44 Â 9% perf-profile.cpu-cycles.kthread.ret_from_fork
1.34 Â 22% +46.9% 1.97 Â 24% perf-profile.cpu-cycles.rest_init.start_kernel.x86_64_start_reservations.x86_64_start_kernel
12.13 Â 9% +19.0% 14.44 Â 9% perf-profile.cpu-cycles.ret_from_fork
1.36 Â 21% -49.5% 0.69 Â 11% perf-profile.cpu-cycles.rw_verify_area.vfs_write.sys_write.system_call_fastpath
0.93 Â 27% -57.1% 0.40 Â 30% perf-profile.cpu-cycles.security_file_permission.rw_verify_area.vfs_write.sys_write.system_call_fastpath
1.34 Â 22% +46.9% 1.97 Â 24% perf-profile.cpu-cycles.start_kernel.x86_64_start_reservations.x86_64_start_kernel
0.80 Â 31% +76.3% 1.41 Â 15% perf-profile.cpu-cycles.tracing_read_pipe.__vfs_read.vfs_read.sys_read.system_call_fastpath
1.34 Â 22% +46.9% 1.97 Â 24% perf-profile.cpu-cycles.x86_64_start_kernel
1.34 Â 22% +46.9% 1.97 Â 24% perf-profile.cpu-cycles.x86_64_start_reservations.x86_64_start_kernel
2.39e+11 Â 1% -2.5% 2.33e+11 Â 1% perf-stat.L1-dcache-loads
2.559e+08 Â 2% +4.3% 2.669e+08 Â 1% perf-stat.L1-dcache-prefetches
4.836e+09 Â 2% +7.5% 5.197e+09 Â 0% perf-stat.L1-icache-load-misses
6.191e+11 Â 1% +3.2% 6.387e+11 Â 1% perf-stat.L1-icache-loads
1.187e+08 Â 1% +29.7% 1.54e+08 Â 2% perf-stat.LLC-load-misses
1.355e+09 Â 4% +9.5% 1.484e+09 Â 4% perf-stat.LLC-loads
1.104e+08 Â 1% +12.9% 1.246e+08 Â 1% perf-stat.LLC-store-misses
1.244e+09 Â 3% -6.7% 1.161e+09 Â 1% perf-stat.branch-misses
2.697e+08 Â 0% +25.1% 3.373e+08 Â 2% perf-stat.cache-misses
10825819 Â 2% -8.6% 9891678 Â 4% perf-stat.context-switches
63968 Â 1% +54.6% 98882 Â 2% perf-stat.cpu-migrations
2.385e+11 Â 1% -3.0% 2.314e+11 Â 1% perf-stat.dTLB-loads
5457543 Â 7% +22.7% 6698136 Â 5% perf-stat.iTLB-load-misses
=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/period/nr_threads/disk/fs/size/filenum/rwmode/iomode:
lkp-ws02/fileio/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/600s/100%/1HDD/btrfs/64G/1024f/rndrd/sync
commit:
e39155ea11eac6da056b04669d7c9fc612e2065a
79553da293d38d63097278de13e28a3b371f43c1
e39155ea11eac6da 79553da293d38d63097278de13
---------------- --------------------------
%stddev %change %stddev
\ | \
6708548 Â 0% +1.8% 6832358 Â 0% fileio.time.file_system_inputs
27192978 Â 10% +24.2% 33781232 Â 9% cpuidle.C1-NHM.time
291264 Â 3% -19.2% 235461 Â 2% cpuidle.C1-NHM.usage
256.25 Â 5% -8.5% 234.50 Â 4% cpuidle.POLL.usage
6927467 Â122% -77.4% 1563952 Â100% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.new_sync_write.vfs_write.SyS_write.system_call_fastpath
126552 Â 73% -100.0% 0.00 Â -1% latency_stats.avg.wait_on_page_bit.extent_write_cache_pages.[btrfs].extent_writepages.[btrfs].btrfs_writepages.[btrfs].do_writepages.__filemap_fdatawrite_range.filemap_fdatawrite_range.btrfs_fdatawrite_range.[btrfs].start_ordered_ops.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync
304943 Â 3% -18.4% 248685 Â 2% latency_stats.hits.pipe_wait.pipe_read.new_sync_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
31145 Â133% -24.8% 23416 Â 0% latency_stats.max.blk_execute_rq.scsi_execute.scsi_execute_req_flags.ses_recv_diag.[ses].ses_enclosure_data_process.[ses].ses_match_to_enclosure.[ses].ses_intf_add.[ses].class_interface_register.scsi_register_interface.0xffffffffa0006013.do_one_initcall.do_init_module
242127 Â 74% -100.0% 0.00 Â -1% latency_stats.max.wait_on_page_bit.extent_write_cache_pages.[btrfs].extent_writepages.[btrfs].btrfs_writepages.[btrfs].do_writepages.__filemap_fdatawrite_range.filemap_fdatawrite_range.btrfs_fdatawrite_range.[btrfs].start_ordered_ops.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync
253104 Â 73% -100.0% 0.00 Â -1% latency_stats.sum.wait_on_page_bit.extent_write_cache_pages.[btrfs].extent_writepages.[btrfs].btrfs_writepages.[btrfs].do_writepages.__filemap_fdatawrite_range.filemap_fdatawrite_range.btrfs_fdatawrite_range.[btrfs].start_ordered_ops.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync
1452770 Â 9% +519.6% 9001196 Â 47% numa-numastat.node0.local_node
166456 Â143% +3090.2% 5310291 Â 56% numa-numastat.node0.numa_foreign
1452773 Â 9% +519.6% 9001200 Â 47% numa-numastat.node0.numa_hit
7153013 Â 3% -75.4% 1761824 Â164% numa-numastat.node0.numa_miss
7153016 Â 3% -75.4% 1761828 Â164% numa-numastat.node0.other_node
7153013 Â 3% -75.4% 1761824 Â164% numa-numastat.node1.numa_foreign
166456 Â143% +3090.2% 5310291 Â 56% numa-numastat.node1.numa_miss
166473 Â143% +3089.9% 5310296 Â 56% numa-numastat.node1.other_node
1128 Â 2% -39.1% 687.00 Â 35% slabinfo.bio-1.active_objs
1128 Â 2% -36.2% 719.75 Â 30% slabinfo.bio-1.num_objs
206.75 Â 4% -43.4% 117.00 Â 34% slabinfo.btrfs_transaction.active_objs
206.75 Â 4% -43.4% 117.00 Â 34% slabinfo.btrfs_transaction.num_objs
614.00 Â 5% -18.0% 503.25 Â 11% slabinfo.ext4_free_data.active_objs
614.00 Â 5% -18.0% 503.25 Â 11% slabinfo.ext4_free_data.num_objs
1241 Â 5% -13.2% 1076 Â 10% slabinfo.file_lock_ctx.active_objs
1241 Â 5% -13.2% 1076 Â 10% slabinfo.file_lock_ctx.num_objs
3644 Â 2% -9.0% 3317 Â 5% slabinfo.kmalloc-1024.active_objs
1505 Â 1% -59.8% 605.75 Â 5% proc-vmstat.kswapd_high_wmark_hit_quickly
53.00 Â 15% +19436.8% 10354 Â 1% proc-vmstat.kswapd_low_wmark_hit_quickly
3246 Â 2% -82.2% 577.50 Â 0% proc-vmstat.nr_alloc_batch
905.25 Â 54% -72.2% 251.25 Â 38% proc-vmstat.numa_hint_faults
836.50 Â 63% -72.8% 227.50 Â 28% proc-vmstat.numa_hint_faults_local
1003 Â 42% -72.3% 278.25 Â 15% proc-vmstat.numa_pte_updates
3357 Â 0% +296.0% 13296 Â 1% proc-vmstat.pageoutrun
412.00 Â 40% +666.0% 3155 Â 18% proc-vmstat.pgscan_direct_dma32
1192 Â 24% +1926.9% 24174 Â 17% proc-vmstat.pgscan_direct_normal
1673 Â 3% +13.6% 1900 Â 6% proc-vmstat.workingset_activate
186101 Â 1% +39.2% 259051 Â 15% numa-meminfo.node0.Active
175144 Â 0% +43.7% 251597 Â 16% numa-meminfo.node0.Active(file)
8061 Â 34% -34.6% 5270 Â 0% numa-meminfo.node0.Mapped
874427 Â 0% -46.8% 464786 Â 46% numa-meminfo.node0.MemFree
88387 Â 0% +20.4% 106421 Â 8% numa-meminfo.node0.Slab
286764 Â 0% -25.1% 214834 Â 18% numa-meminfo.node1.Active
275866 Â 0% -27.5% 200006 Â 21% numa-meminfo.node1.Active(file)
1072 Â 8% -32.2% 727.50 Â 24% numa-meminfo.node1.Dirty
87375 Â 1% -18.7% 71059 Â 13% numa-meminfo.node1.SReclaimable
21629 Â 5% -11.0% 19250 Â 2% numa-meminfo.node1.SUnreclaim
109004 Â 0% -17.1% 90310 Â 11% numa-meminfo.node1.Slab
43785 Â 0% +43.7% 62898 Â 16% numa-vmstat.node0.nr_active_file
1837 Â 1% -85.3% 270.25 Â 0% numa-vmstat.node0.nr_alloc_batch
218616 Â 0% -46.8% 116208 Â 46% numa-vmstat.node0.nr_free_pages
2015 Â 34% -34.6% 1317 Â 0% numa-vmstat.node0.nr_mapped
34180 Â144% +10851.6% 3743275 Â 57% numa-vmstat.node0.numa_foreign
767794 Â 3% +689.8% 6064164 Â 50% numa-vmstat.node0.numa_hit
707070 Â 3% +749.1% 6003501 Â 50% numa-vmstat.node0.numa_local
5022883 Â 1% -75.4% 1236569 Â170% numa-vmstat.node0.numa_miss
5083607 Â 1% -74.5% 1297231 Â162% numa-vmstat.node0.numa_other
605.25 Â 3% +78.0% 1077 Â 24% numa-vmstat.node0.workingset_activate
68966 Â 0% -27.5% 50001 Â 21% numa-vmstat.node1.nr_active_file
1433 Â 2% -81.5% 265.75 Â 1% numa-vmstat.node1.nr_alloc_batch
21843 Â 1% -18.7% 17764 Â 13% numa-vmstat.node1.nr_slab_reclaimable
5406 Â 5% -11.0% 4812 Â 2% numa-vmstat.node1.nr_slab_unreclaimable
5022889 Â 1% -75.4% 1236573 Â170% numa-vmstat.node1.numa_foreign
34182 Â144% +10851.0% 3743283 Â 57% numa-vmstat.node1.numa_miss
38048 Â129% +9748.4% 3747130 Â 57% numa-vmstat.node1.numa_other
1696 Â 30% -70.6% 498.25 Â 18% sched_debug.cfs_rq[10]:/.avg->runnable_avg_sum
10699 Â 18% -48.9% 5471 Â 57% sched_debug.cfs_rq[10]:/.exec_clock
31648 Â 36% -58.8% 13052 Â 59% sched_debug.cfs_rq[10]:/.min_vruntime
493.00 Â 16% -29.9% 345.75 Â 6% sched_debug.cfs_rq[10]:/.tg->runnable_avg
36.00 Â 32% -72.9% 9.75 Â 16% sched_debug.cfs_rq[10]:/.tg_runnable_contrib
166.75 Â 32% -60.0% 66.75 Â 47% sched_debug.cfs_rq[11]:/.blocked_load_avg
9322 Â 21% -53.0% 4380 Â 41% sched_debug.cfs_rq[11]:/.exec_clock
23106 Â 28% -57.7% 9765 Â 34% sched_debug.cfs_rq[11]:/.min_vruntime
11778 Â 54% -791.7% -81472 Â-61% sched_debug.cfs_rq[11]:/.spread0
493.50 Â 14% -29.7% 346.75 Â 5% sched_debug.cfs_rq[11]:/.tg->runnable_avg
168.25 Â 32% -59.9% 67.50 Â 47% sched_debug.cfs_rq[11]:/.tg_load_contrib
498.50 Â 15% -29.6% 351.00 Â 4% sched_debug.cfs_rq[12]:/.tg->runnable_avg
1718 Â 13% +28.9% 2214 Â 9% sched_debug.cfs_rq[13]:/.exec_clock
501.25 Â 14% -28.7% 357.25 Â 5% sched_debug.cfs_rq[13]:/.tg->runnable_avg
506.50 Â 15% -30.6% 351.75 Â 4% sched_debug.cfs_rq[14]:/.tg->runnable_avg
1210 Â 9% +42.4% 1723 Â 10% sched_debug.cfs_rq[15]:/.exec_clock
6566 Â 9% +17.6% 7721 Â 10% sched_debug.cfs_rq[15]:/.min_vruntime
511.25 Â 14% -30.6% 355.00 Â 3% sched_debug.cfs_rq[15]:/.tg->runnable_avg
515.50 Â 14% -30.4% 358.75 Â 3% sched_debug.cfs_rq[16]:/.tg->runnable_avg
-3588 Â-25% +2233.9% -83744 Â-54% sched_debug.cfs_rq[17]:/.spread0
520.25 Â 13% -30.7% 360.75 Â 3% sched_debug.cfs_rq[17]:/.tg->runnable_avg
2859 Â 73% -51.2% 1394 Â 14% sched_debug.cfs_rq[18]:/.exec_clock
11966 Â 67% -45.0% 6585 Â 9% sched_debug.cfs_rq[18]:/.min_vruntime
524.75 Â 13% -30.7% 363.50 Â 3% sched_debug.cfs_rq[18]:/.tg->runnable_avg
523.50 Â 12% -29.7% 368.00 Â 4% sched_debug.cfs_rq[19]:/.tg->runnable_avg
2926 Â 5% +48.9% 4356 Â 18% sched_debug.cfs_rq[1]:/.exec_clock
8401 Â 6% +30.1% 10932 Â 6% sched_debug.cfs_rq[1]:/.min_vruntime
527.50 Â 11% -29.5% 372.00 Â 3% sched_debug.cfs_rq[20]:/.tg->runnable_avg
2098 Â 46% -35.1% 1362 Â 3% sched_debug.cfs_rq[21]:/.exec_clock
532.00 Â 11% -28.9% 378.00 Â 3% sched_debug.cfs_rq[21]:/.tg->runnable_avg
537.25 Â 12% -28.7% 383.00 Â 3% sched_debug.cfs_rq[22]:/.tg->runnable_avg
3184 Â291% -2728.3% -83710 Â-57% sched_debug.cfs_rq[23]:/.spread0
541.50 Â 11% -28.9% 385.25 Â 3% sched_debug.cfs_rq[23]:/.tg->runnable_avg
3163 Â 3% +19.4% 3776 Â 8% sched_debug.cfs_rq[2]:/.exec_clock
0.75 Â110% +400.0% 3.75 Â 47% sched_debug.cfs_rq[2]:/.runnable_load_avg
555.50 Â 18% -20.2% 443.25 Â 14% sched_debug.cfs_rq[2]:/.tg->runnable_avg
8390 Â 2% +23.8% 10384 Â 8% sched_debug.cfs_rq[3]:/.min_vruntime
549.75 Â 18% -19.5% 442.75 Â 14% sched_debug.cfs_rq[3]:/.tg->runnable_avg
68.00 Â 40% +171.3% 184.50 Â 26% sched_debug.cfs_rq[4]:/.blocked_load_avg
2824 Â 14% +30.4% 3682 Â 9% sched_debug.cfs_rq[4]:/.exec_clock
545.50 Â 16% -22.6% 422.25 Â 11% sched_debug.cfs_rq[4]:/.tg->runnable_avg
69.75 Â 39% +171.0% 189.00 Â 25% sched_debug.cfs_rq[4]:/.tg_load_contrib
7824 Â 17% +24.4% 9733 Â 7% sched_debug.cfs_rq[5]:/.min_vruntime
518.25 Â 16% -22.7% 400.50 Â 11% sched_debug.cfs_rq[5]:/.tg->runnable_avg
1599 Â 51% -62.1% 605.50 Â 25% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
15577 Â 38% -69.7% 4721 Â 67% sched_debug.cfs_rq[7]:/.exec_clock
37096 Â 35% -68.1% 11832 Â 56% sched_debug.cfs_rq[7]:/.min_vruntime
25768 Â 49% -408.1% -79404 Â-67% sched_debug.cfs_rq[7]:/.spread0
491.50 Â 16% -27.1% 358.50 Â 10% sched_debug.cfs_rq[7]:/.tg->runnable_avg
34.00 Â 52% -64.0% 12.25 Â 27% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
1949 Â 23% -55.6% 865.75 Â 70% sched_debug.cfs_rq[8]:/.avg->runnable_avg_sum
25183 Â 51% -54.8% 11375 Â 44% sched_debug.cfs_rq[8]:/.min_vruntime
13855 Â 94% -676.4% -79862 Â-64% sched_debug.cfs_rq[8]:/.spread0
492.25 Â 16% -28.9% 349.75 Â 6% sched_debug.cfs_rq[8]:/.tg->runnable_avg
41.25 Â 24% -57.0% 17.75 Â 73% sched_debug.cfs_rq[8]:/.tg_runnable_contrib
494.00 Â 16% -28.6% 352.50 Â 6% sched_debug.cfs_rq[9]:/.tg->runnable_avg
46021 Â 28% +155.5% 117598 Â 31% sched_debug.cpu#0.nr_load_updates
-52.75 Â -8% +26.5% -66.75 Â -6% sched_debug.cpu#0.nr_uninterruptible
77978 Â 37% +70.5% 132942 Â 22% sched_debug.cpu#0.ttwu_count
33164 Â 42% +141.3% 80030 Â 30% sched_debug.cpu#0.ttwu_local
29131 Â 2% +18.7% 34578 Â 4% sched_debug.cpu#1.nr_load_updates
34945 Â 55% +82.2% 63667 Â 22% sched_debug.cpu#1.ttwu_count
53444 Â 11% -32.0% 36339 Â 18% sched_debug.cpu#10.nr_load_updates
254928 Â 19% -37.0% 160497 Â 36% sched_debug.cpu#10.nr_switches
255022 Â 19% -37.0% 160586 Â 36% sched_debug.cpu#10.sched_count
124863 Â 20% -37.9% 77546 Â 37% sched_debug.cpu#10.sched_goidle
62350 Â 6% -35.1% 40434 Â 25% sched_debug.cpu#10.ttwu_count
24043 Â 13% -39.6% 14532 Â 27% sched_debug.cpu#10.ttwu_local
48349 Â 12% -25.1% 36207 Â 17% sched_debug.cpu#11.nr_load_updates
220073 Â 20% -43.2% 125091 Â 24% sched_debug.cpu#11.nr_switches
220176 Â 20% -43.1% 125175 Â 24% sched_debug.cpu#11.sched_count
107249 Â 20% -43.9% 60128 Â 25% sched_debug.cpu#11.sched_goidle
54997 Â 15% -39.9% 33028 Â 15% sched_debug.cpu#11.ttwu_count
11715 Â 2% +14.5% 13414 Â 3% sched_debug.cpu#14.nr_load_updates
11624 Â 2% +14.1% 13262 Â 6% sched_debug.cpu#15.nr_load_updates
26743 Â 35% +214.0% 83986 Â 33% sched_debug.cpu#15.ttwu_count
11460 Â 3% +13.2% 12974 Â 4% sched_debug.cpu#16.nr_load_updates
11441 Â 5% +10.9% 12687 Â 3% sched_debug.cpu#17.nr_load_updates
3016 Â 3% -7.4% 2793 Â 1% sched_debug.cpu#17.ttwu_local
29953 Â 6% +22.4% 36676 Â 2% sched_debug.cpu#2.nr_load_updates
7674 Â 8% +32.4% 10159 Â 4% sched_debug.cpu#2.ttwu_local
12920 Â 13% +206.5% 39603 Â 17% sched_debug.cpu#20.nr_switches
12984 Â 13% +205.5% 39667 Â 17% sched_debug.cpu#20.sched_count
5279 Â 16% +249.0% 18423 Â 19% sched_debug.cpu#20.sched_goidle
80631 Â 35% -54.4% 36729 Â 33% sched_debug.cpu#21.ttwu_count
11221 Â 2% +10.5% 12400 Â 1% sched_debug.cpu#22.nr_load_updates
9.00 Â 78% -61.1% 3.50 Â173% sched_debug.cpu#23.cpu_load[2]
7.50 Â 86% -73.3% 2.00 Â173% sched_debug.cpu#23.cpu_load[3]
29370 Â 4% +29.7% 38100 Â 6% sched_debug.cpu#3.nr_load_updates
7498 Â 7% +37.2% 10286 Â 7% sched_debug.cpu#3.ttwu_local
29911 Â 12% +24.2% 37158 Â 4% sched_debug.cpu#4.nr_load_updates
140120 Â 24% +59.9% 224002 Â 18% sched_debug.cpu#4.nr_switches
140205 Â 24% +59.8% 224099 Â 18% sched_debug.cpu#4.sched_count
68431 Â 24% +60.7% 109965 Â 18% sched_debug.cpu#4.sched_goidle
7544 Â 7% +31.1% 9887 Â 4% sched_debug.cpu#4.ttwu_local
29295 Â 7% +21.4% 35577 Â 6% sched_debug.cpu#5.nr_load_updates
23076 Â 21% +43.1% 33020 Â 25% sched_debug.cpu#5.ttwu_count
7228 Â 4% +34.2% 9700 Â 6% sched_debug.cpu#5.ttwu_local
6.00 Â 83% -75.0% 1.50 Â173% sched_debug.cpu#6.cpu_load[2]
60164 Â 14% -40.5% 35793 Â 19% sched_debug.cpu#7.nr_load_updates
3.75 Â175% -240.0% -5.25 Â-69% sched_debug.cpu#7.nr_uninterruptible
53640 Â 18% -21.4% 42139 Â 16% sched_debug.cpu#7.ttwu_count
28043 Â 19% -51.0% 13744 Â 29% sched_debug.cpu#7.ttwu_local
6.75 Â 58% -77.8% 1.50 Â173% sched_debug.cpu#8.cpu_load[4]
51965 Â 15% -31.7% 35508 Â 9% sched_debug.cpu#8.nr_load_updates
22902 Â 25% -40.6% 13615 Â 12% sched_debug.cpu#8.ttwu_local
51893 Â 13% -29.9% 36371 Â 23% sched_debug.cpu#9.nr_load_updates
56687 Â 12% -40.8% 33540 Â 43% sched_debug.cpu#9.ttwu_count
22884 Â 18% -39.3% 13901 Â 39% sched_debug.cpu#9.ttwu_local
bay: Pentium D
Memory: 2G
lkp-ws02: Westmere-EP
Memory: 16G
To reproduce:
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: fileio
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
default-watchdogs:
oom-killer:
watchdog:
cpufreq_governor:
commit: b953c0d234bc72e8489d3bf51a276c5c4ec85345
model: Westmere-EP
memory: 16G
nr_hdd_partitions: 11
hdd_partitions: "/dev/disk/by-id/scsi-35000c500*-part1"
swap_partitions:
rootfs_partition: "/dev/disk/by-id/ata-WDC_WD1002FAEX-00Z3A0_WD-WCATR5408564-part3"
period: 600s
nr_threads: 100%
disk: 1HDD
fs: btrfs
size: 64G
fileio:
filenum: 1024f
rwmode: rndrd
iomode: sync
queue: cyclic
testbox: lkp-ws02
tbox_group: lkp-ws02
kconfig: x86_64-rhel
enqueue_time: 2015-06-22 04:17:17.376633513 +08:00
user: lkp
compiler: gcc-4.9
head_commit: e81a8edc8c9fef345ef5336c171ee81de14a6c5a
base_commit: b953c0d234bc72e8489d3bf51a276c5c4ec85345
branch: linux-devel/devel-hourly-2015062222
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/b953c0d234bc72e8489d3bf51a276c5c4ec85345/vmlinuz-4.1.0"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/fileio/600s-100%-1HDD-btrfs-64G-1024f-rndrd-sync/lkp-ws02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/b953c0d234bc72e8489d3bf51a276c5c4ec85345/0"
job_file: "/lkp/scheduled/lkp-ws02/cyclic_fileio-600s-100%-1HDD-btrfs-64G-1024f-rndrd-sync-x86_64-rhel-CYCLIC_BASE-b953c0d234bc72e8489d3bf51a276c5c4ec85345-0-20150622-81972-7aszmx.yaml"
dequeue_time: 2015-06-22 23:10:55.828231637 +08:00
nr_cpu: "$(nproc)"
max_uptime: 3526.1800000000003
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/lkp-ws02/cyclic_fileio-600s-100%-1HDD-btrfs-64G-1024f-rndrd-sync-x86_64-rhel-CYCLIC_BASE-b953c0d234bc72e8489d3bf51a276c5c4ec85345-0-20150622-81972-7aszmx.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015062222
- commit=b953c0d234bc72e8489d3bf51a276c5c4ec85345
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/b953c0d234bc72e8489d3bf51a276c5c4ec85345/vmlinuz-4.1.0
- max_uptime=3526
- RESULT_ROOT=/result/fileio/600s-100%-1HDD-btrfs-64G-1024f-rndrd-sync/lkp-ws02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/b953c0d234bc72e8489d3bf51a276c5c4ec85345/0
- LKP_SERVER=inn
- |-
ipmi_watchdog.start_now=1
earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal
rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/b953c0d234bc72e8489d3bf51a276c5c4ec85345/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fileio.cgz"
job_state: finished
loadavg: 22.08 20.58 11.71 1/316 3825
start_time: '1434985915'
end_time: '1434987332'
version: "/lkp/lkp/.src-20150622-101052"
mkfs -t btrfs /dev/sdd1
mount -t btrfs /dev/sdd1 /fs/sdd1
cd /fs/sdd1
sysbench --test=fileio --max-requests=0 --num-threads=24 --max-time=600 --file-test-mode=rndrd --file-total-size=68719476736 --file-io-mode=sync --file-num=1024 prepare
sysbench --test=fileio --max-requests=0 --num-threads=24 --max-time=600 --file-test-mode=rndrd --file-total-size=68719476736 --file-io-mode=sync --file-num=1024 run