[LKP] [mm] c8c06efa8b5: -7.6% unixbench.score

From: Huang Ying
Date: Wed Jan 07 2015 - 21:27:53 EST


FYI, we noticed the below changes on

commit c8c06efa8b552608493b7066c234cfa82c47fcea ("mm: convert i_mmap_mutex to rwsem")


testbox/testcase/testparams: lituya/unixbench/performance-execl

83cde9e8ba95d180 c8c06efa8b552608493b7066c2
---------------- --------------------------
%stddev %change %stddev
\ | \
721721 Â 1% +303.6% 2913110 Â 3% unixbench.time.voluntary_context_switches
11767 Â 0% -7.6% 10867 Â 1% unixbench.score
2.323e+08 Â 0% -7.2% 2.157e+08 Â 1% unixbench.time.minor_page_faults
207 Â 0% -7.0% 192 Â 1% unixbench.time.user_time
4923450 Â 0% -5.7% 4641672 Â 0% unixbench.time.involuntary_context_switches
584 Â 0% -5.2% 554 Â 0% unixbench.time.percent_of_cpu_this_job_got
948 Â 0% -4.9% 902 Â 0% unixbench.time.system_time
0 Â 0% +Inf% 672942 Â 2% latency_stats.hits.call_rwsem_down_write_failed.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.system_call_fastpath
0 Â 0% +Inf% 703126 Â 6% latency_stats.hits.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 317363 Â 7% latency_stats.hits.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 173298 Â 8% latency_stats.hits.call_rwsem_down_write_failed.vma_adjust.__split_vma.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 50444 Â 1% latency_stats.hits.call_rwsem_down_write_failed.vma_adjust.__split_vma.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 104157 Â 0% latency_stats.hits.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.vm_mmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 51342 Â 1% latency_stats.hits.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 1474 Â 7% latency_stats.hits.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.SyS_munmap.system_call_fastpath
0 Â 0% +Inf% 311514 Â 8% latency_stats.hits.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 194 Â 10% latency_stats.hits.call_rwsem_down_write_failed.copy_process.do_fork.SyS_clone.stub_clone
40292 Â 2% -100.0% 0 Â 0% latency_stats.hits.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
19886 Â 3% -100.0% 0 Â 0% latency_stats.hits.unlink_file_vma.free_pgtables.unmap_region.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
18370 Â 3% -100.0% 0 Â 0% latency_stats.hits.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
5863 Â 5% -100.0% 0 Â 0% latency_stats.hits.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
9374 Â 4% -100.0% 0 Â 0% latency_stats.hits.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.vm_mmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
39987 Â 2% -100.0% 0 Â 0% latency_stats.hits.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.system_call_fastpath
11332 Â 3% -100.0% 0 Â 0% latency_stats.hits.vma_adjust.__split_vma.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
5540 Â 6% -100.0% 0 Â 0% latency_stats.hits.vma_adjust.__split_vma.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
9 Â 39% -100.0% 0 Â 0% latency_stats.hits.copy_process.do_fork.SyS_clone.stub_clone
490 Â 5% +301.2% 1967 Â 12% sched_debug.cpu#10.nr_uninterruptible
498 Â 4% +297.0% 1978 Â 10% sched_debug.cpu#13.nr_uninterruptible
528 Â 11% +284.4% 2032 Â 9% sched_debug.cpu#8.nr_uninterruptible
510 Â 5% +278.0% 1930 Â 8% sched_debug.cpu#12.nr_uninterruptible
502 Â 3% +279.9% 1910 Â 8% sched_debug.cpu#15.nr_uninterruptible
488 Â 4% +297.4% 1942 Â 7% sched_debug.cpu#11.nr_uninterruptible
61 Â 17% -51.0% 30 Â 42% sched_debug.cpu#4.cpu_load[0]
537 Â 14% +267.6% 1977 Â 8% sched_debug.cpu#9.nr_uninterruptible
549 Â 14% +256.2% 1958 Â 11% sched_debug.cpu#14.nr_uninterruptible
721721 Â 1% +303.6% 2913110 Â 3% time.voluntary_context_switches
32 Â 23% +41.4% 45 Â 15% sched_debug.cpu#0.cpu_load[1]
138118 Â 15% +22.8% 169551 Â 10% sched_debug.cpu#7.sched_goidle
110150 Â 13% +35.9% 149694 Â 10% sched_debug.cpu#11.sched_goidle
125451 Â 12% +34.5% 168781 Â 9% sched_debug.cpu#11.ttwu_count
30 Â 15% +29.3% 39 Â 15% sched_debug.cpu#12.cpu_load[3]
126828 Â 3% +29.1% 163741 Â 4% sched_debug.cpu#6.sched_goidle
160802 Â 12% +21.5% 195384 Â 8% sched_debug.cpu#7.ttwu_count
123454 Â 4% +30.5% 161069 Â 4% sched_debug.cpu#14.ttwu_count
148649 Â 2% +32.6% 197144 Â 4% sched_debug.cpu#2.ttwu_count
114238 Â 6% +31.1% 149740 Â 6% sched_debug.cpu#8.sched_goidle
128654 Â 5% +27.1% 163499 Â 7% sched_debug.cpu#15.ttwu_count
38 Â 7% -16.1% 32 Â 6% sched_debug.cpu#2.cpu_load[3]
378291 Â 11% +16.3% 440023 Â 7% sched_debug.cpu#7.sched_count
378057 Â 11% +16.0% 438383 Â 7% sched_debug.cpu#7.nr_switches
159246 Â 6% +20.6% 192115 Â 7% sched_debug.cpu#4.ttwu_count
39 Â 11% -17.6% 32 Â 10% sched_debug.cpu#4.cpu_load[3]
355445 Â 2% +20.5% 428445 Â 3% sched_debug.cpu#6.sched_count
321621 Â 9% +23.6% 397380 Â 7% sched_debug.cpu#11.sched_count
355193 Â 2% +20.1% 426758 Â 3% sched_debug.cpu#6.nr_switches
321376 Â 9% +23.2% 396022 Â 7% sched_debug.cpu#11.nr_switches
152398 Â 3% +28.3% 195489 Â 10% sched_debug.cpu#3.ttwu_count
125919 Â 3% +34.8% 169795 Â 4% sched_debug.cpu#2.sched_goidle
330747 Â 4% +20.2% 397424 Â 4% sched_debug.cpu#8.sched_count
36 Â 5% -13.1% 31 Â 6% sched_debug.cpu#2.cpu_load[4]
330460 Â 4% +19.9% 396085 Â 4% sched_debug.cpu#8.nr_switches
31 Â 11% +14.4% 35 Â 6% sched_debug.cpu#8.cpu_load[3]
159635 Â 4% +20.6% 192490 Â 10% sched_debug.cpu#1.ttwu_count
155809 Â 5% +16.3% 181259 Â 5% sched_debug.cpu#5.ttwu_count
133028 Â 6% +17.2% 155920 Â 5% sched_debug.cpu#5.sched_goidle
354261 Â 2% +24.5% 440990 Â 3% sched_debug.cpu#2.sched_count
1399 Â 16% +24.6% 1744 Â 4% sched_debug.cpu#12.curr->pid
354028 Â 2% +24.1% 439371 Â 3% sched_debug.cpu#2.nr_switches
165045 Â 9% +14.2% 188419 Â 3% sched_debug.cpu#0.ttwu_count
129864 Â 4% +30.1% 168934 Â 12% sched_debug.cpu#3.sched_goidle
361169 Â 2% +21.1% 437280 Â 9% sched_debug.cpu#3.nr_switches
361390 Â 2% +21.4% 438886 Â 9% sched_debug.cpu#3.sched_count
113836 Â 5% +26.8% 144302 Â 8% sched_debug.cpu#15.sched_goidle
223075 Â 4% +34.9% 300833 Â 8% cpuidle.C3-HSW.usage
109472 Â 5% +29.5% 141802 Â 4% sched_debug.cpu#14.sched_goidle
320251 Â 3% +19.3% 382084 Â 3% sched_debug.cpu#14.sched_count
328264 Â 4% +17.5% 385651 Â 6% sched_debug.cpu#15.nr_switches
319985 Â 3% +19.0% 380734 Â 3% sched_debug.cpu#14.nr_switches
32 Â 16% +41.9% 45 Â 20% sched_debug.cpu#12.cpu_load[2]
37 Â 6% -12.8% 32 Â 6% sched_debug.cpu#1.cpu_load[4]
328504 Â 4% +17.8% 386960 Â 6% sched_debug.cpu#15.sched_count
37 Â 35% +83.2% 68 Â 20% sched_debug.cpu#12.cpu_load[0]
129752 Â 6% +30.6% 169483 Â 6% sched_debug.cpu#8.ttwu_count
282972 Â 0% -15.8% 238308 Â 1% sched_debug.cfs_rq[9]:/.min_vruntime
284112 Â 0% -16.0% 238794 Â 1% sched_debug.cfs_rq[11]:/.min_vruntime
282666 Â 0% -16.2% 236884 Â 1% sched_debug.cfs_rq[10]:/.min_vruntime
285118 Â 0% -15.3% 241361 Â 1% sched_debug.cfs_rq[2]:/.min_vruntime
285813 Â 0% -15.5% 241598 Â 1% sched_debug.cfs_rq[1]:/.min_vruntime
286458 Â 1% -15.5% 242012 Â 1% sched_debug.cfs_rq[0]:/.min_vruntime
891 Â 33% +71.6% 1530 Â 9% sched_debug.cpu#15.curr->pid
283618 Â 0% -15.9% 238477 Â 1% sched_debug.cfs_rq[14]:/.min_vruntime
286794 Â 0% -15.4% 242695 Â 1% sched_debug.cfs_rq[3]:/.min_vruntime
282507 Â 0% -15.9% 237672 Â 1% sched_debug.cfs_rq[15]:/.min_vruntime
283428 Â 0% -16.2% 237542 Â 1% sched_debug.cfs_rq[12]:/.min_vruntime
284953 Â 0% -16.6% 237517 Â 1% sched_debug.cfs_rq[13]:/.min_vruntime
1270469 Â 1% +24.5% 1581318 Â 3% cpuidle.C6-HSW.usage
282363 Â 1% -16.2% 236514 Â 1% sched_debug.cfs_rq[8]:/.min_vruntime
286105 Â 0% -15.9% 240483 Â 1% sched_debug.cfs_rq[4]:/.min_vruntime
136240 Â 7% +21.8% 165966 Â 8% sched_debug.cpu#4.sched_goidle
137467 Â 5% +21.6% 167200 Â 12% sched_debug.cpu#1.sched_goidle
285463 Â 0% -15.6% 241015 Â 1% sched_debug.cfs_rq[5]:/.min_vruntime
287745 Â 0% -15.6% 242855 Â 2% sched_debug.cfs_rq[7]:/.min_vruntime
148602 Â 3% +27.3% 189130 Â 3% sched_debug.cpu#6.ttwu_count
377362 Â 4% +15.4% 435571 Â 9% sched_debug.cpu#1.sched_count
284598 Â 0% -15.2% 241230 Â 1% sched_debug.cfs_rq[6]:/.min_vruntime
377157 Â 4% +15.1% 433927 Â 9% sched_debug.cpu#1.nr_switches
374585 Â 5% +15.0% 430900 Â 6% sched_debug.cpu#4.nr_switches
374823 Â 5% +15.4% 432576 Â 6% sched_debug.cpu#4.sched_count
2069241 Â 0% +75.3% 3628295 Â 0% cpuidle.C1-HSW.usage
41 Â 47% +122.3% 92 Â 31% sched_debug.cfs_rq[3]:/.load
1.475e+08 Â 2% +46.1% 2.155e+08 Â 1% cpuidle.C1-HSW.time
2000 Â 27% -36.0% 1279 Â 17% sched_debug.cpu#4.curr->pid
91 Â 23% -51.1% 44 Â 26% sched_debug.cpu#4.load
20440136 Â 3% +13.5% 23206516 Â 10% cpuidle.C3-HSW.time
42 Â 46% +82.8% 77 Â 20% sched_debug.cpu#3.load
334868 Â 0% -40.1% 200692 Â 5% latency_stats.hits.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.do_cow_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault.clear_user.padzero.load_elf_binary
195507 Â 0% -40.0% 117305 Â 8% latency_stats.hits.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.do_cow_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
541 Â 1% +24.4% 672 Â 1% cpuidle.POLL.usage
124903 Â 8% -22.8% 96485 Â 7% sched_debug.cpu#10.ttwu_local
840 Â 5% -26.7% 616 Â 4% latency_stats.hits.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.do_read_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
127712 Â 11% -10.3% 114520 Â 6% sched_debug.cpu#9.ttwu_local
620351 Â 3% -14.2% 532554 Â 5% sched_debug.cpu#3.avg_idle
607039 Â 4% -12.6% 530814 Â 3% sched_debug.cpu#6.avg_idle
669680 Â 2% -11.6% 591883 Â 5% sched_debug.cpu#15.avg_idle
9349 Â 1% +9.2% 10211 Â 3% slabinfo.vm_area_struct.active_objs
9411 Â 1% +8.7% 10227 Â 3% slabinfo.vm_area_struct.num_objs
0 Â 0% +Inf% 51 Â 9% latency_stats.avg.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
932 Â 10% -100.0% 0 Â 0% latency_stats.max.unlink_file_vma.free_pgtables.unmap_region.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
146109 Â 5% -100.0% 0 Â 0% latency_stats.sum.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
341728 Â 1% -100.0% 0 Â 0% latency_stats.sum.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 10487 Â 13% latency_stats.sum.call_rwsem_down_write_failed.copy_process.do_fork.SyS_clone.stub_clone
0 Â 0% +Inf% 54 Â 19% latency_stats.avg.call_rwsem_down_write_failed.copy_process.do_fork.SyS_clone.stub_clone
7 Â 6% -100.0% 0 Â 0% latency_stats.avg.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 573 Â 10% latency_stats.max.call_rwsem_down_write_failed.copy_process.do_fork.SyS_clone.stub_clone
2 Â 15% -100.0% 0 Â 0% latency_stats.max.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.SyS_munmap.system_call_fastpath
8 Â 0% -100.0% 0 Â 0% latency_stats.avg.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 1 Â 33% latency_stats.avg.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.SyS_munmap.system_call_fastpath
1 Â 24% -100.0% 0 Â 0% latency_stats.avg.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.SyS_munmap.system_call_fastpath
0 Â 0% +Inf% 3059 Â 20% latency_stats.sum.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.SyS_munmap.system_call_fastpath
0 Â 0% +Inf% 2178 Â 11% latency_stats.max.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 14 Â 3% latency_stats.avg.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
878 Â 10% -100.0% 0 Â 0% latency_stats.max.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 1915 Â 8% latency_stats.max.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.vm_mmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 12 Â 5% latency_stats.avg.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.vm_mmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 1327137 Â 5% latency_stats.sum.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.vm_mmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
518 Â 11% -100.0% 0 Â 0% latency_stats.max.vma_adjust.__split_vma.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 1944 Â 16% latency_stats.max.call_rwsem_down_write_failed.vma_adjust.__split_vma.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 10 Â 8% latency_stats.avg.call_rwsem_down_write_failed.vma_adjust.__split_vma.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 548178 Â 5% latency_stats.sum.call_rwsem_down_write_failed.vma_adjust.__split_vma.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 2247 Â 10% latency_stats.max.call_rwsem_down_write_failed.vma_adjust.__split_vma.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
2 Â 0% -100.0% 0 Â 0% latency_stats.avg.vma_adjust.__split_vma.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 61 Â 5% latency_stats.avg.call_rwsem_down_write_failed.vma_adjust.__split_vma.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 10731426 Â 14% latency_stats.sum.call_rwsem_down_write_failed.vma_adjust.__split_vma.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
13994 Â 7% -100.0% 0 Â 0% latency_stats.sum.vma_adjust.__split_vma.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
966 Â 8% -100.0% 0 Â 0% latency_stats.max.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
867 Â 19% -100.0% 0 Â 0% latency_stats.max.vma_adjust.__split_vma.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 2175 Â 4% latency_stats.max.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 62 Â 5% latency_stats.avg.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 19888298 Â 13% latency_stats.sum.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
15115 Â 6% -100.0% 0 Â 0% latency_stats.sum.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
2 Â 0% -100.0% 0 Â 0% latency_stats.avg.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
536 Â 16% -100.0% 0 Â 0% latency_stats.max.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
118730 Â 3% -100.0% 0 Â 0% latency_stats.sum.unlink_file_vma.free_pgtables.unmap_region.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 2269 Â 2% latency_stats.max.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 771346 Â 3% latency_stats.sum.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.vm_munmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 37036112 Â 15% latency_stats.sum.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 2316 Â 5% latency_stats.max.call_rwsem_down_write_failed.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.system_call_fastpath
0 Â 0% +Inf% 54 Â 10% latency_stats.avg.call_rwsem_down_write_failed.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.system_call_fastpath
0 Â 0% +Inf% 36764596 Â 13% latency_stats.sum.call_rwsem_down_write_failed.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.system_call_fastpath
5 Â 0% -100.0% 0 Â 0% latency_stats.avg.vma_adjust.__split_vma.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
36701 Â 2% -100.0% 0 Â 0% latency_stats.sum.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.vm_mmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
63412 Â 3% -100.0% 0 Â 0% latency_stats.sum.vma_adjust.__split_vma.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 60 Â 5% latency_stats.avg.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
0 Â 0% +Inf% 19176722 Â 14% latency_stats.sum.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
5 Â 9% -100.0% 0 Â 0% latency_stats.avg.unlink_file_vma.free_pgtables.unmap_region.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
929 Â 15% -100.0% 0 Â 0% latency_stats.max.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.system_call_fastpath
6 Â 6% -100.0% 0 Â 0% latency_stats.avg.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.system_call_fastpath
283534 Â 1% -100.0% 0 Â 0% latency_stats.sum.vma_adjust.__split_vma.split_vma.mprotect_fixup.SyS_mprotect.system_call_fastpath
676 Â 6% -100.0% 0 Â 0% latency_stats.max.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.vm_mmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
3 Â 11% -100.0% 0 Â 0% latency_stats.avg.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.vm_mmap.load_elf_binary.search_binary_handler.do_execve_common.SyS_execve.stub_execve
0 Â 0% +Inf% 2156 Â 4% latency_stats.max.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.unmap_region.do_munmap.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
38.04 Â 0% -5.4% 35.98 Â 0% turbostat.%c0
26 Â 31% -50.5% 13 Â 3% latency_stats.avg.kthread_stop.smpboot_destroy_threads.smpboot_unregister_percpu_thread.proc_dowatchdog.proc_sys_call_handler.proc_sys_write.vfs_write.SyS_write.system_call_fastpath
168 Â 30% -43.3% 95 Â 0% latency_stats.max.kthread_stop.smpboot_destroy_threads.smpboot_unregister_percpu_thread.proc_dowatchdog.proc_sys_call_handler.proc_sys_write.vfs_write.SyS_write.system_call_fastpath
408 Â 32% -48.9% 208 Â 2% latency_stats.sum.kthread_stop.smpboot_destroy_threads.smpboot_unregister_percpu_thread.proc_dowatchdog.proc_sys_call_handler.proc_sys_write.vfs_write.SyS_write.system_call_fastpath
2 Â 0% -50.0% 1 Â 0% latency_stats.avg.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.do_read_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
567580 Â 0% -49.5% 286885 Â 5% latency_stats.sum.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.do_cow_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault.clear_user.padzero.load_elf_binary
85194 Â 0% +20.5% 102669 Â 0% vmstat.system.cs
336221 Â 1% -46.7% 179256 Â 9% latency_stats.sum.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.do_cow_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
1729 Â 6% -36.7% 1094 Â 3% latency_stats.sum.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.do_read_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
20 Â 4% -34.1% 13 Â 23% latency_stats.max.stop_one_cpu.sched_exec.do_execve_common.SyS_execve.stub_execve
71302 Â 11% -25.9% 52817 Â 7% latency_stats.sum.path_lookupat.filename_lookup.user_path_at_empty.user_path_at.SyS_access.system_call_fastpath
57.77 Â 0% -1.2% 57.06 Â 0% turbostat.Pkg_W
206 Â 6% +19.0% 245 Â 6% latency_stats.sum.sync_inodes_sb.sync_inodes_one_sb.iterate_supers.sys_sync.system_call_fastpath
1756 Â 5% +10.5% 1940 Â 5% latency_stats.max.pipe_wait.wait_for_partner.fifo_open.do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_open.system_call_fastpath
2148 Â 4% -6.4% 2010 Â 5% latency_stats.avg.wait_woken.inotify_read.vfs_read.SyS_read.system_call_fastpath
10744 Â 4% -6.5% 10050 Â 5% latency_stats.sum.wait_woken.inotify_read.vfs_read.SyS_read.system_call_fastpath
109 Â 1% +8.0% 117 Â 2% latency_stats.avg.do_wait.SyS_wait4.system_call_fastpath
403 Â 3% -7.4% 373 Â 3% latency_stats.sum.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.rpc_call_sync.nfs3_rpc_wrapper.nfs3_proc_setattr.nfs3_proc_create.nfs_create.vfs_create.do_last.path_openat
137032 Â 1% +6.5% 145948 Â 2% latency_stats.sum.do_wait.SyS_wait4.system_call_fastpath
141 Â 0% +6.6% 150 Â 2% latency_stats.avg.pipe_wait.pipe_read.new_sync_read.vfs_read.SyS_read.system_call_fastpath
4898 Â 2% -6.0% 4604 Â 4% latency_stats.max.wait_woken.n_tty_read.tty_read.vfs_read.SyS_read.system_call_fastpath
5797 Â 2% -7.0% 5393 Â 1% latency_stats.sum.stop_one_cpu.sched_exec.do_execve_common.SyS_execve.stub_execve
4650 Â 0% -3.3% 4497 Â 1% latency_stats.max.wait_woken.inotify_read.vfs_read.SyS_read.system_call_fastpath


testbox/testcase/testparams: ivb43/vm-scalability/performance-300s-mmap-xread-seq-mt

83cde9e8ba95d180 c8c06efa8b552608493b7066c2
---------------- --------------------------
%stddev %change %stddev
\ | \
17 Â 48% -80.0% 3 Â 14% sched_debug.cpu#35.cpu_load[2]
1.20 Â 30% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.generic_smp_call_function_single_interrupt.smp_call_function_interrupt.call_function_interrupt.mutex_optimistic_spin.__mutex_lock_slowpath
1.34 Â 30% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.smp_call_function_interrupt.call_function_interrupt.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock
441348 Â 47% -76.0% 105848 Â 15% sched_debug.cpu#29.nr_switches
441662 Â 47% -75.8% 107048 Â 14% sched_debug.cpu#29.sched_count
219946 Â 47% -76.1% 52633 Â 15% sched_debug.cpu#29.sched_goidle
0.99 Â 13% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.__mutex_unlock_slowpath.mutex_unlock.rmap_walk.try_to_unmap.shrink_page_list
0.99 Â 13% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.mutex_unlock.rmap_walk.try_to_unmap.shrink_page_list.shrink_inactive_list
1.89 Â 30% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.call_function_interrupt.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.rmap_walk
27.22 Â 38% -79.5% 5.59 Â 1% perf-profile.cpu-cycles.page_referenced.shrink_page_list.shrink_inactive_list.shrink_lruvec.shrink_zone
27.14 Â 39% -79.6% 5.53 Â 1% perf-profile.cpu-cycles.rmap_walk.page_referenced.shrink_page_list.shrink_inactive_list.shrink_lruvec
5.75 Â 49% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.rmap_walk.try_to_unmap
5.87 Â 48% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.__mutex_lock_slowpath.mutex_lock.rmap_walk.try_to_unmap.shrink_page_list
5.91 Â 47% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.mutex_lock.rmap_walk.try_to_unmap.shrink_page_list.shrink_inactive_list
0.00 Â 0% +Inf% 4.16 Â 2% perf-profile.cpu-cycles.call_rwsem_down_write_failed.rmap_walk.page_referenced.shrink_page_list.shrink_inactive_list
0.00 Â 0% +Inf% 4.15 Â 2% perf-profile.cpu-cycles.rwsem_down_write_failed.call_rwsem_down_write_failed.rmap_walk.page_referenced.shrink_page_list
4.94 Â 4% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.mutex_spin_on_owner.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.rmap_walk
0.00 Â 0% +Inf% 3.18 Â 2% perf-profile.cpu-cycles.rwsem_spin_on_owner.rwsem_down_write_failed.call_rwsem_down_write_failed.rmap_walk.page_referenced
26.20 Â 40% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.rmap_walk.page_referenced
26.80 Â 39% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.__mutex_lock_slowpath.mutex_lock.rmap_walk.page_referenced.shrink_page_list
26.88 Â 39% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.mutex_lock.rmap_walk.page_referenced.shrink_page_list.shrink_inactive_list
0.00 Â 0% +Inf% 1.49 Â 0% perf-profile.cpu-cycles.call_rwsem_wake.rmap_walk.try_to_unmap.shrink_page_list.shrink_inactive_list
0.00 Â 0% +Inf% 1.49 Â 0% perf-profile.cpu-cycles.rwsem_wake.call_rwsem_wake.rmap_walk.try_to_unmap.shrink_page_list
0.00 Â 0% +Inf% 1.39 Â 1% perf-profile.cpu-cycles.wake_up_process.__rwsem_do_wake.rwsem_wake.call_rwsem_wake.rmap_walk
0.00 Â 0% +Inf% 1.28 Â 0% perf-profile.cpu-cycles.__rwsem_do_wake.rwsem_wake.call_rwsem_wake.rmap_walk.try_to_unmap
0.00 Â 0% +Inf% 1.22 Â 0% perf-profile.cpu-cycles.try_to_wake_up.wake_up_process.__rwsem_do_wake.rwsem_wake.call_rwsem_wake
0.00 Â 0% +Inf% 1.17 Â 2% perf-profile.cpu-cycles.call_rwsem_down_write_failed.rmap_walk.try_to_unmap.shrink_page_list.shrink_inactive_list
0.00 Â 0% +Inf% 1.16 Â 2% perf-profile.cpu-cycles.rwsem_down_write_failed.call_rwsem_down_write_failed.rmap_walk.try_to_unmap.shrink_page_list
0.00 Â 0% +Inf% 1.03 Â 1% perf-profile.cpu-cycles.call_rwsem_wake.rmap_walk.page_referenced.shrink_page_list.shrink_inactive_list
0.00 Â 0% +Inf% 1.03 Â 0% perf-profile.cpu-cycles.rwsem_wake.call_rwsem_wake.rmap_walk.page_referenced.shrink_page_list
32 Â 10% -96.2% 1 Â 34% sched_debug.cfs_rq[17]:/.nr_spread_over
42 Â 0% -84.5% 6 Â 41% sched_debug.cpu#31.load
44 Â 4% -85.2% 6 Â 41% sched_debug.cfs_rq[31]:/.load
534530 Â 21% -79.3% 110651 Â 35% sched_debug.cpu#32.nr_switches
535267 Â 20% -79.1% 111666 Â 35% sched_debug.cpu#32.sched_count
266369 Â 20% -79.4% 54763 Â 36% sched_debug.cpu#32.sched_goidle
116443.25 Â 37% -100.0% 0.00 Â 0% sched_debug.cfs_rq[11]:/.max_vruntime
116443.25 Â 37% -100.0% 0.00 Â 0% sched_debug.cfs_rq[11]:/.MIN_vruntime
18 Â 44% -72.2% 5 Â 14% sched_debug.cpu#35.cpu_load[0]
18 Â 44% -76.4% 4 Â 25% sched_debug.cpu#35.cpu_load[1]
1.02 Â 29% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.flush_smp_call_function_queue.generic_smp_call_function_single_interrupt.smp_call_function_interrupt.call_function_interrupt.mutex_optimistic_spin
141571.82 Â 19% -100.0% 0.00 Â 0% sched_debug.cfs_rq[0]:/.MIN_vruntime
141571.82 Â 19% -100.0% 0.00 Â 0% sched_debug.cfs_rq[0]:/.max_vruntime
17 Â 48% -75.7% 4 Â 34% sched_debug.cfs_rq[35]:/.runnable_load_avg
823 Â 43% -72.0% 230 Â 14% sched_debug.cpu#35.ttwu_local
961 Â 38% -63.8% 347 Â 38% sched_debug.cpu#43.ttwu_local
135338 Â 38% -59.8% 54354 Â 43% sched_debug.cpu#27.ttwu_count
20 Â 20% -77.5% 4 Â 36% sched_debug.cpu#46.cpu_load[0]
3 Â 14% -64.3% 1 Â 34% sched_debug.cfs_rq[33]:/.nr_spread_over
4944 Â 15% -69.0% 1533 Â 8% sched_debug.cpu#19.ttwu_local
244794 Â 46% -64.2% 87628 Â 17% sched_debug.cpu#45.nr_switches
121511 Â 46% -64.2% 43527 Â 17% sched_debug.cpu#45.sched_goidle
248225 Â 47% -64.0% 89438 Â 17% sched_debug.cpu#45.sched_count
2.06 Â 25% -65.1% 0.72 Â 3% perf-profile.cpu-cycles.shrink_page_list.shrink_inactive_list.shrink_lruvec.shrink_zone.kswapd_shrink_zone
2.07 Â 25% -64.9% 0.73 Â 3% perf-profile.cpu-cycles.shrink_zone.kswapd_shrink_zone.kswapd.kthread.ret_from_fork
2.07 Â 25% -64.9% 0.73 Â 3% perf-profile.cpu-cycles.kswapd_shrink_zone.kswapd.kthread.ret_from_fork
2.07 Â 25% -64.9% 0.73 Â 3% perf-profile.cpu-cycles.shrink_lruvec.shrink_zone.kswapd_shrink_zone.kswapd.kthread
2.07 Â 25% -64.9% 0.73 Â 3% perf-profile.cpu-cycles.kswapd.kthread.ret_from_fork
2.07 Â 25% -64.9% 0.73 Â 3% perf-profile.cpu-cycles.shrink_inactive_list.shrink_lruvec.shrink_zone.kswapd_shrink_zone.kswapd
9 Â 47% -63.2% 3 Â 24% sched_debug.cpu#45.cpu_load[4]
38.51 Â 31% -60.4% 15.25 Â 0% perf-profile.cpu-cycles.shrink_page_list.shrink_inactive_list.shrink_lruvec.shrink_zone.shrink_zones
38.58 Â 31% -60.2% 15.35 Â 0% perf-profile.cpu-cycles.shrink_inactive_list.shrink_lruvec.shrink_zone.shrink_zones.do_try_to_free_pages
38.59 Â 31% -60.2% 15.36 Â 0% perf-profile.cpu-cycles.shrink_lruvec.shrink_zone.shrink_zones.do_try_to_free_pages.try_to_free_pages
38.59 Â 31% -60.2% 15.37 Â 0% perf-profile.cpu-cycles.shrink_zone.shrink_zones.do_try_to_free_pages.try_to_free_pages.__alloc_pages_nodemask
38.05 Â 30% -60.1% 15.17 Â 0% perf-profile.cpu-cycles.shrink_zones.do_try_to_free_pages.try_to_free_pages.__alloc_pages_nodemask.alloc_pages_current
36.81 Â 31% -60.0% 14.74 Â 1% perf-profile.cpu-cycles.do_try_to_free_pages.try_to_free_pages.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc
36.81 Â 31% -60.0% 14.74 Â 1% perf-profile.cpu-cycles.try_to_free_pages.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc.__do_page_cache_readahead
25.59 Â 30% -60.5% 10.10 Â 3% perf-profile.cpu-cycles.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc.__do_page_cache_readahead.ondemand_readahead
25.61 Â 30% -60.5% 10.12 Â 3% perf-profile.cpu-cycles.alloc_pages_current.__page_cache_alloc.__do_page_cache_readahead.ondemand_readahead.page_cache_async_readahead
25.61 Â 30% -60.4% 10.13 Â 3% perf-profile.cpu-cycles.__page_cache_alloc.__do_page_cache_readahead.ondemand_readahead.page_cache_async_readahead.filemap_fault
646 Â 31% -56.3% 282 Â 20% sched_debug.cpu#24.ttwu_local
110062 Â 35% -58.0% 46222 Â 26% sched_debug.cpu#41.sched_goidle
2.17 Â 23% -61.4% 0.83 Â 2% perf-profile.cpu-cycles.ret_from_fork
2.17 Â 23% -61.4% 0.83 Â 2% perf-profile.cpu-cycles.kthread.ret_from_fork
221912 Â 35% -57.9% 93448 Â 26% sched_debug.cpu#41.nr_switches
235539 Â 28% -59.6% 95119 Â 25% sched_debug.cpu#41.sched_count
113095 Â 48% -55.7% 50108 Â 19% sched_debug.cpu#25.ttwu_count
689 Â 25% -62.8% 256 Â 12% sched_debug.cpu#29.ttwu_local
101269 Â 36% -57.0% 43518 Â 16% sched_debug.cpu#42.sched_goidle
203648 Â 36% -57.0% 87618 Â 15% sched_debug.cpu#42.nr_switches
205852 Â 35% -56.8% 88943 Â 15% sched_debug.cpu#42.sched_count
270505 Â 43% -57.0% 116240 Â 14% sched_debug.cpu#33.sched_count
6 Â 38% -42.3% 3 Â 34% sched_debug.cpu#44.cpu_load[4]
140 Â 45% -56.6% 61 Â 43% sched_debug.cfs_rq[13]:/.tg_load_contrib
177724 Â 9% +217.1% 563500 Â 2% sched_debug.cpu#2.sched_goidle
132568 Â 42% -56.8% 57335 Â 14% sched_debug.cpu#33.sched_goidle
266176 Â 42% -56.8% 115093 Â 14% sched_debug.cpu#33.nr_switches
361283 Â 9% +219.0% 1152534 Â 1% sched_debug.cpu#2.sched_count
358987 Â 9% +214.5% 1129044 Â 3% sched_debug.cpu#2.nr_switches
357519 Â 43% -56.8% 154299 Â 24% sched_debug.cpu#22.avg_idle
11.50 Â 31% -56.1% 5.05 Â 9% perf-profile.cpu-cycles.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc.__do_page_cache_readahead.filemap_fault
11.50 Â 31% -56.0% 5.06 Â 9% perf-profile.cpu-cycles.alloc_pages_current.__page_cache_alloc.__do_page_cache_readahead.filemap_fault.__do_fault
11.50 Â 31% -56.1% 5.05 Â 9% perf-profile.cpu-cycles.__page_cache_alloc.__do_page_cache_readahead.filemap_fault.__do_fault.do_read_fault
11.52 Â 31% -56.0% 5.07 Â 9% perf-profile.cpu-cycles.__do_page_cache_readahead.filemap_fault.__do_fault.do_read_fault.handle_mm_fault
158 Â 17% -64.5% 56 Â 30% sched_debug.cfs_rq[17]:/.blocked_load_avg
916 Â 16% -63.9% 330 Â 20% sched_debug.cpu#44.ttwu_local
140398 Â 14% -62.8% 52291 Â 13% sched_debug.cpu#44.sched_goidle
282255 Â 14% -62.7% 105251 Â 13% sched_debug.cpu#44.nr_switches
283232 Â 13% -61.3% 109484 Â 10% sched_debug.cpu#44.sched_count
722 Â 30% -53.2% 337 Â 16% sched_debug.cpu#38.ttwu_local
4035 Â 47% -36.9% 2548 Â 31% meminfo.AnonHugePages
104439 Â 13% -60.3% 41412 Â 17% sched_debug.cpu#35.ttwu_count
40.27 Â 27% -54.5% 18.31 Â 0% perf-profile.cpu-cycles.handle_mm_fault.__do_page_fault.do_page_fault.page_fault.do_unit
39.04 Â 28% -54.5% 17.78 Â 0% perf-profile.cpu-cycles.__do_fault.do_read_fault.handle_mm_fault.__do_page_fault.do_page_fault
39.04 Â 28% -54.5% 17.78 Â 0% perf-profile.cpu-cycles.filemap_fault.__do_fault.do_read_fault.handle_mm_fault.__do_page_fault
40.29 Â 27% -54.5% 18.34 Â 0% perf-profile.cpu-cycles.__do_page_fault.do_page_fault.page_fault.do_unit
40.30 Â 27% -54.5% 18.34 Â 0% perf-profile.cpu-cycles.do_page_fault.page_fault.do_unit
144634 Â 29% -61.9% 55063 Â 25% sched_debug.cpu#34.sched_goidle
40.31 Â 27% -54.4% 18.37 Â 0% perf-profile.cpu-cycles.page_fault.do_unit
290308 Â 29% -61.9% 110535 Â 25% sched_debug.cpu#34.nr_switches
678 Â 36% -49.6% 341 Â 31% sched_debug.cpu#42.ttwu_local
291037 Â 29% -61.7% 111505 Â 25% sched_debug.cpu#34.sched_count
39.17 Â 28% -54.2% 17.96 Â 0% perf-profile.cpu-cycles.do_read_fault.isra.58.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
127105 Â 42% -56.0% 55930 Â 28% sched_debug.cpu#34.ttwu_count
571 Â 37% -50.8% 281 Â 22% sched_debug.cpu#31.ttwu_local
8 Â 29% -50.0% 4 Â 42% sched_debug.cpu#24.cpu_load[4]
27.52 Â 27% -53.8% 12.71 Â 2% perf-profile.cpu-cycles.page_cache_async_readahead.filemap_fault.__do_fault.do_read_fault.handle_mm_fault
27.52 Â 27% -53.8% 12.70 Â 2% perf-profile.cpu-cycles.ondemand_readahead.page_cache_async_readahead.filemap_fault.__do_fault.do_read_fault
27.51 Â 27% -53.8% 12.70 Â 2% perf-profile.cpu-cycles.__do_page_cache_readahead.ondemand_readahead.page_cache_async_readahead.filemap_fault.__do_fault
198359 Â 18% +216.5% 627757 Â 2% sched_debug.cpu#3.sched_goidle
399579 Â 18% +214.7% 1257445 Â 2% sched_debug.cpu#3.nr_switches
37058 Â 24% -48.5% 19099 Â 37% proc-vmstat.pgactivate
252555 Â 24% -45.8% 136964 Â 45% sched_debug.cpu#8.avg_idle
785 Â 18% -59.5% 318 Â 28% sched_debug.cpu#30.ttwu_local
3604 Â 19% -55.9% 1587 Â 7% sched_debug.cpu#17.ttwu_local
13 Â 23% -63.5% 4 Â 40% sched_debug.cpu#46.cpu_load[1]
411392 Â 19% +218.1% 1308463 Â 6% sched_debug.cpu#3.sched_count
11248 Â 44% -46.7% 5996 Â 8% sched_debug.cfs_rq[29]:/.avg->runnable_avg_sum
38 Â 18% -53.9% 17 Â 19% sched_debug.cpu#16.cpu_load[3]
243 Â 44% -46.6% 130 Â 8% sched_debug.cfs_rq[29]:/.tg_runnable_contrib
124035 Â 22% -56.1% 54473 Â 31% sched_debug.cpu#32.ttwu_count
120002 Â 28% -54.7% 54347 Â 10% sched_debug.cpu#26.ttwu_count
41 Â 19% -53.0% 19 Â 26% sched_debug.cfs_rq[1]:/.runnable_load_avg
21 Â 16% +151.2% 54 Â 29% sched_debug.cpu#7.load
8200 Â 41% +207.2% 25186 Â 18% cpuidle.C3-IVT.usage
179 Â 13% -57.9% 75 Â 30% sched_debug.cfs_rq[17]:/.tg_load_contrib
266 Â 25% -52.1% 127 Â 11% sched_debug.cfs_rq[27]:/.tg_runnable_contrib
12234 Â 25% -52.0% 5868 Â 11% sched_debug.cfs_rq[27]:/.avg->runnable_avg_sum
36 Â 19% -52.1% 17 Â 21% sched_debug.cpu#16.cpu_load[4]
57 Â 35% -53.5% 26 Â 23% sched_debug.cpu#1.load
911 Â 41% -45.6% 495 Â 24% sched_debug.cpu#29.curr->pid
192 Â 19% -62.8% 71 Â 42% sched_debug.cfs_rq[34]:/.blocked_load_avg
100556 Â 29% -53.5% 46739 Â 32% sched_debug.cpu#46.sched_goidle
202682 Â 29% -53.4% 94439 Â 32% sched_debug.cpu#46.nr_switches
203033 Â 29% -51.1% 99370 Â 30% sched_debug.cpu#46.sched_count
112925 Â 6% -58.7% 46633 Â 33% sched_debug.cpu#30.ttwu_count
197 Â 17% -60.8% 77 Â 40% sched_debug.cfs_rq[34]:/.tg_load_contrib
57 Â 35% -37.3% 35 Â 26% sched_debug.cfs_rq[1]:/.load
488 Â 5% -56.1% 214 Â 12% sched_debug.cpu#34.ttwu_local
42 Â 19% -49.4% 21 Â 18% sched_debug.cpu#3.cpu_load[4]
108113 Â 28% -50.1% 53902 Â 19% sched_debug.cpu#28.ttwu_count
83738 Â 36% -24.2% 63466 Â 46% sched_debug.cpu#47.sched_goidle
20 Â 25% +136.2% 47 Â 36% sched_debug.cfs_rq[21]:/.load
395212 Â 1% -59.5% 159962 Â 16% sched_debug.cpu#21.avg_idle
34 Â 33% -36.2% 22 Â 34% sched_debug.cpu#15.cpu_load[4]
113163 Â 11% -53.6% 52505 Â 12% sched_debug.cpu#29.ttwu_count
169129 Â 36% -24.1% 128298 Â 45% sched_debug.cpu#47.nr_switches
169646 Â 36% -23.5% 129736 Â 45% sched_debug.cpu#47.sched_count
33 Â 34% -39.6% 20 Â 12% sched_debug.cpu#15.cpu_load[0]
11699 Â 13% -45.7% 6354 Â 20% sched_debug.cfs_rq[28]:/.avg->runnable_avg_sum
43 Â 9% -48.8% 22 Â 26% sched_debug.cpu#1.cpu_load[1]
470 Â 12% -52.7% 222 Â 8% sched_debug.cpu#33.ttwu_local
3739 Â 23% -39.4% 2265 Â 38% sched_debug.cpu#13.ttwu_local
254 Â 13% -45.6% 138 Â 19% sched_debug.cfs_rq[28]:/.tg_runnable_contrib
44 Â 11% -50.0% 22 Â 21% sched_debug.cpu#1.cpu_load[0]
3607 Â 14% -48.6% 1854 Â 17% sched_debug.cpu#23.ttwu_local
394541 Â 1% -51.8% 190213 Â 29% sched_debug.cpu#18.avg_idle
232039 Â 49% +206.2% 710604 Â 10% sched_debug.cpu#32.avg_idle
33 Â 31% -37.3% 21 Â 19% sched_debug.cpu#15.cpu_load[1]
9 Â 15% -50.0% 4 Â 45% sched_debug.cpu#24.cpu_load[3]
42 Â 9% -53.0% 19 Â 21% sched_debug.cpu#16.cpu_load[0]
34 Â 33% -35.5% 22 Â 31% sched_debug.cpu#15.cpu_load[3]
39 Â 16% -53.8% 18 Â 18% sched_debug.cpu#16.cpu_load[2]
41 Â 12% -53.0% 19 Â 21% sched_debug.cpu#16.cpu_load[1]
325127 Â 11% -50.1% 162114 Â 13% sched_debug.cpu#19.avg_idle
41 Â 17% -47.6% 21 Â 11% sched_debug.cpu#3.cpu_load[3]
210 Â 26% -39.8% 126 Â 9% sched_debug.cfs_rq[30]:/.tg_runnable_contrib
9654 Â 26% -39.5% 5844 Â 9% sched_debug.cfs_rq[30]:/.avg->runnable_avg_sum
42 Â 7% -48.2% 21 Â 26% sched_debug.cpu#1.cpu_load[2]
34 Â 32% -35.3% 22 Â 27% sched_debug.cpu#15.cpu_load[2]
264389 Â 32% -42.1% 153208 Â 30% sched_debug.cpu#17.avg_idle
32 Â 31% -38.3% 19 Â 7% sched_debug.cfs_rq[15]:/.runnable_load_avg
41 Â 7% -54.3% 18 Â 24% sched_debug.cfs_rq[16]:/.runnable_load_avg
225 Â 17% -44.1% 126 Â 8% sched_debug.cfs_rq[26]:/.tg_runnable_contrib
348597 Â 0% +103.0% 707663 Â 5% sched_debug.cpu#34.avg_idle
449999 Â 22% +147.2% 1112495 Â 5% sched_debug.cpu#14.sched_count
77241 Â 24% -41.3% 45332 Â 4% numa-meminfo.node1.Active(anon)
458252 Â 14% +134.4% 1074159 Â 3% sched_debug.cpu#15.nr_switches
143667 Â 8% -47.3% 75686 Â 34% sched_debug.cpu#36.nr_switches
198599 Â 12% -40.4% 118410 Â 26% sched_debug.cpu#24.sched_count
10367 Â 17% -44.1% 5797 Â 8% sched_debug.cfs_rq[26]:/.avg->runnable_avg_sum
226163 Â 15% +136.4% 534622 Â 3% sched_debug.cpu#15.sched_goidle
71145 Â 8% -47.8% 37141 Â 34% sched_debug.cpu#36.sched_goidle
18829 Â 25% -40.4% 11220 Â 4% numa-vmstat.node1.nr_active_anon
65 Â 10% -48.5% 33 Â 18% sched_debug.cfs_rq[16]:/.load
145293 Â 7% -46.4% 77894 Â 33% sched_debug.cpu#36.sched_count
97284 Â 2% -49.6% 49014 Â 24% sched_debug.cpu#25.sched_goidle
195675 Â 2% -49.6% 98683 Â 24% sched_debug.cpu#25.nr_switches
550098 Â 9% +121.5% 1218282 Â 1% cpuidle.C6-IVT.usage
199 Â 27% -37.4% 124 Â 6% sched_debug.cfs_rq[47]:/.tg_runnable_contrib
195734 Â 2% -48.6% 100523 Â 25% sched_debug.cpu#25.sched_count
307889 Â 24% -43.3% 174487 Â 18% sched_debug.cpu#20.avg_idle
216214 Â 21% +140.6% 520172 Â 2% sched_debug.cpu#14.sched_goidle
5 Â 20% -40.0% 3 Â 23% sched_debug.cpu#36.cpu_load[4]
40 Â 3% -46.3% 21 Â 26% sched_debug.cpu#1.cpu_load[3]
10 Â 20% -42.5% 5 Â 25% sched_debug.cfs_rq[42]:/.load
5 Â 9% -40.9% 3 Â 33% sched_debug.cpu#36.cpu_load[3]
0.02 Â 0% +125.0% 0.04 Â 45% turbostat.%c3
436214 Â 21% +139.0% 1042659 Â 2% sched_debug.cpu#14.nr_switches
9142 Â 27% -37.1% 5751 Â 6% sched_debug.cfs_rq[47]:/.avg->runnable_avg_sum
250 Â 10% -46.2% 134 Â 11% sched_debug.cfs_rq[31]:/.tg_runnable_contrib
185 Â 25% -36.0% 118 Â 9% sched_debug.cfs_rq[36]:/.tg_runnable_contrib
68 Â 6% -45.3% 37 Â 11% sched_debug.cpu#16.load
11467 Â 10% -46.1% 6181 Â 11% sched_debug.cfs_rq[31]:/.avg->runnable_avg_sum
8527 Â 25% -36.1% 5451 Â 9% sched_debug.cfs_rq[36]:/.avg->runnable_avg_sum
2757 Â 16% -39.8% 1658 Â 5% sched_debug.cpu#15.curr->pid
3100 Â 17% -28.6% 2212 Â 32% sched_debug.cpu#18.ttwu_local
24 Â 2% +141.8% 59 Â 44% sched_debug.cpu#18.load
39 Â 18% -44.3% 22 Â 11% sched_debug.cpu#3.cpu_load[2]
10432 Â 8% -42.1% 6036 Â 9% sched_debug.cfs_rq[38]:/.avg->runnable_avg_sum
227 Â 7% -42.4% 130 Â 9% sched_debug.cfs_rq[38]:/.tg_runnable_contrib
93977 Â 8% -39.9% 56508 Â 30% sched_debug.cpu#24.sched_goidle
189361 Â 8% -40.0% 113550 Â 30% sched_debug.cpu#24.nr_switches
2759 Â 8% -47.3% 1454 Â 19% sched_debug.cpu#1.curr->pid
37 Â 17% -41.3% 22 Â 14% sched_debug.cpu#3.cpu_load[1]
35 Â 18% -38.0% 22 Â 21% sched_debug.cpu#3.cpu_load[0]
39 Â 1% -45.6% 21 Â 30% sched_debug.cpu#1.cpu_load[4]
1186 Â 23% -31.6% 811 Â 30% sched_debug.cpu#47.curr->pid
354895 Â 13% +105.7% 730183 Â 8% sched_debug.cpu#35.avg_idle
10495 Â 27% -39.9% 6305 Â 14% sched_debug.cfs_rq[24]:/.avg->runnable_avg_sum
87380 Â 17% -37.4% 54692 Â 10% sched_debug.cpu#33.ttwu_count
228 Â 27% -39.8% 137 Â 14% sched_debug.cfs_rq[24]:/.tg_runnable_contrib
208 Â 24% -38.4% 128 Â 9% sched_debug.cfs_rq[34]:/.tg_runnable_contrib
819 Â 24% -39.3% 497 Â 21% sched_debug.cpu#41.ttwu_local
944 Â 5% -43.2% 536 Â 7% cpuidle.POLL.usage
9573 Â 24% -38.2% 5920 Â 9% sched_debug.cfs_rq[34]:/.avg->runnable_avg_sum
90 Â 4% -35.1% 58 Â 35% sched_debug.cfs_rq[11]:/.blocked_load_avg
378417 Â 8% +94.6% 736481 Â 9% sched_debug.cpu#33.avg_idle
20 Â 25% +128.8% 45 Â 24% sched_debug.cfs_rq[7]:/.load
61938 Â 12% -39.4% 37530 Â 5% sched_debug.cfs_rq[32]:/.exec_clock
521095 Â 24% +117.9% 1135582 Â 6% sched_debug.cpu#15.sched_count
9 Â 15% -52.6% 4 Â 36% sched_debug.cpu#46.cpu_load[2]
100874 Â 20% -34.5% 66025 Â 1% meminfo.Active(anon)
292119 Â 28% -43.7% 164431 Â 27% sched_debug.cpu#13.avg_idle
113098 Â 4% -50.5% 56017 Â 22% sched_debug.cpu#31.ttwu_count
333765 Â 0% +81.9% 607170 Â 1% sched_debug.cpu#20.ttwu_count
29688 Â 15% -36.9% 18721 Â 7% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
57876 Â 17% -35.5% 37332 Â 6% sched_debug.cfs_rq[27]:/.exec_clock
647 Â 15% -36.9% 408 Â 7% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
33982 Â 9% -39.3% 20638 Â 9% sched_debug.cfs_rq[3]:/.avg->runnable_avg_sum
1211 Â 25% -40.3% 723 Â 21% sched_debug.cpu#42.curr->pid
742 Â 9% -39.3% 450 Â 9% sched_debug.cfs_rq[3]:/.tg_runnable_contrib
674 Â 21% -39.0% 411 Â 11% sched_debug.cpu#39.ttwu_local
24927 Â 20% -33.6% 16551 Â 1% proc-vmstat.nr_active_anon
8 Â 5% -41.2% 5 Â 24% sched_debug.cfs_rq[28]:/.load
8 Â 12% -37.5% 5 Â 42% sched_debug.cpu#42.cpu_load[0]
8 Â 5% -41.2% 5 Â 24% sched_debug.cpu#28.load
8 Â 12% -37.5% 5 Â 42% sched_debug.cfs_rq[42]:/.runnable_load_avg
769 Â 26% -38.0% 476 Â 18% sched_debug.cpu#40.ttwu_local
59399 Â 13% -37.5% 37105 Â 2% sched_debug.cfs_rq[29]:/.exec_clock
593 Â 19% -38.4% 365 Â 25% sched_debug.cpu#45.ttwu_local
73311 Â 25% -34.4% 48092 Â 11% sched_debug.cpu#37.sched_goidle
58720 Â 15% -36.9% 37068 Â 4% sched_debug.cfs_rq[28]:/.exec_clock
58563 Â 13% -37.3% 36691 Â 4% sched_debug.cfs_rq[30]:/.exec_clock
3205188 Â 12% -35.1% 2079297 Â 27% cpuidle.C1E-IVT.time
147981 Â 25% -34.2% 97422 Â 11% sched_debug.cpu#37.nr_switches
58567 Â 13% -36.4% 37225 Â 4% sched_debug.cfs_rq[34]:/.exec_clock
223 Â 1% -46.0% 120 Â 9% sched_debug.cfs_rq[35]:/.tg_runnable_contrib
58949 Â 9% -37.0% 37126 Â 1% sched_debug.cfs_rq[31]:/.exec_clock
6 Â 7% -38.5% 4 Â 35% sched_debug.cpu#46.cpu_load[4]
20 Â 17% +119.5% 45 Â 24% sched_debug.cfs_rq[18]:/.load
148106 Â 25% -33.6% 98405 Â 11% sched_debug.cpu#37.sched_count
101872 Â 21% -34.0% 67261 Â 6% numa-meminfo.node1.Shmem
10276 Â 1% -46.0% 5553 Â 9% sched_debug.cfs_rq[35]:/.avg->runnable_avg_sum
852 Â 5% -33.3% 568 Â 30% sched_debug.cpu#27.curr->pid
56228 Â 14% -33.9% 37144 Â 2% sched_debug.cfs_rq[25]:/.exec_clock
8322 Â 15% -34.2% 5477 Â 8% sched_debug.cfs_rq[43]:/.avg->runnable_avg_sum
180 Â 15% -33.9% 119 Â 8% sched_debug.cfs_rq[43]:/.tg_runnable_contrib
3142 Â 5% -35.1% 2041 Â 13% sched_debug.cpu#20.ttwu_local
79503 Â 13% -36.2% 50683 Â 24% sched_debug.cpu#41.ttwu_count
11075 Â 13% -34.2% 7290 Â 23% sched_debug.cfs_rq[32]:/.avg->runnable_avg_sum
107991 Â 18% -31.5% 73987 Â 0% meminfo.Shmem
241 Â 13% -34.5% 158 Â 23% sched_debug.cfs_rq[32]:/.tg_runnable_contrib
243694 Â 17% -34.0% 160859 Â 21% sched_debug.cpu#16.avg_idle
24984 Â 21% -33.1% 16716 Â 6% numa-vmstat.node1.nr_shmem
34 Â 14% -37.5% 21 Â 17% sched_debug.cfs_rq[3]:/.runnable_load_avg
26661 Â 17% -30.5% 18541 Â 1% proc-vmstat.nr_shmem
263599 Â 4% -32.6% 177657 Â 18% sched_debug.cpu#1.avg_idle
176 Â 19% -29.8% 123 Â 3% sched_debug.cfs_rq[44]:/.tg_runnable_contrib
56734 Â 11% -34.4% 37240 Â 1% sched_debug.cfs_rq[26]:/.exec_clock
54635 Â 13% -33.1% 36526 Â 1% sched_debug.cfs_rq[24]:/.exec_clock
46 Â 36% +129.9% 105 Â 8% sched_debug.cfs_rq[5]:/.tg_load_contrib
8104 Â 19% -29.7% 5697 Â 3% sched_debug.cfs_rq[44]:/.avg->runnable_avg_sum
9 Â 11% -38.9% 5 Â 45% sched_debug.cpu#34.cpu_load[0]
9 Â 11% -41.7% 5 Â 41% sched_debug.cpu#42.cpu_load[1]
10 Â 0% -47.5% 5 Â 36% sched_debug.cfs_rq[31]:/.runnable_load_avg
4 Â 11% -38.9% 2 Â 30% sched_debug.cpu#42.cpu_load[4]
393940 Â 1% -35.9% 252557 Â 15% sched_debug.cpu#0.avg_idle
55119 Â 8% -34.2% 36278 Â 3% sched_debug.cfs_rq[35]:/.exec_clock
204 Â 0% -40.2% 122 Â 2% sched_debug.cfs_rq[45]:/.tg_runnable_contrib
878 Â 11% -30.9% 607 Â 18% sched_debug.cpu#46.curr->pid
9421 Â 0% -40.5% 5602 Â 3% sched_debug.cfs_rq[45]:/.avg->runnable_avg_sum
71278 Â 19% -34.3% 46823 Â 14% sched_debug.cpu#45.ttwu_count
706 Â 2% -37.3% 443 Â 16% sched_debug.cfs_rq[1]:/.tg_runnable_contrib
640 Â 21% -27.5% 464 Â 11% sched_debug.cfs_rq[9]:/.tg_runnable_contrib
32384 Â 2% -37.2% 20347 Â 16% sched_debug.cfs_rq[1]:/.avg->runnable_avg_sum
29418 Â 21% -27.6% 21302 Â 11% sched_debug.cfs_rq[9]:/.avg->runnable_avg_sum
115950 Â 7% -32.3% 78456 Â 3% sched_debug.cfs_rq[14]:/.exec_clock
12 Â 4% +50.0% 18 Â 16% sched_debug.cfs_rq[19]:/.runnable_load_avg
337262 Â 38% +115.2% 725624 Â 5% sched_debug.cpu#30.avg_idle
509 Â 14% -37.4% 318 Â 40% sched_debug.cpu#25.ttwu_local
415228 Â 3% +60.6% 666835 Â 10% sched_debug.cpu#24.avg_idle
122127 Â 8% -33.8% 80901 Â 1% sched_debug.cfs_rq[3]:/.exec_clock
7 Â 6% -46.7% 4 Â 39% sched_debug.cpu#31.cpu_load[4]
7 Â 6% -40.0% 4 Â 24% sched_debug.cpu#42.cpu_load[2]
7 Â 14% -32.1% 4 Â 31% sched_debug.cpu#47.cpu_load[3]
8 Â 0% -46.9% 4 Â 34% sched_debug.cpu#46.cpu_load[3]
7 Â 6% -40.0% 4 Â 40% sched_debug.cfs_rq[28]:/.runnable_load_avg
2828 Â 5% -33.2% 1890 Â 18% sched_debug.cpu#16.ttwu_local
973 Â 2% -38.1% 602 Â 30% sched_debug.cpu#38.curr->pid
12.72 Â 16% -26.7% 9.33 Â 0% perf-profile.cpu-cycles.try_to_unmap.shrink_page_list.shrink_inactive_list.shrink_lruvec.shrink_zone
120434 Â 16% -28.4% 86248 Â 3% numa-meminfo.node1.Active
115868 Â 7% -32.1% 78675 Â 1% sched_debug.cfs_rq[15]:/.exec_clock
2730 Â 1% -35.4% 1764 Â 5% sched_debug.cpu#3.curr->pid
12.49 Â 16% -25.8% 9.27 Â 0% perf-profile.cpu-cycles.rmap_walk.try_to_unmap.shrink_page_list.shrink_inactive_list.shrink_lruvec
76589 Â 20% -29.7% 53841 Â 13% sched_debug.cpu#37.ttwu_count
9 Â 15% -42.1% 5 Â 48% sched_debug.cpu#31.cpu_load[2]
124110 Â 4% -33.1% 82971 Â 2% sched_debug.cfs_rq[2]:/.exec_clock
75681 Â 15% -37.4% 47341 Â 22% sched_debug.cpu#43.ttwu_count
2937556 Â 14% -26.6% 2157455 Â 1% sched_debug.cfs_rq[9]:/.min_vruntime
351956 Â 41% +113.5% 751528 Â 3% sched_debug.cpu#29.avg_idle
2893 Â 10% -31.3% 1988 Â 11% sched_debug.cpu#2.curr->pid
333679 Â 9% +64.6% 549118 Â 3% sched_debug.cpu#8.ttwu_count
16 Â 33% +95.5% 32 Â 8% sched_debug.cpu#7.cpu_load[0]
55033 Â 6% -32.1% 37382 Â 3% sched_debug.cfs_rq[33]:/.exec_clock
2912268 Â 14% -24.9% 2186689 Â 1% sched_debug.cfs_rq[11]:/.min_vruntime
103839 Â 17% -23.2% 79742 Â 2% sched_debug.cfs_rq[9]:/.exec_clock
221835 Â 3% -36.6% 140546 Â 14% sched_debug.cpu#5.avg_idle
39 Â 18% -24.7% 29 Â 9% sched_debug.cfs_rq[15]:/.load
49071 Â 9% -26.8% 35934 Â 4% sched_debug.cfs_rq[36]:/.exec_clock
2709012 Â 16% -22.3% 2104190 Â 1% sched_debug.cfs_rq[21]:/.min_vruntime
6 Â 0% -45.8% 3 Â 25% sched_debug.cpu#42.cpu_load[3]
8 Â 12% -40.6% 4 Â 40% sched_debug.cpu#31.cpu_load[3]
11 Â 4% -47.8% 6 Â 40% sched_debug.cpu#38.cpu_load[0]
8 Â 12% -50.0% 4 Â 50% sched_debug.cpu#27.cpu_load[4]
386952 Â 23% +83.0% 708259 Â 2% sched_debug.cpu#26.avg_idle
7326 Â 9% -23.5% 5608 Â 9% sched_debug.cfs_rq[42]:/.avg->runnable_avg_sum
3191103 Â 2% -31.4% 2188471 Â 1% sched_debug.cfs_rq[2]:/.min_vruntime
82086 Â 6% -33.0% 54958 Â 8% sched_debug.cpu#44.ttwu_count
2890142 Â 13% -24.9% 2171243 Â 1% sched_debug.cfs_rq[10]:/.min_vruntime
2457 Â 18% -29.9% 1723 Â 19% sched_debug.cpu#9.curr->pid
2727512 Â 14% -23.3% 2091338 Â 1% sched_debug.cfs_rq[23]:/.min_vruntime
159 Â 9% -23.3% 122 Â 9% sched_debug.cfs_rq[42]:/.tg_runnable_contrib
2462 Â 13% -27.1% 1795 Â 10% sched_debug.cpu#16.curr->pid
2701103 Â 15% -22.1% 2102936 Â 2% sched_debug.cfs_rq[22]:/.min_vruntime
3034169 Â 10% -25.7% 2254798 Â 1% sched_debug.cfs_rq[0]:/.min_vruntime
3155734 Â 1% -31.3% 2168304 Â 0% sched_debug.cfs_rq[3]:/.min_vruntime
2281498 Â 10% -26.4% 1680208 Â 2% sched_debug.cfs_rq[32]:/.min_vruntime
3057557 Â 6% -27.7% 2209313 Â 1% sched_debug.cfs_rq[1]:/.min_vruntime
2254079 Â 9% -25.8% 1671750 Â 2% sched_debug.cfs_rq[28]:/.min_vruntime
92291 Â 18% -19.2% 74589 Â 1% sched_debug.cfs_rq[23]:/.exec_clock
3063883 Â 1% -30.1% 2142059 Â 3% sched_debug.cfs_rq[14]:/.min_vruntime
50151 Â 6% -26.7% 36747 Â 3% sched_debug.cfs_rq[39]:/.exec_clock
35769 Â 8% -29.9% 25064 Â 6% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
3068829 Â 1% -30.9% 2120576 Â 2% sched_debug.cfs_rq[15]:/.min_vruntime
781 Â 8% -30.0% 547 Â 6% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
2256242 Â 9% -25.7% 1675498 Â 1% sched_debug.cfs_rq[29]:/.min_vruntime
50205 Â 6% -26.9% 36707 Â 3% sched_debug.cfs_rq[37]:/.exec_clock
3250759 Â 5% -27.5% 2358220 Â 0% softirqs.TIMER
2244501 Â 10% -25.3% 1676302 Â 2% sched_debug.cfs_rq[27]:/.min_vruntime
2232461 Â 9% -25.1% 1671122 Â 2% sched_debug.cfs_rq[30]:/.min_vruntime
412796 Â 25% +75.6% 724762 Â 10% sched_debug.cpu#25.avg_idle
101241 Â 15% -20.2% 80832 Â 1% sched_debug.cfs_rq[10]:/.exec_clock
2880339 Â 10% -22.6% 2228081 Â 2% sched_debug.cfs_rq[12]:/.min_vruntime
3.77e+09 Â 12% +62.4% 6.121e+09 Â 1% cpuidle.C6-IVT.time
31003 Â 16% -29.6% 21815 Â 16% sched_debug.cfs_rq[14]:/.avg->runnable_avg_sum
676 Â 16% -29.5% 476 Â 16% sched_debug.cfs_rq[14]:/.tg_runnable_contrib
2240325 Â 7% -25.3% 1672621 Â 1% sched_debug.cfs_rq[31]:/.min_vruntime
2228382 Â 9% -24.7% 1677834 Â 2% sched_debug.cfs_rq[34]:/.min_vruntime
236748 Â 3% -34.0% 156159 Â 12% sched_debug.cpu#4.avg_idle
2888090 Â 7% -25.1% 2163595 Â 2% sched_debug.cfs_rq[13]:/.min_vruntime
9 Â 5% -36.8% 6 Â 20% sched_debug.cpu#38.cpu_load[2]
19 Â 2% -30.8% 13 Â 3% vmstat.procs.r
2206299 Â 9% -24.0% 1676888 Â 1% sched_debug.cfs_rq[25]:/.min_vruntime
48946 Â 4% -26.4% 36012 Â 1% sched_debug.cfs_rq[44]:/.exec_clock
2169012 Â 9% -23.0% 1670950 Â 1% sched_debug.cfs_rq[24]:/.min_vruntime
267987 Â 3% -39.6% 161758 Â 25% sched_debug.cpu#7.avg_idle
2220319 Â 7% -24.5% 1675674 Â 1% sched_debug.cfs_rq[26]:/.min_vruntime
1614 Â 6% -28.2% 1160 Â 10% sched_debug.cpu#9.ttwu_local
49240 Â 4% -26.6% 36165 Â 4% sched_debug.cfs_rq[43]:/.exec_clock
2351 Â 13% -21.0% 1857 Â 14% sched_debug.cpu#14.curr->pid
2148224 Â 8% -22.8% 1659387 Â 2% sched_debug.cfs_rq[35]:/.min_vruntime
50310 Â 6% -22.7% 38883 Â 5% sched_debug.cfs_rq[47]:/.exec_clock
50877 Â 4% -25.5% 37897 Â 1% sched_debug.cfs_rq[38]:/.exec_clock
2185406 Â 7% -23.1% 1681305 Â 2% sched_debug.cfs_rq[33]:/.min_vruntime
48668 Â 5% -25.3% 36375 Â 3% sched_debug.cfs_rq[45]:/.exec_clock
2735251 Â 8% -21.3% 2151871 Â 0% sched_debug.cfs_rq[8]:/.min_vruntime
892 Â 1% -30.4% 621 Â 9% sched_debug.cpu#28.curr->pid
18745 Â 4% -24.6% 14130 Â 2% sched_debug.cfs_rq[11]:/.tg->runnable_avg
18738 Â 4% -24.6% 14127 Â 2% sched_debug.cfs_rq[10]:/.tg->runnable_avg
18727 Â 4% -24.6% 14123 Â 2% sched_debug.cfs_rq[8]:/.tg->runnable_avg
18732 Â 4% -24.6% 14125 Â 2% sched_debug.cfs_rq[9]:/.tg->runnable_avg
18722 Â 4% -24.6% 14120 Â 2% sched_debug.cfs_rq[7]:/.tg->runnable_avg
25479 Â 3% -27.5% 18484 Â 14% sched_debug.cfs_rq[16]:/.avg->runnable_avg_sum
49186 Â 8% -24.7% 37045 Â 4% sched_debug.cfs_rq[46]:/.exec_clock
10 Â 4% -42.9% 6 Â 45% sched_debug.cpu#38.cpu_load[1]
556 Â 3% -27.4% 404 Â 14% sched_debug.cfs_rq[16]:/.tg_runnable_contrib
184300 Â 10% -19.5% 148390 Â 1% meminfo.Active
2906500 Â 2% -25.4% 2167333 Â 0% sched_debug.cfs_rq[4]:/.min_vruntime
18732 Â 4% -24.6% 14125 Â 2% sched_debug.cfs_rq[1]:/.tg->runnable_avg
18734 Â 4% -24.6% 14131 Â 2% sched_debug.cfs_rq[2]:/.tg->runnable_avg
18722 Â 4% -24.6% 14114 Â 2% sched_debug.cfs_rq[0]:/.tg->runnable_avg
48754 Â 3% -25.8% 36196 Â 3% sched_debug.cfs_rq[41]:/.exec_clock
2342 Â 3% -29.4% 1654 Â 13% sched_debug.cpu#14.ttwu_local
86444 Â 8% -20.5% 68719 Â 2% sched_debug.cpu#28.nr_load_updates
2117189 Â 6% -21.0% 1672016 Â 2% sched_debug.cfs_rq[37]:/.min_vruntime
89642 Â 6% -20.9% 70889 Â 2% sched_debug.cpu#32.nr_load_updates
18719 Â 4% -24.4% 14155 Â 2% sched_debug.cfs_rq[5]:/.tg->runnable_avg
18719 Â 4% -24.4% 14158 Â 2% sched_debug.cfs_rq[6]:/.tg->runnable_avg
18746 Â 4% -24.3% 14194 Â 2% sched_debug.cfs_rq[12]:/.tg->runnable_avg
18705 Â 4% -24.4% 14147 Â 2% sched_debug.cfs_rq[3]:/.tg->runnable_avg
18711 Â 4% -24.4% 14151 Â 2% sched_debug.cfs_rq[4]:/.tg->runnable_avg
18747 Â 4% -24.3% 14200 Â 2% sched_debug.cfs_rq[13]:/.tg->runnable_avg
18750 Â 4% -24.2% 14209 Â 2% sched_debug.cfs_rq[14]:/.tg->runnable_avg
18757 Â 4% -24.2% 14220 Â 2% sched_debug.cfs_rq[16]:/.tg->runnable_avg
18763 Â 4% -24.2% 14223 Â 2% sched_debug.cfs_rq[17]:/.tg->runnable_avg
2566671 Â 10% -18.9% 2081331 Â 2% sched_debug.cfs_rq[20]:/.min_vruntime
18753 Â 4% -24.2% 14220 Â 2% sched_debug.cfs_rq[15]:/.tg->runnable_avg
113665 Â 2% -24.8% 85481 Â 0% sched_debug.cfs_rq[1]:/.exec_clock
18680 Â 5% -23.6% 14275 Â 3% sched_debug.cfs_rq[22]:/.tg->runnable_avg
17 Â 31% +75.7% 30 Â 7% sched_debug.cpu#7.cpu_load[1]
18766 Â 4% -24.1% 14241 Â 2% sched_debug.cfs_rq[18]:/.tg->runnable_avg
18767 Â 4% -24.2% 14228 Â 2% sched_debug.cfs_rq[19]:/.tg->runnable_avg
18665 Â 5% -23.8% 14231 Â 2% sched_debug.cfs_rq[20]:/.tg->runnable_avg
18674 Â 5% -23.8% 14235 Â 2% sched_debug.cfs_rq[21]:/.tg->runnable_avg
18674 Â 5% -23.5% 14280 Â 3% sched_debug.cfs_rq[23]:/.tg->runnable_avg
104108 Â 6% -22.1% 81073 Â 3% sched_debug.cfs_rq[13]:/.exec_clock
1481 Â 13% -19.1% 1198 Â 7% sched_debug.cpu#10.ttwu_local
18677 Â 5% -23.8% 14223 Â 3% sched_debug.cfs_rq[24]:/.tg->runnable_avg
18677 Â 5% -23.8% 14223 Â 3% sched_debug.cfs_rq[25]:/.tg->runnable_avg
18680 Â 5% -23.8% 14228 Â 3% sched_debug.cfs_rq[26]:/.tg->runnable_avg
675646 Â 28% +71.2% 1156997 Â 2% sched_debug.cpu#1.sched_count
18627 Â 5% -23.6% 14228 Â 3% sched_debug.cfs_rq[27]:/.tg->runnable_avg
18631 Â 5% -23.2% 14308 Â 3% sched_debug.cfs_rq[30]:/.tg->runnable_avg
18627 Â 5% -23.6% 14230 Â 3% sched_debug.cfs_rq[28]:/.tg->runnable_avg
18628 Â 5% -23.6% 14231 Â 3% sched_debug.cfs_rq[29]:/.tg->runnable_avg
165 Â 2% -25.1% 124 Â 8% sched_debug.cfs_rq[41]:/.tg_runnable_contrib
18636 Â 5% -23.2% 14310 Â 3% sched_debug.cfs_rq[31]:/.tg->runnable_avg
7637 Â 2% -25.5% 5692 Â 8% sched_debug.cfs_rq[41]:/.avg->runnable_avg_sum
18621 Â 5% -23.2% 14310 Â 3% sched_debug.cfs_rq[32]:/.tg->runnable_avg
117808 Â 7% -20.2% 93969 Â 1% sched_debug.cfs_rq[0]:/.exec_clock
28849 Â 12% -19.8% 23134 Â 6% sched_debug.cfs_rq[11]:/.avg->runnable_avg_sum
18603 Â 4% -23.1% 14313 Â 3% sched_debug.cfs_rq[33]:/.tg->runnable_avg
18736 Â 4% -22.6% 14498 Â 1% sched_debug.cfs_rq[37]:/.tg->runnable_avg
18730 Â 4% -22.6% 14494 Â 1% sched_debug.cfs_rq[36]:/.tg->runnable_avg
18722 Â 4% -22.6% 14491 Â 1% sched_debug.cfs_rq[35]:/.tg->runnable_avg
18736 Â 4% -22.6% 14501 Â 1% sched_debug.cfs_rq[39]:/.tg->runnable_avg
18734 Â 4% -22.6% 14499 Â 1% sched_debug.cfs_rq[38]:/.tg->runnable_avg
18603 Â 4% -22.1% 14485 Â 1% sched_debug.cfs_rq[34]:/.tg->runnable_avg
103545 Â 5% -21.5% 81273 Â 1% sched_debug.cfs_rq[4]:/.exec_clock
47941 Â 2% -24.4% 36242 Â 2% sched_debug.cfs_rq[42]:/.exec_clock
2070791 Â 6% -19.1% 1675576 Â 2% sched_debug.cfs_rq[47]:/.min_vruntime
629 Â 12% -19.6% 505 Â 6% sched_debug.cfs_rq[11]:/.tg_runnable_contrib
331579 Â 27% +70.3% 564606 Â 2% sched_debug.cpu#1.sched_goidle
2132511 Â 5% -20.7% 1691609 Â 1% sched_debug.cfs_rq[38]:/.min_vruntime
2072109 Â 7% -19.8% 1662238 Â 1% sched_debug.cfs_rq[36]:/.min_vruntime
667043 Â 27% +69.7% 1132176 Â 2% sched_debug.cpu#1.nr_switches
15 Â 46% +95.0% 29 Â 10% sched_debug.cfs_rq[7]:/.runnable_load_avg
18629 Â 3% -22.2% 14489 Â 1% sched_debug.cfs_rq[41]:/.tg->runnable_avg
18628 Â 3% -22.2% 14487 Â 1% sched_debug.cfs_rq[40]:/.tg->runnable_avg
18631 Â 3% -22.2% 14492 Â 1% sched_debug.cfs_rq[43]:/.tg->runnable_avg
18630 Â 3% -22.2% 14490 Â 1% sched_debug.cfs_rq[42]:/.tg->runnable_avg
2075199 Â 7% -19.2% 1675770 Â 2% sched_debug.cfs_rq[46]:/.min_vruntime
18632 Â 3% -22.2% 14496 Â 1% sched_debug.cfs_rq[44]:/.tg->runnable_avg
2313 Â 3% -24.9% 1737 Â 13% sched_debug.cpu#4.curr->pid
18625 Â 3% -22.2% 14497 Â 1% sched_debug.cfs_rq[45]:/.tg->runnable_avg
18627 Â 3% -22.2% 14501 Â 1% sched_debug.cfs_rq[46]:/.tg->runnable_avg
18627 Â 3% -22.1% 14503 Â 1% sched_debug.cfs_rq[47]:/.tg->runnable_avg
87154 Â 8% -19.7% 70009 Â 2% sched_debug.cpu#30.nr_load_updates
2066890 Â 5% -19.4% 1665872 Â 1% sched_debug.cfs_rq[44]:/.min_vruntime
22 Â 11% -13.3% 19 Â 13% sched_debug.cpu#20.cpu_load[2]
33 Â 6% +35.6% 44 Â 12% sched_debug.cpu#0.load
2070604 Â 6% -19.2% 1673355 Â 2% sched_debug.cfs_rq[45]:/.min_vruntime
365951 Â 33% +72.9% 632720 Â 3% sched_debug.cpu#18.ttwu_count
87470 Â 7% -19.3% 70583 Â 1% sched_debug.cpu#29.nr_load_updates
2780140 Â 2% -21.6% 2178979 Â 0% sched_debug.cfs_rq[5]:/.min_vruntime
2113140 Â 5% -20.9% 1671691 Â 1% sched_debug.cfs_rq[39]:/.min_vruntime
495 Â 2% -22.0% 386 Â 3% numa-vmstat.node0.nr_isolated_file
87282 Â 5% -19.9% 69905 Â 2% sched_debug.cpu#31.nr_load_updates
2776275 Â 2% -21.6% 2177359 Â 1% sched_debug.cfs_rq[7]:/.min_vruntime
2797293 Â 0% -22.4% 2171180 Â 1% sched_debug.cfs_rq[6]:/.min_vruntime
2067716 Â 4% -19.8% 1659081 Â 2% sched_debug.cfs_rq[41]:/.min_vruntime
183 Â 4% -21.3% 144 Â 11% sched_debug.cfs_rq[46]:/.tg_runnable_contrib
8446 Â 4% -21.6% 6624 Â 11% sched_debug.cfs_rq[46]:/.avg->runnable_avg_sum
2068500 Â 4% -19.5% 1665943 Â 2% sched_debug.cfs_rq[43]:/.min_vruntime
2064957 Â 4% -19.1% 1669565 Â 1% sched_debug.cfs_rq[42]:/.min_vruntime
84917 Â 9% -16.7% 70727 Â 3% sched_debug.cpu#27.nr_load_updates
49411 Â 1% -22.9% 38114 Â 4% sched_debug.cfs_rq[40]:/.exec_clock
2097900 Â 4% -19.4% 1690936 Â 2% sched_debug.cfs_rq[40]:/.min_vruntime
3.13 Â 12% +46.1% 4.57 Â 1% perf-profile.cpu-cycles.cpu_startup_entry.start_secondary
102310 Â 10% -14.3% 87646 Â 3% sched_debug.cfs_rq[12]:/.exec_clock
3.15 Â 12% +46.0% 4.59 Â 1% perf-profile.cpu-cycles.start_secondary
625 Â 1% -23.5% 478 Â 4% sched_debug.cfs_rq[5]:/.tg_runnable_contrib
1.34 Â 12% +45.0% 1.95 Â 3% perf-profile.cpu-cycles.cpuidle_enter.cpu_startup_entry.start_secondary
5393 Â 4% -17.7% 4439 Â 12% numa-vmstat.node0.nr_anon_pages
21562 Â 4% -17.5% 17782 Â 12% numa-meminfo.node0.AnonPages
2670003 Â 1% -21.2% 2104927 Â 2% sched_debug.cfs_rq[16]:/.min_vruntime
28696 Â 1% -23.6% 21938 Â 4% sched_debug.cfs_rq[5]:/.avg->runnable_avg_sum
86063 Â 5% -18.8% 69919 Â 3% sched_debug.cpu#34.nr_load_updates
676 Â 2% -28.5% 483 Â 20% sched_debug.cpu#46.ttwu_local
0.98 Â 15% +46.9% 1.44 Â 0% perf-profile.cpu-cycles.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary
0.92 Â 15% +46.4% 1.34 Â 1% perf-profile.cpu-cycles.intel_idle.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary
18 Â 18% +48.6% 27 Â 5% sched_debug.cfs_rq[20]:/.load
2616968 Â 2% -19.7% 2100626 Â 1% sched_debug.cfs_rq[17]:/.min_vruntime
88866 Â 6% -15.5% 75092 Â 3% sched_debug.cfs_rq[16]:/.exec_clock
5.32 Â 23% +56.5% 8.34 Â 2% turbostat.%c6
83944 Â 6% -17.1% 69586 Â 1% sched_debug.cpu#25.nr_load_updates
96567 Â 7% -16.2% 80964 Â 2% sched_debug.cfs_rq[6]:/.exec_clock
2337 Â 4% -17.8% 1922 Â 12% sched_debug.cpu#8.curr->pid
644 Â 2% -18.4% 526 Â 4% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
173 Â 10% -19.0% 140 Â 10% sched_debug.cfs_rq[40]:/.tg_runnable_contrib
17 Â 8% +37.1% 24 Â 12% sched_debug.cpu#21.cpu_load[4]
18 Â 5% +29.2% 23 Â 13% sched_debug.cpu#21.cpu_load[3]
29479 Â 2% -18.3% 24085 Â 4% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
2312 Â 4% -27.3% 1680 Â 16% sched_debug.cpu#17.curr->pid
7988 Â 10% -19.0% 6468 Â 10% sched_debug.cfs_rq[40]:/.avg->runnable_avg_sum
30 Â 1% +36.9% 41 Â 27% sched_debug.cpu#5.load
442320 Â 20% +50.1% 663933 Â 3% sched_debug.cpu#4.sched_goidle
18 Â 29% +58.1% 29 Â 2% sched_debug.cpu#7.cpu_load[3]
18 Â 33% +63.9% 29 Â 3% sched_debug.cpu#7.cpu_load[2]
431501 Â 36% +65.8% 715220 Â 9% sched_debug.cpu#27.avg_idle
83829 Â 5% -15.7% 70681 Â 1% sched_debug.cpu#26.nr_load_updates
889245 Â 20% +49.7% 1331482 Â 4% sched_debug.cpu#4.nr_switches
3472067 Â 5% -16.8% 2889475 Â 3% numa-meminfo.node0.MemFree
591 Â 3% -18.0% 485 Â 5% sched_debug.cfs_rq[13]:/.tg_runnable_contrib
83450 Â 3% -17.9% 68547 Â 2% sched_debug.cpu#35.nr_load_updates
2571432 Â 1% -17.6% 2119533 Â 1% sched_debug.cfs_rq[18]:/.min_vruntime
27116 Â 3% -17.9% 22268 Â 5% sched_debug.cfs_rq[13]:/.avg->runnable_avg_sum
82841 Â 5% -15.4% 70118 Â 2% sched_debug.cpu#24.nr_load_updates
1.82 Â 11% +35.2% 2.47 Â 0% perf-profile.cpu-cycles.xfs_vm_readpages.__do_page_cache_readahead.ondemand_readahead.page_cache_async_readahead.filemap_fault
1.81 Â 11% +35.7% 2.46 Â 0% perf-profile.cpu-cycles.mpage_readpages.xfs_vm_readpages.__do_page_cache_readahead.ondemand_readahead.page_cache_async_readahead
867184 Â 5% -15.0% 736731 Â 2% numa-vmstat.node0.nr_free_pages
555 Â 8% -21.3% 436 Â 17% sched_debug.cfs_rq[4]:/.tg_runnable_contrib
924672 Â 19% +47.3% 1362330 Â 3% sched_debug.cpu#4.sched_count
1.21 Â 18% +44.6% 1.74 Â 0% perf-profile.cpu-cycles.do_mpage_readpage.mpage_readpages.xfs_vm_readpages.__do_page_cache_readahead.ondemand_readahead
25390 Â 8% -21.1% 20035 Â 17% sched_debug.cfs_rq[4]:/.avg->runnable_avg_sum
2557617 Â 0% -17.2% 2118082 Â 1% sched_debug.cfs_rq[19]:/.min_vruntime
351375 Â 1% -12.9% 306106 Â 6% softirqs.SCHED
2370 Â 6% -13.2% 2058 Â 6% sched_debug.cpu#13.curr->pid
91070 Â 7% -12.8% 79415 Â 2% sched_debug.cfs_rq[8]:/.exec_clock
95294 Â 4% -14.1% 81823 Â 0% sched_debug.cfs_rq[5]:/.exec_clock
238317 Â 5% -30.9% 164683 Â 34% sched_debug.cpu#6.avg_idle
83424 Â 2% -16.2% 69867 Â 2% sched_debug.cpu#33.nr_load_updates
8294 Â 2% -23.4% 6355 Â 18% sched_debug.cfs_rq[33]:/.avg->runnable_avg_sum
180 Â 2% -23.2% 138 Â 18% sched_debug.cfs_rq[33]:/.tg_runnable_contrib
95628 Â 4% -14.2% 82063 Â 1% sched_debug.cfs_rq[7]:/.exec_clock
131027 Â 4% -11.9% 115387 Â 1% sched_debug.cpu#14.nr_load_updates
398962 Â 35% +58.2% 631265 Â 2% sched_debug.cpu#19.ttwu_count
133714 Â 5% -11.4% 118513 Â 0% sched_debug.cpu#3.nr_load_updates
85431 Â 5% -12.5% 74779 Â 1% sched_debug.cfs_rq[17]:/.exec_clock
78487 Â 3% -12.9% 68362 Â 3% sched_debug.cpu#36.nr_load_updates
572 Â 1% -18.1% 468 Â 9% sched_debug.cfs_rq[8]:/.tg_runnable_contrib
135275 Â 3% -12.7% 118072 Â 0% sched_debug.cpu#2.nr_load_updates
26171 Â 1% -17.8% 21516 Â 9% sched_debug.cfs_rq[8]:/.avg->runnable_avg_sum
30 Â 6% -11.7% 26 Â 10% sched_debug.cpu#5.cpu_load[4]
131222 Â 5% -11.6% 116049 Â 0% sched_debug.cpu#15.nr_load_updates
4.739e+09 Â 3% -13.1% 4.119e+09 Â 2% cpuidle.C1-IVT.time
32 Â 3% +25.8% 40 Â 10% sched_debug.cpu#8.load
486160 Â 8% +28.9% 626649 Â 3% sched_debug.cpu#13.ttwu_count
79004 Â 2% -13.1% 68621 Â 2% sched_debug.cpu#37.nr_load_updates
78588 Â 3% -11.5% 69531 Â 2% sched_debug.cpu#39.nr_load_updates
97 Â 2% -20.4% 77 Â 10% sched_debug.cfs_rq[16]:/.tg_load_contrib
2.50 Â 30% +53.0% 3.82 Â 0% perf-profile.cpu-cycles.generic_smp_call_function_single_interrupt.smp_call_function_interrupt.call_function_interrupt.do_unit
2233 Â 4% -15.2% 1893 Â 14% sched_debug.cpu#5.curr->pid
2.79 Â 29% +51.9% 4.24 Â 0% perf-profile.cpu-cycles.smp_call_function_interrupt.call_function_interrupt.do_unit
410822 Â 32% +53.0% 628526 Â 3% sched_debug.cpu#17.ttwu_count
2.17 Â 30% +51.0% 3.27 Â 0% perf-profile.cpu-cycles.flush_smp_call_function_queue.generic_smp_call_function_single_interrupt.smp_call_function_interrupt.call_function_interrupt.do_unit
6181 Â 2% -14.9% 5262 Â 8% numa-vmstat.node0.nr_active_anon
378032 Â 30% +50.5% 568832 Â 2% sched_debug.cpu#6.ttwu_count
4.48 Â 29% +50.2% 6.74 Â 0% perf-profile.cpu-cycles.call_function_interrupt.do_unit.runtime_exceeded
78723 Â 2% -11.9% 69385 Â 2% sched_debug.cpu#46.nr_load_updates
19 Â 28% +47.4% 28 Â 2% sched_debug.cpu#7.cpu_load[4]
24780 Â 2% -14.8% 21123 Â 8% numa-meminfo.node0.Active(anon)
79208 Â 2% -12.5% 69275 Â 1% sched_debug.cpu#43.nr_load_updates
1001 Â 0% -12.8% 873 Â 2% proc-vmstat.nr_isolated_file
122398 Â 5% -8.0% 112589 Â 1% sched_debug.cpu#0.nr_load_updates
838 Â 4% -26.7% 614 Â 26% sched_debug.cpu#32.curr->pid
52.08 Â 1% +16.7% 60.80 Â 0% turbostat.%c1
78330 Â 1% -12.2% 68759 Â 2% sched_debug.cpu#45.nr_load_updates
78994 Â 1% -12.4% 69228 Â 1% sched_debug.cpu#44.nr_load_updates
77859 Â 1% -10.3% 69825 Â 3% sched_debug.cpu#47.nr_load_updates
9588 Â 3% +17.7% 11289 Â 0% uptime.idle
77946 Â 1% -11.6% 68910 Â 1% sched_debug.cpu#42.nr_load_updates
29 Â 1% +22.0% 36 Â 14% sched_debug.cfs_rq[8]:/.load
79172 Â 1% -10.3% 71045 Â 1% sched_debug.cpu#38.nr_load_updates
78525 Â 1% -11.3% 69640 Â 2% sched_debug.cpu#41.nr_load_updates
552379 Â 3% -7.4% 511740 Â 0% meminfo.Committed_AS
78896 Â 1% -10.5% 70631 Â 2% sched_debug.cpu#40.nr_load_updates
659571 Â 6% +18.5% 781765 Â 4% sched_debug.cpu#44.avg_idle
2200 Â 16% -67.7% 710 Â 6% time.system_time
77502 Â 10% -43.3% 43925 Â 5% time.involuntary_context_switches
42.57 Â 5% -27.6% 30.82 Â 0% turbostat.%c0
1933 Â 5% -25.6% 1437 Â 0% time.percent_of_cpu_this_job_got
1325078 Â 3% -21.2% 1044202 Â 0% vmstat.system.in
138 Â 2% -9.3% 125 Â 0% turbostat.Cor_W
171 Â 1% -7.5% 159 Â 0% turbostat.Pkg_W
30073157 Â 3% +4.7% 31478827 Â 0% time.voluntary_context_switches
3758 Â 0% -1.1% 3717 Â 0% time.user_time


ivb43: Ivytown Ivy Bridge-EP
Memory: 64G


unixbench.score

12000 ++------------------------------------------------------------------+
| .* *.. .*.. |
11800 *+*..*.*..*.*..*.*.*.. *. .*.*. + : *.* *.*..*.*.. .*
| + *..* *.. : * |
11600 ++ *. + * |
| * |
11400 ++ O O O O O O O O |
| |
11200 ++ |
| O O |
11000 O+ O O O O O O |
| O O O O O |
10800 ++ |
| O |
10600 ++------------------------------------------------------------------+


time.voluntary_context_switches

3.5e+06 ++----------------------------------------------------------------+
| |
3e+06 ++ O |
O O O O O O O O O O O |
| O O O O O O O O O O O |
2.5e+06 ++ |
| |
2e+06 ++ |
| |
1.5e+06 ++ |
| |
| |
1e+06 ++ |
*.*..*.*.*..*.*..*.*.*.. .*.*..*.*.*..*.*. .*.*..*.*.*..*.*.*..*.*
500000 ++----------------------*-----------------*-----------------------+


vmstat.system.cs

105000 ++-----------------------------------------------------------------+
O O O O O O O O O O O O O O |
| O O O O O O O O O |
100000 ++ |
| |
| |
95000 ++ |
| |
90000 ++ |
| |
| .*. .*. |
85000 *+*..*.*..*.*.*..*.*.. *..*.*..*.*.*..*. .*. *. *.*..*.*..*.*
| *. + * |
| * |
80000 ++-----------------------------------------------------------------+



[*] bisect-good sample
[O] bisect-bad sample

To reproduce:

apt-get install ruby ruby-oj
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Huang, Ying

---
testcase: unixbench
default_monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
cpuidle:
cpufreq:
turbostat:
sched_debug:
interval: 10
pmeter:
default_watchdogs:
watch-oom:
watchdog:
cpufreq_governor:
- performance
commit: b7392d2247cfe6771f95d256374f1a8e6a6f48d6
model: Grantley Haswell
nr_cpu: 16
memory: 16G
hdd_partitions:
swap_partitions:
rootfs_partition:
unixbench:
test:
- execl
testbox: lituya
tbox_group: lituya
kconfig: x86_64-rhel
enqueue_time: 2014-12-18 17:52:11.459479684 +08:00
head_commit: d5d2fd7af896451b86227a0847b54976d87d13f3
base_commit: b7392d2247cfe6771f95d256374f1a8e6a6f48d6
branch: linux-devel/devel-hourly-2014123019
kernel: "/kernel/x86_64-rhel/b7392d2247cfe6771f95d256374f1a8e6a6f48d6/vmlinuz-3.19.0-rc2-gb7392d2"
user: lkp
queue: cyclic
rootfs: debian-x86_64.cgz
result_root: "/result/lituya/unixbench/performance-execl/debian-x86_64.cgz/x86_64-rhel/b7392d2247cfe6771f95d256374f1a8e6a6f48d6/0"
job_file: "/lkp/scheduled/lituya/cyclic_unixbench-performance-execl-x86_64-rhel-BASE-b7392d2247cfe6771f95d256374f1a8e6a6f48d6-0.yaml"
dequeue_time: 2014-12-30 19:49:21.919663379 +08:00
job_state: finished
loadavg: 9.26 3.82 1.42 1/164 5138
start_time: '1419940190'
end_time: '1419940388'
version: "/lkp/lkp/.src-20141230-091833"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
./Run execl
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx