[LKP] [sched] eae3e9e8843: +36.8% pigz.throughput

From: Huang Ying
Date: Mon May 11 2015 - 01:51:18 EST


FYI, we noticed the below changes on

git://bee.sh.intel.com/git/ydu19/linux rewrite-v7-on-4.1-rc1
commit eae3e9e8843146e7e1cc77bd943e5f8138b61314 ("sched: Rewrite per entity runnable load average tracking")

testcase/path_params/tbox_group: pigz/performance-100%-128K/lkp-nex06

40fa32019d8574cb eae3e9e8843146e7e1cc77bd94
---------------- --------------------------
2.683e+08 Â 0% +36.8% 3.67e+08 Â 0% pigz.throughput
18661 Â 0% -21.6% 14625 Â 0% pigz.time.user_time
2046066 Â 0% +176.7% 5661440 Â 0% pigz.time.voluntary_context_switches
174 Â 0% +40.9% 245 Â 0% pigz.time.system_time
904986 Â 1% -43.8% 508883 Â 0% pigz.time.involuntary_context_switches
6268 Â 0% -21.0% 4952 Â 0% pigz.time.percent_of_cpu_this_job_got
3382 Â 8% +118.6% 7392 Â 3% uptime.idle
33576 Â 0% -12.3% 29441 Â 1% meminfo.Shmem
1241653 Â 6% -10.0% 1117099 Â 0% softirqs.RCU
201843 Â 0% +97.2% 397952 Â 0% softirqs.SCHED
9772559 Â 0% -20.6% 7762431 Â 0% softirqs.TIMER
63 Â 0% -17.7% 52 Â 1% vmstat.procs.r
84867 Â 0% -20.0% 67874 Â 0% vmstat.system.in
13057 Â 0% +183.4% 37007 Â 0% vmstat.system.cs
904986 Â 1% -43.8% 508883 Â 0% time.involuntary_context_switches
6268 Â 0% -21.0% 4952 Â 0% time.percent_of_cpu_this_job_got
174 Â 0% +40.9% 245 Â 0% time.system_time
18661 Â 0% -21.6% 14625 Â 0% time.user_time
2046066 Â 0% +176.7% 5661440 Â 0% time.voluntary_context_switches
69.56 Â 0% +7.4% 74.67 Â 0% turbostat.%Busy
1335 Â 0% +24.6% 1663 Â 0% turbostat.Avg_MHz
1920 Â 0% +16.0% 2227 Â 0% turbostat.Bzy_MHz
29.76 Â 0% -44.5% 16.50 Â 1% turbostat.CPU%c1
0.69 Â 5% +1182.5% 8.82 Â 1% turbostat.CPU%c3
0.28 Â 9% +83.0% 0.51 Â 2% turbostat.Pkg%pc3
8379 Â 0% -12.2% 7360 Â 1% proc-vmstat.nr_shmem
26862047 Â 0% +36.0% 36537881 Â 0% proc-vmstat.numa_hit
26861982 Â 0% +36.0% 36537861 Â 0% proc-vmstat.numa_local
9403 Â 0% -12.6% 8221 Â 2% proc-vmstat.pgactivate
2116622 Â 7% +28.9% 2727789 Â 2% proc-vmstat.pgalloc_dma32
24828395 Â 0% +36.6% 33904973 Â 0% proc-vmstat.pgalloc_normal
26931165 Â 0% +36.0% 36619978 Â 0% proc-vmstat.pgfree
6608532 Â 7% +29.1% 8533790 Â 2% numa-numastat.node0.numa_hit
6607488 Â 7% +29.1% 8533243 Â 2% numa-numastat.node0.local_node
6949144 Â 10% +32.9% 9232380 Â 10% numa-numastat.node1.local_node
6950185 Â 10% +32.8% 9232396 Â 10% numa-numastat.node1.numa_hit
6887396 Â 17% +50.7% 10377491 Â 10% numa-numastat.node2.local_node
6889467 Â 17% +50.6% 10378080 Â 10% numa-numastat.node2.numa_hit
6427416 Â 16% +30.8% 8408092 Â 1% numa-numastat.node3.local_node
6429490 Â 16% +30.8% 8408663 Â 1% numa-numastat.node3.numa_hit
23149 Â 4% +100.3% 46357 Â 1% cpuidle.C1-NHM.usage
7161061 Â 6% +326.7% 30557423 Â 1% cpuidle.C1-NHM.time
100163 Â 4% +263.5% 364095 Â 1% cpuidle.C1E-NHM.usage
22529441 Â 3% +390.6% 1.105e+08 Â 1% cpuidle.C1E-NHM.time
3.774e+08 Â 3% +1024.6% 4.244e+09 Â 0% cpuidle.C3-NHM.time
669049 Â 2% +556.3% 4391062 Â 0% cpuidle.C3-NHM.usage
16 Â 15% +506.1% 100 Â 11% cpuidle.POLL.usage
770 Â 40% +544.8% 4966 Â 35% cpuidle.POLL.time
34535 Â 36% -41.8% 20095 Â 8% numa-meminfo.node0.Active(anon)
23133 Â 7% -12.8% 20180 Â 8% numa-meminfo.node0.AnonPages
13351 Â 4% -20.6% 10594 Â 9% numa-meminfo.node0.SReclaimable
28535 Â 5% +15.7% 33003 Â 9% numa-meminfo.node2.Slab
218579 Â 2% +13.3% 247589 Â 1% numa-meminfo.node2.MemUsed
118187 Â 2% +8.3% 127984 Â 9% numa-meminfo.node3.FilePages
226067 Â 4% +10.6% 249934 Â 5% numa-meminfo.node3.MemUsed
2447 Â 9% +18.8% 2908 Â 3% numa-meminfo.node3.KernelStack
5775 Â 7% -12.6% 5047 Â 8% numa-vmstat.node0.nr_anon_pages
3498916 Â 9% +24.1% 4343868 Â 2% numa-vmstat.node0.numa_local
3532759 Â 9% +24.0% 4379945 Â 2% numa-vmstat.node0.numa_hit
8629 Â 37% -41.8% 5025 Â 8% numa-vmstat.node0.nr_active_anon
3337 Â 4% -20.7% 2648 Â 9% numa-vmstat.node0.nr_slab_reclaimable
150 Â 20% -22.2% 116 Â 0% numa-vmstat.node0.nr_unevictable
150 Â 20% -22.2% 116 Â 0% numa-vmstat.node0.nr_mlock
6417 Â 32% +363.7% 29753 Â 41% numa-vmstat.node1.numa_other
3525752 Â 11% +32.4% 4669694 Â 8% numa-vmstat.node1.numa_hit
3519334 Â 11% +31.8% 4639940 Â 8% numa-vmstat.node1.numa_local
3389724 Â 16% +53.8% 5212082 Â 9% numa-vmstat.node2.numa_local
3429257 Â 16% +52.9% 5243860 Â 9% numa-vmstat.node2.numa_hit
3205283 Â 15% +32.0% 4231247 Â 3% numa-vmstat.node3.numa_local
29546 Â 2% +8.3% 31998 Â 9% numa-vmstat.node3.nr_file_pages
3244813 Â 15% +31.1% 4253113 Â 3% numa-vmstat.node3.numa_hit
227 Â 1% +139.2% 544 Â 0% latency_stats.avg.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
452 Â 1% -8.7% 413 Â 1% latency_stats.avg.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
183 Â 4% +129.6% 421 Â 13% latency_stats.avg.call_rwsem_down_read_failed.__do_page_fault.do_page_fault.page_fault
3331 Â 0% -23.0% 2564 Â 3% latency_stats.avg.pipe_wait.wait_for_partner.fifo_open.do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_open.system_call_fastpath
56 Â 28% -34.2% 37 Â 20% latency_stats.avg.stop_two_cpus.migrate_swap.task_numa_migrate.numa_migrate_preferred.task_numa_fault.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
241 Â 8% -21.6% 189 Â 10% latency_stats.avg.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_access.[nfsv4].nfs4_proc_access.[nfsv4].nfs_do_access.nfs_permission.__inode_permission.inode_permission.may_open
207 Â 12% +263.9% 755 Â 26% latency_stats.avg.call_rwsem_down_write_failed.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
927 Â 1% -29.0% 658 Â 1% latency_stats.avg.do_wait.SyS_wait4.system_call_fastpath
175 Â 1% -17.5% 145 Â 0% latency_stats.avg.pipe_wait.pipe_write.__vfs_write.vfs_write.SyS_write.system_call_fastpath
50 Â 11% +181.1% 141 Â 5% latency_stats.avg.call_rwsem_down_read_failed.__do_page_fault.do_page_fault.page_fault.copy_page_to_iter.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
35 Â 11% -100.0% 0 Â 0% latency_stats.avg.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
92 Â 5% +47.3% 136 Â 7% latency_stats.avg.call_rwsem_down_write_failed.SyS_mprotect.system_call_fastpath
152 Â 12% +91.6% 291 Â 6% latency_stats.avg.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
22 Â 19% -42.7% 12 Â 40% latency_stats.avg.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
10 Â 28% -83.7% 1 Â 24% latency_stats.hits.call_rwsem_down_write_failed.unlink_file_vma.free_pgtables.exit_mmap.mmput.flush_old_exec.load_elf_binary.search_binary_handler.do_execveat_common.SyS_execve.return_from_execve
1358060 Â 0% +253.5% 4800372 Â 0% latency_stats.hits.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
15211 Â 1% +52.3% 23161 Â 0% latency_stats.hits.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
195 Â 6% -26.6% 143 Â 5% latency_stats.hits.call_rwsem_down_write_failed.vm_munmap.SyS_munmap.system_call_fastpath
941 Â 2% -41.3% 553 Â 5% latency_stats.hits.call_rwsem_down_read_failed.__do_page_fault.do_page_fault.page_fault.copy_page_to_iter.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
856 Â 21% +233.1% 2851 Â 6% latency_stats.hits.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
25 Â 29% +169.0% 67 Â 15% latency_stats.hits.pipe_write.__vfs_write.vfs_write.SyS_write.system_call_fastpath
9 Â 40% -84.2% 1 Â 33% latency_stats.hits.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault.clear_user.padzero.load_elf_binary
1313 Â 1% -12.4% 1151 Â 3% latency_stats.hits.call_rwsem_down_write_failed.SyS_mprotect.system_call_fastpath
4108 Â 0% +55.7% 6398 Â 0% latency_stats.hits.do_wait.SyS_wait4.system_call_fastpath
12638 Â 0% +27.0% 16053 Â 21% latency_stats.max.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
344 Â 10% -21.3% 271 Â 21% latency_stats.max.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_access.[nfsv4].nfs4_proc_access.[nfsv4].nfs_do_access.nfs_permission.__inode_permission.inode_permission.may_open
2295 Â 25% +331.3% 9898 Â 23% latency_stats.max.call_rwsem_down_read_failed.__do_page_fault.do_page_fault.page_fault.copy_page_to_iter.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
51 Â 18% -100.0% 0 Â 0% latency_stats.max.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
5538 Â 45% +120.8% 12228 Â 18% latency_stats.max.call_rwsem_down_read_failed.__do_page_fault.do_page_fault.page_fault
4086 Â 0% -12.3% 3582 Â 8% latency_stats.max.pipe_wait.wait_for_partner.fifo_open.do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_open.system_call_fastpath
3810831 Â 1% +10.7% 4218213 Â 1% latency_stats.sum.do_wait.SyS_wait4.system_call_fastpath
168212 Â 16% +274.5% 630000 Â 26% latency_stats.sum.call_rwsem_down_write_failed.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
6891099 Â 2% +39.0% 9579806 Â 0% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
1207 Â 8% -21.7% 946 Â 10% latency_stats.sum.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_access.[nfsv4].nfs4_proc_access.[nfsv4].nfs_do_access.nfs_permission.__inode_permission.inode_permission.may_open
122162 Â 6% +28.5% 156995 Â 5% latency_stats.sum.call_rwsem_down_write_failed.SyS_mprotect.system_call_fastpath
33322 Â 0% -23.0% 25650 Â 3% latency_stats.sum.pipe_wait.wait_for_partner.fifo_open.do_dentry_open.vfs_open.do_last.path_openat.do_filp_open.do_sys_open.SyS_open.system_call_fastpath
128589 Â 19% +544.3% 828462 Â 3% latency_stats.sum.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
47841 Â 12% +63.3% 78146 Â 3% latency_stats.sum.call_rwsem_down_read_failed.__do_page_fault.do_page_fault.page_fault.copy_page_to_iter.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
20227 Â 15% +56.2% 31588 Â 11% latency_stats.sum.wait_on_page_bit.filemap_fdatawait_range.filemap_fdatawait.filemap_write_and_wait.nfs_wb_all.nfs_getattr.vfs_getattr_nosec.vfs_getattr.vfs_fstat.SYSC_newfstat.SyS_newfstat.system_call_fastpath
228 Â 34% -93.8% 14 Â 46% latency_stats.sum.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault.clear_user.padzero.load_elf_binary
1.137e+08 Â 1% -9.5% 1.029e+08 Â 0% latency_stats.sum.pipe_wait.pipe_write.__vfs_write.vfs_write.SyS_write.system_call_fastpath
3.1e+08 Â 1% +744.4% 2.617e+09 Â 0% latency_stats.sum.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
131 Â 48% -100.0% 0 Â 0% latency_stats.sum.call_rwsem_down_write_failed.vma_link.mmap_region.do_mmap_pgoff.vm_mmap_pgoff.SyS_mmap_pgoff.SyS_mmap.system_call_fastpath
1581442 Â 5% +105.4% 3248689 Â 14% latency_stats.sum.call_rwsem_down_read_failed.__do_page_fault.do_page_fault.page_fault
0.41 Â 25% +196.3% 1.22 Â 2% perf-profile.cpu-cycles.__schedule.schedule.schedule_preempt_disabled.cpu_startup_entry.start_secondary
22.52 Â 5% -61.9% 8.57 Â 5% perf-profile.cpu-cycles.task_tick_fair.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer
0.54 Â 29% +356.0% 2.48 Â 6% perf-profile.cpu-cycles.__mutex_lock_slowpath.mutex_lock.pipe_wait.pipe_write.__vfs_write
0.54 Â 29% +356.0% 2.48 Â 6% perf-profile.cpu-cycles.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_wait.pipe_write
0.54 Â 13% +633.2% 3.98 Â 7% perf-profile.cpu-cycles.pick_next_task_fair.__schedule.schedule.futex_wait_queue_me.futex_wait
0.52 Â 28% +370.0% 2.47 Â 5% perf-profile.cpu-cycles.mutex_spin_on_owner.isra.4.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_wait
0.40 Â 11% +785.6% 3.54 Â 6% perf-profile.cpu-cycles.load_balance.pick_next_task_fair.__schedule.schedule.futex_wait_queue_me
3.99 Â 5% +62.1% 6.46 Â 2% perf-profile.cpu-cycles.enqueue_entity.enqueue_task_fair.enqueue_task.activate_task.ttwu_do_activate
1.67 Â 26% -47.8% 0.87 Â 20% perf-profile.cpu-cycles.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
0.30 Â 16% +270.2% 1.12 Â 8% perf-profile.cpu-cycles.tick_nohz_stop_sched_tick.__tick_nohz_idle_enter.tick_nohz_idle_enter.cpu_startup_entry.start_secondary
7.13 Â 11% -29.1% 5.05 Â 3% perf-profile.cpu-cycles.update_cfs_shares.task_tick_fair.scheduler_tick.update_process_times.tick_sched_handle
1.25 Â 19% +264.3% 4.54 Â 6% perf-profile.cpu-cycles.wake_futex.futex_wake.do_futex.sys_futex.system_call_fastpath
2.16 Â 28% +317.7% 9.01 Â 4% perf-profile.cpu-cycles.cpuidle_enter.cpu_startup_entry.start_secondary
1.94 Â 1% -32.0% 1.32 Â 2% perf-profile.cpu-cycles.__do_softirq.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
0.57 Â 26% +334.8% 2.50 Â 6% perf-profile.cpu-cycles.mutex_lock.pipe_wait.pipe_write.__vfs_write.vfs_write
0.36 Â 14% +279.9% 1.37 Â 7% perf-profile.cpu-cycles.__tick_nohz_idle_enter.tick_nohz_idle_enter.cpu_startup_entry.start_secondary
1.08 Â 9% -52.2% 0.52 Â 14% perf-profile.cpu-cycles.enqueue_task.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function
1.07 Â 9% -51.6% 0.52 Â 15% perf-profile.cpu-cycles.activate_task.ttwu_do_activate.try_to_wake_up.default_wake_function.autoremove_wake_function
32.23 Â 3% -53.5% 15.00 Â 3% perf-profile.cpu-cycles.update_process_times.tick_sched_handle.tick_sched_timer.__run_hrtimer.hrtimer_interrupt
32.57 Â 2% -53.2% 15.24 Â 3% perf-profile.cpu-cycles.tick_sched_handle.isra.18.tick_sched_timer.__run_hrtimer.hrtimer_interrupt.local_apic_timer_interrupt
1.06 Â 9% -36.8% 0.67 Â 10% perf-profile.cpu-cycles.account_process_tick.update_process_times.tick_sched_handle.tick_sched_timer.__run_hrtimer
33.62 Â 2% -52.8% 15.88 Â 3% perf-profile.cpu-cycles.tick_sched_timer.__run_hrtimer.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt
1.27 Â 7% -16.3% 1.06 Â 12% perf-profile.cpu-cycles.futex_requeue.do_futex.sys_futex.system_call_fastpath
28.27 Â 3% -55.3% 12.64 Â 4% perf-profile.cpu-cycles.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer.__run_hrtimer
35.73 Â 1% -52.2% 17.07 Â 3% perf-profile.cpu-cycles.__run_hrtimer.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
0.41 Â 12% +124.5% 0.92 Â 15% perf-profile.cpu-cycles.dequeue_entity.dequeue_task_fair.dequeue_task.deactivate_task.__schedule
39.41 Â 1% -52.4% 18.74 Â 2% perf-profile.cpu-cycles.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
12.73 Â 6% +11.6% 14.21 Â 2% perf-profile.cpu-cycles.vfs_write.sys_write.system_call_fastpath
39.95 Â 1% -52.4% 19.02 Â 3% perf-profile.cpu-cycles.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
45.98 Â 1% -51.2% 22.46 Â 2% perf-profile.cpu-cycles.apic_timer_interrupt
0.30 Â 21% +235.0% 1.01 Â 3% perf-profile.cpu-cycles.irq_exit.scheduler_ipi.smp_reschedule_interrupt.reschedule_interrupt.cpuidle_enter
1.81 Â 17% +104.3% 3.70 Â 3% perf-profile.cpu-cycles.enqueue_task_fair.enqueue_task.activate_task.ttwu_do_activate.sched_ttwu_pending
43.89 Â 1% -51.3% 21.38 Â 2% perf-profile.cpu-cycles.smp_apic_timer_interrupt.apic_timer_interrupt
1.66 Â 6% +322.1% 6.99 Â 5% perf-profile.cpu-cycles.futex_wait.do_futex.sys_futex.system_call_fastpath
2.99 Â 5% +48.1% 4.42 Â 2% perf-profile.cpu-cycles.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair
3.02 Â 4% +47.4% 4.45 Â 2% perf-profile.cpu-cycles.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.enqueue_task
0.72 Â 20% +210.0% 2.24 Â 7% perf-profile.cpu-cycles.ttwu_do_activate.constprop.88.try_to_wake_up.wake_up_state.wake_futex.futex_wake
3.29 Â 6% +50.0% 4.94 Â 3% perf-profile.cpu-cycles.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.enqueue_task.activate_task
1.11 Â 9% -47.1% 0.59 Â 11% perf-profile.cpu-cycles.clockevents_program_event.tick_program_event.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt
12.63 Â 6% +11.8% 14.12 Â 2% perf-profile.cpu-cycles.__vfs_write.vfs_write.sys_write.system_call_fastpath
1.81 Â 32% +354.8% 8.22 Â 4% perf-profile.cpu-cycles.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary
1.12 Â 6% +171.3% 3.05 Â 5% perf-profile.cpu-cycles.pipe_wait.pipe_write.__vfs_write.vfs_write.sys_write
1.16 Â 7% -50.6% 0.57 Â 13% perf-profile.cpu-cycles.ttwu_do_activate.constprop.88.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common
12.57 Â 6% +12.0% 14.08 Â 2% perf-profile.cpu-cycles.pipe_write.__vfs_write.vfs_write.sys_write.system_call_fastpath
0.40 Â 17% +157.0% 1.01 Â 11% perf-profile.cpu-cycles.deactivate_task.__schedule.schedule.futex_wait_queue_me.futex_wait
1.83 Â 21% -46.3% 0.98 Â 11% perf-profile.cpu-cycles.ktime_get_update_offsets_now.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
12.75 Â 6% +11.6% 14.24 Â 2% perf-profile.cpu-cycles.sys_write.system_call_fastpath
20.55 Â 2% +11.7% 22.95 Â 2% perf-profile.cpu-cycles.sys_read.system_call_fastpath
1.27 Â 12% +97.8% 2.50 Â 4% perf-profile.cpu-cycles.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_state.wake_futex
1.61 Â 6% +43.6% 2.31 Â 6% perf-profile.cpu-cycles.__kernel_text_address.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency
20.50 Â 2% +11.8% 22.92 Â 2% perf-profile.cpu-cycles.vfs_read.sys_read.system_call_fastpath
1.27 Â 12% +97.8% 2.51 Â 4% perf-profile.cpu-cycles.enqueue_task.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_state
0.27 Â 24% +229.0% 0.88 Â 14% perf-profile.cpu-cycles.cpuidle_select.cpu_startup_entry.start_secondary
1.19 Â 10% -47.4% 0.62 Â 9% perf-profile.cpu-cycles.tick_program_event.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
0.39 Â 16% +151.6% 0.99 Â 12% perf-profile.cpu-cycles.dequeue_task.deactivate_task.__schedule.schedule.futex_wait_queue_me
0.55 Â 7% +96.8% 1.07 Â 13% perf-profile.cpu-cycles.dequeue_task_fair.dequeue_task.deactivate_task.__schedule.schedule
20.22 Â 2% +12.0% 22.64 Â 2% perf-profile.cpu-cycles.pipe_read.__vfs_read.vfs_read.sys_read.system_call_fastpath
1.78 Â 14% -47.1% 0.94 Â 9% perf-profile.cpu-cycles.perf_event_task_tick.scheduler_tick.update_process_times.tick_sched_handle.tick_sched_timer
1.33 Â 8% -43.8% 0.75 Â 15% perf-profile.cpu-cycles.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.pipe_read.__vfs_read
1.39 Â 7% -43.4% 0.79 Â 15% perf-profile.cpu-cycles.__wake_up_sync_key.pipe_read.__vfs_read.vfs_read.sys_read
0.33 Â 48% +309.1% 1.35 Â 7% perf-profile.cpu-cycles.tick_broadcast_oneshot_control.intel_idle.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry
1.32 Â 8% -43.7% 0.74 Â 15% perf-profile.cpu-cycles.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.pipe_read
2.37 Â 7% +25.1% 2.97 Â 1% perf-profile.cpu-cycles.enqueue_task_fair.enqueue_task.activate_task.ttwu_do_activate.try_to_wake_up
0.44 Â 25% +207.9% 1.36 Â 3% perf-profile.cpu-cycles.schedule.schedule_preempt_disabled.cpu_startup_entry.start_secondary
1.35 Â 8% -43.3% 0.77 Â 15% perf-profile.cpu-cycles.__wake_up_common.__wake_up_sync_key.pipe_read.__vfs_read.vfs_read
1.06 Â 6% -34.0% 0.70 Â 15% perf-profile.cpu-cycles.try_to_wake_up.wake_up_state.wake_futex.futex_requeue.do_futex
1.08 Â 7% -33.2% 0.72 Â 15% perf-profile.cpu-cycles.wake_up_state.wake_futex.futex_requeue.do_futex.sys_futex
0.30 Â 20% +222.7% 0.96 Â 2% perf-profile.cpu-cycles.__do_softirq.irq_exit.scheduler_ipi.smp_reschedule_interrupt.reschedule_interrupt
0.34 Â 22% +747.4% 2.90 Â 7% perf-profile.cpu-cycles.update_sd_lb_stats.find_busiest_group.load_balance.pick_next_task_fair.__schedule
4.81 Â 6% +185.2% 13.73 Â 5% perf-profile.cpu-cycles.sys_futex.system_call_fastpath
0.87 Â 10% -31.7% 0.59 Â 10% perf-profile.cpu-cycles.account_user_time.account_process_tick.update_process_times.tick_sched_handle.tick_sched_timer
2.75 Â 5% +41.7% 3.91 Â 2% perf-profile.cpu-cycles.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity
4.75 Â 6% +186.9% 13.62 Â 5% perf-profile.cpu-cycles.do_futex.sys_futex.system_call_fastpath
1.38 Â 25% +187.1% 3.95 Â 3% perf-profile.cpu-cycles.sched_ttwu_pending.cpu_startup_entry.start_secondary
1.46 Â 9% +44.7% 2.11 Â 4% perf-profile.cpu-cycles.anon_pipe_buf_release.pipe_read.__vfs_read.vfs_read.sys_read
0.85 Â 11% +51.8% 1.29 Â 5% perf-profile.cpu-cycles.free_hot_cold_page.put_page.anon_pipe_buf_release.pipe_read.__vfs_read
0.38 Â 16% +184.1% 1.07 Â 3% perf-profile.cpu-cycles.scheduler_ipi.smp_reschedule_interrupt.reschedule_interrupt.cpuidle_enter.cpu_startup_entry
20.31 Â 2% +12.2% 22.77 Â 2% perf-profile.cpu-cycles.__vfs_read.vfs_read.sys_read.system_call_fastpath
5.19 Â 23% +266.7% 19.04 Â 2% perf-profile.cpu-cycles.cpu_startup_entry.start_secondary
1.11 Â 11% +51.1% 1.67 Â 5% perf-profile.cpu-cycles.put_page.anon_pipe_buf_release.pipe_read.__vfs_read.vfs_read
5.23 Â 23% +268.5% 19.27 Â 2% perf-profile.cpu-cycles.start_secondary
0.45 Â 23% +209.5% 1.39 Â 3% perf-profile.cpu-cycles.schedule_preempt_disabled.cpu_startup_entry.start_secondary
1.20 Â 18% +252.6% 4.22 Â 7% perf-profile.cpu-cycles.try_to_wake_up.wake_up_state.wake_futex.futex_wake.do_futex
1.29 Â 25% +185.3% 3.69 Â 3% perf-profile.cpu-cycles.enqueue_task.activate_task.ttwu_do_activate.sched_ttwu_pending.cpu_startup_entry
1.76 Â 11% +252.5% 6.20 Â 3% perf-profile.cpu-cycles.mutex_spin_on_owner.isra.4.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_read
1.33 Â 8% +341.8% 5.89 Â 6% perf-profile.cpu-cycles.schedule.futex_wait_queue_me.futex_wait.do_futex.sys_futex
1.47 Â 9% -32.4% 1.00 Â 6% perf-profile.cpu-cycles.rcu_check_callbacks.update_process_times.tick_sched_handle.tick_sched_timer.__run_hrtimer
1.13 Â 5% -30.9% 0.78 Â 15% perf-profile.cpu-cycles.wake_futex.futex_requeue.do_futex.sys_futex.system_call_fastpath
1.35 Â 7% -42.8% 0.78 Â 13% perf-profile.cpu-cycles.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key
5.13 Â 8% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.update_cfs_rq_blocked_load.task_tick_fair.scheduler_tick.update_process_times.tick_sched_handle
2.60 Â 4% -34.7% 1.70 Â 5% perf-profile.cpu-cycles.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
1.21 Â 18% +255.4% 4.28 Â 7% perf-profile.cpu-cycles.wake_up_state.wake_futex.futex_wake.do_futex.sys_futex
0.32 Â 29% +332.3% 1.40 Â 10% perf-profile.cpu-cycles.tick_nohz_idle_exit.cpu_startup_entry.start_secondary
1.41 Â 6% +336.3% 6.16 Â 6% perf-profile.cpu-cycles.futex_wait_queue_me.futex_wait.do_futex.sys_futex.system_call_fastpath
13.78 Â 3% -13.5% 11.92 Â 3% perf-profile.cpu-cycles.copy_user_generic_string.copy_page_to_iter.pipe_read.__vfs_read.vfs_read
1.29 Â 9% +348.0% 5.79 Â 6% perf-profile.cpu-cycles.__schedule.schedule.futex_wait_queue_me.futex_wait.do_futex
38.58 Â 3% +32.7% 51.21 Â 0% perf-profile.cpu-cycles.system_call_fastpath
14.53 Â 4% -13.9% 12.51 Â 3% perf-profile.cpu-cycles.copy_page_to_iter.pipe_read.__vfs_read.vfs_read.sys_read
1.65 Â 29% +367.9% 7.70 Â 4% perf-profile.cpu-cycles.intel_idle.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary
1.57 Â 16% +240.8% 5.35 Â 6% perf-profile.cpu-cycles.futex_wake.do_futex.sys_futex.system_call_fastpath
1.82 Â 11% +246.0% 6.31 Â 3% perf-profile.cpu-cycles.mutex_lock.pipe_read.__vfs_read.vfs_read.sys_read
1.32 Â 24% +185.4% 3.78 Â 3% perf-profile.cpu-cycles.ttwu_do_activate.constprop.88.sched_ttwu_pending.cpu_startup_entry.start_secondary
1.28 Â 25% +183.0% 3.63 Â 2% perf-profile.cpu-cycles.activate_task.ttwu_do_activate.sched_ttwu_pending.cpu_startup_entry.start_secondary
1.80 Â 10% +250.3% 6.30 Â 3% perf-profile.cpu-cycles.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.pipe_read.__vfs_read
1.80 Â 10% +248.9% 6.30 Â 3% perf-profile.cpu-cycles.__mutex_lock_slowpath.mutex_lock.pipe_read.__vfs_read.vfs_read
0.39 Â 16% +278.2% 1.48 Â 7% perf-profile.cpu-cycles.tick_nohz_idle_enter.cpu_startup_entry.start_secondary
0.36 Â 20% +791.6% 3.19 Â 6% perf-profile.cpu-cycles.find_busiest_group.load_balance.pick_next_task_fair.__schedule.schedule
7.79 Â 6% -10.4% 6.98 Â 2% perf-profile.cpu-cycles.copy_page_from_iter.pipe_write.__vfs_write.vfs_write.sys_write
14 Â 3% -27.6% 10 Â 4% sched_debug.cfs_rq[0]:/.runnable_load_avg
936 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[0]:/.utilization_load_avg
14 Â 5% -26.8% 10 Â 4% sched_debug.cfs_rq[0]:/.load
4850 Â 9% -84.1% 769 Â 15% sched_debug.cfs_rq[0]:/.tg_load_avg
149666 Â 0% -17.3% 123710 Â 1% sched_debug.cfs_rq[0]:/.exec_clock
59 Â 35% -100.0% 0 Â 0% sched_debug.cfs_rq[10]:/.blocked_load_avg
14 Â 7% -34.5% 9 Â 5% sched_debug.cfs_rq[10]:/.load
951 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[10]:/.utilization_load_avg
74 Â 27% -100.0% 0 Â 0% sched_debug.cfs_rq[10]:/.tg_load_contrib
8 Â 49% +100.0% 16 Â 23% sched_debug.cfs_rq[10]:/.nr_spread_over
4601 Â 11% -83.4% 764 Â 13% sched_debug.cfs_rq[10]:/.tg_load_avg
143527 Â 0% -19.5% 115574 Â 1% sched_debug.cfs_rq[10]:/.exec_clock
14 Â 5% -26.3% 10 Â 4% sched_debug.cfs_rq[10]:/.runnable_load_avg
121 Â 46% -100.0% 0 Â 0% sched_debug.cfs_rq[11]:/.tg_load_contrib
144746 Â 1% -20.4% 115216 Â 0% sched_debug.cfs_rq[11]:/.exec_clock
4597 Â 11% -83.5% 758 Â 12% sched_debug.cfs_rq[11]:/.tg_load_avg
965 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[11]:/.utilization_load_avg
16 Â 11% -35.9% 10 Â 12% sched_debug.cfs_rq[11]:/.load
15 Â 12% -33.3% 10 Â 4% sched_debug.cfs_rq[11]:/.runnable_load_avg
14 Â 3% -24.6% 10 Â 4% sched_debug.cfs_rq[12]:/.runnable_load_avg
142635 Â 0% -18.8% 115855 Â 1% sched_debug.cfs_rq[12]:/.exec_clock
958 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[12]:/.utilization_load_avg
14 Â 3% -32.8% 9 Â 11% sched_debug.cfs_rq[12]:/.load
4589 Â 12% -83.6% 752 Â 11% sched_debug.cfs_rq[12]:/.tg_load_avg
70 Â 47% -100.0% 0 Â 0% sched_debug.cfs_rq[12]:/.tg_load_contrib
984 Â 6% -100.0% 0 Â 0% sched_debug.cfs_rq[13]:/.utilization_load_avg
4551 Â 12% -83.5% 752 Â 11% sched_debug.cfs_rq[13]:/.tg_load_avg
15 Â 10% -30.0% 10 Â 8% sched_debug.cfs_rq[13]:/.runnable_load_avg
143643 Â 0% -19.4% 115845 Â 1% sched_debug.cfs_rq[13]:/.exec_clock
14 Â 10% -35.6% 9 Â 15% sched_debug.cfs_rq[13]:/.load
86 Â 39% -100.0% 0 Â 0% sched_debug.cfs_rq[14]:/.tg_load_contrib
4544 Â 11% -83.4% 753 Â 11% sched_debug.cfs_rq[14]:/.tg_load_avg
969 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[14]:/.utilization_load_avg
142757 Â 0% -18.9% 115755 Â 1% sched_debug.cfs_rq[14]:/.exec_clock
70 Â 47% -100.0% 0 Â 0% sched_debug.cfs_rq[14]:/.blocked_load_avg
14 Â 10% -30.5% 10 Â 4% sched_debug.cfs_rq[14]:/.runnable_load_avg
15 Â 10% -35.5% 10 Â 14% sched_debug.cfs_rq[14]:/.load
15 Â 8% -34.4% 10 Â 10% sched_debug.cfs_rq[15]:/.runnable_load_avg
4529 Â 11% -83.4% 753 Â 11% sched_debug.cfs_rq[15]:/.tg_load_avg
142995 Â 0% -19.0% 115881 Â 1% sched_debug.cfs_rq[15]:/.exec_clock
961 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[15]:/.utilization_load_avg
4515 Â 11% -83.3% 753 Â 11% sched_debug.cfs_rq[16]:/.tg_load_avg
16 Â 9% -35.9% 10 Â 4% sched_debug.cfs_rq[16]:/.runnable_load_avg
15 Â 9% -39.7% 9 Â 15% sched_debug.cfs_rq[16]:/.load
975 Â 4% -100.0% 0 Â 0% sched_debug.cfs_rq[16]:/.utilization_load_avg
142733 Â 0% -18.6% 116189 Â 1% sched_debug.cfs_rq[16]:/.exec_clock
14 Â 7% -33.3% 9 Â 9% sched_debug.cfs_rq[17]:/.load
93 Â 40% -100.0% 0 Â 0% sched_debug.cfs_rq[17]:/.tg_load_contrib
142654 Â 0% -18.4% 116363 Â 1% sched_debug.cfs_rq[17]:/.exec_clock
4483 Â 11% -83.2% 754 Â 11% sched_debug.cfs_rq[17]:/.tg_load_avg
14 Â 2% -30.5% 10 Â 4% sched_debug.cfs_rq[17]:/.runnable_load_avg
946 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[17]:/.utilization_load_avg
78 Â 47% -100.0% 0 Â 0% sched_debug.cfs_rq[17]:/.blocked_load_avg
14 Â 5% -23.2% 10 Â 7% sched_debug.cfs_rq[18]:/.runnable_load_avg
4484 Â 10% -83.2% 754 Â 11% sched_debug.cfs_rq[18]:/.tg_load_avg
142729 Â 0% -19.0% 115549 Â 1% sched_debug.cfs_rq[18]:/.exec_clock
54 Â 48% -100.0% 0 Â 0% sched_debug.cfs_rq[18]:/.blocked_load_avg
69 Â 38% -100.0% 0 Â 0% sched_debug.cfs_rq[18]:/.tg_load_contrib
928 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[18]:/.utilization_load_avg
14 Â 9% -35.1% 9 Â 15% sched_debug.cfs_rq[18]:/.load
15 Â 10% -33.9% 10 Â 4% sched_debug.cfs_rq[19]:/.runnable_load_avg
142749 Â 0% -19.2% 115271 Â 1% sched_debug.cfs_rq[19]:/.exec_clock
960 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[19]:/.utilization_load_avg
83 Â 37% -100.0% 0 Â 0% sched_debug.cfs_rq[19]:/.tg_load_contrib
4479 Â 11% -83.1% 754 Â 11% sched_debug.cfs_rq[19]:/.tg_load_avg
14 Â 13% -32.2% 10 Â 10% sched_debug.cfs_rq[19]:/.load
68 Â 44% -100.0% 0 Â 0% sched_debug.cfs_rq[19]:/.blocked_load_avg
14 Â 5% -24.6% 10 Â 4% sched_debug.cfs_rq[1]:/.runnable_load_avg
14 Â 5% -24.6% 10 Â 13% sched_debug.cfs_rq[1]:/.load
4879 Â 10% -84.2% 769 Â 15% sched_debug.cfs_rq[1]:/.tg_load_avg
93 Â 38% -100.0% 0 Â 0% sched_debug.cfs_rq[1]:/.blocked_load_avg
107 Â 33% -100.0% 0 Â 0% sched_debug.cfs_rq[1]:/.tg_load_contrib
142952 Â 0% -18.9% 115989 Â 1% sched_debug.cfs_rq[1]:/.exec_clock
955 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[1]:/.utilization_load_avg
91 Â 49% -100.0% 0 Â 0% sched_debug.cfs_rq[20]:/.tg_load_contrib
142991 Â 0% -19.3% 115464 Â 1% sched_debug.cfs_rq[20]:/.exec_clock
13 Â 3% -20.0% 11 Â 6% sched_debug.cfs_rq[20]:/.runnable_load_avg
14 Â 5% -35.7% 9 Â 7% sched_debug.cfs_rq[20]:/.load
962 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[20]:/.utilization_load_avg
4474 Â 11% -83.1% 755 Â 11% sched_debug.cfs_rq[20]:/.tg_load_avg
142721 Â 0% -19.1% 115419 Â 1% sched_debug.cfs_rq[21]:/.exec_clock
14 Â 7% -28.6% 10 Â 12% sched_debug.cfs_rq[21]:/.load
84 Â 23% -100.0% 0 Â 0% sched_debug.cfs_rq[21]:/.tg_load_contrib
14 Â 7% -24.6% 10 Â 4% sched_debug.cfs_rq[21]:/.runnable_load_avg
4435 Â 11% -83.0% 752 Â 10% sched_debug.cfs_rq[21]:/.tg_load_avg
69 Â 27% -100.0% 0 Â 0% sched_debug.cfs_rq[21]:/.blocked_load_avg
945 Â 7% -100.0% 0 Â 0% sched_debug.cfs_rq[21]:/.utilization_load_avg
4424 Â 11% -83.0% 752 Â 10% sched_debug.cfs_rq[22]:/.tg_load_avg
142700 Â 0% -18.9% 115735 Â 0% sched_debug.cfs_rq[22]:/.exec_clock
14 Â 7% -33.3% 9 Â 5% sched_debug.cfs_rq[22]:/.load
14 Â 5% -24.6% 10 Â 4% sched_debug.cfs_rq[22]:/.runnable_load_avg
42 Â 35% -100.0% 0 Â 0% sched_debug.cfs_rq[22]:/.tg_load_contrib
970 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[22]:/.utilization_load_avg
13 Â 6% -18.5% 11 Â 11% sched_debug.cfs_rq[23]:/.runnable_load_avg
924 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[23]:/.utilization_load_avg
6 Â 30% +268.0% 23 Â 29% sched_debug.cfs_rq[23]:/.nr_spread_over
14 Â 5% -39.3% 8 Â 13% sched_debug.cfs_rq[23]:/.load
142864 Â 0% -18.7% 116159 Â 1% sched_debug.cfs_rq[23]:/.exec_clock
4410 Â 11% -82.9% 753 Â 10% sched_debug.cfs_rq[23]:/.tg_load_avg
14 Â 3% -24.1% 11 Â 0% sched_debug.cfs_rq[24]:/.runnable_load_avg
142964 Â 0% -18.9% 116008 Â 1% sched_debug.cfs_rq[24]:/.exec_clock
4386 Â 11% -82.8% 753 Â 10% sched_debug.cfs_rq[24]:/.tg_load_avg
118 Â 49% -100.0% 0 Â 0% sched_debug.cfs_rq[24]:/.blocked_load_avg
968 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[24]:/.utilization_load_avg
134 Â 43% -100.0% 0 Â 0% sched_debug.cfs_rq[24]:/.tg_load_contrib
6 Â 46% +188.9% 19 Â 46% sched_debug.cfs_rq[24]:/.nr_spread_over
14 Â 3% -29.3% 10 Â 4% sched_debug.cfs_rq[25]:/.runnable_load_avg
142996 Â 0% -17.9% 117411 Â 2% sched_debug.cfs_rq[25]:/.exec_clock
4351 Â 11% -82.7% 752 Â 10% sched_debug.cfs_rq[25]:/.tg_load_avg
968 Â 4% -100.0% 0 Â 0% sched_debug.cfs_rq[25]:/.utilization_load_avg
62 Â 41% -100.0% 0 Â 0% sched_debug.cfs_rq[25]:/.tg_load_contrib
14 Â 5% -37.3% 9 Â 14% sched_debug.cfs_rq[25]:/.load
4319 Â 10% -82.6% 752 Â 10% sched_debug.cfs_rq[26]:/.tg_load_avg
14 Â 10% -31.0% 10 Â 10% sched_debug.cfs_rq[26]:/.load
944 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[26]:/.utilization_load_avg
68 Â 45% -100.0% 0 Â 0% sched_debug.cfs_rq[26]:/.tg_load_contrib
142832 Â 0% -18.1% 117001 Â 2% sched_debug.cfs_rq[26]:/.exec_clock
14 Â 5% -29.3% 10 Â 4% sched_debug.cfs_rq[26]:/.runnable_load_avg
142765 Â 0% -18.6% 116191 Â 1% sched_debug.cfs_rq[27]:/.exec_clock
4280 Â 11% -82.4% 751 Â 10% sched_debug.cfs_rq[27]:/.tg_load_avg
14 Â 5% -30.5% 10 Â 4% sched_debug.cfs_rq[27]:/.runnable_load_avg
950 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[27]:/.utilization_load_avg
14 Â 7% -28.1% 10 Â 4% sched_debug.cfs_rq[28]:/.runnable_load_avg
143159 Â 0% -18.6% 116593 Â 1% sched_debug.cfs_rq[28]:/.exec_clock
52 Â 46% -100.0% 0 Â 0% sched_debug.cfs_rq[28]:/.blocked_load_avg
947 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[28]:/.utilization_load_avg
4275 Â 11% -82.4% 750 Â 10% sched_debug.cfs_rq[28]:/.tg_load_avg
15 Â 12% -38.3% 9 Â 14% sched_debug.cfs_rq[28]:/.load
67 Â 36% -100.0% 0 Â 0% sched_debug.cfs_rq[28]:/.tg_load_contrib
14 Â 5% -27.1% 10 Â 4% sched_debug.cfs_rq[29]:/.runnable_load_avg
97 Â 6% -100.0% 0 Â 0% sched_debug.cfs_rq[29]:/.blocked_load_avg
112 Â 6% -100.0% 0 Â 0% sched_debug.cfs_rq[29]:/.tg_load_contrib
956 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[29]:/.utilization_load_avg
4205 Â 9% -82.1% 750 Â 10% sched_debug.cfs_rq[29]:/.tg_load_avg
13 Â 6% -23.6% 10 Â 19% sched_debug.cfs_rq[29]:/.load
142927 Â 0% -18.7% 116233 Â 1% sched_debug.cfs_rq[29]:/.exec_clock
4855 Â 10% -84.1% 770 Â 15% sched_debug.cfs_rq[2]:/.tg_load_avg
78 Â 34% -100.0% 0 Â 0% sched_debug.cfs_rq[2]:/.tg_load_contrib
64 Â 42% -100.0% 0 Â 0% sched_debug.cfs_rq[2]:/.blocked_load_avg
948 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[2]:/.utilization_load_avg
142729 Â 0% -18.8% 115944 Â 1% sched_debug.cfs_rq[2]:/.exec_clock
4173 Â 7% -82.0% 750 Â 10% sched_debug.cfs_rq[30]:/.tg_load_avg
949 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[30]:/.utilization_load_avg
14 Â 5% -42.1% 8 Â 13% sched_debug.cfs_rq[30]:/.load
142883 Â 0% -18.7% 116230 Â 1% sched_debug.cfs_rq[30]:/.exec_clock
14 Â 5% -25.0% 10 Â 4% sched_debug.cfs_rq[30]:/.runnable_load_avg
5 Â 18% +182.6% 16 Â 44% sched_debug.cfs_rq[31]:/.nr_spread_over
14 Â 5% -33.9% 9 Â 8% sched_debug.cfs_rq[31]:/.load
68 Â 29% -100.0% 0 Â 0% sched_debug.cfs_rq[31]:/.tg_load_contrib
945 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[31]:/.utilization_load_avg
53 Â 36% -100.0% 0 Â 0% sched_debug.cfs_rq[31]:/.blocked_load_avg
14 Â 5% -29.8% 10 Â 7% sched_debug.cfs_rq[31]:/.runnable_load_avg
142780 Â 0% -18.6% 116266 Â 1% sched_debug.cfs_rq[31]:/.exec_clock
4148 Â 7% -81.9% 750 Â 10% sched_debug.cfs_rq[31]:/.tg_load_avg
4141 Â 7% -81.9% 750 Â 10% sched_debug.cfs_rq[32]:/.tg_load_avg
13 Â 6% -27.3% 10 Â 0% sched_debug.cfs_rq[32]:/.runnable_load_avg
9604126 Â 0% -13.2% 8340155 Â 1% sched_debug.cfs_rq[32]:/.min_vruntime
963 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[32]:/.utilization_load_avg
14 Â 7% -39.3% 8 Â 17% sched_debug.cfs_rq[32]:/.load
142960 Â 0% -23.1% 109925 Â 1% sched_debug.cfs_rq[32]:/.exec_clock
968 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[33]:/.utilization_load_avg
14 Â 3% -26.3% 10 Â 4% sched_debug.cfs_rq[33]:/.runnable_load_avg
13 Â 6% -38.2% 8 Â 13% sched_debug.cfs_rq[33]:/.load
143024 Â 0% -23.2% 109824 Â 1% sched_debug.cfs_rq[33]:/.exec_clock
4113 Â 7% -81.8% 750 Â 10% sched_debug.cfs_rq[33]:/.tg_load_avg
9591369 Â 0% -13.5% 8301137 Â 1% sched_debug.cfs_rq[33]:/.min_vruntime
142926 Â 0% -22.7% 110486 Â 1% sched_debug.cfs_rq[34]:/.exec_clock
9586520 Â 0% -12.5% 8385635 Â 1% sched_debug.cfs_rq[34]:/.min_vruntime
13 Â 3% -24.5% 10 Â 7% sched_debug.cfs_rq[34]:/.runnable_load_avg
4089 Â 7% -81.7% 750 Â 10% sched_debug.cfs_rq[34]:/.tg_load_avg
47 Â 44% -100.0% 0 Â 0% sched_debug.cfs_rq[34]:/.tg_load_contrib
942 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[34]:/.utilization_load_avg
13 Â 3% -30.2% 9 Â 8% sched_debug.cfs_rq[34]:/.load
143196 Â 0% -23.3% 109775 Â 1% sched_debug.cfs_rq[35]:/.exec_clock
9593365 Â 0% -12.8% 8362919 Â 2% sched_debug.cfs_rq[35]:/.min_vruntime
14 Â 0% -26.8% 10 Â 4% sched_debug.cfs_rq[35]:/.runnable_load_avg
4092 Â 7% -81.7% 749 Â 10% sched_debug.cfs_rq[35]:/.tg_load_avg
944 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[35]:/.utilization_load_avg
4083 Â 7% -81.6% 749 Â 10% sched_debug.cfs_rq[36]:/.tg_load_avg
142862 Â 0% -22.9% 110159 Â 0% sched_debug.cfs_rq[36]:/.exec_clock
934 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[36]:/.utilization_load_avg
14 Â 11% -35.1% 9 Â 4% sched_debug.cfs_rq[36]:/.load
9584087 Â 0% -12.9% 8351358 Â 0% sched_debug.cfs_rq[36]:/.min_vruntime
13 Â 6% -30.9% 9 Â 5% sched_debug.cfs_rq[36]:/.runnable_load_avg
963 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[37]:/.utilization_load_avg
4096 Â 8% -81.7% 748 Â 10% sched_debug.cfs_rq[37]:/.tg_load_avg
9581773 Â 0% -13.1% 8330912 Â 1% sched_debug.cfs_rq[37]:/.min_vruntime
142787 Â 0% -23.0% 109956 Â 1% sched_debug.cfs_rq[37]:/.exec_clock
14 Â 5% -28.6% 10 Â 7% sched_debug.cfs_rq[37]:/.runnable_load_avg
13 Â 3% -32.7% 9 Â 8% sched_debug.cfs_rq[37]:/.load
14 Â 3% -34.5% 9 Â 5% sched_debug.cfs_rq[38]:/.runnable_load_avg
959 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[38]:/.utilization_load_avg
9602528 Â 0% -13.3% 8321364 Â 1% sched_debug.cfs_rq[38]:/.min_vruntime
59 Â 22% -100.0% 0 Â 0% sched_debug.cfs_rq[38]:/.tg_load_contrib
14 Â 3% -41.4% 8 Â 13% sched_debug.cfs_rq[38]:/.load
142952 Â 0% -23.2% 109718 Â 1% sched_debug.cfs_rq[38]:/.exec_clock
44 Â 30% -100.0% 0 Â 0% sched_debug.cfs_rq[38]:/.blocked_load_avg
4096 Â 10% -81.7% 748 Â 10% sched_debug.cfs_rq[38]:/.tg_load_avg
47 Â 41% -100.0% 0 Â 0% sched_debug.cfs_rq[39]:/.blocked_load_avg
963 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[39]:/.utilization_load_avg
14 Â 0% -33.9% 9 Â 8% sched_debug.cfs_rq[39]:/.load
14 Â 3% -28.1% 10 Â 4% sched_debug.cfs_rq[39]:/.runnable_load_avg
62 Â 31% -100.0% 0 Â 0% sched_debug.cfs_rq[39]:/.tg_load_contrib
142892 Â 0% -22.9% 110233 Â 1% sched_debug.cfs_rq[39]:/.exec_clock
4089 Â 10% -81.7% 748 Â 9% sched_debug.cfs_rq[39]:/.tg_load_avg
9563215 Â 0% -12.6% 8356120 Â 1% sched_debug.cfs_rq[39]:/.min_vruntime
142636 Â 0% -18.5% 116193 Â 1% sched_debug.cfs_rq[3]:/.exec_clock
4779 Â 12% -83.9% 770 Â 15% sched_debug.cfs_rq[3]:/.tg_load_avg
14 Â 5% -28.6% 10 Â 7% sched_debug.cfs_rq[3]:/.load
36 Â 43% -100.0% 0 Â 0% sched_debug.cfs_rq[3]:/.tg_load_contrib
13 Â 3% -21.8% 10 Â 4% sched_debug.cfs_rq[3]:/.runnable_load_avg
938 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[3]:/.utilization_load_avg
14 Â 5% -42.1% 8 Â 15% sched_debug.cfs_rq[40]:/.load
14 Â 7% -31.0% 10 Â 0% sched_debug.cfs_rq[40]:/.runnable_load_avg
9581440 Â 0% -12.3% 8398863 Â 1% sched_debug.cfs_rq[40]:/.min_vruntime
4068 Â 10% -81.6% 747 Â 9% sched_debug.cfs_rq[40]:/.tg_load_avg
979 Â 4% -100.0% 0 Â 0% sched_debug.cfs_rq[40]:/.utilization_load_avg
142879 Â 0% -22.5% 110697 Â 1% sched_debug.cfs_rq[40]:/.exec_clock
940 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[41]:/.utilization_load_avg
142941 Â 0% -23.3% 109699 Â 1% sched_debug.cfs_rq[41]:/.exec_clock
30 Â 44% -100.0% 0 Â 0% sched_debug.cfs_rq[41]:/.tg_load_contrib
4072 Â 10% -81.7% 747 Â 9% sched_debug.cfs_rq[41]:/.tg_load_avg
13 Â 6% -38.2% 8 Â 17% sched_debug.cfs_rq[41]:/.load
9583957 Â 0% -13.1% 8332859 Â 1% sched_debug.cfs_rq[41]:/.min_vruntime
13 Â 3% -27.3% 10 Â 7% sched_debug.cfs_rq[41]:/.runnable_load_avg
15 Â 6% -46.7% 8 Â 23% sched_debug.cfs_rq[42]:/.load
984 Â 6% -100.0% 0 Â 0% sched_debug.cfs_rq[42]:/.utilization_load_avg
142998 Â 0% -22.7% 110471 Â 1% sched_debug.cfs_rq[42]:/.exec_clock
4043 Â 11% -81.5% 746 Â 9% sched_debug.cfs_rq[42]:/.tg_load_avg
15 Â 5% -34.4% 10 Â 0% sched_debug.cfs_rq[42]:/.runnable_load_avg
9592620 Â 0% -12.7% 8378446 Â 1% sched_debug.cfs_rq[42]:/.min_vruntime
4050 Â 12% -81.6% 746 Â 9% sched_debug.cfs_rq[43]:/.tg_load_avg
9586033 Â 0% -13.0% 8335645 Â 1% sched_debug.cfs_rq[43]:/.min_vruntime
14 Â 5% -31.0% 10 Â 0% sched_debug.cfs_rq[43]:/.runnable_load_avg
14 Â 5% -47.5% 7 Â 5% sched_debug.cfs_rq[43]:/.load
964 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[43]:/.utilization_load_avg
143013 Â 0% -22.9% 110192 Â 1% sched_debug.cfs_rq[43]:/.exec_clock
961 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[44]:/.utilization_load_avg
14 Â 5% -26.3% 10 Â 8% sched_debug.cfs_rq[44]:/.runnable_load_avg
14 Â 5% -40.4% 8 Â 21% sched_debug.cfs_rq[44]:/.load
142914 Â 0% -22.9% 110172 Â 1% sched_debug.cfs_rq[44]:/.exec_clock
9595791 Â 0% -12.9% 8354769 Â 1% sched_debug.cfs_rq[44]:/.min_vruntime
4051 Â 11% -81.6% 746 Â 9% sched_debug.cfs_rq[44]:/.tg_load_avg
9589932 Â 0% -13.0% 8344377 Â 1% sched_debug.cfs_rq[45]:/.min_vruntime
14 Â 5% -30.4% 9 Â 4% sched_debug.cfs_rq[45]:/.runnable_load_avg
143041 Â 0% -22.2% 111261 Â 3% sched_debug.cfs_rq[45]:/.exec_clock
46 Â 34% -100.0% 0 Â 0% sched_debug.cfs_rq[45]:/.tg_load_contrib
4056 Â 11% -81.6% 746 Â 9% sched_debug.cfs_rq[45]:/.tg_load_avg
14 Â 7% -41.4% 8 Â 10% sched_debug.cfs_rq[45]:/.load
942 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[45]:/.utilization_load_avg
4056 Â 11% -81.6% 746 Â 9% sched_debug.cfs_rq[46]:/.tg_load_avg
9595201 Â 0% -13.2% 8331965 Â 1% sched_debug.cfs_rq[46]:/.min_vruntime
14 Â 3% -34.5% 9 Â 5% sched_debug.cfs_rq[46]:/.runnable_load_avg
62 Â 17% -100.0% 0 Â 0% sched_debug.cfs_rq[46]:/.blocked_load_avg
959 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[46]:/.utilization_load_avg
77 Â 13% -100.0% 0 Â 0% sched_debug.cfs_rq[46]:/.tg_load_contrib
13 Â 3% -41.8% 8 Â 15% sched_debug.cfs_rq[46]:/.load
142914 Â 0% -23.0% 110055 Â 1% sched_debug.cfs_rq[46]:/.exec_clock
9579237 Â 0% -13.1% 8322157 Â 1% sched_debug.cfs_rq[47]:/.min_vruntime
4094 Â 12% -81.8% 747 Â 9% sched_debug.cfs_rq[47]:/.tg_load_avg
142989 Â 0% -23.0% 110124 Â 1% sched_debug.cfs_rq[47]:/.exec_clock
945 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[47]:/.utilization_load_avg
13 Â 6% -38.9% 8 Â 10% sched_debug.cfs_rq[47]:/.load
14 Â 8% -30.4% 9 Â 4% sched_debug.cfs_rq[47]:/.runnable_load_avg
9583731 Â 0% -13.1% 8327758 Â 1% sched_debug.cfs_rq[48]:/.min_vruntime
142905 Â 0% -22.4% 110964 Â 1% sched_debug.cfs_rq[48]:/.exec_clock
13 Â 3% -24.1% 10 Â 4% sched_debug.cfs_rq[48]:/.runnable_load_avg
917 Â 4% -100.0% 0 Â 0% sched_debug.cfs_rq[48]:/.utilization_load_avg
13 Â 6% -30.2% 9 Â 20% sched_debug.cfs_rq[48]:/.load
51 Â 45% -100.0% 0 Â 0% sched_debug.cfs_rq[48]:/.tg_load_contrib
4063 Â 12% -81.6% 747 Â 9% sched_debug.cfs_rq[48]:/.tg_load_avg
13 Â 3% -29.1% 9 Â 8% sched_debug.cfs_rq[49]:/.runnable_load_avg
33 Â 42% -100.0% 0 Â 0% sched_debug.cfs_rq[49]:/.blocked_load_avg
4052 Â 12% -81.6% 746 Â 9% sched_debug.cfs_rq[49]:/.tg_load_avg
9603241 Â 0% -12.9% 8365083 Â 1% sched_debug.cfs_rq[49]:/.min_vruntime
13 Â 3% -35.2% 8 Â 14% sched_debug.cfs_rq[49]:/.load
47 Â 29% -100.0% 0 Â 0% sched_debug.cfs_rq[49]:/.tg_load_contrib
142992 Â 0% -23.1% 110022 Â 1% sched_debug.cfs_rq[49]:/.exec_clock
922 Â 6% -100.0% 0 Â 0% sched_debug.cfs_rq[49]:/.utilization_load_avg
142939 Â 0% -18.3% 116733 Â 1% sched_debug.cfs_rq[4]:/.exec_clock
949 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[4]:/.utilization_load_avg
13 Â 3% -23.6% 10 Â 8% sched_debug.cfs_rq[4]:/.runnable_load_avg
60 Â 35% -100.0% 0 Â 0% sched_debug.cfs_rq[4]:/.blocked_load_avg
4703 Â 12% -83.6% 770 Â 15% sched_debug.cfs_rq[4]:/.tg_load_avg
13 Â 3% -25.5% 10 Â 8% sched_debug.cfs_rq[4]:/.load
74 Â 28% -100.0% 0 Â 0% sched_debug.cfs_rq[4]:/.tg_load_contrib
4043 Â 12% -81.5% 746 Â 9% sched_debug.cfs_rq[50]:/.tg_load_avg
14 Â 11% -40.4% 8 Â 17% sched_debug.cfs_rq[50]:/.load
52 Â 10% -100.0% 0 Â 0% sched_debug.cfs_rq[50]:/.tg_load_contrib
142825 Â 0% -23.5% 109276 Â 1% sched_debug.cfs_rq[50]:/.exec_clock
929 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[50]:/.utilization_load_avg
37 Â 15% -100.0% 0 Â 0% sched_debug.cfs_rq[50]:/.blocked_load_avg
9575580 Â 0% -13.5% 8282955 Â 1% sched_debug.cfs_rq[50]:/.min_vruntime
14 Â 10% -34.5% 9 Â 5% sched_debug.cfs_rq[50]:/.runnable_load_avg
13 Â 3% -40.7% 8 Â 17% sched_debug.cfs_rq[51]:/.load
13 Â 3% -27.8% 9 Â 8% sched_debug.cfs_rq[51]:/.runnable_load_avg
9592803 Â 0% -12.9% 8356128 Â 1% sched_debug.cfs_rq[51]:/.min_vruntime
142861 Â 0% -22.9% 110133 Â 1% sched_debug.cfs_rq[51]:/.exec_clock
4055 Â 12% -81.6% 745 Â 9% sched_debug.cfs_rq[51]:/.tg_load_avg
923 Â 4% -100.0% 0 Â 0% sched_debug.cfs_rq[51]:/.utilization_load_avg
14 Â 5% -30.4% 9 Â 8% sched_debug.cfs_rq[52]:/.runnable_load_avg
4047 Â 12% -81.6% 745 Â 9% sched_debug.cfs_rq[52]:/.tg_load_avg
9583802 Â 0% -13.0% 8339035 Â 0% sched_debug.cfs_rq[52]:/.min_vruntime
945 Â 6% -100.0% 0 Â 0% sched_debug.cfs_rq[52]:/.utilization_load_avg
13 Â 6% -38.9% 8 Â 26% sched_debug.cfs_rq[52]:/.load
142934 Â 0% -23.0% 109993 Â 0% sched_debug.cfs_rq[52]:/.exec_clock
9600421 Â 0% -12.6% 8390692 Â 0% sched_debug.cfs_rq[53]:/.min_vruntime
14 Â 2% -33.9% 9 Â 4% sched_debug.cfs_rq[53]:/.runnable_load_avg
4041 Â 13% -81.6% 745 Â 9% sched_debug.cfs_rq[53]:/.tg_load_avg
38 Â 48% -100.0% 0 Â 0% sched_debug.cfs_rq[53]:/.tg_load_contrib
142968 Â 0% -22.7% 110490 Â 0% sched_debug.cfs_rq[53]:/.exec_clock
14 Â 3% -36.2% 9 Â 14% sched_debug.cfs_rq[53]:/.load
1002 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[53]:/.utilization_load_avg
948 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[54]:/.utilization_load_avg
13 Â 6% -30.9% 9 Â 5% sched_debug.cfs_rq[54]:/.runnable_load_avg
4023 Â 13% -81.5% 745 Â 9% sched_debug.cfs_rq[54]:/.tg_load_avg
83 Â 43% -100.0% 0 Â 0% sched_debug.cfs_rq[54]:/.tg_load_contrib
13 Â 6% -38.2% 8 Â 5% sched_debug.cfs_rq[54]:/.load
142953 Â 0% -23.0% 110018 Â 1% sched_debug.cfs_rq[54]:/.exec_clock
9597890 Â 0% -12.7% 8376139 Â 1% sched_debug.cfs_rq[54]:/.min_vruntime
13 Â 6% -34.5% 9 Â 7% sched_debug.cfs_rq[55]:/.load
142898 Â 0% -23.3% 109542 Â 0% sched_debug.cfs_rq[55]:/.exec_clock
13 Â 3% -27.8% 9 Â 4% sched_debug.cfs_rq[55]:/.runnable_load_avg
22 Â 41% -100.0% 0 Â 0% sched_debug.cfs_rq[55]:/.tg_load_contrib
9571172 Â 0% -13.2% 8310607 Â 0% sched_debug.cfs_rq[55]:/.min_vruntime
944 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[55]:/.utilization_load_avg
4015 Â 13% -81.5% 744 Â 9% sched_debug.cfs_rq[55]:/.tg_load_avg
14 Â 8% -39.3% 8 Â 10% sched_debug.cfs_rq[56]:/.load
143089 Â 0% -22.9% 110304 Â 1% sched_debug.cfs_rq[56]:/.exec_clock
9598931 Â 0% -13.1% 8345381 Â 1% sched_debug.cfs_rq[56]:/.min_vruntime
960 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[56]:/.utilization_load_avg
33 Â 42% -100.0% 0 Â 0% sched_debug.cfs_rq[56]:/.tg_load_contrib
14 Â 7% -30.4% 9 Â 4% sched_debug.cfs_rq[56]:/.runnable_load_avg
4026 Â 14% -81.5% 744 Â 9% sched_debug.cfs_rq[56]:/.tg_load_avg
9588434 Â 0% -13.1% 8336741 Â 1% sched_debug.cfs_rq[57]:/.min_vruntime
43 Â 39% -100.0% 0 Â 0% sched_debug.cfs_rq[57]:/.tg_load_contrib
936 Â 2% -100.0% 0 Â 0% sched_debug.cfs_rq[57]:/.utilization_load_avg
142957 Â 0% -23.2% 109850 Â 0% sched_debug.cfs_rq[57]:/.exec_clock
4019 Â 14% -81.5% 742 Â 9% sched_debug.cfs_rq[57]:/.tg_load_avg
13 Â 3% -30.9% 9 Â 5% sched_debug.cfs_rq[57]:/.runnable_load_avg
960 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[58]:/.utilization_load_avg
14 Â 3% -31.0% 10 Â 0% sched_debug.cfs_rq[58]:/.runnable_load_avg
9594638 Â 0% -13.0% 8350105 Â 1% sched_debug.cfs_rq[58]:/.min_vruntime
57 Â 39% -100.0% 0 Â 0% sched_debug.cfs_rq[58]:/.tg_load_contrib
142941 Â 0% -22.8% 110420 Â 1% sched_debug.cfs_rq[58]:/.exec_clock
4022 Â 13% -81.5% 743 Â 9% sched_debug.cfs_rq[58]:/.tg_load_avg
14 Â 5% -41.4% 8 Â 13% sched_debug.cfs_rq[58]:/.load
14 Â 0% -28.6% 10 Â 7% sched_debug.cfs_rq[59]:/.runnable_load_avg
14 Â 0% -35.7% 9 Â 15% sched_debug.cfs_rq[59]:/.load
968 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[59]:/.utilization_load_avg
53 Â 43% -100.0% 0 Â 0% sched_debug.cfs_rq[59]:/.tg_load_contrib
4016 Â 14% -81.5% 742 Â 9% sched_debug.cfs_rq[59]:/.tg_load_avg
143046 Â 0% -22.9% 110228 Â 1% sched_debug.cfs_rq[59]:/.exec_clock
9594718 Â 0% -12.9% 8358121 Â 1% sched_debug.cfs_rq[59]:/.min_vruntime
63 Â 47% -100.0% 0 Â 0% sched_debug.cfs_rq[5]:/.tg_load_contrib
920 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[5]:/.utilization_load_avg
142540 Â 0% -18.9% 115610 Â 1% sched_debug.cfs_rq[5]:/.exec_clock
4696 Â 12% -83.6% 770 Â 15% sched_debug.cfs_rq[5]:/.tg_load_avg
14 Â 5% -26.8% 10 Â 4% sched_debug.cfs_rq[5]:/.runnable_load_avg
143056 Â 0% -22.8% 110429 Â 1% sched_debug.cfs_rq[60]:/.exec_clock
14 Â 5% -28.1% 10 Â 4% sched_debug.cfs_rq[60]:/.runnable_load_avg
9597950 Â 0% -12.8% 8366607 Â 0% sched_debug.cfs_rq[60]:/.min_vruntime
940 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[60]:/.utilization_load_avg
14 Â 5% -43.9% 8 Â 8% sched_debug.cfs_rq[60]:/.load
3982 Â 15% -81.4% 742 Â 9% sched_debug.cfs_rq[60]:/.tg_load_avg
14 Â 3% -26.3% 10 Â 4% sched_debug.cfs_rq[61]:/.runnable_load_avg
951 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[61]:/.utilization_load_avg
38 Â 36% -100.0% 0 Â 0% sched_debug.cfs_rq[61]:/.tg_load_contrib
9601058 Â 0% -13.0% 8352950 Â 1% sched_debug.cfs_rq[61]:/.min_vruntime
3969 Â 15% -81.3% 742 Â 9% sched_debug.cfs_rq[61]:/.tg_load_avg
143089 Â 0% -23.1% 110084 Â 1% sched_debug.cfs_rq[61]:/.exec_clock
14 Â 5% -39.3% 8 Â 19% sched_debug.cfs_rq[61]:/.load
9593570 Â 0% -12.6% 8383474 Â 1% sched_debug.cfs_rq[62]:/.min_vruntime
49 Â 26% -100.0% 0 Â 0% sched_debug.cfs_rq[62]:/.tg_load_contrib
920 Â 5% -100.0% 0 Â 0% sched_debug.cfs_rq[62]:/.utilization_load_avg
14 Â 5% -28.6% 10 Â 7% sched_debug.cfs_rq[62]:/.runnable_load_avg
34 Â 37% -100.0% 0 Â 0% sched_debug.cfs_rq[62]:/.blocked_load_avg
143017 Â 0% -22.8% 110436 Â 1% sched_debug.cfs_rq[62]:/.exec_clock
3967 Â 15% -81.3% 742 Â 9% sched_debug.cfs_rq[62]:/.tg_load_avg
14 Â 5% -39.3% 8 Â 10% sched_debug.cfs_rq[62]:/.load
14 Â 0% -37.5% 8 Â 4% sched_debug.cfs_rq[63]:/.load
930 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[63]:/.utilization_load_avg
9570329 Â 0% -13.0% 8330510 Â 1% sched_debug.cfs_rq[63]:/.min_vruntime
3969 Â 15% -81.3% 741 Â 9% sched_debug.cfs_rq[63]:/.tg_load_avg
14 Â 0% -32.1% 9 Â 5% sched_debug.cfs_rq[63]:/.runnable_load_avg
143128 Â 0% -22.4% 111121 Â 1% sched_debug.cfs_rq[63]:/.exec_clock
61 Â 41% -100.0% 0 Â 0% sched_debug.cfs_rq[6]:/.tg_load_contrib
951 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[6]:/.utilization_load_avg
15 Â 6% -35.0% 9 Â 19% sched_debug.cfs_rq[6]:/.load
4669 Â 12% -83.5% 770 Â 15% sched_debug.cfs_rq[6]:/.tg_load_avg
5 Â 41% +154.5% 14 Â 20% sched_debug.cfs_rq[6]:/.nr_spread_over
142485 Â 0% -18.8% 115703 Â 1% sched_debug.cfs_rq[6]:/.exec_clock
14 Â 7% -24.6% 10 Â 4% sched_debug.cfs_rq[6]:/.runnable_load_avg
4626 Â 11% -83.4% 767 Â 14% sched_debug.cfs_rq[7]:/.tg_load_avg
14 Â 3% -26.3% 10 Â 4% sched_debug.cfs_rq[7]:/.runnable_load_avg
974 Â 4% -100.0% 0 Â 0% sched_debug.cfs_rq[7]:/.utilization_load_avg
4 Â 46% +143.8% 9 Â 36% sched_debug.cfs_rq[7]:/.nr_spread_over
14 Â 3% -36.8% 9 Â 19% sched_debug.cfs_rq[7]:/.load
56 Â 45% -100.0% 0 Â 0% sched_debug.cfs_rq[7]:/.tg_load_contrib
142781 Â 0% -18.7% 116026 Â 1% sched_debug.cfs_rq[7]:/.exec_clock
162 Â 26% -100.0% 0 Â 0% sched_debug.cfs_rq[8]:/.blocked_load_avg
147352 Â 1% -21.3% 116014 Â 1% sched_debug.cfs_rq[8]:/.exec_clock
178 Â 23% -100.0% 0 Â 0% sched_debug.cfs_rq[8]:/.tg_load_contrib
4611 Â 11% -83.3% 768 Â 14% sched_debug.cfs_rq[8]:/.tg_load_avg
1007 Â 3% -100.0% 0 Â 0% sched_debug.cfs_rq[8]:/.utilization_load_avg
15 Â 4% -35.0% 9 Â 8% sched_debug.cfs_rq[8]:/.load
15 Â 5% -31.1% 10 Â 4% sched_debug.cfs_rq[8]:/.runnable_load_avg
111 Â 22% -100.0% 0 Â 0% sched_debug.cfs_rq[9]:/.tg_load_contrib
14 Â 3% -26.3% 10 Â 4% sched_debug.cfs_rq[9]:/.runnable_load_avg
14 Â 2% -28.8% 10 Â 8% sched_debug.cfs_rq[9]:/.load
4603 Â 11% -83.4% 764 Â 13% sched_debug.cfs_rq[9]:/.tg_load_avg
142730 Â 0% -18.9% 115809 Â 1% sched_debug.cfs_rq[9]:/.exec_clock
965 Â 1% -100.0% 0 Â 0% sched_debug.cfs_rq[9]:/.utilization_load_avg
14 Â 17% -48.3% 7 Â 14% sched_debug.cfs_rq[9]:/.nr_spread_over
96 Â 25% -100.0% 0 Â 0% sched_debug.cfs_rq[9]:/.blocked_load_avg
459950 Â 8% +26.5% 581670 Â 2% sched_debug.cpu#0.avg_idle
5114 Â 3% -15.0% 4345 Â 6% sched_debug.cpu#0.curr->pid
14 Â 5% -41.4% 8 Â 5% sched_debug.cpu#0.cpu_load[1]
54423 Â 2% +115.2% 117124 Â 2% sched_debug.cpu#0.sched_count
15 Â 5% -39.3% 9 Â 4% sched_debug.cpu#0.cpu_load[4]
14 Â 3% -38.6% 8 Â 4% sched_debug.cpu#0.cpu_load[0]
24243 Â 9% +135.3% 57041 Â 3% sched_debug.cpu#0.ttwu_count
8240 Â 6% +442.7% 44723 Â 3% sched_debug.cpu#0.sched_goidle
40005 Â 12% +155.6% 102241 Â 3% sched_debug.cpu#0.nr_switches
15 Â 6% -43.3% 8 Â 5% sched_debug.cpu#0.cpu_load[2]
15 Â 5% -44.4% 8 Â 4% sched_debug.cpu#0.cpu_load[3]
5278 Â 1% -15.1% 4479 Â 4% sched_debug.cpu#1.curr->pid
152407 Â 0% -9.3% 138247 Â 1% sched_debug.cpu#1.nr_load_updates
14 Â 5% -25.0% 10 Â 10% sched_debug.cpu#1.load
9011 Â 5% +397.4% 44820 Â 1% sched_debug.cpu#1.sched_goidle
15 Â 12% -38.3% 9 Â 4% sched_debug.cpu#1.cpu_load[0]
14 Â 3% -36.2% 9 Â 4% sched_debug.cpu#1.cpu_load[4]
419898 Â 5% +41.5% 593963 Â 3% sched_debug.cpu#1.avg_idle
14 Â 5% -39.0% 9 Â 0% sched_debug.cpu#1.cpu_load[3]
45611 Â 12% +138.9% 108972 Â 5% sched_debug.cpu#1.sched_count
22346 Â 14% +138.4% 53266 Â 0% sched_debug.cpu#1.ttwu_count
38054 Â 13% +166.6% 101445 Â 0% sched_debug.cpu#1.nr_switches
15 Â 10% -41.7% 8 Â 4% sched_debug.cpu#1.cpu_load[1]
15 Â 8% -41.0% 9 Â 0% sched_debug.cpu#1.cpu_load[2]
19623 Â 3% +155.1% 50061 Â 3% sched_debug.cpu#10.ttwu_count
14 Â 5% -34.5% 9 Â 5% sched_debug.cpu#10.cpu_load[4]
14 Â 5% -38.6% 8 Â 9% sched_debug.cpu#10.cpu_load[0]
14 Â 7% -34.5% 9 Â 5% sched_debug.cpu#10.load
5286 Â 1% -19.2% 4271 Â 2% sched_debug.cpu#10.curr->pid
33706 Â 8% +181.6% 94912 Â 3% sched_debug.cpu#10.nr_switches
14 Â 5% -37.9% 9 Â 7% sched_debug.cpu#10.cpu_load[3]
14 Â 5% -38.6% 8 Â 9% sched_debug.cpu#10.cpu_load[1]
151771 Â 0% -10.5% 135760 Â 1% sched_debug.cpu#10.nr_load_updates
39656 Â 9% +153.7% 100597 Â 5% sched_debug.cpu#10.sched_count
425666 Â 9% +28.1% 545278 Â 1% sched_debug.cpu#10.avg_idle
8362 Â 8% -27.7% 6044 Â 12% sched_debug.cpu#10.ttwu_local
7486 Â 17% +458.9% 41844 Â 2% sched_debug.cpu#10.sched_goidle
14 Â 5% -41.4% 8 Â 5% sched_debug.cpu#10.cpu_load[2]
15 Â 13% -33.9% 10 Â 10% sched_debug.cpu#11.load
15 Â 8% -46.0% 8 Â 5% sched_debug.cpu#11.cpu_load[0]
35740 Â 6% +168.3% 95897 Â 1% sched_debug.cpu#11.nr_switches
42550 Â 8% +133.0% 99139 Â 1% sched_debug.cpu#11.sched_count
15 Â 4% -40.0% 9 Â 7% sched_debug.cpu#11.cpu_load[3]
15 Â 0% -41.7% 8 Â 4% sched_debug.cpu#11.cpu_load[2]
9422 Â 9% -28.0% 6784 Â 11% sched_debug.cpu#11.ttwu_local
14 Â 5% -32.8% 9 Â 4% sched_debug.cpu#11.cpu_load[4]
152028 Â 0% -11.0% 135292 Â 1% sched_debug.cpu#11.nr_load_updates
5302 Â 1% -29.1% 3757 Â 14% sched_debug.cpu#11.curr->pid
420019 Â 6% +29.4% 543549 Â 9% sched_debug.cpu#11.avg_idle
15 Â 2% -42.6% 8 Â 9% sched_debug.cpu#11.cpu_load[1]
7298 Â 13% +471.9% 41743 Â 1% sched_debug.cpu#11.sched_goidle
20612 Â 8% +147.3% 50976 Â 2% sched_debug.cpu#11.ttwu_count
426702 Â 12% +39.0% 593273 Â 9% sched_debug.cpu#12.avg_idle
37593 Â 6% +163.0% 98855 Â 1% sched_debug.cpu#12.sched_count
34305 Â 7% +171.9% 93282 Â 2% sched_debug.cpu#12.nr_switches
7460 Â 12% +448.1% 40887 Â 2% sched_debug.cpu#12.sched_goidle
1 Â 0% -100.0% 0 Â 0% sched_debug.cpu#12.nr_running
14 Â 2% -35.6% 9 Â 5% sched_debug.cpu#12.cpu_load[4]
19137 Â 6% +163.6% 50437 Â 2% sched_debug.cpu#12.ttwu_count
151130 Â 0% -10.2% 135743 Â 1% sched_debug.cpu#12.nr_load_updates
14 Â 2% -37.3% 9 Â 4% sched_debug.cpu#12.cpu_load[3]
15 Â 4% -40.0% 9 Â 7% sched_debug.cpu#12.cpu_load[2]
15 Â 8% -44.4% 8 Â 9% sched_debug.cpu#12.cpu_load[0]
5203 Â 2% -17.9% 4273 Â 10% sched_debug.cpu#12.curr->pid
8695 Â 14% -27.6% 6297 Â 10% sched_debug.cpu#12.ttwu_local
15 Â 5% -41.0% 9 Â 7% sched_debug.cpu#12.cpu_load[1]
6727 Â 8% +516.7% 41486 Â 1% sched_debug.cpu#13.sched_goidle
18277 Â 15% +171.8% 49677 Â 1% sched_debug.cpu#13.ttwu_count
14 Â 3% -36.2% 9 Â 4% sched_debug.cpu#13.cpu_load[3]
40712 Â 21% +142.4% 98700 Â 4% sched_debug.cpu#13.sched_count
15 Â 9% -34.4% 10 Â 14% sched_debug.cpu#13.load
150856 Â 0% -10.0% 135840 Â 1% sched_debug.cpu#13.nr_load_updates
32486 Â 15% +188.1% 93585 Â 2% sched_debug.cpu#13.nr_switches
418274 Â 5% +38.7% 580336 Â 4% sched_debug.cpu#13.avg_idle
5263 Â 2% -29.2% 3727 Â 17% sched_debug.cpu#13.curr->pid
14 Â 5% -40.4% 8 Â 5% sched_debug.cpu#13.cpu_load[0]
14 Â 3% -34.5% 9 Â 5% sched_debug.cpu#13.cpu_load[4]
14 Â 5% -40.4% 8 Â 5% sched_debug.cpu#13.cpu_load[1]
14 Â 5% -36.8% 9 Â 0% sched_debug.cpu#13.cpu_load[2]
14 Â 5% -38.6% 8 Â 9% sched_debug.cpu#14.cpu_load[0]
36644 Â 6% +175.0% 100770 Â 2% sched_debug.cpu#14.sched_count
15 Â 19% -38.1% 9 Â 11% sched_debug.cpu#14.load
14 Â 3% -37.9% 9 Â 7% sched_debug.cpu#14.cpu_load[1]
150753 Â 0% -10.0% 135653 Â 1% sched_debug.cpu#14.nr_load_updates
32557 Â 8% +186.3% 93223 Â 0% sched_debug.cpu#14.nr_switches
6530 Â 6% +527.6% 40983 Â 0% sched_debug.cpu#14.sched_goidle
430654 Â 11% +35.1% 581977 Â 7% sched_debug.cpu#14.avg_idle
14 Â 3% -37.9% 9 Â 7% sched_debug.cpu#14.cpu_load[2]
7967 Â 12% -22.1% 6204 Â 6% sched_debug.cpu#14.ttwu_local
5162 Â 3% -25.8% 3831 Â 10% sched_debug.cpu#14.curr->pid
18131 Â 6% +175.2% 49892 Â 0% sched_debug.cpu#14.ttwu_count
14 Â 3% -36.2% 9 Â 4% sched_debug.cpu#14.cpu_load[3]
14 Â 3% -34.5% 9 Â 5% sched_debug.cpu#14.cpu_load[4]
32858 Â 9% +181.0% 92338 Â 2% sched_debug.cpu#15.nr_switches
14 Â 5% -35.6% 9 Â 5% sched_debug.cpu#15.cpu_load[3]
14 Â 7% -37.9% 9 Â 11% sched_debug.cpu#15.cpu_load[0]
8518 Â 19% -27.0% 6214 Â 5% sched_debug.cpu#15.ttwu_local
15 Â 4% -36.7% 9 Â 5% sched_debug.cpu#15.cpu_load[4]
14 Â 5% -37.3% 9 Â 8% sched_debug.cpu#15.cpu_load[1]
439603 Â 4% +33.6% 587095 Â 7% sched_debug.cpu#15.avg_idle
37374 Â 14% +172.7% 101927 Â 2% sched_debug.cpu#15.sched_count
150779 Â 0% -10.1% 135609 Â 1% sched_debug.cpu#15.nr_load_updates
18461 Â 10% +168.9% 49635 Â 3% sched_debug.cpu#15.ttwu_count
15 Â 13% -38.7% 9 Â 11% sched_debug.cpu#15.load
5087 Â 4% -21.3% 4005 Â 5% sched_debug.cpu#15.curr->pid
6623 Â 6% +511.6% 40513 Â 2% sched_debug.cpu#15.sched_goidle
14 Â 5% -37.3% 9 Â 8% sched_debug.cpu#15.cpu_load[2]
15 Â 4% -45.0% 8 Â 10% sched_debug.cpu#16.cpu_load[0]
151350 Â 0% -10.3% 135805 Â 1% sched_debug.cpu#16.nr_load_updates
7472 Â 8% +468.9% 42510 Â 2% sched_debug.cpu#16.sched_goidle
15 Â 4% -43.3% 8 Â 5% sched_debug.cpu#16.cpu_load[2]
15 Â 4% -43.3% 8 Â 5% sched_debug.cpu#16.cpu_load[3]
42078 Â 16% +172.3% 114559 Â 13% sched_debug.cpu#16.sched_count
18903 Â 5% +169.1% 50867 Â 1% sched_debug.cpu#16.ttwu_count
15 Â 4% -40.0% 9 Â 7% sched_debug.cpu#16.cpu_load[4]
33692 Â 4% +188.6% 97225 Â 1% sched_debug.cpu#16.nr_switches
5230 Â 3% -21.7% 4098 Â 12% sched_debug.cpu#16.curr->pid
14 Â 7% -44.1% 8 Â 10% sched_debug.cpu#16.cpu_load[1]
1 Â 0% -100.0% 0 Â 0% sched_debug.cpu#16.nr_running
388250 Â 4% +48.8% 577899 Â 7% sched_debug.cpu#16.avg_idle
8751 Â 11% -23.2% 6720 Â 9% sched_debug.cpu#16.ttwu_local
15 Â 4% -45.0% 8 Â 5% sched_debug.cpu#17.cpu_load[1]
14 Â 5% -39.0% 9 Â 0% sched_debug.cpu#17.cpu_load[3]
15 Â 4% -41.7% 8 Â 4% sched_debug.cpu#17.cpu_load[2]
31973 Â 4% +212.8% 100010 Â 2% sched_debug.cpu#17.nr_switches
5079 Â 4% -25.6% 3781 Â 9% sched_debug.cpu#17.curr->pid
14 Â 5% -37.3% 9 Â 4% sched_debug.cpu#17.cpu_load[4]
18052 Â 4% +185.4% 51514 Â 3% sched_debug.cpu#17.ttwu_count
14 Â 2% -44.1% 8 Â 5% sched_debug.cpu#17.cpu_load[0]
7214 Â 9% +504.5% 43608 Â 2% sched_debug.cpu#17.sched_goidle
39953 Â 17% +181.2% 112331 Â 6% sched_debug.cpu#17.sched_count
14 Â 7% -33.3% 9 Â 9% sched_debug.cpu#17.load
150798 Â 0% -10.0% 135723 Â 1% sched_debug.cpu#17.nr_load_updates
460177 Â 5% +24.5% 572863 Â 6% sched_debug.cpu#17.avg_idle
7557 Â 15% +463.6% 42592 Â 1% sched_debug.cpu#18.sched_goidle
151189 Â 0% -10.4% 135505 Â 1% sched_debug.cpu#18.nr_load_updates
14 Â 3% -42.1% 8 Â 5% sched_debug.cpu#18.cpu_load[0]
20909 Â 9% +146.8% 51603 Â 2% sched_debug.cpu#18.ttwu_count
41296 Â 14% +159.1% 107013 Â 4% sched_debug.cpu#18.sched_count
14 Â 3% -40.4% 8 Â 5% sched_debug.cpu#18.cpu_load[1]
14 Â 3% -38.6% 8 Â 4% sched_debug.cpu#18.cpu_load[2]
10219 Â 14% -33.2% 6825 Â 10% sched_debug.cpu#18.ttwu_local
14 Â 2% -37.3% 9 Â 4% sched_debug.cpu#18.cpu_load[4]
14 Â 2% -39.0% 9 Â 0% sched_debug.cpu#18.cpu_load[3]
5088 Â 3% -22.0% 3967 Â 3% sched_debug.cpu#18.curr->pid
425778 Â 11% +32.5% 564360 Â 4% sched_debug.cpu#18.avg_idle
37128 Â 10% +163.4% 97799 Â 1% sched_debug.cpu#18.nr_switches
19065 Â 13% +166.6% 50829 Â 3% sched_debug.cpu#19.ttwu_count
150656 Â 0% -10.2% 135240 Â 1% sched_debug.cpu#19.nr_load_updates
14 Â 14% -32.8% 9 Â 8% sched_debug.cpu#19.load
5122 Â 1% -25.9% 3793 Â 3% sched_debug.cpu#19.curr->pid
6661 Â 9% +538.9% 42558 Â 2% sched_debug.cpu#19.sched_goidle
14 Â 3% -37.9% 9 Â 7% sched_debug.cpu#19.cpu_load[3]
15 Â 8% -46.7% 8 Â 8% sched_debug.cpu#19.cpu_load[1]
48405 Â 38% +139.6% 115975 Â 9% sched_debug.cpu#19.sched_count
15 Â 9% -45.2% 8 Â 5% sched_debug.cpu#19.cpu_load[0]
414211 Â 18% +47.8% 612403 Â 7% sched_debug.cpu#19.avg_idle
33174 Â 16% +194.1% 97573 Â 3% sched_debug.cpu#19.nr_switches
14 Â 3% -37.9% 9 Â 7% sched_debug.cpu#19.cpu_load[4]
14 Â 5% -44.1% 8 Â 13% sched_debug.cpu#19.cpu_load[2]
446481 Â 6% +24.5% 555977 Â 3% sched_debug.cpu#2.avg_idle
49149 Â 23% +115.8% 106068 Â 7% sched_debug.cpu#2.sched_count
10586 Â 10% -25.4% 7902 Â 12% sched_debug.cpu#2.ttwu_local
14 Â 2% -35.6% 9 Â 21% sched_debug.cpu#2.cpu_load[1]
151563 Â 0% -9.0% 137974 Â 1% sched_debug.cpu#2.nr_load_updates
5165 Â 1% -24.7% 3887 Â 10% sched_debug.cpu#2.curr->pid
7552 Â 3% +469.0% 42974 Â 4% sched_debug.cpu#2.sched_goidle
15 Â 0% -36.7% 9 Â 21% sched_debug.cpu#2.cpu_load[0]
21396 Â 8% +145.3% 52491 Â 3% sched_debug.cpu#2.ttwu_count
14 Â 3% -33.3% 9 Â 21% sched_debug.cpu#2.cpu_load[2]
14 Â 3% -33.3% 9 Â 21% sched_debug.cpu#2.cpu_load[3]
38324 Â 9% +156.4% 98254 Â 3% sched_debug.cpu#2.nr_switches
151276 Â 0% -10.5% 135392 Â 1% sched_debug.cpu#20.nr_load_updates
18059 Â 8% +186.4% 51716 Â 1% sched_debug.cpu#20.ttwu_count
14 Â 5% -30.4% 9 Â 8% sched_debug.cpu#20.load
7481 Â 8% +466.1% 42357 Â 2% sched_debug.cpu#20.sched_goidle
444177 Â 20% +31.4% 583581 Â 4% sched_debug.cpu#20.avg_idle
34935 Â 17% +231.3% 115752 Â 6% sched_debug.cpu#20.sched_count
13 Â 3% -34.5% 9 Â 7% sched_debug.cpu#20.cpu_load[1]
31626 Â 7% +212.4% 98796 Â 2% sched_debug.cpu#20.nr_switches
14 Â 3% -34.5% 9 Â 5% sched_debug.cpu#20.cpu_load[3]
14 Â 0% -35.7% 9 Â 7% sched_debug.cpu#20.cpu_load[0]
14 Â 3% -32.8% 9 Â 4% sched_debug.cpu#20.cpu_load[4]
5213 Â 3% -27.4% 3782 Â 7% sched_debug.cpu#20.curr->pid
14 Â 5% -33.9% 9 Â 4% sched_debug.cpu#20.cpu_load[2]
14 Â 3% -33.3% 9 Â 5% sched_debug.cpu#21.cpu_load[3]
14 Â 7% -33.3% 9 Â 9% sched_debug.cpu#21.cpu_load[0]
14 Â 7% -33.3% 9 Â 5% sched_debug.cpu#21.cpu_load[1]
7015 Â 7% +490.2% 41403 Â 1% sched_debug.cpu#21.sched_goidle
32048 Â 16% +198.2% 95554 Â 2% sched_debug.cpu#21.nr_switches
5073 Â 5% -25.3% 3790 Â 6% sched_debug.cpu#21.curr->pid
14 Â 3% -31.0% 10 Â 0% sched_debug.cpu#21.cpu_load[4]
14 Â 3% -33.3% 9 Â 5% sched_debug.cpu#21.cpu_load[2]
39334 Â 17% +167.5% 105239 Â 8% sched_debug.cpu#21.sched_count
150872 Â 0% -10.5% 135099 Â 1% sched_debug.cpu#21.nr_load_updates
18112 Â 15% +180.9% 50883 Â 2% sched_debug.cpu#21.ttwu_count
439087 Â 7% +48.2% 650939 Â 5% sched_debug.cpu#21.avg_idle
5166 Â 2% -26.3% 3805 Â 8% sched_debug.cpu#22.curr->pid
14 Â 3% -38.6% 8 Â 9% sched_debug.cpu#22.cpu_load[2]
14 Â 3% -33.3% 9 Â 5% sched_debug.cpu#22.cpu_load[4]
14 Â 3% -35.1% 9 Â 4% sched_debug.cpu#22.cpu_load[3]
14 Â 7% -36.8% 9 Â 0% sched_debug.cpu#22.load
7428 Â 10% +458.5% 41491 Â 2% sched_debug.cpu#22.sched_goidle
14 Â 5% -35.7% 9 Â 7% sched_debug.cpu#22.cpu_load[0]
17236 Â 10% +197.8% 51335 Â 2% sched_debug.cpu#22.ttwu_count
40157 Â 29% +179.4% 112186 Â 7% sched_debug.cpu#22.sched_count
425732 Â 7% +42.6% 607224 Â 9% sched_debug.cpu#22.avg_idle
14 Â 5% -35.7% 9 Â 7% sched_debug.cpu#22.cpu_load[1]
31510 Â 9% +204.0% 95784 Â 2% sched_debug.cpu#22.nr_switches
151172 Â 0% -10.6% 135200 Â 0% sched_debug.cpu#22.nr_load_updates
6321 Â 6% +544.7% 40754 Â 2% sched_debug.cpu#23.sched_goidle
150554 Â 0% -10.0% 135467 Â 1% sched_debug.cpu#23.nr_load_updates
17495 Â 7% +184.8% 49824 Â 2% sched_debug.cpu#23.ttwu_count
13 Â 3% -36.4% 8 Â 4% sched_debug.cpu#23.cpu_load[2]
421307 Â 8% +34.1% 564941 Â 1% sched_debug.cpu#23.avg_idle
37049 Â 20% +217.2% 117530 Â 18% sched_debug.cpu#23.sched_count
13 Â 3% -38.2% 8 Â 5% sched_debug.cpu#23.cpu_load[1]
14 Â 5% -33.9% 9 Â 4% sched_debug.cpu#23.cpu_load[3]
13 Â 3% -34.5% 9 Â 0% sched_debug.cpu#23.cpu_load[0]
30682 Â 8% +208.4% 94634 Â 3% sched_debug.cpu#23.nr_switches
14 Â 5% -32.1% 9 Â 5% sched_debug.cpu#23.cpu_load[4]
4883 Â 6% -23.7% 3727 Â 11% sched_debug.cpu#23.curr->pid
14 Â 5% -39.3% 8 Â 13% sched_debug.cpu#23.load
5238 Â 3% -29.0% 3720 Â 15% sched_debug.cpu#24.curr->pid
6584 Â 3% +532.7% 41658 Â 3% sched_debug.cpu#24.sched_goidle
428673 Â 10% +40.8% 603741 Â 7% sched_debug.cpu#24.avg_idle
14 Â 3% -36.2% 9 Â 4% sched_debug.cpu#24.cpu_load[0]
39369 Â 8% +197.9% 117265 Â 12% sched_debug.cpu#24.sched_count
150374 Â 0% -10.1% 135251 Â 1% sched_debug.cpu#24.nr_load_updates
9006 Â 8% -32.2% 6104 Â 5% sched_debug.cpu#24.ttwu_local
14 Â 3% -31.0% 10 Â 0% sched_debug.cpu#24.cpu_load[4]
34540 Â 5% +173.1% 94315 Â 3% sched_debug.cpu#24.nr_switches
14 Â 3% -34.5% 9 Â 5% sched_debug.cpu#24.cpu_load[1]
19271 Â 2% +164.7% 51003 Â 2% sched_debug.cpu#24.ttwu_count
14 Â 3% -31.0% 10 Â 0% sched_debug.cpu#24.cpu_load[3]
14 Â 3% -34.5% 9 Â 5% sched_debug.cpu#24.cpu_load[2]
18407 Â 7% +177.0% 50983 Â 3% sched_debug.cpu#25.ttwu_count
5311 Â 0% -25.3% 3969 Â 9% sched_debug.cpu#25.curr->pid
15 Â 4% -41.7% 8 Â 14% sched_debug.cpu#25.cpu_load[1]
483320 Â 10% +17.9% 569719 Â 5% sched_debug.cpu#25.avg_idle
6536 Â 3% +542.2% 41973 Â 2% sched_debug.cpu#25.sched_goidle
32159 Â 7% +198.9% 96122 Â 3% sched_debug.cpu#25.nr_switches
14 Â 5% -37.3% 9 Â 14% sched_debug.cpu#25.load
14 Â 3% -39.7% 8 Â 14% sched_debug.cpu#25.cpu_load[0]
15 Â 4% -40.0% 9 Â 11% sched_debug.cpu#25.cpu_load[2]
40528 Â 7% +189.2% 117196 Â 5% sched_debug.cpu#25.sched_count
14 Â 2% -37.3% 9 Â 8% sched_debug.cpu#25.cpu_load[4]
15 Â 4% -40.0% 9 Â 11% sched_debug.cpu#25.cpu_load[3]
6505 Â 6% +545.5% 41988 Â 2% sched_debug.cpu#26.sched_goidle
32597 Â 8% +198.0% 97140 Â 5% sched_debug.cpu#26.nr_switches
5159 Â 1% -30.0% 3610 Â 11% sched_debug.cpu#26.curr->pid
18398 Â 9% +178.4% 51212 Â 5% sched_debug.cpu#26.ttwu_count
14 Â 5% -41.4% 8 Â 10% sched_debug.cpu#26.cpu_load[1]
14 Â 3% -40.4% 8 Â 10% sched_debug.cpu#26.cpu_load[2]
14 Â 5% -39.7% 8 Â 9% sched_debug.cpu#26.cpu_load[0]
14 Â 3% -36.8% 9 Â 7% sched_debug.cpu#26.cpu_load[3]
39155 Â 14% +180.9% 110006 Â 6% sched_debug.cpu#26.sched_count
14 Â 3% -36.2% 9 Â 11% sched_debug.cpu#26.cpu_load[4]
14 Â 10% -31.0% 10 Â 7% sched_debug.cpu#26.load
418972 Â 13% +39.4% 583946 Â 9% sched_debug.cpu#26.avg_idle
14 Â 3% -37.9% 9 Â 15% sched_debug.cpu#27.load
14 Â 3% -37.9% 9 Â 7% sched_debug.cpu#27.cpu_load[3]
34672 Â 10% +164.2% 91614 Â 2% sched_debug.cpu#27.nr_switches
14 Â 2% -39.0% 9 Â 7% sched_debug.cpu#27.cpu_load[0]
14 Â 3% -37.9% 9 Â 7% sched_debug.cpu#27.cpu_load[1]
9155 Â 15% -38.8% 5606 Â 8% sched_debug.cpu#27.ttwu_local
43896 Â 23% +158.1% 113312 Â 17% sched_debug.cpu#27.sched_count
14 Â 3% -36.8% 9 Â 7% sched_debug.cpu#27.cpu_load[2]
5086 Â 2% -21.0% 4019 Â 13% sched_debug.cpu#27.curr->pid
14 Â 2% -37.3% 9 Â 4% sched_debug.cpu#27.cpu_load[4]
150291 Â 0% -10.0% 135294 Â 1% sched_debug.cpu#27.nr_load_updates
6795 Â 6% +499.4% 40732 Â 1% sched_debug.cpu#27.sched_goidle
412373 Â 18% +42.2% 586421 Â 3% sched_debug.cpu#27.avg_idle
19738 Â 9% +151.6% 49671 Â 3% sched_debug.cpu#27.ttwu_count
6379 Â 6% +531.9% 40316 Â 1% sched_debug.cpu#28.sched_goidle
14 Â 7% -43.1% 8 Â 13% sched_debug.cpu#28.cpu_load[0]
30304 Â 11% +201.2% 91288 Â 3% sched_debug.cpu#28.nr_switches
14 Â 3% -42.1% 8 Â 5% sched_debug.cpu#28.cpu_load[2]
14 Â 5% -37.3% 9 Â 4% sched_debug.cpu#28.cpu_load[4]
7447 Â 19% -21.5% 5848 Â 16% sched_debug.cpu#28.ttwu_local
14 Â 7% -43.1% 8 Â 13% sched_debug.cpu#28.cpu_load[1]
150488 Â 0% -10.0% 135387 Â 1% sched_debug.cpu#28.nr_load_updates
15 Â 12% -40.0% 9 Â 17% sched_debug.cpu#28.load
47096 Â 34% +144.1% 114961 Â 13% sched_debug.cpu#28.sched_count
14 Â 3% -37.9% 9 Â 7% sched_debug.cpu#28.cpu_load[3]
17301 Â 13% +184.6% 49235 Â 3% sched_debug.cpu#28.ttwu_count
429678 Â 7% +31.0% 562755 Â 5% sched_debug.cpu#28.avg_idle
14 Â 3% -36.2% 9 Â 4% sched_debug.cpu#29.cpu_load[2]
150289 Â 0% -10.0% 135209 Â 1% sched_debug.cpu#29.nr_load_updates
5143 Â 2% -20.8% 4074 Â 18% sched_debug.cpu#29.curr->pid
6371 Â 4% +535.5% 40493 Â 3% sched_debug.cpu#29.sched_goidle
14 Â 5% -37.3% 9 Â 4% sched_debug.cpu#29.cpu_load[1]
15 Â 4% -38.3% 9 Â 4% sched_debug.cpu#29.cpu_load[3]
17989 Â 12% +175.7% 49598 Â 2% sched_debug.cpu#29.ttwu_count
14 Â 5% -39.0% 9 Â 0% sched_debug.cpu#29.cpu_load[0]
14 Â 5% -30.4% 9 Â 16% sched_debug.cpu#29.load
31759 Â 14% +188.3% 91569 Â 3% sched_debug.cpu#29.nr_switches
471255 Â 8% +30.0% 612851 Â 9% sched_debug.cpu#29.avg_idle
15 Â 2% -37.7% 9 Â 5% sched_debug.cpu#29.cpu_load[4]
37619 Â 19% +228.8% 123692 Â 12% sched_debug.cpu#29.sched_count
18968 Â 14% +172.7% 51724 Â 3% sched_debug.cpu#3.ttwu_count
39894 Â 18% +153.6% 101178 Â 8% sched_debug.cpu#3.sched_count
33554 Â 16% +186.1% 95988 Â 3% sched_debug.cpu#3.nr_switches
14 Â 3% -35.1% 9 Â 8% sched_debug.cpu#3.cpu_load[2]
14 Â 5% -33.9% 9 Â 8% sched_debug.cpu#3.cpu_load[1]
418310 Â 5% +37.9% 576750 Â 5% sched_debug.cpu#3.avg_idle
14 Â 0% -33.9% 9 Â 8% sched_debug.cpu#3.cpu_load[3]
5142 Â 2% -21.6% 4034 Â 6% sched_debug.cpu#3.curr->pid
7131 Â 5% +495.7% 42485 Â 3% sched_debug.cpu#3.sched_goidle
14 Â 0% -30.4% 9 Â 4% sched_debug.cpu#3.cpu_load[4]
14 Â 7% -35.1% 9 Â 15% sched_debug.cpu#3.cpu_load[0]
14 Â 5% -30.4% 9 Â 11% sched_debug.cpu#3.load
41476 Â 32% +142.2% 100464 Â 4% sched_debug.cpu#30.sched_count
14 Â 2% -37.3% 9 Â 4% sched_debug.cpu#30.cpu_load[4]
31708 Â 14% +185.0% 90376 Â 1% sched_debug.cpu#30.nr_switches
17674 Â 14% +179.3% 49362 Â 1% sched_debug.cpu#30.ttwu_count
150228 Â 0% -10.0% 135265 Â 1% sched_debug.cpu#30.nr_load_updates
14 Â 5% -43.9% 8 Â 0% sched_debug.cpu#30.cpu_load[1]
14 Â 5% -42.1% 8 Â 13% sched_debug.cpu#30.load
14 Â 5% -43.1% 8 Â 5% sched_debug.cpu#30.cpu_load[2]
6329 Â 7% +529.5% 39848 Â 1% sched_debug.cpu#30.sched_goidle
425532 Â 14% +43.5% 610674 Â 8% sched_debug.cpu#30.avg_idle
14 Â 5% -41.4% 8 Â 5% sched_debug.cpu#30.cpu_load[3]
5108 Â 4% -28.4% 3655 Â 15% sched_debug.cpu#30.curr->pid
14 Â 5% -43.9% 8 Â 0% sched_debug.cpu#30.cpu_load[0]
18864 Â 9% +163.9% 49790 Â 1% sched_debug.cpu#31.ttwu_count
433193 Â 3% +38.0% 598002 Â 6% sched_debug.cpu#31.avg_idle
14 Â 3% -38.6% 8 Â 9% sched_debug.cpu#31.cpu_load[4]
14 Â 5% -41.1% 8 Â 15% sched_debug.cpu#31.cpu_load[1]
5129 Â 2% -26.7% 3761 Â 12% sched_debug.cpu#31.curr->pid
33770 Â 12% +172.8% 92110 Â 1% sched_debug.cpu#31.nr_switches
150123 Â 0% -9.7% 135537 Â 1% sched_debug.cpu#31.nr_load_updates
14 Â 5% -41.1% 8 Â 10% sched_debug.cpu#31.cpu_load[0]
6565 Â 5% +508.8% 39972 Â 2% sched_debug.cpu#31.sched_goidle
14 Â 3% -40.4% 8 Â 13% sched_debug.cpu#31.cpu_load[3]
9070 Â 17% -25.3% 6771 Â 13% sched_debug.cpu#31.ttwu_local
14 Â 5% -41.1% 8 Â 15% sched_debug.cpu#31.cpu_load[2]
6227 Â 7% +428.4% 32902 Â 1% sched_debug.cpu#32.sched_goidle
14 Â 5% -41.1% 8 Â 17% sched_debug.cpu#32.cpu_load[1]
13 Â 3% -40.0% 8 Â 10% sched_debug.cpu#32.cpu_load[2]
15098 Â 11% +156.0% 38651 Â 2% sched_debug.cpu#32.ttwu_count
5247 Â 1% -32.3% 3550 Â 14% sched_debug.cpu#32.curr->pid
14 Â 7% -37.5% 8 Â 18% sched_debug.cpu#32.load
14 Â 0% -39.3% 8 Â 10% sched_debug.cpu#32.cpu_load[3]
150384 Â 0% -13.9% 129496 Â 1% sched_debug.cpu#32.nr_load_updates
26391 Â 11% +190.0% 76546 Â 1% sched_debug.cpu#32.nr_switches
375426 Â 9% +79.6% 674153 Â 3% sched_debug.cpu#32.avg_idle
14 Â 0% -35.7% 9 Â 7% sched_debug.cpu#32.cpu_load[4]
28730 Â 15% +174.6% 78888 Â 1% sched_debug.cpu#32.sched_count
13 Â 6% -41.8% 8 Â 17% sched_debug.cpu#32.cpu_load[0]
5267 Â 1% -35.6% 3392 Â 18% sched_debug.cpu#33.curr->pid
14 Â 3% -43.9% 8 Â 15% sched_debug.cpu#33.cpu_load[0]
15812 Â 10% +144.2% 38619 Â 3% sched_debug.cpu#33.ttwu_count
34631 Â 26% +127.0% 78608 Â 4% sched_debug.cpu#33.sched_count
14 Â 3% -36.2% 9 Â 8% sched_debug.cpu#33.cpu_load[4]
150320 Â 0% -14.1% 129155 Â 1% sched_debug.cpu#33.nr_load_updates
14 Â 5% -39.3% 8 Â 13% sched_debug.cpu#33.load
14 Â 2% -39.0% 9 Â 7% sched_debug.cpu#33.cpu_load[3]
14 Â 3% -41.4% 8 Â 13% sched_debug.cpu#33.cpu_load[2]
14 Â 3% -43.9% 8 Â 15% sched_debug.cpu#33.cpu_load[1]
27394 Â 10% +177.4% 75990 Â 3% sched_debug.cpu#33.nr_switches
377751 Â 6% +73.9% 656823 Â 4% sched_debug.cpu#33.avg_idle
6209 Â 3% +429.9% 32905 Â 2% sched_debug.cpu#33.sched_goidle
14 Â 0% -41.1% 8 Â 10% sched_debug.cpu#34.cpu_load[2]
13 Â 3% -39.6% 8 Â 8% sched_debug.cpu#34.cpu_load[0]
14 Â 0% -37.5% 8 Â 9% sched_debug.cpu#34.cpu_load[3]
30105 Â 18% +158.7% 77892 Â 3% sched_debug.cpu#34.nr_switches
150139 Â 0% -13.4% 130058 Â 1% sched_debug.cpu#34.nr_load_updates
36505 Â 26% +122.7% 81302 Â 2% sched_debug.cpu#34.sched_count
5149 Â 3% -29.8% 3612 Â 15% sched_debug.cpu#34.curr->pid
17483 Â 14% +124.1% 39182 Â 2% sched_debug.cpu#34.ttwu_count
427142 Â 13% +60.5% 685572 Â 2% sched_debug.cpu#34.avg_idle
6139 Â 9% +437.9% 33026 Â 3% sched_debug.cpu#34.sched_goidle
14 Â 0% -37.5% 8 Â 9% sched_debug.cpu#34.cpu_load[4]
13 Â 6% -33.3% 9 Â 7% sched_debug.cpu#34.load
13 Â 3% -40.0% 8 Â 10% sched_debug.cpu#34.cpu_load[1]
38213 Â 26% +125.8% 86267 Â 11% sched_debug.cpu#35.sched_count
5162 Â 2% -30.7% 3576 Â 21% sched_debug.cpu#35.curr->pid
150061 Â 0% -13.8% 129327 Â 1% sched_debug.cpu#35.nr_load_updates
14 Â 5% -42.9% 8 Â 12% sched_debug.cpu#35.cpu_load[1]
16579 Â 23% +130.0% 38131 Â 3% sched_debug.cpu#35.ttwu_count
14 Â 5% -37.5% 8 Â 9% sched_debug.cpu#35.cpu_load[0]
439468 Â 4% +53.6% 675096 Â 1% sched_debug.cpu#35.avg_idle
14 Â 0% -41.1% 8 Â 10% sched_debug.cpu#35.cpu_load[2]
14 Â 0% -39.3% 8 Â 5% sched_debug.cpu#35.cpu_load[3]
5768 Â 7% +474.6% 33143 Â 2% sched_debug.cpu#35.sched_goidle
29372 Â 24% +161.3% 76738 Â 2% sched_debug.cpu#35.nr_switches
14 Â 0% -37.5% 8 Â 4% sched_debug.cpu#35.cpu_load[4]
149993 Â 0% -13.6% 129644 Â 0% sched_debug.cpu#36.nr_load_updates
5201 Â 1% -26.1% 3843 Â 8% sched_debug.cpu#36.curr->pid
28801 Â 9% +172.8% 78566 Â 3% sched_debug.cpu#36.nr_switches
13 Â 6% -41.8% 8 Â 8% sched_debug.cpu#36.cpu_load[3]
13 Â 6% -41.8% 8 Â 8% sched_debug.cpu#36.cpu_load[2]
381368 Â 8% +70.1% 648745 Â 2% sched_debug.cpu#36.avg_idle
5943 Â 2% +456.7% 33086 Â 2% sched_debug.cpu#36.sched_goidle
14 Â 8% -33.9% 9 Â 11% sched_debug.cpu#36.load
14 Â 7% -38.6% 8 Â 9% sched_debug.cpu#36.cpu_load[4]
32433 Â 20% +155.2% 82772 Â 3% sched_debug.cpu#36.sched_count
13 Â 6% -41.8% 8 Â 8% sched_debug.cpu#36.cpu_load[0]
15835 Â 8% +148.5% 39344 Â 4% sched_debug.cpu#36.ttwu_count
13 Â 6% -41.8% 8 Â 8% sched_debug.cpu#36.cpu_load[1]
39427 Â 23% +103.9% 80377 Â 5% sched_debug.cpu#37.sched_count
28952 Â 1% +164.5% 76574 Â 3% sched_debug.cpu#37.nr_switches
5289 Â 1% -30.6% 3670 Â 2% sched_debug.cpu#37.curr->pid
13 Â 3% -37.0% 8 Â 10% sched_debug.cpu#37.cpu_load[3]
13 Â 3% -32.7% 9 Â 11% sched_debug.cpu#37.load
13 Â 6% -38.2% 8 Â 10% sched_debug.cpu#37.cpu_load[2]
416579 Â 11% +61.7% 673750 Â 1% sched_debug.cpu#37.avg_idle
13 Â 6% -38.2% 8 Â 10% sched_debug.cpu#37.cpu_load[1]
14 Â 0% -33.9% 9 Â 4% sched_debug.cpu#37.cpu_load[4]
15659 Â 3% +149.5% 39063 Â 4% sched_debug.cpu#37.ttwu_count
149915 Â 0% -13.6% 129537 Â 1% sched_debug.cpu#37.nr_load_updates
6300 Â 7% +419.9% 32753 Â 2% sched_debug.cpu#37.sched_goidle
13 Â 6% -38.2% 8 Â 10% sched_debug.cpu#37.cpu_load[0]
5944 Â 2% +454.9% 32984 Â 1% sched_debug.cpu#38.sched_goidle
5213 Â 2% -32.9% 3499 Â 16% sched_debug.cpu#38.curr->pid
14 Â 3% -43.1% 8 Â 5% sched_debug.cpu#38.cpu_load[3]
14 Â 3% -40.4% 8 Â 13% sched_debug.cpu#38.load
14 Â 3% -43.9% 8 Â 8% sched_debug.cpu#38.cpu_load[1]
33925 Â 40% +138.0% 80747 Â 2% sched_debug.cpu#38.sched_count
14 Â 3% -44.8% 8 Â 8% sched_debug.cpu#38.cpu_load[2]
150000 Â 0% -13.9% 129180 Â 1% sched_debug.cpu#38.nr_load_updates
437919 Â 6% +64.8% 721794 Â 5% sched_debug.cpu#38.avg_idle
14 Â 5% -42.1% 8 Â 10% sched_debug.cpu#38.cpu_load[0]
14 Â 5% -42.4% 8 Â 5% sched_debug.cpu#38.cpu_load[4]
25748 Â 6% +202.9% 77995 Â 1% sched_debug.cpu#38.nr_switches
14197 Â 8% +172.5% 38691 Â 1% sched_debug.cpu#38.ttwu_count
29596 Â 13% +159.9% 76923 Â 1% sched_debug.cpu#39.nr_switches
14 Â 0% -37.5% 8 Â 4% sched_debug.cpu#39.cpu_load[1]
14 Â 0% -37.5% 8 Â 4% sched_debug.cpu#39.cpu_load[2]
14 Â 3% -35.1% 9 Â 4% sched_debug.cpu#39.cpu_load[3]
149902 Â 0% -13.6% 129582 Â 1% sched_debug.cpu#39.nr_load_updates
31260 Â 11% +166.4% 83279 Â 3% sched_debug.cpu#39.sched_count
5882 Â 8% +449.3% 32310 Â 1% sched_debug.cpu#39.sched_goidle
5291 Â 1% -30.0% 3705 Â 8% sched_debug.cpu#39.curr->pid
16822 Â 10% +137.4% 39930 Â 3% sched_debug.cpu#39.ttwu_count
14 Â 0% -42.9% 8 Â 8% sched_debug.cpu#39.cpu_load[0]
14 Â 0% -35.7% 9 Â 7% sched_debug.cpu#39.load
420204 Â 7% +54.7% 649999 Â 2% sched_debug.cpu#39.avg_idle
14 Â 3% -35.1% 9 Â 4% sched_debug.cpu#39.cpu_load[4]
15 Â 10% -41.0% 9 Â 13% sched_debug.cpu#4.cpu_load[1]
436412 Â 12% +41.1% 615909 Â 5% sched_debug.cpu#4.avg_idle
15 Â 7% -40.3% 9 Â 11% sched_debug.cpu#4.cpu_load[3]
8027 Â 21% +443.1% 43600 Â 2% sched_debug.cpu#4.sched_goidle
14 Â 3% -35.1% 9 Â 11% sched_debug.cpu#4.cpu_load[0]
5160 Â 2% -20.9% 4079 Â 7% sched_debug.cpu#4.curr->pid
22071 Â 4% +138.0% 52524 Â 3% sched_debug.cpu#4.ttwu_count
47912 Â 8% +113.3% 102208 Â 1% sched_debug.cpu#4.sched_count
39644 Â 4% +147.9% 98279 Â 3% sched_debug.cpu#4.nr_switches
11544 Â 2% -27.3% 8397 Â 22% sched_debug.cpu#4.ttwu_local
15 Â 10% -41.0% 9 Â 13% sched_debug.cpu#4.cpu_load[2]
13 Â 3% -25.5% 10 Â 8% sched_debug.cpu#4.load
15 Â 7% -38.7% 9 Â 9% sched_debug.cpu#4.cpu_load[4]
403692 Â 12% +61.8% 653091 Â 5% sched_debug.cpu#40.avg_idle
14 Â 3% -36.8% 9 Â 7% sched_debug.cpu#40.cpu_load[4]
149865 Â 0% -13.1% 130283 Â 1% sched_debug.cpu#40.nr_load_updates
5812 Â 5% +474.9% 33412 Â 3% sched_debug.cpu#40.sched_goidle
15812 Â 10% +144.8% 38704 Â 4% sched_debug.cpu#40.ttwu_count
14 Â 3% -42.1% 8 Â 10% sched_debug.cpu#40.cpu_load[3]
14 Â 3% -42.1% 8 Â 10% sched_debug.cpu#40.cpu_load[2]
14 Â 5% -38.6% 8 Â 12% sched_debug.cpu#40.load
28338 Â 11% +173.6% 77530 Â 3% sched_debug.cpu#40.nr_switches
32984 Â 15% +143.5% 80306 Â 3% sched_debug.cpu#40.sched_count
14 Â 7% -43.1% 8 Â 10% sched_debug.cpu#40.cpu_load[0]
5223 Â 1% -28.0% 3762 Â 10% sched_debug.cpu#40.curr->pid
14 Â 7% -42.1% 8 Â 10% sched_debug.cpu#40.cpu_load[1]
14 Â 0% -37.5% 8 Â 9% sched_debug.cpu#41.cpu_load[4]
13 Â 6% -36.4% 8 Â 14% sched_debug.cpu#41.load
5946 Â 6% +464.4% 33561 Â 2% sched_debug.cpu#41.sched_goidle
5100 Â 3% -27.4% 3701 Â 12% sched_debug.cpu#41.curr->pid
32262 Â 16% +148.7% 80251 Â 3% sched_debug.cpu#41.sched_count
29197 Â 8% +167.5% 78115 Â 2% sched_debug.cpu#41.nr_switches
149918 Â 0% -13.7% 129319 Â 1% sched_debug.cpu#41.nr_load_updates
13 Â 3% -41.8% 8 Â 8% sched_debug.cpu#41.cpu_load[0]
13 Â 3% -41.8% 8 Â 8% sched_debug.cpu#41.cpu_load[1]
13 Â 3% -40.0% 8 Â 5% sched_debug.cpu#41.cpu_load[2]
16140 Â 8% +139.4% 38649 Â 4% sched_debug.cpu#41.ttwu_count
443969 Â 6% +53.8% 682912 Â 3% sched_debug.cpu#41.avg_idle
13 Â 3% -40.0% 8 Â 5% sched_debug.cpu#41.cpu_load[3]
15 Â 2% -45.9% 8 Â 10% sched_debug.cpu#42.cpu_load[0]
14 Â 3% -41.4% 8 Â 5% sched_debug.cpu#42.cpu_load[4]
15 Â 4% -48.3% 7 Â 24% sched_debug.cpu#42.load
5183 Â 3% -37.9% 3216 Â 21% sched_debug.cpu#42.curr->pid
15330 Â 5% +163.4% 40384 Â 2% sched_debug.cpu#42.ttwu_count
14 Â 5% -43.9% 8 Â 8% sched_debug.cpu#42.cpu_load[3]
5651 Â 3% +498.4% 33819 Â 1% sched_debug.cpu#42.sched_goidle
14 Â 3% -43.1% 8 Â 5% sched_debug.cpu#42.cpu_load[1]
31858 Â 13% +165.8% 84681 Â 2% sched_debug.cpu#42.sched_count
427660 Â 8% +65.9% 709402 Â 1% sched_debug.cpu#42.avg_idle
149880 Â 0% -13.3% 129968 Â 1% sched_debug.cpu#42.nr_load_updates
14 Â 3% -43.1% 8 Â 5% sched_debug.cpu#42.cpu_load[2]
26967 Â 4% +199.1% 80670 Â 2% sched_debug.cpu#42.nr_switches
149899 Â 0% -13.6% 129473 Â 1% sched_debug.cpu#43.nr_load_updates
14 Â 5% -47.5% 7 Â 5% sched_debug.cpu#43.load
14 Â 8% -47.5% 7 Â 10% sched_debug.cpu#43.cpu_load[1]
15334 Â 13% +153.6% 38892 Â 4% sched_debug.cpu#43.ttwu_count
28502 Â 15% +172.9% 77773 Â 1% sched_debug.cpu#43.nr_switches
6038 Â 9% +450.9% 33264 Â 1% sched_debug.cpu#43.sched_goidle
5310 Â 0% -36.4% 3380 Â 18% sched_debug.cpu#43.curr->pid
406693 Â 5% +67.1% 679394 Â 7% sched_debug.cpu#43.avg_idle
14 Â 8% -45.8% 8 Â 8% sched_debug.cpu#43.cpu_load[0]
14 Â 5% -39.7% 8 Â 9% sched_debug.cpu#43.cpu_load[4]
30032 Â 13% +163.2% 79035 Â 1% sched_debug.cpu#43.sched_count
14 Â 5% -41.4% 8 Â 5% sched_debug.cpu#43.cpu_load[3]
14 Â 5% -46.6% 7 Â 10% sched_debug.cpu#43.cpu_load[2]
16043 Â 12% +142.6% 38928 Â 2% sched_debug.cpu#44.ttwu_count
5236 Â 3% -31.7% 3576 Â 17% sched_debug.cpu#44.curr->pid
14 Â 3% -36.8% 9 Â 13% sched_debug.cpu#44.cpu_load[4]
30020 Â 11% +155.1% 76592 Â 2% sched_debug.cpu#44.nr_switches
6216 Â 2% +425.2% 32650 Â 1% sched_debug.cpu#44.sched_goidle
14 Â 5% -37.5% 8 Â 21% sched_debug.cpu#44.load
393059 Â 9% +77.3% 696766 Â 4% sched_debug.cpu#44.avg_idle
32223 Â 15% +151.2% 80928 Â 1% sched_debug.cpu#44.sched_count
14 Â 5% -41.4% 8 Â 17% sched_debug.cpu#44.cpu_load[0]
14 Â 5% -39.3% 8 Â 17% sched_debug.cpu#44.cpu_load[2]
14 Â 5% -40.4% 8 Â 17% sched_debug.cpu#44.cpu_load[1]
149852 Â 0% -13.6% 129432 Â 1% sched_debug.cpu#44.nr_load_updates
14 Â 3% -38.6% 8 Â 14% sched_debug.cpu#44.cpu_load[3]
14 Â 3% -40.4% 8 Â 5% sched_debug.cpu#45.cpu_load[4]
27127 Â 9% +189.0% 78406 Â 2% sched_debug.cpu#45.nr_switches
30870 Â 8% +180.3% 86533 Â 5% sched_debug.cpu#45.sched_count
15222 Â 8% +159.4% 39486 Â 4% sched_debug.cpu#45.ttwu_count
399822 Â 7% +64.7% 658696 Â 8% sched_debug.cpu#45.avg_idle
14 Â 3% -43.9% 8 Â 8% sched_debug.cpu#45.cpu_load[3]
149876 Â 0% -13.1% 130170 Â 2% sched_debug.cpu#45.nr_load_updates
5727 Â 4% +481.5% 33305 Â 0% sched_debug.cpu#45.sched_goidle
13 Â 6% -45.5% 7 Â 6% sched_debug.cpu#45.cpu_load[0]
14 Â 9% -38.6% 8 Â 9% sched_debug.cpu#45.load
5161 Â 1% -33.0% 3460 Â 12% sched_debug.cpu#45.curr->pid
13 Â 6% -43.6% 7 Â 5% sched_debug.cpu#45.cpu_load[1]
13 Â 6% -41.8% 8 Â 8% sched_debug.cpu#45.cpu_load[2]
5143 Â 4% -34.9% 3347 Â 19% sched_debug.cpu#46.curr->pid
5725 Â 4% +463.9% 32286 Â 1% sched_debug.cpu#46.sched_goidle
14 Â 3% -46.6% 7 Â 10% sched_debug.cpu#46.cpu_load[2]
14 Â 3% -39.7% 8 Â 9% sched_debug.cpu#46.cpu_load[4]
14 Â 3% -47.4% 7 Â 6% sched_debug.cpu#46.cpu_load[0]
14 Â 3% -44.8% 8 Â 12% sched_debug.cpu#46.cpu_load[3]
30713 Â 17% +165.0% 81393 Â 11% sched_debug.cpu#46.sched_count
27647 Â 10% +173.2% 75542 Â 2% sched_debug.cpu#46.nr_switches
14 Â 3% -47.4% 7 Â 6% sched_debug.cpu#46.cpu_load[1]
14823 Â 5% +158.6% 38328 Â 2% sched_debug.cpu#46.ttwu_count
402688 Â 9% +78.1% 717376 Â 2% sched_debug.cpu#46.avg_idle
149705 Â 0% -13.8% 129111 Â 1% sched_debug.cpu#46.nr_load_updates
27298 Â 6% +175.3% 75141 Â 3% sched_debug.cpu#47.nr_switches
13 Â 6% -38.9% 8 Â 10% sched_debug.cpu#47.load
14 Â 13% -45.8% 8 Â 0% sched_debug.cpu#47.cpu_load[2]
14 Â 7% -38.6% 8 Â 4% sched_debug.cpu#47.cpu_load[4]
5296 Â 1% -29.2% 3752 Â 7% sched_debug.cpu#47.curr->pid
5721 Â 6% +458.4% 31951 Â 1% sched_debug.cpu#47.sched_goidle
15555 Â 5% +150.9% 39035 Â 3% sched_debug.cpu#47.ttwu_count
15 Â 14% -49.2% 7 Â 5% sched_debug.cpu#47.cpu_load[1]
15 Â 19% -50.8% 7 Â 6% sched_debug.cpu#47.cpu_load[0]
427514 Â 3% +62.5% 694869 Â 5% sched_debug.cpu#47.avg_idle
30901 Â 9% +150.4% 77385 Â 4% sched_debug.cpu#47.sched_count
149712 Â 0% -13.8% 129072 Â 1% sched_debug.cpu#47.nr_load_updates
14 Â 10% -43.1% 8 Â 5% sched_debug.cpu#47.cpu_load[3]
5825 Â 9% +484.6% 34052 Â 2% sched_debug.cpu#48.sched_goidle
445509 Â 10% +55.6% 693060 Â 2% sched_debug.cpu#48.avg_idle
5144 Â 2% -27.6% 3724 Â 15% sched_debug.cpu#48.curr->pid
149663 Â 0% -13.2% 129942 Â 1% sched_debug.cpu#48.nr_load_updates
31091 Â 19% +162.8% 81718 Â 1% sched_debug.cpu#48.nr_switches
13 Â 3% -34.0% 8 Â 9% sched_debug.cpu#48.cpu_load[0]
37659 Â 9% +128.2% 85950 Â 2% sched_debug.cpu#48.sched_count
13 Â 6% -30.2% 9 Â 20% sched_debug.cpu#48.load
14 Â 5% -37.5% 8 Â 9% sched_debug.cpu#48.cpu_load[1]
14 Â 7% -38.6% 8 Â 9% sched_debug.cpu#48.cpu_load[2]
14 Â 5% -37.9% 9 Â 7% sched_debug.cpu#48.cpu_load[3]
14 Â 3% -35.1% 9 Â 4% sched_debug.cpu#48.cpu_load[4]
17375 Â 17% +135.1% 40852 Â 3% sched_debug.cpu#48.ttwu_count
14781 Â 16% +169.2% 39795 Â 2% sched_debug.cpu#49.ttwu_count
5723 Â 5% +499.1% 34288 Â 2% sched_debug.cpu#49.sched_goidle
13 Â 3% -33.3% 9 Â 13% sched_debug.cpu#49.load
14 Â 0% -39.3% 8 Â 5% sched_debug.cpu#49.cpu_load[4]
13 Â 3% -43.6% 7 Â 5% sched_debug.cpu#49.cpu_load[3]
29140 Â 19% +193.2% 85438 Â 5% sched_debug.cpu#49.sched_count
149680 Â 0% -13.5% 129415 Â 1% sched_debug.cpu#49.nr_load_updates
26451 Â 18% +205.1% 80693 Â 3% sched_debug.cpu#49.nr_switches
5140 Â 2% -26.7% 3767 Â 9% sched_debug.cpu#49.curr->pid
442201 Â 9% +59.5% 705228 Â 5% sched_debug.cpu#49.avg_idle
13 Â 3% -48.1% 7 Â 0% sched_debug.cpu#49.cpu_load[0]
13 Â 3% -46.3% 7 Â 5% sched_debug.cpu#49.cpu_load[2]
13 Â 3% -48.1% 7 Â 0% sched_debug.cpu#49.cpu_load[1]
393207 Â 8% +44.0% 566272 Â 12% sched_debug.cpu#5.avg_idle
14 Â 3% -40.4% 8 Â 5% sched_debug.cpu#5.cpu_load[2]
5202 Â 3% -27.5% 3773 Â 23% sched_debug.cpu#5.curr->pid
7245 Â 9% +487.3% 42556 Â 3% sched_debug.cpu#5.sched_goidle
14 Â 3% -43.1% 8 Â 13% sched_debug.cpu#5.cpu_load[0]
50662 Â 48% +101.7% 102190 Â 4% sched_debug.cpu#5.sched_count
8840 Â 4% -14.7% 7541 Â 11% sched_debug.cpu#5.ttwu_local
14 Â 8% -25.0% 10 Â 10% sched_debug.cpu#5.load
34132 Â 2% +181.9% 96203 Â 3% sched_debug.cpu#5.nr_switches
14 Â 3% -40.4% 8 Â 5% sched_debug.cpu#5.cpu_load[3]
19453 Â 1% +163.4% 51235 Â 2% sched_debug.cpu#5.ttwu_count
14 Â 3% -35.1% 9 Â 4% sched_debug.cpu#5.cpu_load[4]
14 Â 3% -42.1% 8 Â 13% sched_debug.cpu#5.cpu_load[1]
17176 Â 11% +126.2% 38854 Â 3% sched_debug.cpu#50.ttwu_count
6062 Â 6% +454.0% 33580 Â 2% sched_debug.cpu#50.sched_goidle
427537 Â 8% +70.6% 729408 Â 8% sched_debug.cpu#50.avg_idle
8338 Â 16% -26.7% 6110 Â 4% sched_debug.cpu#50.ttwu_local
14 Â 10% -46.6% 7 Â 14% sched_debug.cpu#50.cpu_load[0]
31212 Â 11% +150.5% 78189 Â 2% sched_debug.cpu#50.nr_switches
14 Â 13% -45.8% 8 Â 8% sched_debug.cpu#50.cpu_load[1]
15 Â 11% -46.7% 8 Â 8% sched_debug.cpu#50.cpu_load[2]
14 Â 8% -45.8% 8 Â 8% sched_debug.cpu#50.cpu_load[3]
34287 Â 13% +135.9% 80898 Â 2% sched_debug.cpu#50.sched_count
149585 Â 0% -14.2% 128408 Â 1% sched_debug.cpu#50.nr_load_updates
5077 Â 2% -31.0% 3505 Â 20% sched_debug.cpu#50.curr->pid
14 Â 5% -41.4% 8 Â 10% sched_debug.cpu#50.cpu_load[4]
13 Â 3% -46.3% 7 Â 11% sched_debug.cpu#51.cpu_load[0]
149563 Â 0% -13.7% 129131 Â 1% sched_debug.cpu#51.nr_load_updates
13 Â 6% -37.7% 8 Â 13% sched_debug.cpu#51.load
5961 Â 3% +456.4% 33169 Â 3% sched_debug.cpu#51.sched_goidle
14 Â 8% -50.8% 7 Â 11% sched_debug.cpu#51.cpu_load[1]
14805 Â 14% +169.2% 39856 Â 8% sched_debug.cpu#51.ttwu_count
446768 Â 9% +46.4% 654210 Â 3% sched_debug.cpu#51.avg_idle
27463 Â 13% +189.6% 79531 Â 7% sched_debug.cpu#51.nr_switches
5134 Â 2% -35.3% 3321 Â 18% sched_debug.cpu#51.curr->pid
14 Â 3% -42.1% 8 Â 10% sched_debug.cpu#51.cpu_load[4]
31453 Â 12% +165.8% 83619 Â 4% sched_debug.cpu#51.sched_count
14 Â 5% -44.8% 8 Â 8% sched_debug.cpu#51.cpu_load[3]
14 Â 8% -50.8% 7 Â 11% sched_debug.cpu#51.cpu_load[2]
13 Â 3% -41.8% 8 Â 15% sched_debug.cpu#52.cpu_load[1]
5140 Â 3% -28.5% 3677 Â 26% sched_debug.cpu#52.curr->pid
6165 Â 6% +427.5% 32524 Â 1% sched_debug.cpu#52.sched_goidle
17019 Â 18% +126.4% 38539 Â 4% sched_debug.cpu#52.ttwu_count
13 Â 3% -44.4% 7 Â 20% sched_debug.cpu#52.cpu_load[0]
35027 Â 21% +134.8% 82258 Â 8% sched_debug.cpu#52.sched_count
13 Â 3% -38.2% 8 Â 13% sched_debug.cpu#52.cpu_load[3]
13 Â 6% -38.9% 8 Â 26% sched_debug.cpu#52.load
149605 Â 0% -13.8% 128950 Â 0% sched_debug.cpu#52.nr_load_updates
31250 Â 16% +143.2% 76016 Â 3% sched_debug.cpu#52.nr_switches
13 Â 3% -41.8% 8 Â 15% sched_debug.cpu#52.cpu_load[2]
407007 Â 18% +73.1% 704364 Â 7% sched_debug.cpu#52.avg_idle
14 Â 0% -35.7% 9 Â 11% sched_debug.cpu#52.cpu_load[4]
14 Â 0% -39.3% 8 Â 5% sched_debug.cpu#53.cpu_load[2]
14336 Â 16% +174.3% 39327 Â 2% sched_debug.cpu#53.ttwu_count
149573 Â 0% -13.4% 129557 Â 0% sched_debug.cpu#53.nr_load_updates
14 Â 0% -39.3% 8 Â 5% sched_debug.cpu#53.cpu_load[3]
14 Â 0% -37.5% 8 Â 4% sched_debug.cpu#53.cpu_load[4]
26045 Â 17% +201.9% 78623 Â 4% sched_debug.cpu#53.nr_switches
5785 Â 9% +479.3% 33519 Â 3% sched_debug.cpu#53.sched_goidle
394131 Â 9% +71.8% 677092 Â 4% sched_debug.cpu#53.avg_idle
14 Â 3% -36.2% 9 Â 14% sched_debug.cpu#53.load
1 Â 0% -100.0% 0 Â 0% sched_debug.cpu#53.nr_running
5301 Â 1% -32.4% 3583 Â 22% sched_debug.cpu#53.curr->pid
14 Â 0% -39.3% 8 Â 5% sched_debug.cpu#53.cpu_load[1]
13 Â 3% -40.0% 8 Â 5% sched_debug.cpu#53.cpu_load[0]
27428 Â 19% +215.5% 86525 Â 11% sched_debug.cpu#53.sched_count
13 Â 6% -43.6% 7 Â 5% sched_debug.cpu#54.cpu_load[1]
5229 Â 0% -28.1% 3762 Â 4% sched_debug.cpu#54.curr->pid
149541 Â 0% -13.7% 129115 Â 1% sched_debug.cpu#54.nr_load_updates
5754 Â 5% +481.1% 33442 Â 2% sched_debug.cpu#54.sched_goidle
14 Â 5% -44.6% 7 Â 5% sched_debug.cpu#54.cpu_load[0]
14 Â 14% -41.4% 8 Â 5% sched_debug.cpu#54.load
14 Â 0% -44.6% 7 Â 5% sched_debug.cpu#54.cpu_load[3]
15157 Â 14% +154.4% 38560 Â 2% sched_debug.cpu#54.ttwu_count
14 Â 0% -41.1% 8 Â 5% sched_debug.cpu#54.cpu_load[4]
27067 Â 15% +186.7% 77604 Â 2% sched_debug.cpu#54.nr_switches
381381 Â 9% +87.1% 713657 Â 4% sched_debug.cpu#54.avg_idle
14 Â 0% -44.6% 7 Â 5% sched_debug.cpu#54.cpu_load[2]
33009 Â 30% +218.6% 105158 Â 35% sched_debug.cpu#54.sched_count
13 Â 6% -44.4% 7 Â 11% sched_debug.cpu#55.cpu_load[1]
5335 Â 6% +518.1% 32977 Â 4% sched_debug.cpu#55.sched_goidle
13 Â 8% -33.3% 9 Â 7% sched_debug.cpu#55.load
24372 Â 15% +215.9% 76995 Â 5% sched_debug.cpu#55.nr_switches
5093 Â 4% -21.1% 4016 Â 9% sched_debug.cpu#55.curr->pid
149428 Â 0% -14.0% 128500 Â 0% sched_debug.cpu#55.nr_load_updates
13723 Â 13% +181.8% 38667 Â 2% sched_debug.cpu#55.ttwu_count
13 Â 6% -46.3% 7 Â 17% sched_debug.cpu#55.cpu_load[0]
14 Â 5% -46.4% 7 Â 11% sched_debug.cpu#55.cpu_load[2]
14 Â 5% -42.9% 8 Â 8% sched_debug.cpu#55.cpu_load[3]
28348 Â 21% +190.3% 82293 Â 2% sched_debug.cpu#55.sched_count
431592 Â 5% +50.3% 648536 Â 8% sched_debug.cpu#55.avg_idle
14 Â 5% -37.5% 8 Â 9% sched_debug.cpu#55.cpu_load[4]
29536 Â 13% +232.5% 98216 Â 30% sched_debug.cpu#56.sched_count
149577 Â 0% -13.5% 129346 Â 1% sched_debug.cpu#56.nr_load_updates
14 Â 5% -39.3% 8 Â 10% sched_debug.cpu#56.cpu_load[4]
15441 Â 12% +158.8% 39963 Â 4% sched_debug.cpu#56.ttwu_count
14 Â 5% -41.1% 8 Â 10% sched_debug.cpu#56.cpu_load[3]
14 Â 5% -42.9% 8 Â 8% sched_debug.cpu#56.cpu_load[2]
14 Â 5% -42.9% 8 Â 8% sched_debug.cpu#56.cpu_load[0]
5178 Â 1% -27.9% 3734 Â 8% sched_debug.cpu#56.curr->pid
14 Â 5% -42.9% 8 Â 8% sched_debug.cpu#56.cpu_load[1]
5731 Â 3% +480.4% 33266 Â 3% sched_debug.cpu#56.sched_goidle
382742 Â 10% +80.1% 689145 Â 6% sched_debug.cpu#56.avg_idle
14 Â 8% -37.5% 8 Â 9% sched_debug.cpu#56.load
27782 Â 14% +183.1% 78659 Â 4% sched_debug.cpu#56.nr_switches
13 Â 3% -34.5% 9 Â 0% sched_debug.cpu#57.cpu_load[4]
13 Â 6% -43.6% 7 Â 14% sched_debug.cpu#57.load
14 Â 5% -42.9% 8 Â 0% sched_debug.cpu#57.cpu_load[0]
15431 Â 3% +156.5% 39587 Â 2% sched_debug.cpu#57.ttwu_count
149541 Â 0% -13.9% 128797 Â 1% sched_debug.cpu#57.nr_load_updates
6140 Â 4% +436.5% 32946 Â 2% sched_debug.cpu#57.sched_goidle
29782 Â 7% +177.5% 82641 Â 1% sched_debug.cpu#57.sched_count
14 Â 5% -44.6% 7 Â 5% sched_debug.cpu#57.cpu_load[1]
13 Â 3% -41.8% 8 Â 0% sched_debug.cpu#57.cpu_load[2]
27648 Â 5% +181.2% 77741 Â 1% sched_debug.cpu#57.nr_switches
5142 Â 1% -28.5% 3676 Â 13% sched_debug.cpu#57.curr->pid
13 Â 3% -38.2% 8 Â 5% sched_debug.cpu#57.cpu_load[3]
412300 Â 8% +63.2% 672757 Â 6% sched_debug.cpu#57.avg_idle
25198 Â 10% +209.5% 77988 Â 2% sched_debug.cpu#58.nr_switches
14 Â 5% -40.4% 8 Â 10% sched_debug.cpu#58.load
5766 Â 4% +471.7% 32966 Â 2% sched_debug.cpu#58.sched_goidle
5306 Â 1% -29.0% 3766 Â 8% sched_debug.cpu#58.curr->pid
14008 Â 11% +186.0% 40066 Â 4% sched_debug.cpu#58.ttwu_count
14 Â 3% -38.6% 8 Â 4% sched_debug.cpu#58.cpu_load[4]
149466 Â 0% -13.4% 129403 Â 1% sched_debug.cpu#58.nr_load_updates
14 Â 0% -41.1% 8 Â 5% sched_debug.cpu#58.cpu_load[1]
416949 Â 9% +60.8% 670547 Â 3% sched_debug.cpu#58.avg_idle
14 Â 0% -41.1% 8 Â 5% sched_debug.cpu#58.cpu_load[3]
27843 Â 16% +195.6% 82292 Â 1% sched_debug.cpu#58.sched_count
14 Â 3% -42.1% 8 Â 5% sched_debug.cpu#58.cpu_load[0]
14 Â 0% -41.1% 8 Â 5% sched_debug.cpu#58.cpu_load[2]
13 Â 3% -42.6% 7 Â 10% sched_debug.cpu#59.cpu_load[1]
5322 Â 0% -32.1% 3611 Â 11% sched_debug.cpu#59.curr->pid
385063 Â 11% +81.8% 699943 Â 3% sched_debug.cpu#59.avg_idle
149421 Â 0% -13.5% 129224 Â 1% sched_debug.cpu#59.nr_load_updates
24511 Â 9% +213.0% 76718 Â 2% sched_debug.cpu#59.nr_switches
14102 Â 13% +177.8% 39172 Â 1% sched_debug.cpu#59.ttwu_count
28477 Â 10% +180.7% 79943 Â 4% sched_debug.cpu#59.sched_count
13 Â 3% -42.6% 7 Â 5% sched_debug.cpu#59.cpu_load[0]
14 Â 0% -37.5% 8 Â 9% sched_debug.cpu#59.cpu_load[4]
14 Â 0% -35.7% 9 Â 15% sched_debug.cpu#59.load
5642 Â 6% +480.0% 32728 Â 1% sched_debug.cpu#59.sched_goidle
13 Â 3% -43.6% 7 Â 10% sched_debug.cpu#59.cpu_load[2]
14 Â 0% -41.1% 8 Â 5% sched_debug.cpu#59.cpu_load[3]
4986 Â 3% -27.6% 3609 Â 8% sched_debug.cpu#6.curr->pid
14 Â 5% -39.0% 9 Â 0% sched_debug.cpu#6.cpu_load[3]
9630 Â 11% -28.0% 6929 Â 9% sched_debug.cpu#6.ttwu_local
419211 Â 12% +40.3% 588042 Â 6% sched_debug.cpu#6.avg_idle
14 Â 3% -34.5% 9 Â 5% sched_debug.cpu#6.cpu_load[4]
20287 Â 12% +154.0% 51526 Â 2% sched_debug.cpu#6.ttwu_count
14 Â 8% -39.3% 8 Â 5% sched_debug.cpu#6.cpu_load[0]
46733 Â 31% +114.9% 100445 Â 2% sched_debug.cpu#6.sched_count
150916 Â 0% -9.8% 136187 Â 1% sched_debug.cpu#6.nr_load_updates
36120 Â 9% +162.9% 94974 Â 2% sched_debug.cpu#6.nr_switches
14 Â 8% -41.1% 8 Â 5% sched_debug.cpu#6.cpu_load[1]
7113 Â 9% +480.9% 41316 Â 2% sched_debug.cpu#6.sched_goidle
14 Â 5% -39.7% 8 Â 4% sched_debug.cpu#6.cpu_load[2]
14 Â 10% -49.2% 7 Â 6% sched_debug.cpu#60.load
439937 Â 10% +54.5% 679526 Â 5% sched_debug.cpu#60.avg_idle
14 Â 5% -39.7% 8 Â 9% sched_debug.cpu#60.cpu_load[4]
5179 Â 2% -35.5% 3342 Â 10% sched_debug.cpu#60.curr->pid
28843 Â 16% +167.8% 77229 Â 4% sched_debug.cpu#60.nr_switches
33067 Â 12% +247.8% 115018 Â 28% sched_debug.cpu#60.sched_count
14 Â 7% -45.6% 7 Â 10% sched_debug.cpu#60.cpu_load[2]
15944 Â 16% +148.5% 39619 Â 3% sched_debug.cpu#60.ttwu_count
14 Â 7% -43.9% 8 Â 8% sched_debug.cpu#60.cpu_load[3]
149390 Â 0% -13.5% 129257 Â 1% sched_debug.cpu#60.nr_load_updates
5661 Â 5% +476.4% 32633 Â 2% sched_debug.cpu#60.sched_goidle
14 Â 7% -49.1% 7 Â 5% sched_debug.cpu#60.cpu_load[1]
14 Â 7% -51.7% 7 Â 10% sched_debug.cpu#60.cpu_load[0]
2 Â 47% +172.7% 7 Â 46% sched_debug.cpu#60.nr_uninterruptible
14 Â 3% -37.9% 9 Â 7% sched_debug.cpu#61.cpu_load[4]
149440 Â 0% -13.8% 128815 Â 1% sched_debug.cpu#61.nr_load_updates
29614 Â 21% +172.4% 80671 Â 7% sched_debug.cpu#61.sched_count
14 Â 3% -43.1% 8 Â 10% sched_debug.cpu#61.cpu_load[3]
14 Â 3% -43.9% 8 Â 8% sched_debug.cpu#61.cpu_load[1]
13368 Â 10% +181.0% 37568 Â 2% sched_debug.cpu#61.ttwu_count
14 Â 3% -44.8% 8 Â 8% sched_debug.cpu#61.cpu_load[2]
5539 Â 4% +492.5% 32824 Â 1% sched_debug.cpu#61.sched_goidle
386906 Â 8% +76.3% 682133 Â 7% sched_debug.cpu#61.avg_idle
14 Â 3% -42.1% 8 Â 5% sched_debug.cpu#61.cpu_load[0]
5248 Â 2% -33.1% 3512 Â 8% sched_debug.cpu#61.curr->pid
14 Â 3% -38.6% 8 Â 21% sched_debug.cpu#61.load
23867 Â 10% +214.5% 75068 Â 1% sched_debug.cpu#61.nr_switches
25806 Â 16% +202.7% 78114 Â 0% sched_debug.cpu#62.nr_switches
14 Â 3% -43.9% 8 Â 8% sched_debug.cpu#62.cpu_load[2]
149356 Â 0% -13.5% 129229 Â 1% sched_debug.cpu#62.nr_load_updates
14 Â 7% -45.8% 8 Â 8% sched_debug.cpu#62.cpu_load[0]
13869 Â 13% +183.4% 39304 Â 0% sched_debug.cpu#62.ttwu_count
5710 Â 6% +477.2% 32961 Â 1% sched_debug.cpu#62.sched_goidle
14 Â 3% -44.8% 8 Â 8% sched_debug.cpu#62.cpu_load[1]
14 Â 5% -39.3% 8 Â 10% sched_debug.cpu#62.load
14 Â 3% -38.6% 8 Â 12% sched_debug.cpu#62.cpu_load[4]
4966 Â 5% -31.5% 3402 Â 7% sched_debug.cpu#62.curr->pid
378717 Â 15% +76.9% 669793 Â 4% sched_debug.cpu#62.avg_idle
14 Â 3% -42.1% 8 Â 13% sched_debug.cpu#62.cpu_load[3]
29176 Â 10% +189.4% 84428 Â 5% sched_debug.cpu#62.sched_count
5768 Â 4% +457.9% 32180 Â 3% sched_debug.cpu#63.sched_goidle
14 Â 3% -42.1% 8 Â 10% sched_debug.cpu#63.cpu_load[4]
14 Â 0% -44.6% 7 Â 10% sched_debug.cpu#63.cpu_load[0]
14 Â 0% -46.4% 7 Â 11% sched_debug.cpu#63.cpu_load[2]
5225 Â 0% -31.4% 3584 Â 8% sched_debug.cpu#63.curr->pid
30028 Â 12% +167.0% 80165 Â 5% sched_debug.cpu#63.sched_count
149417 Â 0% -13.6% 129103 Â 1% sched_debug.cpu#63.nr_load_updates
14 Â 0% -44.6% 7 Â 10% sched_debug.cpu#63.cpu_load[1]
14 Â 3% -43.9% 8 Â 8% sched_debug.cpu#63.cpu_load[3]
28749 Â 9% +160.3% 74821 Â 4% sched_debug.cpu#63.nr_switches
16124 Â 8% +139.4% 38596 Â 4% sched_debug.cpu#63.ttwu_count
381327 Â 11% +76.9% 674475 Â 1% sched_debug.cpu#63.avg_idle
13 Â 3% -36.4% 8 Â 4% sched_debug.cpu#63.load
5228 Â 2% -30.4% 3638 Â 18% sched_debug.cpu#7.curr->pid
32692 Â 14% +177.2% 90611 Â 2% sched_debug.cpu#7.nr_switches
14 Â 3% -39.7% 8 Â 4% sched_debug.cpu#7.cpu_load[3]
432214 Â 9% +31.0% 566127 Â 7% sched_debug.cpu#7.avg_idle
18548 Â 13% +164.5% 49069 Â 3% sched_debug.cpu#7.ttwu_count
14 Â 3% -35.1% 9 Â 8% sched_debug.cpu#7.cpu_load[0]
14 Â 3% -36.2% 9 Â 8% sched_debug.cpu#7.cpu_load[4]
8398 Â 21% -33.4% 5591 Â 4% sched_debug.cpu#7.ttwu_local
14 Â 3% -36.8% 9 Â 7% sched_debug.cpu#7.cpu_load[1]
14 Â 3% -36.8% 9 Â 19% sched_debug.cpu#7.load
151006 Â 0% -9.9% 136117 Â 1% sched_debug.cpu#7.nr_load_updates
14 Â 3% -39.7% 8 Â 4% sched_debug.cpu#7.cpu_load[2]
6755 Â 8% +495.5% 40224 Â 2% sched_debug.cpu#7.sched_goidle
44491 Â 27% +113.6% 95041 Â 1% sched_debug.cpu#7.sched_count
430043 Â 6% +28.0% 550405 Â 5% sched_debug.cpu#8.avg_idle
36918 Â 14% +155.5% 94310 Â 2% sched_debug.cpu#8.nr_switches
40673 Â 12% +150.6% 101925 Â 4% sched_debug.cpu#8.sched_count
15 Â 2% -37.7% 9 Â 5% sched_debug.cpu#8.cpu_load[3]
15 Â 5% -41.0% 9 Â 0% sched_debug.cpu#8.cpu_load[1]
9474 Â 15% -36.4% 6024 Â 8% sched_debug.cpu#8.ttwu_local
21205 Â 11% +135.4% 49914 Â 2% sched_debug.cpu#8.ttwu_count
1 Â 0% -100.0% 0 Â 0% sched_debug.cpu#8.nr_running
15 Â 3% -40.3% 9 Â 4% sched_debug.cpu#8.cpu_load[2]
5366 Â 0% -29.7% 3773 Â 8% sched_debug.cpu#8.curr->pid
15 Â 5% -39.3% 9 Â 4% sched_debug.cpu#8.cpu_load[0]
153610 Â 1% -11.4% 136085 Â 1% sched_debug.cpu#8.nr_load_updates
15 Â 4% -35.0% 9 Â 4% sched_debug.cpu#8.load
15 Â 2% -36.1% 9 Â 4% sched_debug.cpu#8.cpu_load[4]
6904 Â 9% +503.5% 41667 Â 1% sched_debug.cpu#8.sched_goidle
12279 Â 13% -42.9% 7013 Â 16% sched_debug.cpu#9.ttwu_local
422957 Â 10% +33.0% 562718 Â 10% sched_debug.cpu#9.avg_idle
14 Â 2% -33.9% 9 Â 4% sched_debug.cpu#9.cpu_load[4]
52631 Â 6% +91.3% 100677 Â 3% sched_debug.cpu#9.sched_count
14 Â 2% -27.1% 10 Â 7% sched_debug.cpu#9.load
7657 Â 11% +454.6% 42463 Â 1% sched_debug.cpu#9.sched_goidle
41670 Â 11% +135.0% 97935 Â 3% sched_debug.cpu#9.nr_switches
23117 Â 10% +121.6% 51221 Â 3% sched_debug.cpu#9.ttwu_count
14 Â 2% -40.7% 8 Â 4% sched_debug.cpu#9.cpu_load[2]
14 Â 2% -39.0% 9 Â 7% sched_debug.cpu#9.cpu_load[3]
14 Â 3% -38.6% 8 Â 4% sched_debug.cpu#9.cpu_load[0]
151315 Â 0% -10.2% 135912 Â 1% sched_debug.cpu#9.nr_load_updates
14 Â 3% -39.7% 8 Â 4% sched_debug.cpu#9.cpu_load[1]
5086 Â 5% -13.7% 4389 Â 7% sched_debug.cpu#9.curr->pid

lkp-nex06: Nehalem-EX
Memory: 64G

To reproduce:

apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang

---
testcase: will-it-scale
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 10
default-watchdogs:
watch-oom:
watchdog:
cpufreq_governor: powersave
commit: 41be894106982f71da9469e310cf49d395119751
model: Grantley Haswell
nr_cpu: 16
memory: 16G
hdd_partitions:
swap_partitions:
rootfs_partition:
perf-profile:
freq: 800
will-it-scale:
test: pread3
testbox: lituya
tbox_group: lituya
kconfig: x86_64-rhel
enqueue_time: 2015-05-06 17:07:11.102936852 +08:00
user: lkp
queue: cyclic
compiler: gcc-4.9
head_commit: 41be894106982f71da9469e310cf49d395119751
base_commit: 5ebe6afaf0057ac3eaeb98defd5456894b446d22
branch: linux-devel/devel-hourly-2015050621
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/41be894106982f71da9469e310cf49d395119751/vmlinuz-4.1.0-rc2-wl-12844-g41be894"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/will-it-scale/powersave-pread3/lituya/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/41be894106982f71da9469e310cf49d395119751/0"
LKP_SERVER: inn
job_file: "/lkp/scheduled/lituya/cyclic_will-it-scale-powersave-pread3-x86_64-rhel-CYCLIC_HEAD-41be894106982f71da9469e310cf49d395119751-0-20150506-92831-1urwahj.yaml"
dequeue_time: 2015-05-07 08:48:36.146543177 +08:00
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/lituya/cyclic_will-it-scale-powersave-pread3-x86_64-rhel-CYCLIC_HEAD-41be894106982f71da9469e310cf49d395119751-0-20150506-92831-1urwahj.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015050621
- commit=41be894106982f71da9469e310cf49d395119751
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/41be894106982f71da9469e310cf49d395119751/vmlinuz-4.1.0-rc2-wl-12844-g41be894
- RESULT_ROOT=/result/will-it-scale/powersave-pread3/lituya/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/41be894106982f71da9469e310cf49d395119751/0
- LKP_SERVER=inn
- |2-


earlyprintk=ttyS0,115200
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal

rw
max_uptime: 1500
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/41be894106982f71da9469e310cf49d395119751/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/will-it-scale.cgz"
job_state: finished
loadavg: 9.93 6.25 2.60 1/213 5701
start_time: '1430959750'
end_time: '1430960054'
version: "/lkp/lkp/.src-20150506-223656"
echo powersave > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
./runtest.py pread3 32 both 1 8 12 16
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx