[LKP] [vfs] 75cbe701a42: +40.5% vmstat.system.in
From: Huang Ying
Date: Mon Dec 15 2014 - 03:26:14 EST
FYI, we noticed the below changes on
commit 75cbe701a4251fcd8b846d52ae42f88c9a8e5e93 ("vfs: Remove i_dquot field from inode")
testbox/testcase/testparams: lkp-nex05/will-it-scale/performance-pwrite3
507e1fa697097b48 75cbe701a4251fcd8b846d52ae
---------------- --------------------------
%stddev %change %stddev
\ | \
157162 Â 12% +420.1% 817388 Â 41% sched_debug.cfs_rq[0]:/.min_vruntime
6 Â 7% +533.8% 41 Â 42% sched_debug.cfs_rq[22]:/.tg_load_contrib
1 Â 0% +440.0% 5 Â 43% sched_debug.cpu#26.cpu_load[1]
0 Â 0% +Inf% 5 Â 45% sched_debug.cfs_rq[26]:/.runnable_load_avg
0 Â 0% +Inf% 5 Â 36% sched_debug.cpu#30.cpu_load[0]
8431.40 Â 30% -100.0% 0.00 Â 0% sched_debug.cfs_rq[34]:/.MIN_vruntime
8431.40 Â 30% -100.0% 0.00 Â 0% sched_debug.cfs_rq[34]:/.max_vruntime
0 Â 0% +Inf% 3 Â 19% sched_debug.cpu#57.cpu_load[4]
145948 Â 31% -65.3% 50659 Â 47% sched_debug.cpu#3.sched_count
2 Â 20% +388.0% 12 Â 44% sched_debug.cfs_rq[33]:/.runnable_load_avg
102647 Â 42% -65.2% 35676 Â 24% sched_debug.cpu#11.sched_count
185980 Â 17% -59.1% 76091 Â 43% sched_debug.cpu#10.sched_count
92451 Â 16% -65.8% 31628 Â 13% sched_debug.cpu#10.sched_goidle
185092 Â 16% -65.7% 63435 Â 13% sched_debug.cpu#10.nr_switches
191359 Â 6% +227.3% 626233 Â 42% sched_debug.cfs_rq[34]:/.min_vruntime
87689 Â 15% -64.2% 31375 Â 13% sched_debug.cpu#10.ttwu_count
205303 Â 10% +195.9% 607409 Â 43% sched_debug.cfs_rq[38]:/.min_vruntime
40762 Â 27% -45.8% 22112 Â 37% sched_debug.cpu#25.ttwu_count
41187 Â 23% -45.7% 22347 Â 37% sched_debug.cpu#25.sched_goidle
82643 Â 23% -45.4% 45095 Â 37% sched_debug.cpu#25.nr_switches
2702 Â 9% +155.2% 6896 Â 45% sched_debug.cfs_rq[43]:/.avg->runnable_avg_sum
58 Â 10% +158.3% 149 Â 46% sched_debug.cfs_rq[43]:/.tg_runnable_contrib
83542 Â 22% -43.9% 46893 Â 36% sched_debug.cpu#25.sched_count
51 Â 8% +187.4% 148 Â 42% sched_debug.cfs_rq[49]:/.tg_runnable_contrib
2394 Â 9% +184.2% 6805 Â 42% sched_debug.cfs_rq[49]:/.avg->runnable_avg_sum
292617 Â 23% -41.0% 172597 Â 42% sched_debug.cpu#6.sched_count
145348 Â 23% -41.6% 84899 Â 41% sched_debug.cpu#6.sched_goidle
290925 Â 23% -41.6% 169994 Â 41% sched_debug.cpu#6.nr_switches
34916 Â 10% -45.2% 19148 Â 31% sched_debug.cpu#17.sched_goidle
70132 Â 10% -44.8% 38690 Â 31% sched_debug.cpu#17.nr_switches
3371 Â 20% +542.7% 21665 Â 42% sched_debug.cfs_rq[13]:/.exec_clock
33857 Â 13% -43.0% 19297 Â 29% sched_debug.cpu#17.ttwu_count
464 Â 0% +288.4% 1804 Â 47% sched_debug.cpu#7.curr->pid
93 Â 11% +128.0% 212 Â 34% sched_debug.cfs_rq[42]:/.tg_runnable_contrib
4288 Â 11% +126.8% 9724 Â 33% sched_debug.cfs_rq[42]:/.avg->runnable_avg_sum
70149 Â 10% -43.6% 39548 Â 29% sched_debug.cpu#17.sched_count
310 Â 6% +432.2% 1652 Â 45% sched_debug.cpu#18.curr->pid
2155 Â 7% +329.7% 9262 Â 45% sched_debug.cfs_rq[23]:/.exec_clock
166703 Â 21% -49.1% 84791 Â 30% sched_debug.cpu#2.sched_goidle
333623 Â 21% -49.1% 169785 Â 30% sched_debug.cpu#2.nr_switches
333958 Â 21% -44.4% 185671 Â 35% sched_debug.cpu#2.sched_count
9362 Â 9% +118.8% 20488 Â 37% sched_debug.cfs_rq[42]:/.exec_clock
0 Â 0% +Inf% 2 Â 41% sched_debug.cpu#11.cpu_load[4]
72346 Â 28% -40.5% 43020 Â 23% sched_debug.cpu#19.sched_count
22735 Â 9% -35.8% 14591 Â 29% sched_debug.cpu#29.ttwu_count
88509 Â 10% -42.9% 50561 Â 32% sched_debug.cpu#5.sched_goidle
177345 Â 10% -42.8% 101466 Â 31% sched_debug.cpu#5.nr_switches
0 Â 0% +Inf% 3 Â 36% sched_debug.cpu#11.cpu_load[3]
178372 Â 10% -39.4% 108021 Â 19% sched_debug.cpu#5.sched_count
176 Â 46% +778.8% 1551 Â 42% sched_debug.cpu#14.curr->pid
1 Â 0% +500.0% 6 Â 34% sched_debug.cpu#20.cpu_load[0]
22743 Â 12% -32.4% 15363 Â 29% sched_debug.cpu#29.sched_goidle
68316 Â 13% -27.5% 49497 Â 33% sched_debug.cpu#4.ttwu_count
26811 Â 16% -36.8% 16949 Â 30% sched_debug.cpu#11.ttwu_count
45868 Â 12% -31.6% 31356 Â 29% sched_debug.cpu#29.nr_switches
96945 Â 12% -40.3% 57836 Â 34% sched_debug.cpu#5.ttwu_count
25741 Â 14% -33.3% 17166 Â 26% sched_debug.cpu#11.sched_goidle
51764 Â 13% -33.0% 34703 Â 25% sched_debug.cpu#11.nr_switches
63300 Â 13% -20.0% 50621 Â 18% sched_debug.cpu#26.sched_goidle
126782 Â 13% -20.0% 101443 Â 18% sched_debug.cpu#26.nr_switches
5 Â 40% +116.0% 10 Â 15% sched_debug.cpu#48.cpu_load[4]
0 Â 0% +Inf% 5 Â 36% sched_debug.cfs_rq[20]:/.runnable_load_avg
59777 Â 12% -19.2% 48285 Â 17% sched_debug.cpu#26.ttwu_count
7 Â 6% +52.0% 11 Â 10% sched_debug.cpu#40.cpu_load[4]
1 Â 33% +220.0% 4 Â 24% sched_debug.cpu#57.cpu_load[3]
483 Â 19% +378.2% 2312 Â 36% sched_debug.cpu#0.curr->pid
172 Â 4% +71.5% 295 Â 22% sched_debug.cfs_rq[40]:/.tg_runnable_contrib
55388 Â 24% -24.6% 41749 Â 39% sched_debug.cpu#57.sched_goidle
7887 Â 4% +71.3% 13512 Â 23% sched_debug.cfs_rq[40]:/.avg->runnable_avg_sum
5 Â 40% +120.0% 11 Â 16% sched_debug.cpu#48.cpu_load[3]
46362 Â 11% -25.4% 34597 Â 24% sched_debug.cpu#29.sched_count
43 Â 13% +309.8% 176 Â 35% sched_debug.cfs_rq[9]:/.tg_runnable_contrib
75 Â 14% +158.7% 194 Â 39% sched_debug.cfs_rq[37]:/.tg_runnable_contrib
3468 Â 14% +157.2% 8919 Â 39% sched_debug.cfs_rq[37]:/.avg->runnable_avg_sum
2015 Â 13% +302.0% 8102 Â 35% sched_debug.cfs_rq[9]:/.avg->runnable_avg_sum
32 Â 9% +297.5% 127 Â 42% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
1507 Â 7% +290.1% 5878 Â 42% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
3998 Â 8% -14.1% 3433 Â 9% cpuidle.C1E-NHM.usage
111142 Â 24% -24.5% 83920 Â 39% sched_debug.cpu#57.nr_switches
111162 Â 24% -24.3% 84203 Â 39% sched_debug.cpu#57.sched_count
63 Â 19% +187.6% 181 Â 36% sched_debug.cfs_rq[35]:/.tg_runnable_contrib
71 Â 4% +145.6% 174 Â 26% sched_debug.cfs_rq[61]:/.tg_runnable_contrib
2927 Â 18% +185.8% 8365 Â 36% sched_debug.cfs_rq[35]:/.avg->runnable_avg_sum
3276 Â 3% +144.9% 8022 Â 26% sched_debug.cfs_rq[61]:/.avg->runnable_avg_sum
459 Â 10% -25.0% 344 Â 12% sched_debug.cpu#40.ttwu_local
196 Â 3% -14.0% 169 Â 10% sched_debug.cpu#46.ttwu_local
178384 Â 0% -32.1% 121086 Â 29% sched_debug.cpu#62.ttwu_count
252 Â 24% +336.6% 1100 Â 37% sched_debug.cpu#15.curr->pid
2364 Â 5% +257.1% 8441 Â 38% sched_debug.cfs_rq[18]:/.avg->runnable_avg_sum
6 Â 0% +103.3% 12 Â 24% sched_debug.cpu#48.cpu_load[0]
4 Â 33% +148.9% 11 Â 19% sched_debug.cpu#48.cpu_load[2]
8 Â 12% +45.0% 11 Â 11% sched_debug.cpu#40.cpu_load[3]
5 Â 20% +128.0% 11 Â 21% sched_debug.cpu#48.cpu_load[1]
8886 Â 8% +114.1% 19030 Â 35% sched_debug.cfs_rq[37]:/.exec_clock
51 Â 11% +253.7% 180 Â 36% sched_debug.cfs_rq[22]:/.tg_runnable_contrib
2358 Â 11% +251.6% 8293 Â 36% sched_debug.cfs_rq[22]:/.avg->runnable_avg_sum
331 Â 0% -16.1% 278 Â 4% sched_debug.cpu#49.ttwu_local
26574 Â 8% +47.3% 39151 Â 16% sched_debug.cfs_rq[34]:/.exec_clock
8204 Â 3% +65.0% 13540 Â 24% sched_debug.cfs_rq[36]:/.avg->runnable_avg_sum
179 Â 3% +65.3% 295 Â 24% sched_debug.cfs_rq[36]:/.tg_runnable_contrib
330 Â 17% +325.6% 1404 Â 37% sched_debug.cpu#20.curr->pid
2 Â 0% +180.0% 5 Â 40% sched_debug.cfs_rq[62]:/.runnable_load_avg
597 Â 5% +155.0% 1523 Â 27% sched_debug.cpu#42.curr->pid
555 Â 23% +115.5% 1197 Â 43% sched_debug.cpu#41.curr->pid
8581 Â 0% +56.5% 13426 Â 25% sched_debug.cfs_rq[60]:/.avg->runnable_avg_sum
187 Â 0% +56.2% 292 Â 25% sched_debug.cfs_rq[60]:/.tg_runnable_contrib
2 Â 50% +210.0% 6 Â 27% sched_debug.cpu#57.cpu_load[2]
71489 Â 4% -49.9% 35802 Â 30% sched_debug.cpu#55.sched_goidle
7457 Â 17% +63.9% 12222 Â 19% sched_debug.cfs_rq[38]:/.avg->runnable_avg_sum
163 Â 17% +63.4% 266 Â 19% sched_debug.cfs_rq[38]:/.tg_runnable_contrib
923 Â 9% +105.2% 1894 Â 28% sched_debug.cpu#60.curr->pid
4 Â 0% +185.0% 11 Â 48% sched_debug.cpu#41.cpu_load[0]
43 Â 9% +202.8% 130 Â 43% sched_debug.cfs_rq[31]:/.blocked_load_avg
143344 Â 4% -49.7% 72109 Â 30% sched_debug.cpu#55.nr_switches
154157 Â 16% -41.3% 90499 Â 24% sched_debug.cpu#33.sched_goidle
7157 Â 13% +80.3% 12902 Â 24% sched_debug.cfs_rq[34]:/.avg->runnable_avg_sum
143368 Â 4% -49.6% 72207 Â 30% sched_debug.cpu#55.sched_count
398727 Â 7% -25.0% 299212 Â 33% sched_debug.cpu#56.ttwu_count
331 Â 38% -53.2% 154 Â 38% sched_debug.cfs_rq[45]:/.blocked_load_avg
155 Â 14% +81.0% 281 Â 24% sched_debug.cfs_rq[34]:/.tg_runnable_contrib
17 Â 14% -30.3% 12 Â 34% sched_debug.cfs_rq[59]:/.runnable_load_avg
37 Â 4% +202.4% 113 Â 39% sched_debug.cfs_rq[23]:/.tg_runnable_contrib
173012 Â 19% -42.7% 99216 Â 27% sched_debug.cpu#33.ttwu_count
75987 Â 4% -50.1% 37929 Â 31% sched_debug.cpu#55.ttwu_count
308942 Â 16% -41.3% 181404 Â 24% sched_debug.cpu#33.nr_switches
3 Â 14% +180.0% 9 Â 35% sched_debug.cpu#41.cpu_load[1]
1780 Â 4% +194.9% 5248 Â 39% sched_debug.cfs_rq[23]:/.avg->runnable_avg_sum
3 Â 14% +128.6% 8 Â 28% sched_debug.cpu#41.cpu_load[2]
29 Â 10% +263.4% 105 Â 46% sched_debug.cfs_rq[21]:/.tg_runnable_contrib
308995 Â 16% -41.2% 181562 Â 24% sched_debug.cpu#33.sched_count
74 Â 14% -53.5% 34 Â 45% sched_debug.cfs_rq[12]:/.tg_load_contrib
1388 Â 9% +250.5% 4865 Â 45% sched_debug.cfs_rq[21]:/.avg->runnable_avg_sum
2 Â 20% +180.0% 7 Â 40% sched_debug.cfs_rq[41]:/.runnable_load_avg
191260 Â 12% -43.1% 108749 Â 25% sched_debug.cpu#45.ttwu_count
68784 Â 0% -36.6% 43633 Â 40% sched_debug.cpu#53.ttwu_count
173314 Â 9% -45.9% 93834 Â 26% sched_debug.cpu#45.sched_goidle
40 Â 17% -40.0% 24 Â 21% sched_debug.cfs_rq[32]:/.runnable_load_avg
55810 Â 12% -26.3% 41131 Â 30% sched_debug.cpu#39.sched_goidle
179862 Â 1% -35.8% 115446 Â 38% sched_debug.cpu#50.ttwu_count
23 Â 17% -43.5% 13 Â 40% sched_debug.cpu#59.load
23 Â 17% -43.5% 13 Â 40% sched_debug.cfs_rq[59]:/.load
211 Â 21% -47.3% 111 Â 36% sched_debug.cfs_rq[5]:/.blocked_load_avg
39 Â 18% -37.7% 24 Â 24% sched_debug.cpu#32.cpu_load[1]
347042 Â 9% -45.8% 188111 Â 25% sched_debug.cpu#45.nr_switches
89 Â 10% -52.6% 42 Â 40% sched_debug.cfs_rq[2]:/.tg_load_contrib
111996 Â 12% -26.2% 82661 Â 29% sched_debug.cpu#39.nr_switches
347092 Â 9% -45.8% 188225 Â 25% sched_debug.cpu#45.sched_count
9 Â 26% -43.2% 5 Â 44% sched_debug.cpu#47.cpu_load[1]
337 Â 37% -51.7% 162 Â 37% sched_debug.cfs_rq[45]:/.tg_load_contrib
6701 Â 12% +137.1% 15886 Â 29% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
171660 Â 8% -33.6% 114067 Â 27% sched_debug.cpu#54.ttwu_count
146 Â 12% +135.8% 345 Â 29% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
60240 Â 12% -27.1% 43911 Â 29% sched_debug.cpu#39.ttwu_count
180017 Â 5% -34.2% 118414 Â 26% sched_debug.cpu#54.sched_goidle
6 Â 7% +96.9% 12 Â 18% sched_debug.cpu#34.cpu_load[4]
48 Â 19% +180.0% 135 Â 44% sched_debug.cfs_rq[31]:/.tg_load_contrib
5564 Â 25% +196.0% 16471 Â 49% sched_debug.cfs_rq[1]:/.exec_clock
164454 Â 22% -39.4% 99729 Â 29% sched_debug.cpu#41.sched_goidle
113012 Â 11% -26.8% 82773 Â 29% sched_debug.cpu#39.sched_count
184124 Â 1% -36.6% 116679 Â 38% sched_debug.cpu#50.sched_goidle
360271 Â 5% -34.2% 237049 Â 26% sched_debug.cpu#54.nr_switches
39 Â 18% -34.7% 25 Â 23% sched_debug.cpu#32.cpu_load[0]
360327 Â 5% -34.2% 237201 Â 26% sched_debug.cpu#54.sched_count
339 Â 5% -16.4% 283 Â 13% sched_debug.cpu#51.ttwu_local
329307 Â 22% -39.3% 199906 Â 29% sched_debug.cpu#41.nr_switches
329356 Â 22% -39.3% 200070 Â 29% sched_debug.cpu#41.sched_count
368489 Â 1% -36.6% 233598 Â 37% sched_debug.cpu#50.nr_switches
37 Â 13% -30.3% 25 Â 23% sched_debug.cpu#32.cpu_load[2]
6 Â 16% -26.7% 4 Â 23% sched_debug.cpu#55.cpu_load[3]
368543 Â 1% -36.3% 234920 Â 38% sched_debug.cpu#50.sched_count
60 Â 26% -24.0% 45 Â 41% sched_debug.cfs_rq[38]:/.blocked_load_avg
1 Â 33% +233.3% 5 Â 35% sched_debug.cpu#18.cpu_load[1]
176 Â 25% -29.0% 125 Â 41% sched_debug.cfs_rq[37]:/.blocked_load_avg
9 Â 11% -37.8% 5 Â 24% sched_debug.cpu#55.cpu_load[2]
183394 Â 27% -36.5% 116428 Â 29% sched_debug.cpu#41.ttwu_count
188 Â 26% -28.1% 135 Â 40% sched_debug.cfs_rq[37]:/.tg_load_contrib
30994 Â 21% +103.6% 63115 Â 24% sched_debug.cfs_rq[0]:/.exec_clock
17 Â 35% +103.5% 34 Â 15% sched_debug.cfs_rq[0]:/.load
6 Â 7% +100.0% 13 Â 20% sched_debug.cpu#34.cpu_load[3]
130 Â 17% -40.2% 78 Â 20% sched_debug.cfs_rq[7]:/.tg_load_contrib
12 Â 4% -42.4% 7 Â 28% sched_debug.cpu#55.cpu_load[0]
71751 Â 4% -40.6% 42585 Â 38% sched_debug.cpu#47.ttwu_count
216 Â 20% -44.6% 120 Â 34% sched_debug.cfs_rq[5]:/.tg_load_contrib
436 Â 22% -31.8% 297 Â 15% sched_debug.cpu#33.ttwu_local
14 Â 17% +159.3% 37 Â 39% sched_debug.cfs_rq[28]:/.tg_load_contrib
11 Â 4% -42.6% 6 Â 22% sched_debug.cpu#55.cpu_load[1]
68501 Â 3% -41.2% 40278 Â 38% sched_debug.cpu#47.sched_goidle
137399 Â 3% -41.0% 81095 Â 38% sched_debug.cpu#47.nr_switches
118 Â 15% -40.6% 70 Â 25% sched_debug.cfs_rq[7]:/.blocked_load_avg
7 Â 0% +88.6% 13 Â 18% sched_debug.cpu#34.cpu_load[2]
139101 Â 2% -41.6% 81205 Â 37% sched_debug.cpu#47.sched_count
24 Â 25% -24.2% 18 Â 48% sched_debug.cfs_rq[60]:/.load
1.571e+10 Â 1% -9.5% 1.421e+10 Â 5% cpuidle.C3-NHM.time
66 Â 24% -14.3% 57 Â 26% sched_debug.cfs_rq[38]:/.tg_load_contrib
40806 Â 6% +72.3% 70294 Â 20% sched_debug.cpu#0.nr_load_updates
58 Â 28% +160.2% 152 Â 37% sched_debug.cfs_rq[11]:/.tg_load_contrib
3310987 Â 1% -30.0% 2316926 Â 21% cpuidle.C3-NHM.usage
67 Â 0% +90.5% 128 Â 28% sched_debug.cfs_rq[9]:/.blocked_load_avg
80 Â 27% +109.2% 168 Â 22% sched_debug.cfs_rq[47]:/.blocked_load_avg
186538 Â 0% -37.4% 116738 Â 33% sched_debug.cpu#46.ttwu_count
8 Â 0% +65.0% 13 Â 18% sched_debug.cpu#34.cpu_load[1]
194298 Â 1% -38.6% 119356 Â 33% sched_debug.cpu#46.sched_goidle
71 Â 4% +89.1% 135 Â 25% sched_debug.cfs_rq[9]:/.tg_load_contrib
188 Â 15% +97.7% 372 Â 33% sched_debug.cfs_rq[59]:/.blocked_load_avg
388833 Â 1% -38.6% 238921 Â 33% sched_debug.cpu#46.nr_switches
358921 Â 1% -13.8% 309431 Â 6% softirqs.SCHED
2.34 Â 2% -37.8% 1.46 Â 41% perf-profile.cpu-cycles.__srcu_read_lock.fsnotify.vfs_write.sys_pwrite64.system_call_fastpath
53 Â 3% -34.0% 35 Â 13% sched_debug.cfs_rq[32]:/.load
50 Â 8% -31.1% 34 Â 13% sched_debug.cpu#32.load
206 Â 12% +86.8% 385 Â 31% sched_debug.cfs_rq[59]:/.tg_load_contrib
388897 Â 1% -38.5% 239075 Â 33% sched_debug.cpu#46.sched_count
34 Â 23% +163.5% 89 Â 47% sched_debug.cfs_rq[0]:/.tg_load_contrib
62.72 Â 2% -15.4% 53.04 Â 10% turbostat.%c3
82 Â 10% -29.0% 58 Â 15% sched_debug.cfs_rq[32]:/.tg_load_contrib
780606 Â 7% -7.6% 721141 Â 5% sched_debug.cpu#43.avg_idle
111634 Â 4% -12.2% 97988 Â 10% sched_debug.cpu#32.nr_load_updates
0.93 Â 8% -20.4% 0.74 Â 27% perf-profile.cpu-cycles.shmem_write_begin.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.new_sync_write
774084 Â 8% -8.3% 710179 Â 4% sched_debug.cpu#39.avg_idle
200 Â 9% -16.7% 166 Â 8% numa-vmstat.node3.nr_unevictable
200 Â 9% -16.7% 166 Â 8% numa-vmstat.node3.nr_mlock
672 Â 12% -12.5% 588 Â 18% slabinfo.xfs_buf.active_objs
672 Â 12% -12.5% 588 Â 18% slabinfo.xfs_buf.num_objs
711853 Â 1% -12.6% 622436 Â 7% sched_debug.cpu#37.avg_idle
42208 Â 2% +26.6% 53451 Â 9% sched_debug.cpu#14.nr_load_updates
43074 Â 5% +31.3% 56552 Â 8% sched_debug.cpu#13.nr_load_updates
774013 Â 6% -10.1% 696106 Â 9% sched_debug.cpu#20.avg_idle
742537 Â 4% -12.4% 650469 Â 9% sched_debug.cpu#8.avg_idle
657723 Â 5% -10.2% 590877 Â 8% sched_debug.cpu#6.avg_idle
779627 Â 1% -15.0% 662459 Â 14% sched_debug.cpu#24.avg_idle
43298 Â 2% +25.2% 54214 Â 12% sched_debug.cpu#18.nr_load_updates
40657 Â 1% +11.9% 45515 Â 8% sched_debug.cpu#15.nr_load_updates
652300 Â 5% -8.3% 598417 Â 2% sched_debug.cpu#58.avg_idle
870884 Â 4% -12.9% 758163 Â 5% sched_debug.cpu#31.avg_idle
44351 Â 2% +31.7% 58432 Â 19% sched_debug.cpu#24.nr_load_updates
763225 Â 4% -15.6% 644390 Â 11% sched_debug.cpu#12.avg_idle
776029 Â 0% -13.1% 674169 Â 8% sched_debug.cpu#18.avg_idle
838004 Â 2% -15.9% 704595 Â 6% sched_debug.cpu#30.avg_idle
193 Â 3% -14.3% 165 Â 11% sched_debug.cpu#58.ttwu_local
54047 Â 1% -8.8% 49303 Â 5% numa-meminfo.node0.Slab
1.24 Â 2% -19.2% 1.00 Â 9% perf-profile.cpu-cycles.osq_unlock.mutex_optimistic_spin.__mutex_lock_slowpath.mutex_lock.generic_file_write_iter
40061 Â 0% +12.7% 45154 Â 5% sched_debug.cpu#23.nr_load_updates
795000 Â 0% -14.5% 679866 Â 9% sched_debug.cpu#13.avg_idle
752969 Â 1% -11.2% 668306 Â 5% sched_debug.cpu#14.avg_idle
148074 Â 4% -9.5% 133968 Â 5% numa-numastat.node0.local_node
298993 Â 1% -9.6% 270299 Â 7% softirqs.RCU
835755 Â 0% -11.4% 740167 Â 8% sched_debug.cpu#23.avg_idle
156339 Â 4% -9.0% 142236 Â 5% numa-numastat.node0.numa_hit
1785 Â 5% -8.0% 1642 Â 3% slabinfo.sock_inode_cache.active_objs
1785 Â 5% -8.0% 1642 Â 3% slabinfo.sock_inode_cache.num_objs
238242 Â 3% -9.7% 215107 Â 2% numa-vmstat.node0.numa_local
826398 Â 5% -7.2% 766511 Â 9% sched_debug.cpu#19.avg_idle
245728 Â 3% -9.4% 222564 Â 2% numa-vmstat.node0.numa_hit
1022 Â 2% -8.7% 932 Â 5% slabinfo.RAW.num_objs
1022 Â 2% -8.7% 932 Â 5% slabinfo.RAW.active_objs
904008 Â 2% -16.3% 756394 Â 18% sched_debug.cpu#29.avg_idle
850180 Â 0% -10.3% 762690 Â 5% sched_debug.cpu#25.avg_idle
15949 Â 1% -7.4% 14765 Â 3% slabinfo.anon_vma.active_objs
15949 Â 1% -7.4% 14765 Â 3% slabinfo.anon_vma.num_objs
38 Â 24% +46.5% 56 Â 13% sched_debug.cfs_rq[48]:/.blocked_load_avg
1179 Â 3% -5.1% 1118 Â 5% numa-vmstat.node0.nr_alloc_batch
14112 Â 3% +40.5% 19822 Â 11% vmstat.system.in
testbox/testcase/testparams: lkp-nex06/vm-scalability/performance-300s-small-allocs
507e1fa697097b48 75cbe701a4251fcd8b846d52ae
---------------- --------------------------
1.35 Â 17% -56.0% 0.59 Â 10% vm-scalability.stddev
8850363 Â 0% -19.9% 7092322 Â 0% vm-scalability.throughput
136 Â 11% +429.6% 720 Â 5% sched_debug.cfs_rq[41]:/.tg_runnable_contrib
2 Â 35% +575.0% 13 Â 12% sched_debug.cpu#41.cpu_load[4]
6253 Â 11% +429.4% 33102 Â 5% sched_debug.cfs_rq[41]:/.avg->runnable_avg_sum
407488 Â 11% +1302.5% 5714993 Â 1% sched_debug.cfs_rq[40]:/.min_vruntime
20998 Â 4% +487.3% 123327 Â 2% sched_debug.cfs_rq[40]:/.exec_clock
548704 Â 13% +945.3% 5735835 Â 1% sched_debug.cfs_rq[39]:/.min_vruntime
1121563 Â 6% +439.2% 6047753 Â 2% sched_debug.cfs_rq[0]:/.min_vruntime
757728 Â 12% +716.5% 6186567 Â 1% sched_debug.cfs_rq[63]:/.min_vruntime
2 Â 19% +422.2% 11 Â 9% sched_debug.cpu#42.cpu_load[4]
1112970 Â 8% +446.4% 6081015 Â 0% sched_debug.cfs_rq[1]:/.min_vruntime
2 Â 35% +500.0% 12 Â 5% sched_debug.cpu#37.cpu_load[4]
555081 Â 11% +934.1% 5740165 Â 1% sched_debug.cfs_rq[36]:/.min_vruntime
566546 Â 9% +919.3% 5774630 Â 0% sched_debug.cfs_rq[35]:/.min_vruntime
21140 Â 15% +476.2% 121799 Â 1% sched_debug.cfs_rq[43]:/.exec_clock
1113551 Â 7% +440.8% 6022454 Â 1% sched_debug.cfs_rq[4]:/.min_vruntime
409522 Â 17% +1278.1% 5643706 Â 1% sched_debug.cfs_rq[43]:/.min_vruntime
564685 Â 11% +912.5% 5717371 Â 1% sched_debug.cfs_rq[33]:/.min_vruntime
588577 Â 12% +879.1% 5762530 Â 2% sched_debug.cfs_rq[32]:/.min_vruntime
510891 Â 2% -86.1% 70759 Â 1% sched_debug.cpu#31.ttwu_count
467778 Â 4% -87.5% 58637 Â 4% sched_debug.cpu#31.sched_goidle
936797 Â 4% -87.0% 121998 Â 4% sched_debug.cpu#31.nr_switches
519499 Â 2% -86.5% 70274 Â 1% sched_debug.cpu#30.ttwu_count
506902 Â 4% -87.5% 63322 Â 1% sched_debug.cpu#30.sched_goidle
1031191 Â 3% -87.1% 133375 Â 1% sched_debug.cpu#30.sched_count
20787 Â 20% +493.7% 123415 Â 2% sched_debug.cfs_rq[44]:/.exec_clock
409766 Â 21% +1295.5% 5718172 Â 3% sched_debug.cfs_rq[44]:/.min_vruntime
1015080 Â 4% -87.0% 131581 Â 1% sched_debug.cpu#30.nr_switches
127 Â 25% +455.2% 709 Â 2% sched_debug.cfs_rq[44]:/.tg_runnable_contrib
1057283 Â 7% +462.5% 5947320 Â 3% sched_debug.cfs_rq[8]:/.min_vruntime
5880 Â 25% +453.7% 32564 Â 2% sched_debug.cfs_rq[44]:/.avg->runnable_avg_sum
507560 Â 3% -85.8% 71967 Â 1% sched_debug.cpu#28.ttwu_count
529976 Â 2% -88.3% 61987 Â 2% sched_debug.cpu#28.sched_goidle
1081294 Â 1% -87.9% 130386 Â 2% sched_debug.cpu#28.sched_count
1061187 Â 2% -87.8% 129262 Â 2% sched_debug.cpu#28.nr_switches
450376 Â 19% +1167.0% 5706353 Â 2% sched_debug.cfs_rq[45]:/.min_vruntime
1 Â 34% +240.0% 4 Â 30% sched_debug.cfs_rq[45]:/.nr_spread_over
516365 Â 2% -86.1% 71689 Â 1% sched_debug.cpu#27.ttwu_count
467103 Â 4% -87.1% 60129 Â 5% sched_debug.cpu#27.sched_goidle
935433 Â 4% -86.6% 125428 Â 5% sched_debug.cpu#27.nr_switches
1054821 Â 2% -87.2% 134954 Â 6% sched_debug.cpu#26.sched_count
1283469 Â 7% +411.7% 6568077 Â 0% sched_debug.cfs_rq[25]:/.min_vruntime
19517 Â 19% +522.5% 121491 Â 1% sched_debug.cfs_rq[46]:/.exec_clock
387725 Â 23% +1349.9% 5621538 Â 1% sched_debug.cfs_rq[46]:/.min_vruntime
130 Â 16% +463.8% 735 Â 6% sched_debug.cfs_rq[46]:/.tg_runnable_contrib
1296121 Â 7% +412.8% 6646523 Â 0% sched_debug.cfs_rq[24]:/.min_vruntime
521743 Â 4% -85.6% 75227 Â 0% sched_debug.cpu#24.ttwu_count
523738 Â 1% -87.9% 63237 Â 4% sched_debug.cpu#24.sched_goidle
1060028 Â 0% -87.3% 134602 Â 5% sched_debug.cpu#24.sched_count
6007 Â 16% +463.6% 33854 Â 6% sched_debug.cfs_rq[46]:/.avg->runnable_avg_sum
1048862 Â 1% -87.4% 132535 Â 5% sched_debug.cpu#24.nr_switches
415579 Â 1% -84.0% 66558 Â 1% sched_debug.cpu#23.ttwu_count
399591 Â 3% -83.8% 64773 Â 1% sched_debug.cpu#23.sched_goidle
802364 Â 3% -82.0% 144403 Â 6% sched_debug.cpu#23.sched_count
800051 Â 3% -83.2% 134420 Â 0% sched_debug.cpu#23.nr_switches
1088620 Â 9% +445.7% 5940679 Â 1% sched_debug.cfs_rq[22]:/.min_vruntime
1034659 Â 8% +474.7% 5946605 Â 0% sched_debug.cfs_rq[13]:/.min_vruntime
417590 Â 1% -83.8% 67501 Â 2% sched_debug.cpu#22.ttwu_count
430636 Â 4% -83.5% 70888 Â 3% sched_debug.cpu#22.sched_goidle
865193 Â 4% -81.4% 160862 Â 9% sched_debug.cpu#22.sched_count
1052369 Â 8% +466.8% 5965329 Â 1% sched_debug.cfs_rq[14]:/.min_vruntime
862183 Â 4% -83.0% 146429 Â 3% sched_debug.cpu#22.nr_switches
750218 Â 13% +733.4% 6252541 Â 1% sched_debug.cfs_rq[62]:/.min_vruntime
19311 Â 11% +527.4% 121159 Â 0% sched_debug.cfs_rq[47]:/.exec_clock
387976 Â 13% +1346.7% 5612772 Â 1% sched_debug.cfs_rq[47]:/.min_vruntime
409721 Â 3% -83.7% 66715 Â 1% sched_debug.cpu#21.ttwu_count
407184 Â 2% -83.2% 68502 Â 11% sched_debug.cpu#21.sched_goidle
20 Â 40% +379.3% 98 Â 30% sched_debug.cpu#21.nr_uninterruptible
815235 Â 2% -82.5% 142411 Â 11% sched_debug.cpu#21.nr_switches
1080091 Â 9% +455.0% 5994880 Â 1% sched_debug.cfs_rq[20]:/.min_vruntime
415446 Â 1% -83.8% 67443 Â 2% sched_debug.cpu#20.ttwu_count
442408 Â 1% -84.4% 68875 Â 1% sched_debug.cpu#20.sched_goidle
888757 Â 1% -82.5% 155372 Â 7% sched_debug.cpu#20.sched_count
885847 Â 1% -83.9% 142506 Â 1% sched_debug.cpu#20.nr_switches
1091055 Â 9% +453.4% 6038436 Â 1% sched_debug.cfs_rq[19]:/.min_vruntime
417773 Â 1% -83.9% 67398 Â 1% sched_debug.cpu#19.ttwu_count
398066 Â 3% -83.9% 63958 Â 2% sched_debug.cpu#19.sched_goidle
500150 Â 16% +1041.3% 5708010 Â 1% sched_debug.cfs_rq[48]:/.min_vruntime
17 Â 33% +360.6% 81 Â 16% sched_debug.cpu#19.nr_uninterruptible
797029 Â 3% -83.4% 132566 Â 2% sched_debug.cpu#19.nr_switches
133 Â 25% +446.0% 727 Â 3% sched_debug.cfs_rq[48]:/.tg_runnable_contrib
1093025 Â 9% +448.3% 5993418 Â 1% sched_debug.cfs_rq[18]:/.min_vruntime
417854 Â 1% -84.0% 66789 Â 2% sched_debug.cpu#18.ttwu_count
428863 Â 3% -84.3% 67425 Â 3% sched_debug.cpu#18.sched_goidle
860091 Â 2% -79.7% 174594 Â 28% sched_debug.cpu#18.sched_count
858626 Â 3% -83.7% 139729 Â 3% sched_debug.cpu#18.nr_switches
6134 Â 25% +445.2% 33448 Â 3% sched_debug.cfs_rq[48]:/.avg->runnable_avg_sum
1069314 Â 8% +465.0% 6041367 Â 0% sched_debug.cfs_rq[17]:/.min_vruntime
1084018 Â 8% +455.3% 6020039 Â 1% sched_debug.cfs_rq[16]:/.min_vruntime
415085 Â 3% -83.5% 68307 Â 2% sched_debug.cpu#16.ttwu_count
1073942 Â 8% +447.6% 5881393 Â 2% sched_debug.cfs_rq[21]:/.min_vruntime
435899 Â 1% -83.8% 70769 Â 2% sched_debug.cpu#16.sched_goidle
876150 Â 1% -78.0% 193078 Â 38% sched_debug.cpu#16.sched_count
872710 Â 1% -83.2% 146906 Â 2% sched_debug.cpu#16.nr_switches
1042616 Â 10% +470.0% 5943066 Â 0% sched_debug.cfs_rq[15]:/.min_vruntime
392967 Â 3% -81.6% 72239 Â 3% sched_debug.cpu#15.ttwu_count
380826 Â 2% -81.3% 71372 Â 2% sched_debug.cpu#15.sched_goidle
762638 Â 2% -80.4% 149503 Â 2% sched_debug.cpu#15.nr_switches
395697 Â 3% -81.9% 71526 Â 3% sched_debug.cpu#14.ttwu_count
411944 Â 3% -82.5% 72082 Â 1% sched_debug.cpu#14.sched_goidle
834606 Â 2% -80.6% 162017 Â 5% sched_debug.cpu#14.sched_count
13 Â 26% +463.0% 76 Â 34% sched_debug.cpu#23.nr_uninterruptible
824706 Â 3% -81.8% 150178 Â 2% sched_debug.cpu#14.nr_switches
390571 Â 3% -81.8% 71105 Â 2% sched_debug.cpu#13.ttwu_count
384103 Â 2% -81.6% 70579 Â 1% sched_debug.cpu#13.sched_goidle
785013 Â 2% -80.3% 155000 Â 3% sched_debug.cpu#13.sched_count
1090952 Â 9% +444.6% 5941241 Â 1% sched_debug.cfs_rq[23]:/.min_vruntime
512564 Â 14% +1013.4% 5706910 Â 1% sched_debug.cfs_rq[50]:/.min_vruntime
769118 Â 2% -80.8% 147440 Â 2% sched_debug.cpu#13.nr_switches
1 Â 34% +280.0% 4 Â 31% sched_debug.cfs_rq[12]:/.nr_spread_over
1044080 Â 7% +462.3% 5871045 Â 2% sched_debug.cfs_rq[12]:/.min_vruntime
391582 Â 3% -81.7% 71718 Â 3% sched_debug.cpu#12.ttwu_count
421874 Â 2% -81.5% 77886 Â 9% sched_debug.cpu#12.sched_goidle
862416 Â 2% -79.9% 172919 Â 13% sched_debug.cpu#12.sched_count
844625 Â 2% -80.7% 162752 Â 9% sched_debug.cpu#12.nr_switches
1058728 Â 8% +465.3% 5985054 Â 0% sched_debug.cfs_rq[11]:/.min_vruntime
16 Â 33% +662.7% 127 Â 22% sched_debug.cpu#11.nr_uninterruptible
1062298 Â 8% +464.6% 5997504 Â 1% sched_debug.cfs_rq[10]:/.min_vruntime
517857 Â 19% +1001.2% 5702462 Â 1% sched_debug.cfs_rq[51]:/.min_vruntime
1048051 Â 7% +471.3% 5987193 Â 1% sched_debug.cfs_rq[9]:/.min_vruntime
392612 Â 3% -81.3% 73222 Â 2% sched_debug.cpu#9.ttwu_count
384344 Â 1% -81.0% 73003 Â 6% sched_debug.cpu#9.sched_goidle
740882 Â 11% +736.6% 6197903 Â 1% sched_debug.cfs_rq[61]:/.min_vruntime
957109 Â 3% -86.7% 127218 Â 5% sched_debug.cpu#27.sched_count
18 Â 47% +529.7% 116 Â 23% sched_debug.cpu#9.nr_uninterruptible
769524 Â 1% -80.3% 151526 Â 6% sched_debug.cpu#9.nr_switches
399029 Â 3% -82.0% 71669 Â 0% sched_debug.cpu#8.ttwu_count
420425 Â 1% -81.0% 79992 Â 10% sched_debug.cpu#8.sched_goidle
843535 Â 1% -80.3% 166159 Â 10% sched_debug.cpu#8.nr_switches
1117965 Â 8% +435.3% 5984125 Â 0% sched_debug.cfs_rq[7]:/.min_vruntime
511997 Â 9% +1015.9% 5713356 Â 0% sched_debug.cfs_rq[52]:/.min_vruntime
1119452 Â 8% +438.3% 6025524 Â 1% sched_debug.cfs_rq[6]:/.min_vruntime
1107077 Â 7% +438.6% 5963136 Â 1% sched_debug.cfs_rq[5]:/.min_vruntime
2 Â 35% +550.0% 13 Â 5% sched_debug.cpu#53.cpu_load[4]
507924 Â 15% +1034.2% 5760828 Â 1% sched_debug.cfs_rq[53]:/.min_vruntime
462841 Â 3% -85.0% 69313 Â 4% sched_debug.cpu#4.sched_goidle
978113 Â 7% -84.5% 151685 Â 7% sched_debug.cpu#4.sched_count
926708 Â 3% -84.5% 143697 Â 5% sched_debug.cpu#4.nr_switches
1122648 Â 9% +435.9% 6016455 Â 1% sched_debug.cfs_rq[3]:/.min_vruntime
941488 Â 4% -86.9% 123539 Â 3% sched_debug.cpu#31.sched_count
430229 Â 2% -83.3% 71773 Â 1% sched_debug.cpu#3.ttwu_count
415077 Â 4% -83.8% 67085 Â 4% sched_debug.cpu#3.sched_goidle
2 Â 50% +575.0% 13 Â 12% sched_debug.cpu#54.cpu_load[4]
514866 Â 15% +1015.3% 5742085 Â 1% sched_debug.cfs_rq[54]:/.min_vruntime
831241 Â 4% -83.2% 139398 Â 4% sched_debug.cpu#3.nr_switches
1132874 Â 9% +434.5% 6055152 Â 1% sched_debug.cfs_rq[2]:/.min_vruntime
396510 Â 13% +1326.7% 5656886 Â 1% sched_debug.cfs_rq[41]:/.min_vruntime
510648 Â 17% +1017.0% 5703966 Â 0% sched_debug.cfs_rq[55]:/.min_vruntime
432018 Â 2% -82.4% 76046 Â 1% sched_debug.cpu#1.ttwu_count
435032 Â 3% -84.2% 68773 Â 5% sched_debug.cpu#1.sched_goidle
898167 Â 6% -82.8% 154598 Â 9% sched_debug.cpu#1.sched_count
871402 Â 3% -83.6% 143032 Â 5% sched_debug.cpu#1.nr_switches
439884 Â 3% -81.6% 81117 Â 0% sched_debug.cpu#0.ttwu_count
467463 Â 4% -83.4% 77658 Â 11% sched_debug.cpu#0.sched_goidle
968052 Â 5% -79.3% 200488 Â 26% sched_debug.cpu#0.sched_count
939022 Â 4% -82.6% 163789 Â 11% sched_debug.cpu#0.nr_switches
728509 Â 9% +763.8% 6292757 Â 0% sched_debug.cfs_rq[56]:/.min_vruntime
733898 Â 12% +758.7% 6302006 Â 1% sched_debug.cfs_rq[57]:/.min_vruntime
752642 Â 12% +739.9% 6321565 Â 1% sched_debug.cfs_rq[58]:/.min_vruntime
34452061 Â 0% -82.4% 6069487 Â 0% cpuidle.C1-NHM.usage
563860 Â 14% +920.1% 5751802 Â 0% sched_debug.cfs_rq[37]:/.min_vruntime
753969 Â 10% +729.8% 6256638 Â 0% sched_debug.cfs_rq[59]:/.min_vruntime
3 Â 0% +416.7% 15 Â 3% sched_debug.cpu#57.cpu_load[4]
522652 Â 15% +986.5% 5678738 Â 1% sched_debug.cfs_rq[49]:/.min_vruntime
3 Â 23% +375.0% 14 Â 10% sched_debug.cpu#60.cpu_load[4]
735034 Â 12% +746.0% 6218721 Â 1% sched_debug.cfs_rq[60]:/.min_vruntime
419503 Â 25% +1238.4% 5614621 Â 0% sched_debug.cfs_rq[42]:/.min_vruntime
20513 Â 10% +496.7% 122398 Â 0% sched_debug.cfs_rq[41]:/.exec_clock
1278013 Â 7% +402.3% 6419733 Â 0% sched_debug.cfs_rq[29]:/.min_vruntime
1288072 Â 8% +401.4% 6458783 Â 1% sched_debug.cfs_rq[27]:/.min_vruntime
1289452 Â 8% +402.9% 6484674 Â 0% sched_debug.cfs_rq[26]:/.min_vruntime
1273611 Â 8% +405.4% 6436316 Â 0% sched_debug.cfs_rq[28]:/.min_vruntime
883280 Â 6% -80.0% 176592 Â 8% sched_debug.cpu#8.sched_count
143 Â 25% +392.9% 708 Â 2% sched_debug.cfs_rq[50]:/.tg_runnable_contrib
6628 Â 25% +391.8% 32593 Â 2% sched_debug.cfs_rq[50]:/.avg->runnable_avg_sum
1273326 Â 8% +402.6% 6399707 Â 1% sched_debug.cfs_rq[31]:/.min_vruntime
819313 Â 1% -80.6% 159234 Â 14% sched_debug.cpu#21.sched_count
1288398 Â 8% +394.4% 6370426 Â 0% sched_debug.cfs_rq[30]:/.min_vruntime
798719 Â 4% -79.7% 162260 Â 11% sched_debug.cpu#9.sched_count
426 Â 14% +391.4% 2093 Â 14% sched_debug.cpu#47.ttwu_local
502 Â 43% +296.1% 1991 Â 10% sched_debug.cpu#33.curr->pid
151 Â 12% +375.9% 721 Â 7% sched_debug.cfs_rq[54]:/.tg_runnable_contrib
6970 Â 12% +375.1% 33114 Â 6% sched_debug.cfs_rq[54]:/.avg->runnable_avg_sum
394 Â 33% +379.3% 1890 Â 6% sched_debug.cpu#48.curr->pid
2 Â 36% +477.8% 13 Â 19% sched_debug.cpu#49.cpu_load[4]
2 Â 20% +420.0% 13 Â 9% sched_debug.cpu#51.cpu_load[4]
3 Â 33% +323.1% 13 Â 10% sched_debug.cpu#38.cpu_load[4]
2 Â 39% +390.9% 13 Â 3% sched_debug.cfs_rq[48]:/.runnable_load_avg
2 Â 39% +372.7% 13 Â 19% sched_debug.cpu#49.cpu_load[3]
251433 Â 3% -78.1% 55111 Â 4% sched_debug.cpu#61.sched_goidle
251186 Â 2% -78.1% 54906 Â 4% sched_debug.cpu#63.sched_goidle
269668 Â 4% -78.0% 59278 Â 2% sched_debug.cpu#60.sched_goidle
27195 Â 12% +352.0% 122916 Â 0% sched_debug.cfs_rq[55]:/.exec_clock
5673 Â 19% +423.4% 29691 Â 5% sched_debug.cfs_rq[47]:/.avg->runnable_avg_sum
123 Â 19% +425.0% 645 Â 5% sched_debug.cfs_rq[47]:/.tg_runnable_contrib
861367 Â 8% -82.7% 148638 Â 10% sched_debug.cpu#7.sched_count
27168 Â 11% +355.5% 123756 Â 0% sched_debug.cfs_rq[54]:/.exec_clock
27600 Â 15% +345.5% 122946 Â 1% sched_debug.cfs_rq[51]:/.exec_clock
23537 Â 15% +422.8% 123064 Â 1% sched_debug.cfs_rq[45]:/.exec_clock
862841 Â 7% -82.4% 151999 Â 8% sched_debug.cpu#3.sched_count
504662 Â 4% -77.1% 115592 Â 4% sched_debug.cpu#61.sched_count
503667 Â 3% -77.3% 114372 Â 4% sched_debug.cpu#61.nr_switches
264568 Â 1% -77.6% 59369 Â 0% sched_debug.cpu#62.sched_goidle
503203 Â 2% -77.3% 113986 Â 4% sched_debug.cpu#63.nr_switches
503461 Â 2% -77.2% 114736 Â 4% sched_debug.cpu#63.sched_count
540170 Â 4% -77.3% 122790 Â 2% sched_debug.cpu#60.nr_switches
540222 Â 4% -77.2% 122950 Â 3% sched_debug.cpu#60.sched_count
26706 Â 12% +364.5% 124058 Â 1% sched_debug.cfs_rq[53]:/.exec_clock
264009 Â 3% -77.6% 59237 Â 5% sched_debug.cpu#58.sched_goidle
27131 Â 6% +353.8% 123122 Â 0% sched_debug.cfs_rq[52]:/.exec_clock
253780 Â 3% -77.5% 57202 Â 7% sched_debug.cpu#59.sched_goidle
769924 Â 2% -75.5% 188277 Â 32% sched_debug.cpu#15.sched_count
7048 Â 14% +357.6% 32254 Â 1% sched_debug.cfs_rq[53]:/.avg->runnable_avg_sum
153 Â 14% +358.2% 702 Â 1% sched_debug.cfs_rq[53]:/.tg_runnable_contrib
529978 Â 1% -76.8% 122764 Â 0% sched_debug.cpu#62.nr_switches
2 Â 30% +372.7% 13 Â 5% sched_debug.cpu#52.cpu_load[4]
3 Â 23% +325.0% 12 Â 6% sched_debug.cpu#53.cpu_load[3]
2 Â 36% +477.8% 13 Â 5% sched_debug.cpu#33.cpu_load[4]
2 Â 20% +410.0% 12 Â 6% sched_debug.cpu#59.cpu_load[4]
3 Â 40% +316.7% 12 Â 14% sched_debug.cfs_rq[49]:/.runnable_load_avg
3 Â 23% +300.0% 12 Â 21% sched_debug.cfs_rq[36]:/.runnable_load_avg
264953 Â 3% -76.9% 61283 Â 3% sched_debug.cpu#56.sched_goidle
26622 Â 12% +361.8% 122941 Â 1% sched_debug.cfs_rq[48]:/.exec_clock
457 Â 12% +337.7% 2003 Â 20% sched_debug.cpu#36.curr->pid
765141 Â 1% -79.3% 158075 Â 7% sched_debug.cpu#11.sched_count
530129 Â 1% -76.6% 123998 Â 1% sched_debug.cpu#62.sched_count
516 Â 41% +282.1% 1972 Â 10% sched_debug.cpu#54.curr->pid
27223 Â 11% +351.4% 122887 Â 1% sched_debug.cfs_rq[50]:/.exec_clock
529094 Â 3% -76.8% 122624 Â 5% sched_debug.cpu#58.nr_switches
530320 Â 3% -76.6% 123934 Â 5% sched_debug.cpu#58.sched_count
855966 Â 5% -80.6% 166023 Â 9% sched_debug.cpu#10.sched_count
413120 Â 2% -82.1% 73890 Â 2% sched_debug.cpu#10.sched_goidle
828115 Â 2% -81.4% 154252 Â 3% sched_debug.cpu#10.nr_switches
158 Â 18% +360.1% 727 Â 2% sched_debug.cfs_rq[51]:/.tg_runnable_contrib
7287 Â 18% +358.8% 33434 Â 2% sched_debug.cfs_rq[51]:/.avg->runnable_avg_sum
29543 Â 13% +317.6% 123370 Â 1% sched_debug.cfs_rq[38]:/.exec_clock
555797 Â 9% +929.8% 5723364 Â 1% sched_debug.cfs_rq[34]:/.min_vruntime
508378 Â 3% -76.6% 118881 Â 7% sched_debug.cpu#59.nr_switches
508565 Â 3% -76.5% 119331 Â 7% sched_debug.cpu#59.sched_count
275963 Â 5% -75.9% 66628 Â 1% sched_debug.cpu#63.ttwu_count
279203 Â 2% -76.0% 67079 Â 0% sched_debug.cpu#59.ttwu_count
530676 Â 3% -76.1% 127010 Â 3% sched_debug.cpu#56.nr_switches
531314 Â 4% -75.8% 128404 Â 4% sched_debug.cpu#56.sched_count
27764 Â 12% +342.0% 122721 Â 1% sched_debug.cfs_rq[49]:/.exec_clock
1327 Â 12% +196.1% 3930 Â 49% sched_debug.cpu#13.ttwu_local
399237 Â 2% -81.9% 72338 Â 3% sched_debug.cpu#10.ttwu_count
4 Â 25% +287.5% 15 Â 5% sched_debug.cfs_rq[57]:/.runnable_load_avg
274076 Â 6% -75.4% 67499 Â 0% sched_debug.cpu#58.ttwu_count
158 Â 19% +349.8% 714 Â 3% sched_debug.cfs_rq[52]:/.tg_runnable_contrib
7300 Â 19% +349.4% 32806 Â 3% sched_debug.cfs_rq[52]:/.avg->runnable_avg_sum
28800 Â 9% +329.2% 123619 Â 0% sched_debug.cfs_rq[39]:/.exec_clock
267405 Â 3% -75.0% 66863 Â 0% sched_debug.cpu#61.ttwu_count
264134 Â 4% -74.7% 66769 Â 0% sched_debug.cpu#60.ttwu_count
29402 Â 6% +320.8% 123727 Â 1% sched_debug.cfs_rq[36]:/.exec_clock
30362 Â 4% +309.9% 124471 Â 0% sched_debug.cfs_rq[35]:/.exec_clock
560496 Â 16% +922.0% 5728023 Â 1% sched_debug.cfs_rq[38]:/.min_vruntime
3 Â 40% +308.3% 12 Â 14% sched_debug.cpu#39.cpu_load[4]
2 Â 44% +390.0% 12 Â 3% sched_debug.cpu#44.cpu_load[4]
2 Â 36% +444.4% 12 Â 20% sched_debug.cpu#36.cpu_load[4]
3 Â 24% +228.6% 11 Â 13% sched_debug.cpu#42.cpu_load[3]
5 Â 34% +200.0% 15 Â 12% sched_debug.cfs_rq[61]:/.runnable_load_avg
3 Â 40% +300.0% 12 Â 5% sched_debug.cpu#37.cpu_load[3]
4 Â 17% +293.8% 15 Â 6% sched_debug.cpu#57.cpu_load[0]
3 Â 42% +228.6% 11 Â 4% sched_debug.cfs_rq[37]:/.runnable_load_avg
4 Â 43% +206.2% 12 Â 8% sched_debug.cpu#37.cpu_load[1]
1 Â 34% +160.0% 3 Â 39% sched_debug.cfs_rq[47]:/.nr_spread_over
3 Â 23% +291.7% 11 Â 21% sched_debug.cpu#36.cpu_load[3]
426620 Â 2% -83.6% 69920 Â 1% sched_debug.cpu#6.ttwu_count
30042 Â 8% +310.5% 123314 Â 0% sched_debug.cfs_rq[33]:/.exec_clock
264870 Â 4% -74.4% 67820 Â 0% sched_debug.cpu#57.ttwu_count
177 Â 12% +299.6% 707 Â 3% sched_debug.cfs_rq[33]:/.tg_runnable_contrib
514194 Â 3% -85.5% 74348 Â 1% sched_debug.cpu#25.ttwu_count
3 Â 45% +307.7% 13 Â 6% sched_debug.cpu#33.cpu_load[2]
7350 Â 11% +320.9% 30942 Â 4% sched_debug.cfs_rq[55]:/.avg->runnable_avg_sum
8172 Â 12% +297.8% 32506 Â 3% sched_debug.cfs_rq[33]:/.avg->runnable_avg_sum
488344 Â 1% -88.0% 58541 Â 5% sched_debug.cpu#25.sched_goidle
159 Â 11% +321.4% 673 Â 3% sched_debug.cfs_rq[55]:/.tg_runnable_contrib
978040 Â 1% -87.4% 122822 Â 5% sched_debug.cpu#25.nr_switches
3 Â 39% +293.3% 14 Â 7% sched_debug.cpu#61.cpu_load[4]
510 Â 8% +261.4% 1843 Â 7% sched_debug.cpu#41.ttwu_local
31508 Â 8% +294.5% 124313 Â 2% sched_debug.cfs_rq[32]:/.exec_clock
6215 Â 20% +420.7% 32363 Â 4% sched_debug.cfs_rq[43]:/.avg->runnable_avg_sum
134 Â 20% +423.0% 703 Â 4% sched_debug.cfs_rq[43]:/.tg_runnable_contrib
423696 Â 1% -83.7% 69089 Â 1% sched_debug.cpu#7.ttwu_count
985152 Â 1% -87.3% 125087 Â 4% sched_debug.cpu#25.sched_count
176 Â 7% +293.3% 693 Â 7% sched_debug.cfs_rq[39]:/.tg_runnable_contrib
8113 Â 7% +293.2% 31898 Â 7% sched_debug.cfs_rq[39]:/.avg->runnable_avg_sum
158 Â 14% +346.4% 706 Â 11% sched_debug.cfs_rq[49]:/.tg_runnable_contrib
588 Â 43% +204.5% 1791 Â 5% sched_debug.cpu#42.ttwu_local
7280 Â 14% +346.0% 32470 Â 11% sched_debug.cfs_rq[49]:/.avg->runnable_avg_sum
189 Â 11% +276.3% 711 Â 2% sched_debug.cfs_rq[32]:/.tg_runnable_contrib
3 Â 22% +273.3% 14 Â 8% sched_debug.cpu#62.cpu_load[4]
3 Â 29% +266.7% 13 Â 9% sched_debug.cfs_rq[60]:/.runnable_load_avg
4 Â 38% +223.5% 13 Â 11% sched_debug.cpu#46.cpu_load[3]
4 Â 17% +250.0% 14 Â 11% sched_debug.cpu#60.cpu_load[3]
5 Â 46% +175.0% 13 Â 11% sched_debug.cpu#46.cpu_load[2]
6 Â 19% +103.7% 13 Â 13% sched_debug.cpu#11.cpu_load[0]
3 Â 22% +286.7% 14 Â 7% sched_debug.cpu#58.cpu_load[4]
3 Â 39% +330.8% 14 Â 12% sched_debug.cpu#46.cpu_load[4]
3 Â 33% +350.0% 13 Â 15% sched_debug.cpu#54.cpu_load[3]
8706 Â 11% +275.5% 32690 Â 2% sched_debug.cfs_rq[32]:/.avg->runnable_avg_sum
415474 Â 5% -84.4% 64948 Â 2% sched_debug.cpu#7.sched_goidle
161883 Â 18% +221.3% 520122 Â 28% sched_debug.cfs_rq[25]:/.spread0
831974 Â 5% -83.8% 135081 Â 2% sched_debug.cpu#7.nr_switches
932620 Â 9% -83.7% 151771 Â 4% sched_debug.cpu#6.sched_count
448439 Â 5% -84.1% 71489 Â 4% sched_debug.cpu#6.sched_goidle
897830 Â 5% -83.5% 148047 Â 4% sched_debug.cpu#6.nr_switches
474 Â 19% +304.2% 1916 Â 4% sched_debug.cpu#44.ttwu_local
408625 Â 3% -84.3% 64169 Â 5% sched_debug.cpu#17.sched_goidle
818333 Â 3% -83.7% 133510 Â 5% sched_debug.cpu#17.nr_switches
57425 Â 2% +124.8% 129088 Â 0% sched_debug.cfs_rq[13]:/.exec_clock
9922 Â 7% +278.2% 37525 Â 2% sched_debug.cfs_rq[57]:/.avg->runnable_avg_sum
216 Â 7% +276.9% 814 Â 2% sched_debug.cfs_rq[57]:/.tg_runnable_contrib
57426 Â 6% +127.5% 130639 Â 2% sched_debug.cfs_rq[15]:/.exec_clock
409805 Â 3% -83.5% 67449 Â 2% sched_debug.cpu#17.ttwu_count
646 Â 25% +207.4% 1987 Â 10% sched_debug.cpu#7.curr->pid
553 Â 8% +280.4% 2104 Â 12% sched_debug.cpu#38.curr->pid
7 Â 17% +92.9% 13 Â 15% sched_debug.cpu#11.cpu_load[1]
6 Â 20% +128.0% 14 Â 5% sched_debug.cpu#18.cpu_load[4]
124 Â 9% +434.9% 663 Â 7% sched_debug.cfs_rq[42]:/.tg_runnable_contrib
5723 Â 9% +432.6% 30482 Â 7% sched_debug.cfs_rq[42]:/.avg->runnable_avg_sum
820458 Â 3% -82.6% 142803 Â 5% sched_debug.cpu#17.sched_count
907 Â 6% +109.9% 1905 Â 7% sched_debug.cpu#18.curr->pid
7596 Â 27% +317.1% 31685 Â 5% sched_debug.cfs_rq[45]:/.avg->runnable_avg_sum
3 Â 43% +246.7% 13 Â 5% sched_debug.cpu#33.cpu_load[1]
60531 Â 4% +113.4% 129160 Â 0% sched_debug.cfs_rq[23]:/.exec_clock
48 Â 39% +205.2% 147 Â 34% sched_debug.cfs_rq[33]:/.tg_load_contrib
453444 Â 4% -84.1% 72012 Â 4% sched_debug.cpu#2.sched_goidle
907983 Â 4% -83.5% 149609 Â 4% sched_debug.cpu#2.nr_switches
974782 Â 10% -83.9% 157156 Â 2% sched_debug.cpu#2.sched_count
165 Â 27% +317.1% 688 Â 5% sched_debug.cfs_rq[45]:/.tg_runnable_contrib
522 Â 12% +239.3% 1771 Â 12% sched_debug.cpu#39.ttwu_local
22377 Â 19% +441.3% 121128 Â 0% sched_debug.cfs_rq[42]:/.exec_clock
167866 Â 22% +160.2% 436709 Â 34% sched_debug.cfs_rq[26]:/.spread0
3 Â 29% +226.7% 12 Â 21% sched_debug.cpu#36.cpu_load[1]
4 Â 17% +231.2% 13 Â 6% sched_debug.cpu#59.cpu_load[0]
3 Â 42% +278.6% 13 Â 9% sched_debug.cfs_rq[54]:/.runnable_load_avg
3 Â 31% +285.7% 13 Â 12% sched_debug.cpu#41.cpu_load[3]
3 Â 47% +246.7% 13 Â 9% sched_debug.cpu#63.cpu_load[4]
6 Â 20% +112.5% 12 Â 14% sched_debug.cpu#11.cpu_load[4]
3 Â 11% +226.7% 12 Â 21% sched_debug.cpu#36.cpu_load[0]
4 Â 35% +225.0% 13 Â 7% sched_debug.cpu#53.cpu_load[1]
5 Â 47% +140.9% 13 Â 9% sched_debug.cpu#46.cpu_load[1]
4 Â 25% +205.9% 13 Â 12% sched_debug.cpu#53.cpu_load[0]
500 Â 18% +262.1% 1810 Â 15% sched_debug.cpu#45.ttwu_local
451 Â 8% +230.7% 1493 Â 19% sched_debug.cpu#54.ttwu_local
1.156e+10 Â 0% -71.2% 3.326e+09 Â 1% cpuidle.C1-NHM.time
799819 Â 2% -82.4% 140789 Â 6% sched_debug.cpu#19.sched_count
50 Â 16% +93.0% 97 Â 15% sched_debug.cpu#31.nr_uninterruptible
228 Â 4% +239.6% 776 Â 4% sched_debug.cfs_rq[58]:/.tg_runnable_contrib
10517 Â 4% +239.3% 35688 Â 4% sched_debug.cfs_rq[58]:/.avg->runnable_avg_sum
156424 Â 23% +137.7% 371749 Â 37% sched_debug.cfs_rq[29]:/.spread0
44 Â 41% +201.7% 133 Â 37% sched_debug.cfs_rq[33]:/.blocked_load_avg
7 Â 17% +79.3% 13 Â 15% sched_debug.cpu#11.cpu_load[2]
7 Â 15% +72.4% 12 Â 4% sched_debug.cfs_rq[18]:/.runnable_load_avg
6 Â 13% +96.0% 12 Â 12% sched_debug.cpu#21.cpu_load[4]
38958 Â 6% +249.7% 136223 Â 0% sched_debug.cfs_rq[56]:/.exec_clock
166485 Â 27% +146.8% 410810 Â 40% sched_debug.cfs_rq[27]:/.spread0
903 Â 9% +122.0% 2006 Â 2% sched_debug.cpu#25.curr->pid
10608 Â 7% +232.5% 35277 Â 5% sched_debug.cfs_rq[62]:/.avg->runnable_avg_sum
231 Â 7% +232.4% 767 Â 5% sched_debug.cfs_rq[62]:/.tg_runnable_contrib
510864 Â 4% -85.9% 71998 Â 0% sched_debug.cpu#29.ttwu_count
497 Â 14% +258.9% 1783 Â 30% sched_debug.cpu#55.ttwu_local
584 Â 27% +251.9% 2057 Â 9% sched_debug.cpu#58.curr->pid
577 Â 14% +214.3% 1815 Â 12% sched_debug.cpu#51.curr->pid
424410 Â 4% -84.9% 64153 Â 1% sched_debug.cpu#5.sched_goidle
464 Â 14% -39.7% 280 Â 6% sched_debug.cpu#24.nr_uninterruptible
849809 Â 4% -84.3% 133474 Â 1% sched_debug.cpu#5.nr_switches
6 Â 19% +88.9% 12 Â 14% sched_debug.cpu#11.cpu_load[3]
893664 Â 8% -84.3% 140385 Â 2% sched_debug.cpu#5.sched_count
1050 Â 22% +92.7% 2023 Â 4% sched_debug.cpu#14.curr->pid
6 Â 41% +84.0% 11 Â 9% sched_debug.cpu#20.cpu_load[0]
562 Â 26% +264.2% 2049 Â 2% sched_debug.cpu#56.curr->pid
484950 Â 3% -87.6% 60072 Â 4% sched_debug.cpu#29.sched_goidle
631 Â 20% +204.4% 1921 Â 13% sched_debug.cpu#35.curr->pid
400 Â 9% +91.0% 765 Â 3% sched_debug.cfs_rq[29]:/.tg_runnable_contrib
221437 Â 4% -69.0% 68559 Â 1% sched_debug.cpu#32.sched_goidle
151736 Â 28% +131.8% 351711 Â 34% sched_debug.cfs_rq[31]:/.spread0
39350 Â 8% +241.4% 134348 Â 0% sched_debug.cfs_rq[60]:/.exec_clock
424957 Â 2% -83.4% 70675 Â 2% sched_debug.cpu#4.ttwu_count
399685 Â 1% -82.2% 71004 Â 0% sched_debug.cpu#11.ttwu_count
971165 Â 3% -87.1% 125255 Â 4% sched_debug.cpu#29.nr_switches
423917 Â 3% -83.7% 69106 Â 1% sched_debug.cpu#5.ttwu_count
8 Â 19% +63.6% 13 Â 15% sched_debug.cpu#22.cpu_load[1]
7 Â 19% +71.0% 13 Â 12% sched_debug.cpu#22.cpu_load[2]
6 Â 19% +96.3% 13 Â 12% sched_debug.cpu#22.cpu_load[3]
608 Â 20% +258.9% 2182 Â 6% sched_debug.cpu#57.curr->pid
4 Â 30% +205.9% 13 Â 5% sched_debug.cpu#33.cpu_load[0]
376316 Â 1% -81.4% 70115 Â 2% sched_debug.cpu#11.sched_goidle
754666 Â 1% -80.6% 146300 Â 2% sched_debug.cpu#11.nr_switches
511244 Â 3% -87.5% 63857 Â 6% sched_debug.cpu#26.sched_goidle
1023764 Â 3% -87.0% 133252 Â 6% sched_debug.cpu#26.nr_switches
2 Â 35% +600.0% 14 Â 5% sched_debug.cpu#48.cpu_load[4]
6 Â 30% +76.0% 11 Â 6% sched_debug.cfs_rq[20]:/.runnable_load_avg
39811 Â 7% +236.6% 134001 Â 1% sched_debug.cfs_rq[61]:/.exec_clock
76 Â 17% +64.7% 126 Â 12% sched_debug.cpu#28.nr_uninterruptible
8 Â 16% +57.1% 13 Â 14% sched_debug.cpu#23.cpu_load[0]
58133 Â 3% +126.1% 131423 Â 2% sched_debug.cfs_rq[14]:/.exec_clock
516681 Â 2% -85.9% 72656 Â 1% sched_debug.cpu#26.ttwu_count
206503 Â 3% -68.6% 64766 Â 1% sched_debug.cpu#32.ttwu_count
18371 Â 9% +91.0% 35082 Â 3% sched_debug.cfs_rq[29]:/.avg->runnable_avg_sum
3 Â 33% +308.3% 12 Â 6% sched_debug.cpu#55.cpu_load[4]
3 Â 14% +257.1% 12 Â 4% sched_debug.cpu#59.cpu_load[3]
4 Â 33% +161.1% 11 Â 13% sched_debug.cpu#42.cpu_load[2]
4 Â 30% +225.0% 13 Â 9% sched_debug.cpu#32.cpu_load[3]
3 Â 31% +264.3% 12 Â 11% sched_debug.cpu#35.cpu_load[4]
3 Â 29% +240.0% 12 Â 11% sched_debug.cfs_rq[53]:/.runnable_load_avg
3 Â 29% +240.0% 12 Â 6% sched_debug.cpu#53.cpu_load[2]
3 Â 22% +246.7% 13 Â 5% sched_debug.cpu#52.cpu_load[3]
3 Â 31% +257.1% 12 Â 8% sched_debug.cpu#51.cpu_load[3]
3 Â 39% +300.0% 13 Â 5% sched_debug.cpu#32.cpu_load[4]
4 Â 30% +212.5% 12 Â 4% sched_debug.cpu#40.cpu_load[2]
4 Â 38% +194.1% 12 Â 4% sched_debug.cpu#40.cpu_load[1]
2 Â 19% +466.7% 12 Â 8% sched_debug.cpu#40.cpu_load[4]
2 Â 47% +372.7% 13 Â 5% sched_debug.cpu#33.cpu_load[3]
4 Â 10% +200.0% 12 Â 6% sched_debug.cpu#59.cpu_load[1]
4 Â 38% +194.1% 12 Â 4% sched_debug.cpu#40.cpu_load[0]
3 Â 29% +220.0% 12 Â 21% sched_debug.cpu#36.cpu_load[2]
4 Â 17% +212.5% 12 Â 14% sched_debug.cpu#39.cpu_load[3]
4 Â 27% +147.4% 11 Â 19% sched_debug.cpu#42.cpu_load[1]
4 Â 0% +287.5% 15 Â 3% sched_debug.cpu#57.cpu_load[3]
4 Â 0% +212.5% 12 Â 4% sched_debug.cpu#59.cpu_load[2]
190817 Â 7% -69.0% 59123 Â 5% sched_debug.cpu#37.sched_goidle
683 Â 5% +204.3% 2080 Â 9% sched_debug.cpu#61.curr->pid
109 Â 10% +37.0% 150 Â 21% sched_debug.cpu#26.nr_uninterruptible
528 Â 18% +232.1% 1755 Â 12% sched_debug.cpu#55.curr->pid
540 Â 32% +257.9% 1934 Â 9% sched_debug.cpu#39.curr->pid
429861 Â 1% -82.7% 74351 Â 1% sched_debug.cpu#2.ttwu_count
39439 Â 8% +245.8% 136399 Â 0% sched_debug.cfs_rq[57]:/.exec_clock
973 Â 25% +110.9% 2052 Â 10% sched_debug.cpu#0.curr->pid
40253 Â 9% +235.4% 135015 Â 0% sched_debug.cfs_rq[62]:/.exec_clock
73690739 Â 3% -67.6% 23868773 Â 10% cpuidle.C1E-NHM.time
443566 Â 4% -67.9% 142393 Â 1% sched_debug.cpu#32.nr_switches
133 Â 15% +429.1% 705 Â 5% sched_debug.cfs_rq[40]:/.tg_runnable_contrib
247383 Â 3% -77.8% 54929 Â 5% sched_debug.cpu#57.sched_goidle
6128 Â 15% +428.5% 32386 Â 5% sched_debug.cfs_rq[40]:/.avg->runnable_avg_sum
586 Â 9% +204.9% 1787 Â 8% sched_debug.cpu#36.ttwu_local
5 Â 27% +177.3% 15 Â 9% sched_debug.cpu#61.cpu_load[2]
4 Â 9% +236.8% 16 Â 4% sched_debug.cpu#57.cpu_load[1]
9 Â 20% +45.9% 13 Â 3% sched_debug.cpu#30.cpu_load[4]
2 Â 30% +400.0% 13 Â 3% sched_debug.cpu#48.cpu_load[3]
230 Â 5% +224.5% 748 Â 2% sched_debug.cfs_rq[59]:/.tg_runnable_contrib
625 Â 32% +207.4% 1922 Â 17% sched_debug.cpu#32.curr->pid
984354 Â 2% -87.2% 126424 Â 5% sched_debug.cpu#29.sched_count
181989 Â 11% -65.9% 62130 Â 1% sched_debug.cpu#49.ttwu_count
473 Â 7% +219.2% 1512 Â 22% sched_debug.cpu#52.ttwu_local
10613 Â 5% +224.4% 34424 Â 2% sched_debug.cfs_rq[59]:/.avg->runnable_avg_sum
581 Â 12% +185.7% 1661 Â 17% sched_debug.cpu#48.ttwu_local
40432 Â 8% +237.9% 136634 Â 1% sched_debug.cfs_rq[58]:/.exec_clock
193465 Â 9% -67.5% 62936 Â 1% sched_debug.cpu#37.ttwu_count
308 Â 10% +140.7% 742 Â 5% sched_debug.cfs_rq[9]:/.tg_runnable_contrib
176 Â 11% +288.1% 683 Â 5% sched_debug.cfs_rq[37]:/.tg_runnable_contrib
187261 Â 7% -68.1% 59804 Â 6% sched_debug.cpu#35.sched_goidle
8109 Â 10% +287.2% 31401 Â 5% sched_debug.cfs_rq[37]:/.avg->runnable_avg_sum
14139 Â 9% +141.3% 34124 Â 5% sched_debug.cfs_rq[9]:/.avg->runnable_avg_sum
382269 Â 7% -67.9% 122882 Â 5% sched_debug.cpu#37.nr_switches
315 Â 4% +135.6% 742 Â 4% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
14456 Â 4% +136.0% 34117 Â 4% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
281534 Â 3% -86.8% 37215 Â 6% cpuidle.C1E-NHM.usage
603 Â 38% +208.4% 1862 Â 8% sched_debug.cpu#40.curr->pid
495617 Â 3% -77.0% 114184 Â 5% sched_debug.cpu#57.nr_switches
198497 Â 3% -67.6% 64225 Â 0% sched_debug.cpu#35.ttwu_count
498597 Â 4% -76.9% 115414 Â 4% sched_debug.cpu#57.sched_count
446635 Â 3% -61.0% 174387 Â 28% sched_debug.cpu#32.sched_count
8 Â 16% +62.9% 14 Â 15% sched_debug.cpu#23.cpu_load[1]
202136 Â 5% -68.5% 63771 Â 3% sched_debug.cpu#36.sched_goidle
568 Â 24% +226.5% 1856 Â 8% sched_debug.cpu#53.curr->pid
869 Â 19% +109.7% 1822 Â 7% sched_debug.cpu#12.curr->pid
40794 Â 8% +228.5% 133993 Â 1% sched_debug.cfs_rq[63]:/.exec_clock
191084 Â 7% -66.8% 63524 Â 1% sched_debug.cpu#36.ttwu_count
183888 Â 8% -65.6% 63297 Â 6% sched_debug.cpu#52.sched_goidle
10 Â 18% +29.3% 13 Â 3% sched_debug.cpu#30.cpu_load[3]
5 Â 31% +170.0% 13 Â 11% sched_debug.cpu#9.cpu_load[4]
189595 Â 8% -66.4% 63732 Â 2% sched_debug.cpu#34.ttwu_count
181 Â 15% +284.3% 696 Â 7% sched_debug.cfs_rq[35]:/.tg_runnable_contrib
10639 Â 12% +240.2% 36198 Â 2% sched_debug.cfs_rq[56]:/.avg->runnable_avg_sum
231 Â 12% +239.3% 785 Â 2% sched_debug.cfs_rq[56]:/.tg_runnable_contrib
558 Â 15% +230.8% 1848 Â 4% sched_debug.cpu#52.curr->pid
375242 Â 7% -66.9% 124149 Â 6% sched_debug.cpu#35.nr_switches
229 Â 2% +238.5% 777 Â 1% sched_debug.cfs_rq[61]:/.tg_runnable_contrib
8353 Â 14% +283.7% 32054 Â 7% sched_debug.cfs_rq[35]:/.avg->runnable_avg_sum
173742 Â 13% -64.5% 61631 Â 5% sched_debug.cpu#49.sched_goidle
10571 Â 2% +237.9% 35717 Â 1% sched_debug.cfs_rq[61]:/.avg->runnable_avg_sum
197826 Â 8% -67.3% 64771 Â 4% sched_debug.cpu#38.sched_goidle
404925 Â 5% -67.4% 132191 Â 3% sched_debug.cpu#36.nr_switches
368375 Â 8% -64.4% 131038 Â 6% sched_debug.cpu#52.nr_switches
4 Â 38% +170.6% 11 Â 19% sched_debug.cfs_rq[55]:/.runnable_load_avg
5 Â 14% +185.0% 14 Â 9% sched_debug.cpu#60.cpu_load[1]
4 Â 39% +262.5% 14 Â 7% sched_debug.cpu#56.cpu_load[4]
5 Â 14% +195.0% 14 Â 7% sched_debug.cpu#58.cpu_load[3]
4 Â 31% +205.3% 14 Â 5% sched_debug.cfs_rq[58]:/.runnable_load_avg
1 Â 34% +120.0% 2 Â 30% sched_debug.cfs_rq[9]:/.nr_spread_over
1 Â 0% +200.0% 3 Â 40% sched_debug.cfs_rq[1]:/.nr_spread_over
4 Â 19% +216.7% 14 Â 9% sched_debug.cpu#60.cpu_load[2]
4 Â 25% +237.5% 13 Â 15% sched_debug.cpu#54.cpu_load[2]
5 Â 37% +200.0% 15 Â 9% sched_debug.cpu#61.cpu_load[3]
4 Â 24% +205.6% 13 Â 12% sched_debug.cpu#54.cpu_load[1]
445 Â 5% +332.8% 1929 Â 12% sched_debug.cpu#40.ttwu_local
661 Â 16% +184.0% 1877 Â 7% sched_debug.cpu#37.curr->pid
176233 Â 8% -64.5% 62640 Â 2% sched_debug.cpu#52.ttwu_count
453 Â 19% +319.1% 1898 Â 11% sched_debug.cpu#46.ttwu_local
167768 Â 12% -64.8% 59130 Â 6% sched_debug.cpu#53.sched_goidle
383507 Â 7% -66.9% 126751 Â 4% sched_debug.cpu#37.sched_count
189267 Â 14% -66.6% 63161 Â 1% sched_debug.cpu#38.ttwu_count
271885 Â 4% -75.2% 67422 Â 0% sched_debug.cpu#62.ttwu_count
396300 Â 8% -66.1% 134542 Â 5% sched_debug.cpu#38.nr_switches
198817 Â 9% -66.0% 67505 Â 4% sched_debug.cpu#34.sched_goidle
16468 Â 4% +185.9% 47080 Â 1% sched_debug.cfs_rq[3]:/.tg->runnable_avg
16466 Â 4% +185.8% 47066 Â 1% sched_debug.cfs_rq[4]:/.tg->runnable_avg
16459 Â 4% +186.0% 47082 Â 1% sched_debug.cfs_rq[2]:/.tg->runnable_avg
16458 Â 4% +186.0% 47063 Â 1% sched_debug.cfs_rq[1]:/.tg->runnable_avg
16484 Â 4% +185.5% 47069 Â 1% sched_debug.cfs_rq[6]:/.tg->runnable_avg
10545 Â 10% +215.5% 33271 Â 3% sched_debug.cfs_rq[63]:/.avg->runnable_avg_sum
16455 Â 4% +186.0% 47061 Â 1% sched_debug.cfs_rq[0]:/.tg->runnable_avg
16495 Â 4% +185.4% 47078 Â 1% sched_debug.cfs_rq[7]:/.tg->runnable_avg
16481 Â 4% +185.5% 47062 Â 1% sched_debug.cfs_rq[5]:/.tg->runnable_avg
10 Â 48% +182.9% 29 Â 44% sched_debug.cfs_rq[34]:/.load
229 Â 10% +214.5% 722 Â 3% sched_debug.cfs_rq[63]:/.tg_runnable_contrib
405011 Â 5% -66.1% 137434 Â 6% sched_debug.cpu#36.sched_count
16507 Â 4% +185.2% 47087 Â 1% sched_debug.cfs_rq[8]:/.tg->runnable_avg
16516 Â 4% +185.1% 47086 Â 1% sched_debug.cfs_rq[9]:/.tg->runnable_avg
16527 Â 4% +185.1% 47117 Â 1% sched_debug.cfs_rq[12]:/.tg->runnable_avg
16525 Â 4% +185.0% 47099 Â 1% sched_debug.cfs_rq[11]:/.tg->runnable_avg
16522 Â 4% +185.0% 47089 Â 1% sched_debug.cfs_rq[10]:/.tg->runnable_avg
16528 Â 4% +185.1% 47115 Â 1% sched_debug.cfs_rq[13]:/.tg->runnable_avg
348070 Â 13% -63.1% 128384 Â 5% sched_debug.cpu#49.nr_switches
506 Â 6% +197.3% 1504 Â 4% sched_debug.cpu#53.ttwu_local
16538 Â 4% +184.9% 47115 Â 1% sched_debug.cfs_rq[15]:/.tg->runnable_avg
16539 Â 4% +184.9% 47116 Â 1% sched_debug.cfs_rq[14]:/.tg->runnable_avg
16549 Â 4% +184.8% 47129 Â 1% sched_debug.cfs_rq[17]:/.tg->runnable_avg
16543 Â 4% +184.8% 47118 Â 1% sched_debug.cfs_rq[16]:/.tg->runnable_avg
184779 Â 11% -66.2% 62497 Â 5% sched_debug.cpu#48.sched_goidle
941 Â 14% +100.2% 1883 Â 11% sched_debug.cpu#15.curr->pid
16591 Â 4% +184.2% 47159 Â 1% sched_debug.cfs_rq[24]:/.tg->runnable_avg
16562 Â 4% +184.6% 47131 Â 1% sched_debug.cfs_rq[18]:/.tg->runnable_avg
16594 Â 4% +184.2% 47166 Â 1% sched_debug.cfs_rq[25]:/.tg->runnable_avg
16593 Â 4% +184.4% 47195 Â 1% sched_debug.cfs_rq[28]:/.tg->runnable_avg
16591 Â 4% +184.4% 47192 Â 1% sched_debug.cfs_rq[29]:/.tg->runnable_avg
16597 Â 4% +184.3% 47181 Â 1% sched_debug.cfs_rq[26]:/.tg->runnable_avg
16597 Â 4% +184.4% 47195 Â 1% sched_debug.cfs_rq[27]:/.tg->runnable_avg
16570 Â 4% +184.5% 47138 Â 1% sched_debug.cfs_rq[19]:/.tg->runnable_avg
16593 Â 4% +184.2% 47152 Â 1% sched_debug.cfs_rq[23]:/.tg->runnable_avg
16587 Â 4% +184.2% 47138 Â 1% sched_debug.cfs_rq[22]:/.tg->runnable_avg
16612 Â 4% +184.1% 47192 Â 1% sched_debug.cfs_rq[32]:/.tg->runnable_avg
16600 Â 4% +184.2% 47176 Â 1% sched_debug.cfs_rq[30]:/.tg->runnable_avg
16606 Â 4% +184.2% 47196 Â 1% sched_debug.cfs_rq[31]:/.tg->runnable_avg
16580 Â 4% +184.3% 47142 Â 1% sched_debug.cfs_rq[21]:/.tg->runnable_avg
16577 Â 4% +184.4% 47142 Â 1% sched_debug.cfs_rq[20]:/.tg->runnable_avg
16626 Â 4% +183.9% 47201 Â 1% sched_debug.cfs_rq[34]:/.tg->runnable_avg
16628 Â 4% +183.9% 47199 Â 1% sched_debug.cfs_rq[33]:/.tg->runnable_avg
16636 Â 4% +183.8% 47207 Â 1% sched_debug.cfs_rq[36]:/.tg->runnable_avg
16633 Â 4% +183.8% 47204 Â 1% sched_debug.cfs_rq[35]:/.tg->runnable_avg
16641 Â 4% +183.7% 47206 Â 1% sched_debug.cfs_rq[37]:/.tg->runnable_avg
183606 Â 14% -66.0% 62470 Â 2% sched_debug.cpu#51.ttwu_count
16652 Â 4% +183.5% 47213 Â 1% sched_debug.cfs_rq[40]:/.tg->runnable_avg
16650 Â 4% +183.6% 47221 Â 1% sched_debug.cfs_rq[41]:/.tg->runnable_avg
16677 Â 4% +183.3% 47243 Â 1% sched_debug.cfs_rq[45]:/.tg->runnable_avg
336124 Â 12% -63.6% 122396 Â 6% sched_debug.cpu#53.nr_switches
16674 Â 4% +183.4% 47248 Â 1% sched_debug.cfs_rq[51]:/.tg->runnable_avg
16659 Â 4% +183.5% 47226 Â 1% sched_debug.cfs_rq[42]:/.tg->runnable_avg
16672 Â 4% +183.3% 47235 Â 1% sched_debug.cfs_rq[49]:/.tg->runnable_avg
16653 Â 4% +183.4% 47199 Â 1% sched_debug.cfs_rq[39]:/.tg->runnable_avg
16674 Â 4% +183.3% 47243 Â 1% sched_debug.cfs_rq[44]:/.tg->runnable_avg
16651 Â 4% +183.4% 47195 Â 1% sched_debug.cfs_rq[38]:/.tg->runnable_avg
14838 Â 7% +126.6% 33617 Â 3% sched_debug.cfs_rq[18]:/.avg->runnable_avg_sum
16677 Â 4% +183.2% 47238 Â 1% sched_debug.cfs_rq[50]:/.tg->runnable_avg
16675 Â 4% +183.2% 47224 Â 1% sched_debug.cfs_rq[48]:/.tg->runnable_avg
16683 Â 4% +183.3% 47256 Â 1% sched_debug.cfs_rq[52]:/.tg->runnable_avg
16666 Â 4% +183.4% 47227 Â 1% sched_debug.cfs_rq[43]:/.tg->runnable_avg
16 Â 6% +193.9% 48 Â 1% vmstat.procs.r
16679 Â 4% +183.2% 47228 Â 1% sched_debug.cfs_rq[46]:/.tg->runnable_avg
16680 Â 4% +183.0% 47213 Â 1% sched_debug.cfs_rq[47]:/.tg->runnable_avg
16686 Â 4% +183.2% 47259 Â 1% sched_debug.cfs_rq[53]:/.tg->runnable_avg
16704 Â 4% +182.9% 47264 Â 1% sched_debug.cfs_rq[55]:/.tg->runnable_avg
16700 Â 4% +183.0% 47264 Â 1% sched_debug.cfs_rq[54]:/.tg->runnable_avg
16720 Â 4% +182.7% 47261 Â 1% sched_debug.cfs_rq[57]:/.tg->runnable_avg
16730 Â 4% +182.5% 47256 Â 1% sched_debug.cfs_rq[59]:/.tg->runnable_avg
16726 Â 4% +182.6% 47266 Â 1% sched_debug.cfs_rq[58]:/.tg->runnable_avg
16734 Â 4% +182.5% 47268 Â 1% sched_debug.cfs_rq[61]:/.tg->runnable_avg
16733 Â 4% +182.4% 47260 Â 1% sched_debug.cfs_rq[60]:/.tg->runnable_avg
16714 Â 4% +182.8% 47260 Â 1% sched_debug.cfs_rq[56]:/.tg->runnable_avg
16759 Â 5% +182.1% 47272 Â 1% sched_debug.cfs_rq[63]:/.tg->runnable_avg
16740 Â 4% +182.3% 47264 Â 1% sched_debug.cfs_rq[62]:/.tg->runnable_avg
398244 Â 8% -65.7% 136524 Â 5% sched_debug.cpu#38.sched_count
348117 Â 13% -61.1% 135573 Â 9% sched_debug.cpu#49.sched_count
336174 Â 12% -62.4% 126449 Â 7% sched_debug.cpu#53.sched_count
61049 Â 5% +113.0% 130050 Â 0% sched_debug.cfs_rq[18]:/.exec_clock
398443 Â 9% -64.9% 140004 Â 3% sched_debug.cpu#34.nr_switches
3 Â 39% +323.1% 13 Â 3% sched_debug.cpu#48.cpu_load[0]
3 Â 25% +323.1% 13 Â 3% sched_debug.cpu#48.cpu_load[2]
3 Â 33% +284.6% 12 Â 4% sched_debug.cpu#40.cpu_load[3]
3 Â 31% +292.9% 13 Â 3% sched_debug.cpu#48.cpu_load[1]
370157 Â 11% -65.0% 129455 Â 4% sched_debug.cpu#48.nr_switches
173811 Â 10% -63.9% 62775 Â 0% sched_debug.cpu#48.ttwu_count
376459 Â 7% -64.9% 132057 Â 9% sched_debug.cpu#35.sched_count
5 Â 20% +147.6% 13 Â 14% sched_debug.cfs_rq[7]:/.runnable_load_avg
4 Â 24% +200.0% 13 Â 8% sched_debug.cpu#38.cpu_load[3]
4 Â 30% +231.2% 13 Â 9% sched_debug.cpu#54.cpu_load[0]
5 Â 37% +160.0% 13 Â 12% sched_debug.cfs_rq[38]:/.runnable_load_avg
5 Â 42% +155.0% 12 Â 10% sched_debug.cpu#50.cpu_load[0]
398843 Â 9% -64.4% 141829 Â 4% sched_debug.cpu#34.sched_count
29862 Â 10% +315.1% 123964 Â 0% sched_debug.cfs_rq[37]:/.exec_clock
69.47 Â 1% -64.5% 24.64 Â 1% turbostat.%c1
326 Â 5% +125.0% 734 Â 6% sched_debug.cfs_rq[22]:/.tg_runnable_contrib
14948 Â 5% +125.7% 33739 Â 6% sched_debug.cfs_rq[22]:/.avg->runnable_avg_sum
565 Â 18% +214.6% 1779 Â 13% sched_debug.cpu#37.ttwu_local
452 Â 18% +283.9% 1737 Â 18% sched_debug.cpu#49.ttwu_local
721 Â 23% +174.1% 1978 Â 7% sched_debug.cpu#62.curr->pid
29298 Â 4% +320.8% 123279 Â 1% sched_debug.cfs_rq[34]:/.exec_clock
40698 Â 6% +232.5% 135315 Â 0% sched_debug.cfs_rq[59]:/.exec_clock
534 Â 12% +248.3% 1862 Â 30% sched_debug.cpu#38.ttwu_local
35 Â 35% -54.3% 16 Â 8% sched_debug.cfs_rq[14]:/.load
35 Â 32% -55.2% 16 Â 8% sched_debug.cpu#14.load
7610 Â 9% +311.8% 31343 Â 13% sched_debug.cfs_rq[36]:/.avg->runnable_avg_sum
686 Â 23% +179.2% 1915 Â 2% sched_debug.cpu#17.curr->pid
166 Â 9% +310.7% 681 Â 13% sched_debug.cfs_rq[36]:/.tg_runnable_contrib
368896 Â 7% -62.8% 137215 Â 7% sched_debug.cpu#52.sched_count
791 Â 30% +117.7% 1722 Â 9% sched_debug.cpu#20.curr->pid
5 Â 24% +152.4% 13 Â 6% sched_debug.cfs_rq[62]:/.runnable_load_avg
6 Â 26% +140.0% 15 Â 12% sched_debug.cpu#61.cpu_load[0]
5 Â 18% +160.9% 15 Â 8% sched_debug.cpu#58.cpu_load[2]
6 Â 26% +140.0% 15 Â 12% sched_debug.cpu#61.cpu_load[1]
680 Â 24% +198.1% 2028 Â 12% sched_debug.cpu#42.curr->pid
174536 Â 16% +243.0% 598575 Â 24% sched_debug.cfs_rq[24]:/.spread0
606 Â 34% +242.2% 2075 Â 8% sched_debug.cpu#41.curr->pid
9829 Â 11% +263.3% 35712 Â 5% sched_debug.cfs_rq[60]:/.avg->runnable_avg_sum
213 Â 11% +263.4% 776 Â 5% sched_debug.cfs_rq[60]:/.tg_runnable_contrib
4 Â 40% +172.2% 12 Â 10% sched_debug.cfs_rq[42]:/.runnable_load_avg
4 Â 19% +211.8% 13 Â 8% sched_debug.cpu#52.cpu_load[2]
5 Â 48% +131.8% 12 Â 8% sched_debug.cfs_rq[46]:/.runnable_load_avg
4 Â 11% +255.6% 16 Â 4% sched_debug.cpu#57.cpu_load[2]
4 Â 19% +205.9% 13 Â 5% sched_debug.cpu#52.cpu_load[1]
4 Â 30% +182.4% 12 Â 14% sched_debug.cfs_rq[51]:/.runnable_load_avg
4 Â 25% +231.2% 13 Â 3% sched_debug.cpu#52.cpu_load[0]
5 Â 14% +150.0% 12 Â 14% sched_debug.cpu#39.cpu_load[2]
4 Â 25% +225.0% 13 Â 5% sched_debug.cfs_rq[52]:/.runnable_load_avg
4 Â 40% +172.2% 12 Â 17% sched_debug.cpu#42.cpu_load[0]
4 Â 34% +194.1% 12 Â 8% sched_debug.cpu#51.cpu_load[2]
4 Â 24% +177.8% 12 Â 14% sched_debug.cpu#35.cpu_load[3]
1308 Â 1% +126.4% 2962 Â 49% sched_debug.cpu#14.ttwu_local
530 Â 29% +206.5% 1626 Â 12% sched_debug.cpu#50.ttwu_local
170379 Â 7% -66.0% 57923 Â 1% sched_debug.cpu#55.sched_goidle
9222 Â 9% +248.6% 32152 Â 4% sched_debug.cfs_rq[38]:/.avg->runnable_avg_sum
523 Â 36% +237.7% 1766 Â 7% sched_debug.cpu#59.curr->pid
200 Â 9% +249.4% 698 Â 4% sched_debug.cfs_rq[38]:/.tg_runnable_contrib
540 Â 22% +250.5% 1894 Â 6% sched_debug.cpu#60.curr->pid
167034 Â 14% -62.4% 62885 Â 5% sched_debug.cpu#51.sched_goidle
8 Â 23% +81.2% 14 Â 14% sched_debug.cpu#23.cpu_load[2]
584 Â 25% +189.3% 1691 Â 6% sched_debug.cpu#43.ttwu_local
59537 Â 3% +114.7% 127808 Â 1% sched_debug.cfs_rq[21]:/.exec_clock
3500275 Â 3% +151.4% 8797983 Â 0% softirqs.TIMER
341326 Â 7% -64.6% 120668 Â 1% sched_debug.cpu#55.nr_switches
370336 Â 11% -62.9% 137515 Â 7% sched_debug.cpu#48.sched_count
5 Â 25% +178.3% 16 Â 22% sched_debug.cpu#1.cpu_load[4]
4 Â 45% +222.2% 14 Â 7% sched_debug.cpu#56.cpu_load[3]
2 Â 0% +125.0% 4 Â 24% sched_debug.cfs_rq[0]:/.nr_spread_over
5 Â 31% +175.0% 13 Â 9% sched_debug.cpu#38.cpu_load[2]
190067 Â 3% -66.9% 62977 Â 3% sched_debug.cpu#33.sched_goidle
8888 Â 12% +265.4% 32480 Â 2% sched_debug.cfs_rq[34]:/.avg->runnable_avg_sum
341372 Â 7% -63.7% 123871 Â 3% sched_debug.cpu#55.sched_count
259809 Â 2% -73.9% 67736 Â 1% sched_debug.cpu#56.ttwu_count
193 Â 12% +265.1% 706 Â 2% sched_debug.cfs_rq[34]:/.tg_runnable_contrib
334659 Â 14% -61.1% 130211 Â 5% sched_debug.cpu#51.nr_switches
1453 Â 22% +136.1% 3431 Â 24% sched_debug.cpu#12.ttwu_local
328 Â 5% +137.7% 780 Â 6% sched_debug.cfs_rq[23]:/.tg_runnable_contrib
7 Â 17% +93.1% 14 Â 7% sched_debug.cpu#18.cpu_load[3]
190052 Â 4% -66.3% 64136 Â 1% sched_debug.cpu#33.ttwu_count
181149 Â 7% -65.5% 62508 Â 0% sched_debug.cpu#55.ttwu_count
380828 Â 3% -65.7% 130803 Â 3% sched_debug.cpu#33.nr_switches
838 Â 11% +128.7% 1918 Â 7% sched_debug.cpu#23.curr->pid
60928 Â 5% +111.6% 128950 Â 1% sched_debug.cfs_rq[22]:/.exec_clock
5 Â 41% +145.5% 13 Â 13% sched_debug.cpu#41.cpu_load[1]
15049 Â 5% +138.4% 35879 Â 6% sched_debug.cfs_rq[23]:/.avg->runnable_avg_sum
782 Â 49% +144.9% 1917 Â 19% sched_debug.cpu#34.curr->pid
4 Â 36% +200.0% 13 Â 13% sched_debug.cpu#41.cpu_load[2]
14510 Â 3% +133.1% 33826 Â 3% sched_debug.cfs_rq[20]:/.avg->runnable_avg_sum
316 Â 3% +132.9% 737 Â 3% sched_debug.cfs_rq[20]:/.tg_runnable_contrib
612 Â 36% +200.3% 1839 Â 4% sched_debug.cpu#50.curr->pid
337 Â 4% +109.3% 705 Â 5% sched_debug.cfs_rq[21]:/.tg_runnable_contrib
382766 Â 3% -64.5% 135808 Â 4% sched_debug.cpu#33.sched_count
14583 Â 4% +141.9% 35280 Â 5% sched_debug.cfs_rq[13]:/.avg->runnable_avg_sum
318 Â 4% +141.3% 768 Â 5% sched_debug.cfs_rq[13]:/.tg_runnable_contrib
15468 Â 5% +109.6% 32424 Â 5% sched_debug.cfs_rq[21]:/.avg->runnable_avg_sum
787 Â 29% +171.7% 2138 Â 7% sched_debug.cpu#24.curr->pid
20340 Â 1% -57.8% 8589 Â 1% uptime.idle
950 Â 35% +98.9% 1890 Â 11% sched_debug.cpu#2.curr->pid
315 Â 7% +130.6% 726 Â 6% sched_debug.cfs_rq[12]:/.tg_runnable_contrib
14440 Â 7% +131.2% 33389 Â 6% sched_debug.cfs_rq[12]:/.avg->runnable_avg_sum
432 Â 5% +124.7% 972 Â 10% sched_debug.cpu#60.ttwu_local
5 Â 20% +175.0% 13 Â 7% sched_debug.cpu#62.cpu_load[3]
5 Â 20% +195.5% 16 Â 27% sched_debug.cpu#8.cpu_load[4]
5 Â 14% +143.5% 14 Â 17% sched_debug.cpu#15.cpu_load[4]
6 Â 17% +124.0% 14 Â 13% sched_debug.cpu#13.cpu_load[4]
5 Â 20% +154.5% 14 Â 8% sched_debug.cpu#5.cpu_load[4]
5 Â 14% +134.8% 13 Â 12% sched_debug.cpu#22.cpu_load[4]
5 Â 9% +150.0% 13 Â 7% sched_debug.cpu#60.cpu_load[0]
335869 Â 14% -59.3% 136745 Â 7% sched_debug.cpu#51.sched_count
866 Â 17% +113.6% 1850 Â 17% sched_debug.cpu#16.curr->pid
14550 Â 1% +136.4% 34401 Â 3% sched_debug.cfs_rq[19]:/.avg->runnable_avg_sum
843 Â 20% +139.5% 2020 Â 7% sched_debug.cpu#11.curr->pid
598054 Â 10% -53.8% 276304 Â 21% sched_debug.cpu#42.avg_idle
7 Â 14% +64.5% 12 Â 6% sched_debug.cpu#12.cpu_load[1]
317 Â 1% +135.9% 749 Â 3% sched_debug.cfs_rq[19]:/.tg_runnable_contrib
826 Â 17% +146.3% 2036 Â 8% sched_debug.cpu#3.curr->pid
4 Â 43% +173.7% 13 Â 14% sched_debug.cfs_rq[41]:/.runnable_load_avg
15098 Â 6% +130.7% 34838 Â 2% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
148254 Â 11% -55.9% 65306 Â 0% sched_debug.cpu#45.ttwu_count
329 Â 6% +130.3% 758 Â 2% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
170167 Â 9% -63.0% 63030 Â 2% sched_debug.cpu#53.ttwu_count
343 Â 8% +113.8% 734 Â 4% sched_debug.cfs_rq[4]:/.tg_runnable_contrib
6 Â 23% +130.8% 15 Â 8% sched_debug.cpu#58.cpu_load[1]
14279 Â 4% +138.7% 34087 Â 1% sched_debug.cfs_rq[8]:/.avg->runnable_avg_sum
311 Â 4% +137.7% 741 Â 1% sched_debug.cfs_rq[8]:/.tg_runnable_contrib
939 Â 17% +97.2% 1852 Â 10% sched_debug.cpu#35.ttwu_local
148294 Â 16% -56.5% 64514 Â 1% sched_debug.cpu#45.sched_goidle
15760 Â 8% +113.6% 33671 Â 4% sched_debug.cfs_rq[4]:/.avg->runnable_avg_sum
8 Â 19% +68.8% 13 Â 3% sched_debug.cpu#18.cpu_load[2]
4 Â 33% +177.8% 12 Â 16% sched_debug.cfs_rq[32]:/.runnable_load_avg
180819 Â 5% -67.1% 59533 Â 3% sched_debug.cpu#39.sched_goidle
2125 Â 39% +45.1% 3083 Â 38% numa-vmstat.node2.nr_anon_pages
8496 Â 39% +45.2% 12333 Â 38% numa-meminfo.node2.AnonPages
177750 Â 9% -65.1% 61959 Â 1% sched_debug.cpu#50.ttwu_count
5 Â 20% +147.6% 13 Â 14% sched_debug.cpu#32.cpu_load[1]
15552 Â 2% +124.5% 34911 Â 5% sched_debug.cfs_rq[6]:/.avg->runnable_avg_sum
15315 Â 4% +121.2% 33874 Â 5% sched_debug.cfs_rq[10]:/.avg->runnable_avg_sum
339 Â 2% +124.3% 760 Â 5% sched_debug.cfs_rq[6]:/.tg_runnable_contrib
297130 Â 16% -54.9% 134084 Â 2% sched_debug.cpu#45.nr_switches
324 Â 7% +126.0% 732 Â 3% sched_debug.cfs_rq[18]:/.tg_runnable_contrib
319 Â 5% +124.3% 716 Â 3% sched_debug.cfs_rq[11]:/.tg_runnable_contrib
334 Â 4% +120.6% 737 Â 4% sched_debug.cfs_rq[10]:/.tg_runnable_contrib
14627 Â 5% +124.6% 32849 Â 3% sched_debug.cfs_rq[11]:/.avg->runnable_avg_sum
62 Â 15% +81.7% 114 Â 22% sched_debug.cfs_rq[35]:/.tg_load_contrib
57625 Â 3% +126.3% 130409 Â 1% sched_debug.cfs_rq[9]:/.exec_clock
15710 Â 6% +126.1% 35526 Â 3% sched_debug.cfs_rq[3]:/.avg->runnable_avg_sum
17785 Â 4% +114.5% 38148 Â 2% sched_debug.cfs_rq[24]:/.avg->runnable_avg_sum
362283 Â 5% -65.8% 123900 Â 3% sched_debug.cpu#39.nr_switches
9255 Â 40% +41.6% 13110 Â 30% numa-meminfo.node2.Active(anon)
2315 Â 40% +41.6% 3277 Â 30% numa-vmstat.node2.nr_active_anon
388 Â 4% +113.7% 829 Â 2% sched_debug.cfs_rq[24]:/.tg_runnable_contrib
14664 Â 7% +125.3% 33037 Â 5% sched_debug.cfs_rq[14]:/.avg->runnable_avg_sum
297259 Â 16% -53.6% 138074 Â 1% sched_debug.cpu#45.sched_count
57759 Â 3% +124.4% 129607 Â 3% sched_debug.cfs_rq[12]:/.exec_clock
343 Â 6% +125.0% 773 Â 3% sched_debug.cfs_rq[3]:/.tg_runnable_contrib
320 Â 8% +124.7% 719 Â 5% sched_debug.cfs_rq[14]:/.tg_runnable_contrib
15258 Â 8% +123.9% 34162 Â 6% sched_debug.cfs_rq[1]:/.avg->runnable_avg_sum
355 Â 8% +113.2% 757 Â 1% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
59869 Â 4% +119.1% 131193 Â 0% sched_debug.cfs_rq[17]:/.exec_clock
60764 Â 1% +113.8% 129936 Â 3% sched_debug.cfs_rq[8]:/.exec_clock
16268 Â 8% +113.6% 34751 Â 1% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
333 Â 8% +123.1% 743 Â 6% sched_debug.cfs_rq[1]:/.tg_runnable_contrib
15124 Â 9% +125.6% 34123 Â 6% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
333 Â 7% +117.8% 726 Â 4% sched_debug.cfs_rq[5]:/.tg_runnable_contrib
177540 Â 9% -64.7% 62756 Â 1% sched_debug.cpu#54.ttwu_count
15308 Â 7% +118.0% 33371 Â 4% sched_debug.cfs_rq[5]:/.avg->runnable_avg_sum
59954 Â 6% +117.7% 130516 Â 0% sched_debug.cfs_rq[11]:/.exec_clock
15754 Â 10% +107.9% 32747 Â 6% sched_debug.cfs_rq[16]:/.avg->runnable_avg_sum
5 Â 22% +134.8% 13 Â 11% sched_debug.cpu#9.cpu_load[3]
331 Â 9% +124.8% 744 Â 6% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
343 Â 10% +107.4% 712 Â 6% sched_debug.cfs_rq[16]:/.tg_runnable_contrib
5 Â 20% +127.3% 12 Â 14% sched_debug.cpu#39.cpu_load[0]
4 Â 27% +168.4% 12 Â 8% sched_debug.cpu#51.cpu_load[1]
5 Â 14% +113.0% 12 Â 6% sched_debug.cfs_rq[17]:/.runnable_load_avg
5 Â 30% +122.7% 12 Â 17% sched_debug.cfs_rq[35]:/.runnable_load_avg
4 Â 27% +157.9% 12 Â 10% sched_debug.cpu#51.cpu_load[0]
5 Â 20% +127.3% 12 Â 14% sched_debug.cpu#39.cpu_load[1]
5 Â 32% +131.8% 12 Â 8% sched_debug.cpu#63.cpu_load[3]
6 Â 19% +81.5% 12 Â 12% sched_debug.cpu#16.cpu_load[4]
5 Â 24% +138.1% 12 Â 14% sched_debug.cpu#35.cpu_load[2]
183707 Â 5% -65.1% 64034 Â 1% sched_debug.cpu#39.ttwu_count
182822 Â 7% -64.0% 65856 Â 4% sched_debug.cpu#54.sched_goidle
3 Â 43% +233.3% 12 Â 8% sched_debug.cpu#34.cpu_load[4]
56 Â 20% +78.4% 101 Â 24% sched_debug.cfs_rq[35]:/.blocked_load_avg
139252 Â 16% -54.0% 64000 Â 2% sched_debug.cpu#42.ttwu_count
60499 Â 4% +116.0% 130668 Â 0% sched_debug.cfs_rq[16]:/.exec_clock
15134 Â 9% +129.3% 34698 Â 2% sched_debug.cfs_rq[17]:/.avg->runnable_avg_sum
60021 Â 4% +116.8% 130132 Â 1% sched_debug.cfs_rq[20]:/.exec_clock
60178 Â 7% +119.3% 131954 Â 3% sched_debug.cfs_rq[10]:/.exec_clock
60603 Â 4% +116.1% 130941 Â 1% sched_debug.cfs_rq[19]:/.exec_clock
5 Â 22% +147.8% 14 Â 9% sched_debug.cpu#19.cpu_load[4]
6 Â 21% +133.3% 15 Â 19% sched_debug.cpu#1.cpu_load[3]
7 Â 20% +114.3% 15 Â 12% sched_debug.cpu#23.cpu_load[3]
7 Â 10% +103.6% 14 Â 5% sched_debug.cpu#3.cpu_load[4]
6 Â 26% +125.0% 13 Â 15% sched_debug.cfs_rq[11]:/.runnable_load_avg
5 Â 37% +145.5% 13 Â 12% sched_debug.cpu#38.cpu_load[1]
6 Â 20% +140.0% 15 Â 12% sched_debug.cpu#23.cpu_load[4]
6 Â 26% +103.7% 13 Â 13% sched_debug.cfs_rq[19]:/.runnable_load_avg
887 Â 19% +138.4% 2114 Â 1% sched_debug.cpu#1.curr->pid
330 Â 9% +128.9% 756 Â 2% sched_debug.cfs_rq[17]:/.tg_runnable_contrib
61838 Â 3% +113.9% 132255 Â 0% sched_debug.cfs_rq[1]:/.exec_clock
125542 Â 9% -48.0% 65277 Â 2% sched_debug.cpu#41.sched_goidle
363290 Â 5% -64.2% 129971 Â 3% sched_debug.cpu#39.sched_count
61549 Â 2% +110.5% 129558 Â 1% sched_debug.cfs_rq[5]:/.exec_clock
179177 Â 9% -64.1% 64237 Â 4% sched_debug.cpu#50.sched_goidle
62105 Â 2% +110.7% 130860 Â 1% sched_debug.cfs_rq[4]:/.exec_clock
366224 Â 7% -62.8% 136064 Â 4% sched_debug.cpu#54.nr_switches
785 Â 17% +144.2% 1918 Â 6% sched_debug.cpu#63.curr->pid
5 Â 24% +155.0% 12 Â 15% sched_debug.cpu#32.cpu_load[0]
366355 Â 7% -61.6% 140522 Â 7% sched_debug.cpu#54.sched_count
17897 Â 7% +111.1% 37790 Â 4% sched_debug.cfs_rq[31]:/.avg->runnable_avg_sum
18221 Â 8% +102.9% 36980 Â 4% sched_debug.cfs_rq[25]:/.avg->runnable_avg_sum
139298 Â 21% -49.4% 70496 Â 4% sched_debug.cpu#44.sched_goidle
533 Â 26% +188.1% 1535 Â 9% sched_debug.cpu#51.ttwu_local
251646 Â 9% -46.2% 135449 Â 2% sched_debug.cpu#41.nr_switches
81 Â 31% +92.0% 156 Â 16% sched_debug.cpu#27.nr_uninterruptible
397 Â 8% +102.3% 803 Â 4% sched_debug.cfs_rq[25]:/.tg_runnable_contrib
62563 Â 4% +111.3% 132217 Â 1% sched_debug.cfs_rq[2]:/.exec_clock
15 Â 20% +68.3% 25 Â 42% sched_debug.cfs_rq[61]:/.load
15 Â 20% +68.3% 25 Â 42% sched_debug.cpu#61.load
391 Â 7% +110.3% 822 Â 4% sched_debug.cfs_rq[31]:/.tg_runnable_contrib
252106 Â 9% -45.5% 137280 Â 3% sched_debug.cpu#41.sched_count
637864 Â 12% -48.7% 327236 Â 15% sched_debug.cpu#46.avg_idle
358959 Â 9% -63.0% 132896 Â 5% sched_debug.cpu#50.nr_switches
4 Â 31% +173.7% 13 Â 9% sched_debug.cpu#32.cpu_load[2]
4 Â 25% +206.2% 12 Â 8% sched_debug.cpu#55.cpu_load[3]
62121 Â 4% +110.7% 130896 Â 1% sched_debug.cfs_rq[6]:/.exec_clock
64 Â 23% +111.6% 136 Â 27% sched_debug.cpu#8.nr_uninterruptible
359157 Â 9% -55.6% 159516 Â 28% sched_debug.cpu#50.sched_count
923 Â 21% +112.5% 1963 Â 10% sched_debug.cpu#19.curr->pid
61984 Â 4% +109.6% 129941 Â 0% sched_debug.cfs_rq[7]:/.exec_clock
1121 Â 19% +89.5% 2125 Â 5% sched_debug.cpu#31.curr->pid
17994 Â 5% +98.5% 35718 Â 1% sched_debug.cfs_rq[27]:/.avg->runnable_avg_sum
102 Â 36% -47.1% 54 Â 24% sched_debug.cfs_rq[58]:/.blocked_load_avg
392 Â 5% +97.5% 775 Â 1% sched_debug.cfs_rq[27]:/.tg_runnable_contrib
279111 Â 21% -47.7% 145948 Â 4% sched_debug.cpu#44.nr_switches
62503 Â 4% +109.2% 130769 Â 0% sched_debug.cfs_rq[3]:/.exec_clock
129420 Â 6% -48.2% 67042 Â 2% sched_debug.cpu#40.ttwu_count
8 Â 13% +55.9% 13 Â 6% sched_debug.cpu#18.cpu_load[1]
18170 Â 8% +99.1% 36176 Â 6% sched_debug.cfs_rq[26]:/.avg->runnable_avg_sum
396 Â 8% +98.7% 788 Â 6% sched_debug.cfs_rq[26]:/.tg_runnable_contrib
1100 Â 5% +91.6% 2108 Â 8% sched_debug.cpu#34.ttwu_local
125224 Â 22% -47.0% 66368 Â 1% sched_debug.cpu#44.ttwu_count
131648 Â 15% -51.2% 64301 Â 0% sched_debug.cpu#43.ttwu_count
5 Â 25% +117.4% 12 Â 6% sched_debug.cpu#20.cpu_load[4]
8 Â 13% +87.9% 15 Â 18% sched_debug.cpu#36.load
7 Â 17% +132.1% 16 Â 27% sched_debug.cpu#8.cpu_load[3]
1 Â 33% +150.0% 3 Â 22% sched_debug.cfs_rq[13]:/.nr_spread_over
5 Â 37% +163.6% 14 Â 10% sched_debug.cpu#56.cpu_load[2]
6 Â 16% +111.1% 14 Â 7% sched_debug.cpu#7.cpu_load[3]
5 Â 31% +130.4% 13 Â 11% sched_debug.cpu#38.cpu_load[0]
8 Â 13% +81.8% 15 Â 16% sched_debug.cfs_rq[36]:/.load
6 Â 13% +128.0% 14 Â 7% sched_debug.cpu#7.cpu_load[4]
6 Â 25% +107.7% 13 Â 8% sched_debug.cpu#62.cpu_load[0]
7 Â 10% +92.9% 13 Â 16% sched_debug.cpu#15.cpu_load[3]
6 Â 26% +103.7% 13 Â 3% sched_debug.cpu#17.cpu_load[4]
5 Â 41% +152.2% 14 Â 10% sched_debug.cpu#56.cpu_load[1]
5 Â 41% +139.1% 13 Â 7% sched_debug.cpu#56.cpu_load[0]
8 Â 13% +69.7% 14 Â 14% sched_debug.cpu#19.cpu_load[1]
6 Â 16% +85.2% 12 Â 12% sched_debug.cfs_rq[16]:/.runnable_load_avg
6 Â 26% +129.2% 13 Â 7% sched_debug.cpu#62.cpu_load[2]
5 Â 31% +130.4% 13 Â 9% sched_debug.cpu#35.cpu_load[0]
7 Â 20% +103.6% 14 Â 12% sched_debug.cpu#7.cpu_load[0]
5 Â 24% +133.3% 12 Â 8% sched_debug.cpu#55.cpu_load[2]
6 Â 33% +122.2% 15 Â 8% sched_debug.cpu#58.cpu_load[0]
18222 Â 6% +96.9% 35875 Â 4% sched_debug.cfs_rq[28]:/.avg->runnable_avg_sum
146795 Â 16% -53.1% 68791 Â 3% sched_debug.cpu#42.sched_goidle
128302 Â 7% -48.8% 65646 Â 1% sched_debug.cpu#41.ttwu_count
397 Â 6% +96.2% 780 Â 4% sched_debug.cfs_rq[28]:/.tg_runnable_contrib
279405 Â 21% -45.8% 151308 Â 3% sched_debug.cpu#44.sched_count
68673 Â 1% +101.0% 138021 Â 1% sched_debug.cfs_rq[0]:/.exec_clock
72077 Â 5% +100.8% 144728 Â 0% sched_debug.cpu#46.nr_load_updates
71626 Â 3% +101.8% 144571 Â 0% sched_debug.cpu#47.nr_load_updates
440 Â 10% +96.9% 867 Â 8% sched_debug.cpu#61.ttwu_local
141858 Â 1% -50.0% 70866 Â 3% sched_debug.cpu#40.sched_goidle
33 Â 36% -49.3% 17 Â 4% sched_debug.cfs_rq[0]:/.load
73919 Â 0% +97.9% 146324 Â 1% sched_debug.cpu#40.nr_load_updates
73274 Â 3% +98.0% 145097 Â 0% sched_debug.cpu#41.nr_load_updates
17741 Â 7% +102.7% 35954 Â 1% sched_debug.cfs_rq[30]:/.avg->runnable_avg_sum
73236 Â 2% +98.0% 145027 Â 0% sched_debug.cfs_rq[24]:/.exec_clock
72077 Â 2% +99.4% 143704 Â 0% sched_debug.cfs_rq[25]:/.exec_clock
387 Â 7% +102.1% 783 Â 1% sched_debug.cfs_rq[30]:/.tg_runnable_contrib
5 Â 46% +145.0% 12 Â 12% sched_debug.cpu#34.cpu_load[3]
33 Â 39% -49.3% 17 Â 7% sched_debug.cpu#0.load
294115 Â 16% -51.6% 142381 Â 3% sched_debug.cpu#42.nr_switches
933 Â 18% +112.0% 1978 Â 20% sched_debug.cpu#22.curr->pid
850 Â 14% +111.4% 1797 Â 4% sched_debug.cpu#4.curr->pid
5 Â 20% +118.2% 12 Â 15% sched_debug.cpu#55.cpu_load[0]
71684 Â 3% +95.6% 140217 Â 0% sched_debug.cfs_rq[29]:/.exec_clock
73607 Â 6% +98.7% 146254 Â 1% sched_debug.cpu#44.nr_load_updates
114289 Â 13% -42.7% 65451 Â 0% sched_debug.cpu#47.ttwu_count
71217 Â 3% +96.7% 140070 Â 0% sched_debug.cfs_rq[28]:/.exec_clock
72279 Â 3% +95.6% 141391 Â 0% sched_debug.cfs_rq[26]:/.exec_clock
480 Â 2% +76.0% 845 Â 11% sched_debug.cpu#63.ttwu_local
71428 Â 4% +95.5% 139650 Â 0% sched_debug.cfs_rq[31]:/.exec_clock
72174 Â 4% +94.9% 140663 Â 0% sched_debug.cfs_rq[27]:/.exec_clock
73935 Â 4% +95.9% 144836 Â 0% sched_debug.cpu#43.nr_load_updates
469 Â 6% +92.1% 902 Â 13% sched_debug.cpu#59.ttwu_local
1004 Â 21% +95.8% 1966 Â 4% sched_debug.cpu#29.curr->pid
1103 Â 13% +68.5% 1859 Â 11% sched_debug.cpu#28.curr->pid
284228 Â 1% -48.4% 146748 Â 3% sched_debug.cpu#40.nr_switches
533580 Â 6% -43.9% 299569 Â 15% sched_debug.cpu#52.avg_idle
985 Â 9% +95.8% 1928 Â 5% sched_debug.cpu#33.ttwu_local
150 Â 16% -55.5% 67 Â 32% sched_debug.cfs_rq[52]:/.blocked_load_avg
8 Â 9% +91.4% 16 Â 4% sched_debug.cpu#24.cpu_load[4]
34 Â 36% -51.4% 16 Â 14% sched_debug.cfs_rq[28]:/.load
9 Â 41% +77.8% 16 Â 11% sched_debug.cpu#54.load
9 Â 41% +77.8% 16 Â 11% sched_debug.cfs_rq[54]:/.load
288105 Â 2% -48.2% 149355 Â 3% sched_debug.cpu#40.sched_count
574987 Â 15% -44.0% 321728 Â 24% sched_debug.cpu#49.avg_idle
75106 Â 6% +92.2% 144349 Â 0% sched_debug.cpu#42.nr_load_updates
887 Â 23% +117.8% 1933 Â 10% sched_debug.cpu#5.curr->pid
7 Â 10% +80.6% 14 Â 11% sched_debug.cpu#19.cpu_load[2]
7 Â 14% +86.7% 14 Â 11% sched_debug.cpu#5.cpu_load[2]
7 Â 14% +103.6% 14 Â 9% sched_debug.cpu#19.cpu_load[3]
7 Â 19% +83.9% 14 Â 5% sched_debug.cpu#3.cpu_load[0]
6 Â 41% +103.8% 13 Â 13% sched_debug.cfs_rq[9]:/.runnable_load_avg
7 Â 19% +93.5% 15 Â 12% sched_debug.cpu#1.cpu_load[2]
7 Â 26% +100.0% 14 Â 8% sched_debug.cfs_rq[1]:/.runnable_load_avg
72318 Â 4% +91.9% 138766 Â 0% sched_debug.cfs_rq[30]:/.exec_clock
294503 Â 16% -50.5% 145879 Â 4% sched_debug.cpu#42.sched_count
6 Â 21% +77.8% 12 Â 8% sched_debug.cpu#20.cpu_load[2]
7 Â 20% +80.0% 13 Â 15% sched_debug.cpu#13.cpu_load[3]
7 Â 20% +70.0% 12 Â 3% sched_debug.cfs_rq[12]:/.runnable_load_avg
7 Â 36% +82.1% 12 Â 8% sched_debug.cpu#63.cpu_load[2]
6 Â 20% +104.0% 12 Â 6% sched_debug.cpu#12.cpu_load[4]
5 Â 20% +122.7% 12 Â 13% sched_debug.cpu#55.cpu_load[1]
7 Â 35% +73.3% 13 Â 5% sched_debug.cfs_rq[0]:/.runnable_load_avg
6 Â 20% +104.0% 12 Â 11% sched_debug.cpu#35.cpu_load[1]
7 Â 26% +71.4% 12 Â 8% sched_debug.cpu#20.cpu_load[1]
6 Â 6% +92.6% 13 Â 5% sched_debug.cfs_rq[3]:/.runnable_load_avg
6 Â 7% +107.7% 13 Â 11% sched_debug.cpu#6.cpu_load[4]
7 Â 26% +82.1% 12 Â 3% sched_debug.cpu#4.cpu_load[4]
6 Â 27% +96.2% 12 Â 3% sched_debug.cpu#14.cpu_load[4]
7 Â 10% +71.4% 12 Â 13% sched_debug.cpu#16.cpu_load[3]
152023 Â 25% +155.4% 388338 Â 34% sched_debug.cfs_rq[28]:/.spread0
905 Â 21% +107.1% 1874 Â 15% sched_debug.cpu#6.curr->pid
1065 Â 14% +82.7% 1945 Â 8% sched_debug.cpu#30.curr->pid
109213 Â 14% -39.5% 66094 Â 2% sched_debug.cpu#47.sched_goidle
969 Â 27% +110.6% 2042 Â 3% sched_debug.cpu#9.curr->pid
123361 Â 14% -45.7% 66953 Â 4% sched_debug.cpu#43.sched_goidle
76544 Â 5% +90.2% 145574 Â 0% sched_debug.cpu#45.nr_load_updates
624604 Â 7% -46.8% 331988 Â 13% sched_debug.cpu#44.avg_idle
218936 Â 14% -37.2% 137507 Â 2% sched_debug.cpu#47.nr_switches
519754 Â 12% -38.6% 318976 Â 6% sched_debug.cpu#33.avg_idle
1048 Â 4% +90.7% 1999 Â 4% sched_debug.cpu#27.curr->pid
9 Â 32% +77.8% 16 Â 17% sched_debug.cpu#50.load
9 Â 32% +75.0% 15 Â 15% sched_debug.cfs_rq[50]:/.load
80125 Â 3% +80.9% 144977 Â 0% sched_debug.cpu#54.nr_load_updates
79744 Â 4% +82.3% 145334 Â 1% sched_debug.cpu#53.nr_load_updates
6 Â 41% +104.0% 12 Â 14% sched_debug.cpu#34.cpu_load[2]
80391 Â 2% +79.7% 144447 Â 0% sched_debug.cpu#52.nr_load_updates
80078 Â 4% +80.5% 144562 Â 0% sched_debug.cpu#48.nr_load_updates
80441 Â 5% +79.8% 144606 Â 0% sched_debug.cpu#51.nr_load_updates
80274 Â 3% +79.6% 144142 Â 0% sched_debug.cpu#55.nr_load_updates
8 Â 13% +70.6% 14 Â 14% sched_debug.cpu#5.cpu_load[1]
8 Â 16% +74.3% 15 Â 12% sched_debug.cpu#0.cpu_load[3]
8 Â 19% +96.9% 15 Â 11% sched_debug.cpu#0.cpu_load[4]
9 Â 21% +60.5% 15 Â 5% sched_debug.cfs_rq[52]:/.load
622405 Â 3% -45.1% 341805 Â 14% sched_debug.cpu#41.avg_idle
247262 Â 14% -43.8% 138983 Â 4% sched_debug.cpu#43.nr_switches
556949 Â 13% -40.8% 329934 Â 13% sched_debug.cpu#48.avg_idle
220428 Â 13% -29.8% 154692 Â 13% sched_debug.cpu#47.sched_count
1136 Â 12% +67.9% 1907 Â 7% sched_debug.cpu#26.curr->pid
852 Â 18% +117.6% 1855 Â 9% sched_debug.cpu#8.curr->pid
6 Â 23% +112.0% 13 Â 8% sched_debug.cpu#9.cpu_load[2]
80479 Â 3% +79.5% 144456 Â 0% sched_debug.cpu#50.nr_load_updates
5.655e+09 Â 3% -58.4% 2.351e+09 Â 2% cpuidle.C3-NHM.time
1846 Â 11% +59.3% 2941 Â 16% sched_debug.cpu#2.ttwu_local
7 Â 14% +80.6% 14 Â 10% sched_debug.cpu#7.cpu_load[1]
8 Â 17% +78.1% 14 Â 7% sched_debug.cpu#1.cpu_load[1]
7 Â 5% +58.1% 12 Â 14% sched_debug.cpu#16.cpu_load[2]
6 Â 19% +103.7% 13 Â 7% sched_debug.cpu#62.cpu_load[1]
7 Â 14% +74.2% 13 Â 3% sched_debug.cpu#2.cpu_load[4]
7 Â 14% +80.6% 14 Â 10% sched_debug.cpu#7.cpu_load[2]
6 Â 19% +100.0% 13 Â 8% sched_debug.cpu#5.cpu_load[3]
7 Â 14% +61.3% 12 Â 25% sched_debug.cfs_rq[15]:/.runnable_load_avg
6 Â 31% +107.7% 13 Â 13% sched_debug.cfs_rq[63]:/.runnable_load_avg
7 Â 10% +106.5% 16 Â 29% sched_debug.cpu#8.cpu_load[2]
8 Â 8% +75.0% 14 Â 5% sched_debug.cpu#3.cpu_load[3]
7 Â 23% +80.6% 14 Â 5% sched_debug.cpu#17.cpu_load[3]
8 Â 15% +96.9% 15 Â 30% sched_debug.cpu#8.cpu_load[0]
83696 Â 0% +74.1% 145676 Â 0% sched_debug.cpu#34.nr_load_updates
81108 Â 4% +77.9% 144284 Â 0% sched_debug.cpu#49.nr_load_updates
1152 Â 10% +93.6% 2230 Â 8% sched_debug.cpu#32.ttwu_local
83482 Â 4% +74.1% 145347 Â 0% sched_debug.cpu#38.nr_load_updates
563098 Â 11% -40.3% 336316 Â 11% sched_debug.cpu#53.avg_idle
473 Â 11% +73.6% 821 Â 6% sched_debug.cpu#62.ttwu_local
84691 Â 1% +72.7% 146295 Â 0% sched_debug.cpu#35.nr_load_updates
28 Â 37% -43.8% 15 Â 16% sched_debug.cfs_rq[13]:/.load
27 Â 38% -42.7% 15 Â 16% sched_debug.cpu#13.load
82882 Â 2% +75.6% 145577 Â 0% sched_debug.cpu#39.nr_load_updates
498258 Â 7% -32.4% 336924 Â 21% sched_debug.cpu#34.avg_idle
83560 Â 1% +74.4% 145717 Â 0% sched_debug.cpu#36.nr_load_updates
469 Â 10% +71.8% 807 Â 8% sched_debug.cpu#57.ttwu_local
247486 Â 14% -41.8% 143921 Â 3% sched_debug.cpu#43.sched_count
155 Â 17% -49.3% 78 Â 43% sched_debug.cfs_rq[49]:/.blocked_load_avg
527328 Â 14% -32.7% 354686 Â 30% sched_debug.cpu#50.avg_idle
951 Â 18% +96.5% 1868 Â 12% sched_debug.cpu#21.curr->pid
446 Â 8% +88.5% 840 Â 8% sched_debug.cpu#56.ttwu_local
6 Â 17% +84.6% 12 Â 5% sched_debug.cpu#20.cpu_load[3]
84258 Â 2% +72.9% 145705 Â 0% sched_debug.cpu#33.nr_load_updates
84000 Â 3% +73.6% 145816 Â 0% sched_debug.cpu#37.nr_load_updates
7 Â 48% +103.3% 15 Â 15% sched_debug.cfs_rq[49]:/.load
10 Â 36% +67.5% 16 Â 8% sched_debug.cpu#41.load
553971 Â 16% -38.8% 338817 Â 26% sched_debug.cpu#45.avg_idle
1048 Â 22% +89.7% 1988 Â 11% sched_debug.cpu#10.curr->pid
90262 Â 2% +57.1% 141765 Â 1% sched_debug.cpu#0.nr_load_updates
883 Â 3% -42.0% 512 Â 4% cpuidle.POLL.usage
2381442 Â 0% -45.9% 1288111 Â 0% cpuidle.C3-NHM.usage
503911 Â 14% -41.6% 294421 Â 20% sched_debug.cpu#54.avg_idle
8 Â 16% +60.0% 14 Â 7% sched_debug.cpu#3.cpu_load[1]
8 Â 9% +54.3% 13 Â 15% sched_debug.cpu#19.cpu_load[0]
8 Â 19% +71.9% 13 Â 9% sched_debug.cpu#17.cpu_load[1]
9 Â 21% +60.5% 15 Â 2% sched_debug.cpu#52.load
107 Â 34% -36.0% 68 Â 17% sched_debug.cfs_rq[58]:/.tg_load_contrib
112841 Â 17% -41.9% 65594 Â 1% sched_debug.cpu#46.ttwu_count
604642 Â 8% -34.9% 393664 Â 28% sched_debug.cpu#40.avg_idle
154 Â 16% -47.8% 80 Â 26% sched_debug.cfs_rq[52]:/.tg_load_contrib
7 Â 11% +73.3% 13 Â 5% sched_debug.cpu#18.cpu_load[0]
7 Â 34% +64.5% 12 Â 8% sched_debug.cpu#63.cpu_load[1]
6 Â 19% +88.9% 12 Â 6% sched_debug.cpu#12.cpu_load[3]
7 Â 6% +76.7% 13 Â 8% sched_debug.cpu#6.cpu_load[3]
5 Â 49% +108.7% 12 Â 13% sched_debug.cpu#45.cpu_load[0]
7 Â 19% +64.5% 12 Â 6% sched_debug.cpu#4.cpu_load[3]
7 Â 11% +65.5% 12 Â 13% sched_debug.cpu#21.cpu_load[3]
7 Â 38% +70.0% 12 Â 8% sched_debug.cpu#63.cpu_load[0]
7 Â 14% +70.0% 12 Â 6% sched_debug.cpu#12.cpu_load[2]
7 Â 11% +75.9% 12 Â 6% sched_debug.cpu#12.cpu_load[0]
7 Â 14% +73.3% 13 Â 9% sched_debug.cpu#10.cpu_load[4]
6 Â 43% +96.3% 13 Â 16% sched_debug.cpu#34.cpu_load[1]
7 Â 20% +82.8% 13 Â 8% sched_debug.cpu#9.cpu_load[1]
7 Â 29% +76.7% 13 Â 13% sched_debug.cpu#9.cpu_load[0]
6 Â 28% +125.9% 15 Â 37% sched_debug.cfs_rq[8]:/.runnable_load_avg
8 Â 8% +50.0% 12 Â 13% sched_debug.cpu#21.cpu_load[2]
99 Â 22% -36.4% 63 Â 40% sched_debug.cfs_rq[16]:/.tg_load_contrib
92714 Â 2% +65.3% 153258 Â 0% sched_debug.cpu#56.nr_load_updates
122255 Â 18% -42.8% 69927 Â 3% sched_debug.cpu#46.sched_goidle
1038 Â 12% +93.7% 2010 Â 21% sched_debug.cpu#13.curr->pid
637159 Â 6% -42.8% 364152 Â 16% sched_debug.cpu#47.avg_idle
9 Â 20% +75.0% 15 Â 13% sched_debug.cfs_rq[53]:/.load
8 Â 18% +77.1% 15 Â 30% sched_debug.cpu#5.cpu_load[0]
10 Â 7% +55.0% 15 Â 3% sched_debug.cpu#24.cpu_load[2]
10 Â 18% +52.5% 15 Â 9% sched_debug.cpu#0.cpu_load[2]
9 Â 15% +70.3% 15 Â 13% sched_debug.cpu#53.load
9 Â 8% +64.1% 16 Â 4% sched_debug.cpu#24.cpu_load[3]
93245 Â 3% +64.3% 153239 Â 0% sched_debug.cpu#57.nr_load_updates
93978 Â 3% +63.3% 153433 Â 0% sched_debug.cpu#58.nr_load_updates
53 Â 20% -48.1% 27 Â 39% sched_debug.cfs_rq[24]:/.tg_load_contrib
93568 Â 3% +62.6% 152138 Â 0% sched_debug.cpu#62.nr_load_updates
505486 Â 18% -33.6% 335452 Â 14% sched_debug.cpu#35.avg_idle
93233 Â 3% +62.7% 151695 Â 0% sched_debug.cpu#60.nr_load_updates
245017 Â 18% -40.9% 144877 Â 3% sched_debug.cpu#46.nr_switches
93586 Â 3% +61.9% 151487 Â 0% sched_debug.cpu#61.nr_load_updates
11 Â 18% +47.8% 17 Â 12% sched_debug.cfs_rq[58]:/.load
11 Â 27% +54.5% 17 Â 12% sched_debug.cpu#58.load
27 Â 17% -33.6% 18 Â 7% sched_debug.cfs_rq[31]:/.load
94340 Â 2% +61.7% 152558 Â 0% sched_debug.cpu#59.nr_load_updates
94212 Â 3% +60.6% 151306 Â 0% sched_debug.cpu#63.nr_load_updates
785 Â 14% +29.1% 1014 Â 18% sched_debug.cpu#30.ttwu_local
8 Â 23% +71.9% 13 Â 7% sched_debug.cpu#17.cpu_load[2]
8 Â 17% +63.6% 13 Â 6% sched_debug.cpu#2.cpu_load[3]
8 Â 13% +64.7% 14 Â 5% sched_debug.cpu#3.cpu_load[2]
9 Â 4% +48.6% 13 Â 3% sched_debug.cpu#27.cpu_load[4]
8 Â 20% +54.3% 13 Â 6% sched_debug.cpu#2.cpu_load[2]
7 Â 24% +82.8% 13 Â 11% sched_debug.cpu#17.cpu_load[0]
8 Â 16% +62.9% 14 Â 7% sched_debug.cpu#1.cpu_load[0]
8 Â 10% +93.9% 16 Â 29% sched_debug.cpu#8.cpu_load[1]
8 Â 10% +63.6% 13 Â 16% sched_debug.cpu#15.cpu_load[2]
9 Â 23% +38.9% 12 Â 12% sched_debug.cpu#16.cpu_load[0]
8 Â 5% +55.9% 13 Â 9% sched_debug.cpu#10.cpu_load[3]
8 Â 13% +47.1% 12 Â 12% sched_debug.cpu#16.cpu_load[1]
6 Â 36% +92.6% 13 Â 12% sched_debug.cfs_rq[6]:/.runnable_load_avg
502140 Â 12% -35.6% 323535 Â 15% sched_debug.cpu#51.avg_idle
746062 Â 1% -47.1% 394583 Â 0% softirqs.SCHED
1078 Â 4% +50.7% 1625 Â 9% sched_debug.cpu#22.ttwu_local
740 Â 12% +35.0% 999 Â 11% sched_debug.cpu#25.ttwu_local
245937 Â 18% -39.0% 150096 Â 4% sched_debug.cpu#46.sched_count
78 Â 43% -53.5% 36 Â 44% sched_debug.cfs_rq[57]:/.tg_load_contrib
28 Â 19% -33.9% 18 Â 8% sched_debug.cpu#31.load
7 Â 14% +76.7% 13 Â 16% sched_debug.cfs_rq[22]:/.runnable_load_avg
10 Â 12% +41.9% 15 Â 2% sched_debug.cpu#24.cpu_load[1]
22 Â 7% -31.1% 15 Â 7% sched_debug.cpu#21.load
7 Â 6% +56.7% 11 Â 9% sched_debug.cfs_rq[4]:/.runnable_load_avg
9 Â 37% +52.6% 14 Â 10% sched_debug.cpu#2.cpu_load[0]
7 Â 14% +56.7% 11 Â 18% sched_debug.cfs_rq[21]:/.runnable_load_avg
9 Â 33% +78.9% 17 Â 9% sched_debug.cpu#42.load
3.40 Â 2% -45.0% 1.87 Â 8% turbostat.%c3
73 Â 29% +66.6% 122 Â 21% sched_debug.cpu#29.nr_uninterruptible
611173 Â 6% -49.8% 306820 Â 15% sched_debug.cpu#43.avg_idle
2095578 Â 1% -31.4% 1438534 Â 0% numa-meminfo.node3.SUnreclaim
523718 Â 1% -31.3% 359624 Â 0% numa-vmstat.node3.nr_slab_unreclaimable
2106490 Â 1% -31.2% 1449462 Â 0% numa-meminfo.node3.Slab
886663 Â 1% -31.2% 609723 Â 0% numa-meminfo.node3.PageTables
221608 Â 1% -31.2% 152413 Â 0% numa-vmstat.node3.nr_page_table_pages
10 Â 23% +52.5% 15 Â 12% sched_debug.cpu#31.cpu_load[4]
9 Â 32% +70.3% 15 Â 6% sched_debug.cfs_rq[42]:/.load
86466 Â 2% +69.6% 146665 Â 1% sched_debug.cpu#32.nr_load_updates
3404 Â 17% +27.2% 4329 Â 12% numa-meminfo.node2.Mapped
8 Â 16% +51.4% 13 Â 17% sched_debug.cpu#13.cpu_load[2]
8 Â 13% +52.9% 13 Â 9% sched_debug.cpu#6.cpu_load[0]
7 Â 33% +58.1% 12 Â 15% sched_debug.cpu#21.cpu_load[0]
8 Â 10% +57.6% 13 Â 9% sched_debug.cpu#6.cpu_load[2]
8 Â 21% +47.1% 12 Â 4% sched_debug.cpu#14.cpu_load[2]
9 Â 15% +44.4% 13 Â 9% sched_debug.cpu#4.cpu_load[1]
8 Â 21% +50.0% 12 Â 6% sched_debug.cpu#4.cpu_load[2]
8 Â 15% +48.5% 12 Â 15% sched_debug.cpu#21.cpu_load[1]
8 Â 23% +71.9% 13 Â 13% sched_debug.cfs_rq[23]:/.runnable_load_avg
8 Â 24% +40.0% 12 Â 12% sched_debug.cfs_rq[10]:/.runnable_load_avg
27 Â 13% -38.0% 16 Â 14% sched_debug.cpu#28.load
403384 Â 8% -29.5% 284499 Â 10% sched_debug.cpu#56.avg_idle
852 Â 17% +26.8% 1080 Â 12% numa-vmstat.node2.nr_mapped
3218544 Â 1% -29.6% 2264649 Â 0% numa-meminfo.node3.MemUsed
703 Â 10% +33.3% 938 Â 13% sched_debug.cpu#31.ttwu_local
488743 Â 12% -37.1% 307311 Â 7% sched_debug.cpu#39.avg_idle
8 Â 37% +88.6% 16 Â 10% sched_debug.cfs_rq[39]:/.load
9 Â 39% +83.3% 16 Â 10% sched_debug.cpu#39.load
9 Â 5% +39.5% 13 Â 9% sched_debug.cpu#10.cpu_load[2]
9 Â 14% +48.6% 13 Â 3% sched_debug.cfs_rq[25]:/.runnable_load_avg
10 Â 7% +35.0% 13 Â 8% sched_debug.cpu#28.cpu_load[3]
9 Â 15% +43.2% 13 Â 18% sched_debug.cpu#15.cpu_load[1]
10 Â 7% +35.0% 13 Â 3% sched_debug.cpu#27.cpu_load[3]
8 Â 14% +51.4% 13 Â 9% sched_debug.cpu#6.cpu_load[1]
9 Â 8% +45.9% 13 Â 8% sched_debug.cpu#28.cpu_load[4]
944754 Â 0% -28.1% 678924 Â 1% numa-numastat.node3.local_node
948941 Â 0% -28.0% 683119 Â 1% numa-numastat.node3.numa_hit
114901 Â 2% +34.7% 154715 Â 1% sched_debug.cpu#9.nr_load_updates
25 Â 6% -31.0% 17 Â 11% sched_debug.cpu#30.load
1460 Â 19% +37.8% 2013 Â 10% sched_debug.cpu#7.ttwu_local
460171 Â 5% -30.5% 319618 Â 10% sched_debug.cpu#32.avg_idle
10 Â 14% +36.6% 14 Â 8% sched_debug.cpu#25.cpu_load[4]
8 Â 31% +65.7% 14 Â 7% sched_debug.cfs_rq[24]:/.runnable_load_avg
607228 Â 1% -27.9% 437862 Â 3% numa-vmstat.node3.numa_local
114663 Â 1% +33.1% 152576 Â 1% sched_debug.cpu#12.nr_load_updates
513031 Â 16% -32.2% 347796 Â 10% sched_debug.cpu#37.avg_idle
2004 Â 18% +44.1% 2888 Â 11% proc-vmstat.numa_pages_migrated
2004 Â 18% +44.1% 2888 Â 11% proc-vmstat.pgmigrate_success
644441 Â 1% -26.3% 474922 Â 3% numa-vmstat.node3.numa_hit
7 Â 27% +66.7% 12 Â 12% sched_debug.cfs_rq[5]:/.runnable_load_avg
11 Â 15% +38.6% 15 Â 12% sched_debug.cpu#31.cpu_load[3]
11 Â 11% +35.6% 15 Â 12% sched_debug.cpu#31.cpu_load[2]
7 Â 22% +63.3% 12 Â 3% sched_debug.cpu#14.cpu_load[3]
116422 Â 2% +32.4% 154101 Â 1% sched_debug.cpu#10.nr_load_updates
129 Â 9% -36.4% 82 Â 27% sched_debug.cfs_rq[44]:/.blocked_load_avg
514997 Â 15% -26.5% 378518 Â 6% sched_debug.cpu#55.avg_idle
511213 Â 14% -23.4% 391526 Â 21% sched_debug.cpu#36.avg_idle
1107 Â 6% +41.2% 1564 Â 14% sched_debug.cpu#23.ttwu_local
421195 Â 6% -28.2% 302545 Â 12% sched_debug.cpu#57.avg_idle
116187 Â 1% +31.2% 152493 Â 0% sched_debug.cpu#11.nr_load_updates
117788 Â 1% +29.6% 152672 Â 1% sched_debug.cpu#8.nr_load_updates
9 Â 20% +44.4% 13 Â 5% sched_debug.cpu#4.cpu_load[0]
10 Â 10% +29.3% 13 Â 3% sched_debug.cpu#27.cpu_load[2]
10 Â 12% +35.0% 13 Â 6% sched_debug.cpu#28.cpu_load[2]
10 Â 7% +35.0% 13 Â 19% sched_debug.cpu#13.cpu_load[1]
41426 Â 7% +36.8% 56660 Â 5% proc-vmstat.numa_hint_faults_local
114514 Â 1% +34.0% 153480 Â 1% sched_debug.cpu#14.nr_load_updates
113983 Â 1% +34.3% 153134 Â 1% sched_debug.cpu#13.nr_load_updates
117248 Â 1% +29.9% 152335 Â 0% sched_debug.cpu#17.nr_load_updates
117878 Â 1% +29.0% 152058 Â 0% sched_debug.cpu#19.nr_load_updates
118372 Â 1% +28.3% 151813 Â 0% sched_debug.cpu#16.nr_load_updates
10 Â 14% +23.8% 13 Â 7% sched_debug.cpu#10.cpu_load[0]
10 Â 8% +26.8% 13 Â 7% sched_debug.cpu#10.cpu_load[1]
11 Â 12% +22.7% 13 Â 3% sched_debug.cpu#27.cpu_load[1]
9 Â 27% +52.8% 13 Â 7% sched_debug.cpu#2.cpu_load[1]
117488 Â 1% +28.1% 150510 Â 0% sched_debug.cpu#22.nr_load_updates
13 Â 24% +38.9% 18 Â 4% sched_debug.cfs_rq[57]:/.load
13 Â 26% +38.9% 18 Â 4% sched_debug.cpu#57.load
117379 Â 1% +27.9% 150133 Â 0% sched_debug.cpu#21.nr_load_updates
1602483 Â 3% -19.6% 1288298 Â 0% numa-meminfo.node0.SUnreclaim
238564 Â 15% +29.9% 309806 Â 6% sched_debug.cpu#27.avg_idle
311032 Â 2% -22.7% 240441 Â 5% sched_debug.cpu#8.avg_idle
400457 Â 3% -19.6% 322053 Â 0% numa-vmstat.node0.nr_slab_unreclaimable
1394 Â 12% +28.9% 1797 Â 7% sched_debug.cpu#6.ttwu_local
120507 Â 1% +26.0% 151799 Â 1% sched_debug.cpu#6.nr_load_updates
348850 Â 6% -24.7% 262520 Â 21% sched_debug.cpu#6.avg_idle
675626 Â 3% -19.6% 543470 Â 0% numa-meminfo.node0.PageTables
168838 Â 3% -19.5% 135853 Â 0% numa-vmstat.node0.nr_page_table_pages
120183 Â 3% +26.2% 151615 Â 0% sched_debug.cpu#20.nr_load_updates
120277 Â 1% +25.9% 151380 Â 0% sched_debug.cpu#7.nr_load_updates
35935589 Â 0% -20.4% 28606880 Â 0% slabinfo.vm_area_struct.num_objs
816717 Â 0% -20.4% 650156 Â 0% slabinfo.vm_area_struct.active_slabs
816717 Â 0% -20.4% 650156 Â 0% slabinfo.vm_area_struct.num_slabs
118159 Â 1% +26.1% 148944 Â 0% sched_debug.cpu#1.nr_load_updates
35891627 Â 0% -20.3% 28590239 Â 0% slabinfo.vm_area_struct.active_objs
120720 Â 1% +24.9% 150796 Â 0% sched_debug.cpu#4.nr_load_updates
474282 Â 6% -22.9% 365875 Â 19% sched_debug.cpu#38.avg_idle
1651580 Â 0% -20.1% 1319009 Â 0% proc-vmstat.nr_slab_unreclaimable
6606619 Â 0% -20.1% 5276360 Â 0% meminfo.SUnreclaim
120367 Â 1% +24.9% 150388 Â 0% sched_debug.cpu#5.nr_load_updates
11 Â 14% +34.1% 14 Â 10% sched_debug.cfs_rq[31]:/.runnable_load_avg
20 Â 5% -24.7% 15 Â 8% sched_debug.cfs_rq[21]:/.load
12 Â 8% +16.3% 14 Â 5% sched_debug.cpu#25.cpu_load[0]
696867 Â 0% -20.0% 557635 Â 0% proc-vmstat.nr_page_table_pages
6653178 Â 0% -20.0% 5322573 Â 0% meminfo.Slab
248196 Â 9% +25.4% 311208 Â 12% sched_debug.cpu#24.avg_idle
2787604 Â 0% -20.0% 2230917 Â 0% meminfo.PageTables
5.922e+08 Â 0% -19.9% 4.747e+08 Â 0% proc-vmstat.pgfault
118211 Â 2% +28.0% 151343 Â 0% sched_debug.cpu#18.nr_load_updates
120807 Â 1% +24.7% 150598 Â 0% sched_debug.cpu#3.nr_load_updates
128900 Â 1% +24.2% 160040 Â 0% sched_debug.cpu#25.nr_load_updates
121256 Â 1% +24.3% 150777 Â 1% sched_debug.cpu#2.nr_load_updates
114909 Â 3% +34.0% 153977 Â 1% sched_debug.cpu#15.nr_load_updates
128637 Â 1% +23.4% 158697 Â 0% sched_debug.cpu#26.nr_load_updates
424064 Â 10% -22.4% 328912 Â 13% sched_debug.cpu#63.avg_idle
2518148 Â 2% -17.3% 2082266 Â 0% numa-meminfo.node0.MemUsed
377009 Â 10% -22.9% 290499 Â 10% sched_debug.cpu#58.avg_idle
1751 Â 9% +27.3% 2230 Â 8% sched_debug.cpu#3.ttwu_local
128243 Â 1% +22.8% 157484 Â 0% sched_debug.cpu#28.nr_load_updates
128594 Â 1% +22.3% 157264 Â 0% sched_debug.cpu#29.nr_load_updates
128398 Â 1% +23.1% 158004 Â 0% sched_debug.cpu#27.nr_load_updates
129844 Â 1% +24.5% 161710 Â 0% sched_debug.cpu#24.nr_load_updates
128589 Â 1% +21.7% 156520 Â 0% sched_debug.cpu#30.nr_load_updates
127546 Â 1% +22.9% 156748 Â 0% sched_debug.cpu#31.nr_load_updates
134 Â 7% -29.4% 95 Â 24% sched_debug.cfs_rq[44]:/.tg_load_contrib
70304 Â 8% +22.6% 86182 Â 3% proc-vmstat.numa_hint_faults
29 Â 39% -37.0% 18 Â 2% sched_debug.cfs_rq[25]:/.load
28 Â 44% -33.6% 18 Â 2% sched_debug.cpu#25.load
272672 Â 2% -15.8% 229700 Â 1% proc-vmstat.pgalloc_dma32
363798 Â 12% -21.2% 286629 Â 13% sched_debug.cpu#59.avg_idle
1613943 Â 3% -19.5% 1298521 Â 0% numa-meminfo.node0.Slab
117340 Â 1% +28.4% 150721 Â 0% sched_debug.cpu#23.nr_load_updates
4814407 Â 0% -16.8% 4004118 Â 1% proc-vmstat.pgfree
1552099 Â 0% -16.8% 1291492 Â 0% numa-meminfo.node2.Slab
649904 Â 0% -16.7% 541350 Â 0% numa-meminfo.node2.PageTables
162410 Â 0% -16.7% 135321 Â 0% numa-vmstat.node2.nr_page_table_pages
1540899 Â 0% -17.0% 1279172 Â 0% numa-meminfo.node2.SUnreclaim
385059 Â 0% -17.0% 319769 Â 0% numa-vmstat.node2.nr_slab_unreclaimable
4548086 Â 0% -16.4% 3804042 Â 1% proc-vmstat.pgalloc_normal
1255200 Â 0% +19.0% 1493504 Â 0% numa-vmstat.node3.nr_free_pages
5019891 Â 0% +19.0% 5973786 Â 0% numa-meminfo.node3.MemFree
5905 Â 2% -14.0% 5081 Â 0% sched_debug.cfs_rq[2]:/.tg_load_avg
5916 Â 2% -14.2% 5077 Â 0% sched_debug.cfs_rq[0]:/.tg_load_avg
5918 Â 2% -14.0% 5087 Â 0% sched_debug.cfs_rq[1]:/.tg_load_avg
2423291 Â 1% -15.3% 2052984 Â 0% numa-meminfo.node2.MemUsed
10 Â 12% +26.8% 13 Â 9% sched_debug.cpu#28.cpu_load[1]
0.13 Â 3% -15.7% 0.11 Â 10% turbostat.%pc3
3420656 Â 0% -15.1% 2903309 Â 2% proc-vmstat.numa_local
3433217 Â 0% -15.1% 2915876 Â 2% proc-vmstat.numa_hit
838458 Â 1% -13.0% 729220 Â 2% numa-numastat.node0.local_node
736674 Â 3% +18.6% 874029 Â 1% softirqs.RCU
840554 Â 1% -12.9% 732351 Â 2% numa-numastat.node0.numa_hit
5802 Â 3% -12.5% 5078 Â 0% sched_debug.cfs_rq[3]:/.tg_load_avg
5730 Â 3% -11.3% 5083 Â 0% sched_debug.cfs_rq[4]:/.tg_load_avg
533590 Â 2% -11.2% 473739 Â 1% numa-vmstat.node0.numa_local
237268 Â 7% +27.6% 302850 Â 13% sched_debug.cpu#28.avg_idle
565252 Â 2% -10.4% 506412 Â 1% numa-vmstat.node0.numa_hit
5709 Â 3% -11.1% 5077 Â 0% sched_debug.cfs_rq[5]:/.tg_load_avg
111048 Â 5% -13.4% 96201 Â 10% meminfo.DirectMap4k
5698 Â 3% -10.8% 5084 Â 0% sched_debug.cfs_rq[6]:/.tg_load_avg
270517 Â 8% +11.1% 300584 Â 4% sched_debug.cpu#29.avg_idle
12 Â 19% +32.7% 16 Â 6% sched_debug.cpu#38.load
12 Â 19% +32.7% 16 Â 6% sched_debug.cfs_rq[38]:/.load
5703 Â 3% -11.1% 5071 Â 0% sched_debug.cfs_rq[7]:/.tg_load_avg
5701 Â 3% -10.9% 5081 Â 0% sched_debug.cfs_rq[8]:/.tg_load_avg
537830 Â 1% -12.3% 471678 Â 1% numa-vmstat.node2.numa_local
5708 Â 2% -10.6% 5102 Â 0% sched_debug.cfs_rq[9]:/.tg_load_avg
824960 Â 2% -10.8% 735556 Â 1% numa-numastat.node2.local_node
829145 Â 2% -10.9% 738701 Â 1% numa-numastat.node2.numa_hit
5670 Â 2% -10.0% 5101 Â 0% sched_debug.cfs_rq[10]:/.tg_load_avg
5663 Â 2% -9.9% 5100 Â 0% sched_debug.cfs_rq[11]:/.tg_load_avg
574968 Â 0% -11.6% 508060 Â 1% numa-vmstat.node2.numa_hit
5660 Â 2% -9.7% 5109 Â 0% sched_debug.cfs_rq[12]:/.tg_load_avg
5580 Â 1% -9.5% 5050 Â 1% sched_debug.cfs_rq[34]:/.tg_load_avg
5578 Â 1% -9.5% 5049 Â 1% sched_debug.cfs_rq[33]:/.tg_load_avg
5655 Â 1% -10.1% 5085 Â 1% sched_debug.cfs_rq[14]:/.tg_load_avg
5651 Â 1% -9.9% 5092 Â 0% sched_debug.cfs_rq[15]:/.tg_load_avg
5656 Â 2% -9.6% 5111 Â 0% sched_debug.cfs_rq[13]:/.tg_load_avg
5615 Â 1% -9.4% 5086 Â 1% sched_debug.cfs_rq[19]:/.tg_load_avg
5631 Â 1% -9.7% 5085 Â 1% sched_debug.cfs_rq[20]:/.tg_load_avg
9735 Â 6% +11.7% 10878 Â 3% slabinfo.anon_vma.active_objs
9735 Â 6% +11.7% 10878 Â 3% slabinfo.anon_vma.num_objs
5635 Â 1% -9.6% 5094 Â 1% sched_debug.cfs_rq[21]:/.tg_load_avg
5645 Â 1% -9.8% 5090 Â 0% sched_debug.cfs_rq[16]:/.tg_load_avg
5400 Â 1% -8.4% 4949 Â 2% sched_debug.cfs_rq[63]:/.tg_load_avg
5388 Â 2% -8.2% 4948 Â 2% sched_debug.cfs_rq[62]:/.tg_load_avg
5569 Â 1% -9.0% 5070 Â 1% sched_debug.cfs_rq[35]:/.tg_load_avg
5566 Â 1% -9.1% 5060 Â 1% sched_debug.cfs_rq[40]:/.tg_load_avg
5392 Â 2% -8.1% 4955 Â 2% sched_debug.cfs_rq[61]:/.tg_load_avg
5623 Â 1% -9.5% 5087 Â 1% sched_debug.cfs_rq[18]:/.tg_load_avg
5562 Â 1% -9.0% 5064 Â 1% sched_debug.cfs_rq[37]:/.tg_load_avg
5395 Â 2% -7.8% 4974 Â 2% sched_debug.cfs_rq[60]:/.tg_load_avg
5638 Â 1% -9.9% 5081 Â 0% sched_debug.cfs_rq[17]:/.tg_load_avg
5565 Â 1% -8.9% 5070 Â 1% sched_debug.cfs_rq[36]:/.tg_load_avg
5502 Â 2% -9.4% 4985 Â 2% sched_debug.cfs_rq[47]:/.tg_load_avg
5581 Â 1% -9.6% 5042 Â 1% sched_debug.cfs_rq[43]:/.tg_load_avg
5526 Â 3% -9.6% 4995 Â 2% sched_debug.cfs_rq[46]:/.tg_load_avg
5558 Â 1% -8.9% 5063 Â 1% sched_debug.cfs_rq[39]:/.tg_load_avg
5581 Â 1% -8.8% 5091 Â 1% sched_debug.cfs_rq[32]:/.tg_load_avg
5622 Â 1% -9.3% 5102 Â 1% sched_debug.cfs_rq[27]:/.tg_load_avg
5559 Â 1% -8.8% 5070 Â 1% sched_debug.cfs_rq[38]:/.tg_load_avg
5567 Â 1% -9.7% 5028 Â 1% sched_debug.cfs_rq[45]:/.tg_load_avg
5588 Â 1% -9.9% 5036 Â 1% sched_debug.cfs_rq[44]:/.tg_load_avg
5603 Â 1% -8.9% 5102 Â 1% sched_debug.cfs_rq[28]:/.tg_load_avg
40992 Â 8% +525.7% 256479 Â 1% time.involuntary_context_switches
37575247 Â 0% -81.1% 7104211 Â 1% time.voluntary_context_switches
206833 Â 0% -78.8% 43766 Â 0% vmstat.system.cs
5002 Â 4% +209.3% 15474 Â 0% time.system_time
1649 Â 3% +185.0% 4701 Â 0% time.percent_of_cpu_this_job_got
27.13 Â 3% +170.9% 73.49 Â 0% turbostat.%c0
24116 Â 5% +133.1% 56210 Â 9% vmstat.system.in
5.909e+08 Â 0% -19.9% 4.734e+08 Â 0% time.minor_page_faults
369 Â 0% -4.6% 352 Â 0% time.elapsed_time
lkp-nex05: Nehalem-EX
Memory: 192G
lkp-nex06: Nehalem-EX
Memory: 64G
time.elapsed_time
375 ++--------------------------------------------------------------------+
| *.. .*... .*... .*. |
| .. *...*. *. *. .. |
370 ++ . .*...*.. .*...*..*
*...*..*...*..* *. *...*. |
| |
365 ++ |
| |
360 ++ |
| |
| |
355 O+ O O |
| O O O O O |
| O O O O O O O O O O O O
350 ++-------------------------------------O------------------------------+
time.minor_page_faults
6.2e+08 ++----------------------------------------------------------------+
| .*.. |
6e+08 *+.*...*..*.. ..*. *..*...*..*..*...*.. ..*..*
5.8e+08 ++ *. *..*...*..*..*..*. |
| |
5.6e+08 ++ |
| |
5.4e+08 ++ |
| |
5.2e+08 ++ |
5e+08 O+ O O O O |
| |
4.8e+08 ++ |
| O O O O O O O O O O O O O O O O
4.6e+08 ++----------------------------------------------------------------+
vm-scalability.throughput
9.5e+06 ++----------------------------------------------------------------+
| |
9e+06 ++ ..*..*.. ..*.. |
*..*...*..*..*. *..*...*..*..*. *..*...*..*..*..*...*..*
| |
8.5e+06 ++ |
| |
8e+06 ++ |
| |
7.5e+06 O+ O O O O |
| |
| O O O O O O O O O O O O O
7e+06 ++ O O O |
| |
6.5e+06 ++----------------------------------------------------------------+
cpuidle.C1E-NHM.time
8e+07 ++----------------------------------------------*--*---------*------+
| *. *...*. *..|
7e+07 ++ .*... .*.. .. *
6e+07 *+.*...*. *..*.. .*..*...*. ..* |
| .. *. |
5e+07 ++ * |
| |
4e+07 ++ |
| |
3e+07 ++ O O O O O O O O |
2e+07 ++ O O O O O O O O
| |
1e+07 ++ |
| |
0 O+-O---O--O---O-----------------------------------------------------+
cpuidle.C1E-NHM.usage
300000 ++---------------------------------------------*--*---*-----*------+
| *. *. *..*
250000 ++ .. |
*..*...*..*..*...*.. *...*..*...*..*.. . |
| .. * |
200000 ++ * |
| |
150000 ++ |
| |
100000 ++ |
| |
| |
50000 ++ O O O O O O O O O O O O O O O O
O O O O O |
0 ++-----------------------------------------------------------------+
proc-vmstat.nr_slab_unreclaimable
1.7e+06 ++------------------*------------------*-------------------------+
*..*...*..*.. .*. *..*..*...*..*. .*..*
1.65e+06 ++ *. *...*..*..*..*...*. |
1.6e+06 ++ |
| |
1.55e+06 ++ |
1.5e+06 ++ |
| |
1.45e+06 ++ |
1.4e+06 O+ O O O O |
| |
1.35e+06 ++ |
1.3e+06 ++ O O O O O O O O O O O O O O O O
| |
1.25e+06 ++---------------------------------------------------------------+
proc-vmstat.nr_page_table_pages
720000 ++------------------*----------------------------------------------+
*.. ..*. *...*..*...*..*..*... |
700000 ++ *...*..*..*. *..*..*...*..*..*...*..*
680000 ++ |
| |
660000 ++ |
640000 ++ |
| |
620000 ++ |
600000 ++ |
O O O O O |
580000 ++ |
560000 ++ O |
| O O O O O O O O O O O O O O O
540000 ++-----------------------------------------------------------------+
proc-vmstat.numa_hit
3.6e+06 ++----------------------------------------------------------------+
| *.. |
3.5e+06 ++ .. *.. ..*..*..*...*.. ..*..|
*..*...*..*..*...* *. .*...*..*..*..*. *
3.4e+06 ++ *. |
3.3e+06 ++ |
| |
3.2e+06 ++ |
| |
3.1e+06 ++ |
3e+06 O+ O O O |
| O O O O O O O |
2.9e+06 ++ O O O O O O O O O
| |
2.8e+06 ++---------------------------------------------------------O------+
proc-vmstat.numa_local
3.6e+06 ++----------------------------------------------------------------+
| |
3.5e+06 ++ .*.. ..*.. .*...*.. ..*..|
*..*...*..*..*...*. *..*. *. .*...*..*..*..*. |
3.4e+06 ++ *. *
3.3e+06 ++ |
| |
3.2e+06 ++ |
| |
3.1e+06 ++ |
3e+06 ++ O O |
O O O O O O O O O |
2.9e+06 ++ O O O O O O O O
| O |
2.8e+06 ++---------------------------------------------------------O------+
proc-vmstat.pgalloc_normal
4.7e+06 ++----------------------------------------------------------------+
4.6e+06 ++ .*.. ..*..*..*...* ..*..|
*..*...*..*..*...*. .*. + *...*..*..*..*. *
4.5e+06 ++ *. + .. |
4.4e+06 ++ * |
| |
4.3e+06 ++ |
4.2e+06 ++ |
4.1e+06 ++ |
| |
4e+06 ++ O O O |
3.9e+06 O+ O O |
| O O O O O O |
3.8e+06 ++ O O O O O O O O
3.7e+06 ++---------------------------------------------------------O------+
proc-vmstat.pgfree
5e+06 ++----------------------------------------------------------------+
| .*.. ..*.. .*...*.. |
4.8e+06 ++.*... .*..*...*. *..*. *. .*...*..*..*..*...*..|
*. *. *. *
| |
4.6e+06 ++ |
| |
4.4e+06 ++ |
| |
4.2e+06 O+ O O O |
| O |
| O O O O O O O O O O O
4e+06 ++ O O O O O |
| |
3.8e+06 ++----------------------------------------------------------------+
proc-vmstat.pgfault
6.2e+08 ++----------------------------------------------------------------+
| .*.. ..*.. |
6e+08 *+.*...*..*..*...*. *..*...*..*..*. .*..*.. ..*..*
5.8e+08 ++ *..*...*. *. |
| |
5.6e+08 ++ |
| |
5.4e+08 ++ |
| |
5.2e+08 ++ |
5e+08 O+ O O O O |
| |
4.8e+08 ++ O |
| O O O O O O O O O O O O O O O
4.6e+08 ++----------------------------------------------------------------+
meminfo.Slab
7e+06 ++----------------------------------------------------------------+
| .*.. ..*.. |
6.8e+06 *+. ..*.. ..*. *..*...*..*..*. |
6.6e+06 ++ *. *..*. *..*...*..*..*..*...*..*
| |
6.4e+06 ++ |
6.2e+06 ++ |
| |
6e+06 ++ |
5.8e+06 ++ |
O O O O |
5.6e+06 ++ O |
5.4e+06 ++ |
| O O O O O O O O O O O O O O
5.2e+06 ++---------------O---------------O--------------------------------+
meminfo.SUnreclaim
6.8e+06 ++------------------*-------------------*-------------------------+
*..*...*..*.. ..*. *..*...*..*..*. ..*..*
6.6e+06 ++ *. *..*...*..*..*..*. |
6.4e+06 ++ |
| |
6.2e+06 ++ |
6e+06 ++ |
| |
5.8e+06 ++ |
5.6e+06 O+ O O O O |
| |
5.4e+06 ++ |
5.2e+06 ++ O O O O O O O O O O O O O O O O
| |
5e+06 ++----------------------------------------------------------------+
meminfo.PageTables
2.9e+06 ++----------------------------------------------------------------+
| ..*..*..*..*...*..*..*...*.. |
2.8e+06 *+.*...*..*..*. *..*...*..*..*..*...*..*
| |
2.7e+06 ++ |
2.6e+06 ++ |
| |
2.5e+06 ++ |
| |
2.4e+06 O+ O O O |
2.3e+06 ++ O |
| |
2.2e+06 ++ O O O O O O O O O O O O O O O O
| |
2.1e+06 ++----------------------------------------------------------------+
slabinfo.vm_area_struct.active_objs
3.7e+07 ++------------------*-------------------*-------------------------+
*..*... .*.. ..*. *..*...*..*..*. |
3.6e+07 ++ *. *. *..*...*..*..*..*...*..*
3.5e+07 ++ |
| |
3.4e+07 ++ |
3.3e+07 ++ |
| |
3.2e+07 ++ |
3.1e+07 ++ |
O O O O O |
3e+07 ++ |
2.9e+07 ++ |
| O O O O O O O O O O O O O O
2.8e+07 ++---------------O---------------O--------------------------------+
slabinfo.vm_area_struct.num_objs
3.7e+07 ++------------------*-------------------*-------------------------+
*..*...*..*.. ..*. *..*...*..*..*. ..*..|
3.6e+07 ++ *. *..*...*..*..*..*. *
3.5e+07 ++ |
| |
3.4e+07 ++ |
3.3e+07 ++ |
| |
3.2e+07 ++ |
3.1e+07 ++ |
O O O O O |
3e+07 ++ |
2.9e+07 ++ |
| O O O O O O O O O O O O O O O
2.8e+07 ++---------------O------------------------------------------------+
slabinfo.vm_area_struct.active_slabs
840000 ++------------------*-------------------*--------------------------+
820000 *+.*...*..*.. ..*. *...*..*...*..*. . ..*..*
| *. *..*..*...*..*..*. |
800000 ++ |
780000 ++ |
| |
760000 ++ |
740000 ++ |
720000 ++ |
| |
700000 O+ O O O O |
680000 ++ |
| |
660000 ++ O O O O O O O O O O O
640000 ++---------------O--O------O------O-------------------------O------+
slabinfo.vm_area_struct.num_slabs
840000 ++------------------*-------------------*--------------------------+
820000 *+.*...*..*.. ..*. *...*..*...*..*. . ..*..*
| *. *..*..*...*..*..*. |
800000 ++ |
780000 ++ |
| |
760000 ++ |
740000 ++ |
720000 ++ |
| |
700000 O+ O O O O |
680000 ++ |
| |
660000 ++ O O O O O O O O O O O
640000 ++---------------O--O------O------O-------------------------O------+
sched_debug.cpu#39.nr_switches
600000 ++---------------------*-------------------------------------------+
| :: |
500000 ++ : : *.. |
| : : + *.. |
| *.. : : + *.. |
400000 *+. : : *.. + . .*.. .*
| *.. : *...*..: *..* *. *...*. |
300000 ++ . : * *... .. |
| * * |
200000 ++ |
| |
O O O O O O O O O O O O O O O O O O O
100000 ++ O |
| O |
0 ++-----------------------------------------------------------------+
sched_debug.cpu#39.sched_count
600000 ++---------------------*-------------------------------------------+
| :: *.. |
500000 ++ : : : |
| : : : *.. |
| *.. : : : *.. |
400000 *+. : : *.. : . .*.. .*
| *.. : *...*..: *..* *. *...*. |
300000 ++ . : * *... .. |
| * * |
200000 ++ |
| O |
O O O O O O O O O O O O O O O O O O
100000 ++ O O |
| |
0 ++-----------------------------------------------------------------+
sched_debug.cpu#39.sched_goidle
300000 ++---------------------*-------------------------------------------+
| :: |
250000 ++ : : *.. |
| : : + *.. |
| *.. : : + *.. |
200000 *+. : : *.. + . .*.. .*
| *.. : *...*..: *..* *. *...*. |
150000 ++ . : * *... .. |
| * * |
100000 ++ |
| |
O O O O O O O O O O O O O O O O O O O
50000 ++ O |
| O |
0 ++-----------------------------------------------------------------+
sched_debug.cpu#39.ttwu_count
350000 ++-----------------------------------------------------------------+
| * |
300000 ++ :: |
| : : *.. |
250000 ++ : : + |
| *.. : : + *..*.. |
200000 *+. + : *.. + . .*..*... |
| *... + *...*..: .*..* *. *..*
150000 ++ * * *...*. |
| |
100000 ++ |
O O O O O O O O O O O O O O O O
50000 ++ O O O O |
| O |
0 ++-----------------------------------------------------------------+
sched_debug.cpu#53.nr_switches
400000 ++-----------------------------------------------------------------+
| *. *..*..* |
350000 ++ ..*..* : .. *.. .*..* : + |
* .*. : : .. .. : : + |
300000 ++ .*..*. : : * * : : + .*
| + .. : ..* : : *. |
250000 ++ * *. * |
| |
200000 ++ |
| |
150000 ++ O O |
| O O O O O O O O O O O O O
100000 ++ O O O O O |
O |
50000 ++-----------------------------------------------------------------+
sched_debug.cpu#53.sched_count
400000 ++-----------------------------------------------------------------+
| *. *..*..* |
350000 ++ ..*..* + .. *.. .*..* : + |
* *.. .*. : + .. .. : : + |
300000 ++ .. *. : .* * * : : + .*
| + . : .. : : *. |
250000 ++ * * * |
| |
200000 ++ |
| |
150000 ++ O O O O O |
| O O O O O O O O O O
100000 ++ O O O O O |
O |
50000 ++-----------------------------------------------------------------+
sched_debug.cpu#53.sched_goidle
200000 ++-----------------------------------------------------------------+
| *. *..*..*. |
180000 ++ ..*..* + .. *.. .*..* : .. |
160000 *+ .*. : + .. .. : : |
|+ .*..*. : .* * * : : *..*
140000 +++ .. : .. : : |
120000 ++ * * * |
| |
100000 ++ |
80000 ++ |
| O O O |
60000 ++ O O O O O O O O O O O O O
40000 ++ O O O O |
O |
20000 ++-----------------------------------------------------------------+
sched_debug.cpu#53.ttwu_count
200000 ++----------------------------*------------------------------------+
| * :+ *..*..*. |
180000 *+ .. : : + *.. .* : .. |
160000 ++ *...* : : + .. *...*. : : |
| + .. : : * : : *..*
140000 ++ *...*..* : : : : |
| *...* * |
120000 ++ |
| |
100000 ++ |
80000 ++ |
| |
60000 ++ O O O O O O O O O O O O O O O O O
| O O O |
40000 O+-----------------------------------------------------------------+
kmsg.hrtimer:interrupt_took#ns
1 ++--------------------------O----------O--------------------O---------+
| |
| |
0.8 ++ |
| |
| |
0.6 ++ |
| |
0.4 ++ |
| |
| |
0.2 ++ |
| |
| |
0 O+--O--O---O--O---O--O---O------O--O------O---O--O---O--O------O---O--O
kmsg.CE:hpet_increased_min_delta_ns_to#nsec
1 ++------------------------------O-------------------------------------+
| |
| |
0.8 ++ |
| |
| |
0.6 ++ |
| |
0.4 ++ |
| |
| |
0.2 ++ |
| |
| |
0 O+--O--O---O--O---O--O---O--O------O---O--O---O--O---O--O---O--O---O--O
kmsg.DHCP/BOOTP:Reply_not_for_us,op[#]xid[#]
1 ++--------------------------*-----------------------------------------+
| : |
| :: |
0.8 ++ : : |
| : : |
| : : |
0.6 ++ : : |
| : : |
0.4 ++ : : |
| : : |
| : : |
0.2 ++ : : |
| : : |
| : : |
0 O+--O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O
kmsg.dmar:DRHD:handling_fault_status_reg
1 ++--------------------------------------------*-----------------------+
| : |
| :: |
0.8 ++ : : |
| : : |
| : : |
0.6 ++ : : |
| : : |
0.4 ++ : : |
| : : |
| : : |
0.2 ++ : : |
| : : |
| : : |
0 O+--O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O
kmsg.dmar:INTR-REMAP:Request_device[[f0:#f.#]fault_index_c9b0
1 ++--------------------------------------------*-----------------------+
| : |
| :: |
0.8 ++ : : |
| : : |
| : : |
0.6 ++ : : |
| : : |
0.4 ++ : : |
| : : |
| : : |
0.2 ++ : : |
| : : |
| : : |
0 O+--O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O
kmsg.INTR-REMAP:[fault_reason#]Blocked_a_compatibility_format_interrupt_request
1 ++--------------------------------------------*-----------------------+
| : |
| :: |
0.8 ++ : : |
| : : |
| : : |
0.6 ++ : : |
| : : |
0.4 ++ : : |
| : : |
| : : |
0.2 ++ : : |
| : : |
| : : |
0 O+--O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O---O--O
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
apt-get install ruby ruby-oj
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Fengguang
---
testcase: vm-scalability
default_monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
cpuidle:
cpufreq:
turbostat:
sched_debug:
interval: 10
pmeter:
default_watchdogs:
watch-oom:
watchdog:
cpufreq_governor:
- performance
commit: 8191d07dbb16ae88cc2bc475584b9f185f02795f
model: Nehalem-EX
memory: 64G
nr_cpu: 64
nr_hdd_partitions: 0
hdd_partitions:
swap_partitions:
rootfs_partition:
rootfs: clearlinux-x86_64-20141119.cgz
perf-profile:
runtime: 300s
size:
vm-scalability:
test:
- small-allocs
testbox: lkp-nex06
tbox_group: lkp-nex06
kconfig: x86_64-rhel
enqueue_time: 2014-12-05 15:23:14.945092246 +08:00
head_commit: 8191d07dbb16ae88cc2bc475584b9f185f02795f
base_commit: 009d0431c3914de64666bec0d350e54fdd59df6a
branch: next/master
kernel: "/kernel/x86_64-rhel/8191d07dbb16ae88cc2bc475584b9f185f02795f/vmlinuz-3.18.0-rc7-next-20141205-g8191d07"
user: lkp
queue: cyclic
result_root: "/result/lkp-nex06/vm-scalability/performance-300s-small-allocs/clearlinux-x86_64-20141119.cgz/x86_64-rhel/8191d07dbb16ae88cc2bc475584b9f185f02795f/0"
job_file: "/lkp/scheduled/lkp-nex06/cyclic_vm-scalability-performance-300s-small-allocs-clearlinux-x86_64.cgz-x86_64-rhel-HEAD-8191d07dbb16ae88cc2bc475584b9f185f02795f-0.yaml"
dequeue_time: 2014-12-06 02:39:22.930615393 +08:00
job_state: finished
loadavg: 52.90 43.13 20.22 1/529 12451
start_time: '1417833648'
end_time: '1417834004'
version: "/lkp/lkp/.src-20141205-010122"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu16/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu17/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu18/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu19/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu20/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu21/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu22/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu23/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu24/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu25/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu26/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu27/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu28/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu29/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu30/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu31/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu32/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu33/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu34/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu35/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu36/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu37/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu38/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu39/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu40/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu41/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu42/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu43/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu44/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu45/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu46/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu47/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu48/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu49/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu50/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu51/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu52/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu53/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu54/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu55/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu56/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu57/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu58/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu59/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu60/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu61/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu62/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu63/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
mount -t tmpfs -o size=100% vm-scalability-tmp /tmp/vm-scalability-tmp
truncate -s 33607249920 /tmp/vm-scalability.img
mkfs.xfs -q /tmp/vm-scalability.img
mount -o loop /tmp/vm-scalability.img /tmp/vm-scalability
./case-small-allocs
./usemem --runtime 300 -n 64 --readonly --unit 40960 68719476736
umount /tmp/vm-scalability-tmp
umount /tmp/vm-scalability
rm /tmp/vm-scalability.img
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx