[lkp] [f2fs] 957efb0c21: No primary result change, -15.1% fsmark.time.voluntary_context_switches

From: kernel test robot
Date: Sun Jan 10 2016 - 21:10:44 EST


FYI, we noticed the below changes on

https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs dev-test
commit 957efb0c2144cc5ff1795f43bf2d2ca430eaa227 ("Revert "f2fs: check the node block address of newly allocated nid"")


=========================================================================================
compiler/cpufreq_governor/disk/filesize/fs/iterations/kconfig/nr_directories/nr_files_per_directory/nr_threads/rootfs/sync_method/tbox_group/test_size/testcase:
gcc-4.9/performance/1HDD/9B/f2fs/1x/x86_64-rhel/16d/256fpd/32t/debian-x86_64-2015-02-07.cgz/fsyncBeforeClose/lkp-ne04/400M/fsmark

commit:
a51311938e14c17f5a94d30baac9d7bec71f5858
957efb0c2144cc5ff1795f43bf2d2ca430eaa227

a51311938e14c17f 957efb0c2144cc5ff1795f43bf
---------------- --------------------------
%stddev %change %stddev
\ | \
5699822 ± 2% -23.2% 4375176 ± 5% fsmark.app_overhead
1853986 ± 0% +1.0% 1872422 ± 0% fsmark.time.file_system_outputs
60.75 ± 0% -21.4% 47.75 ± 0% fsmark.time.percent_of_cpu_this_job_got
47.56 ± 0% -21.8% 37.18 ± 1% fsmark.time.system_time
1021602 ± 0% -15.1% 867006 ± 0% fsmark.time.voluntary_context_switches
5171 ± 0% +17.5% 6076 ± 1% proc-vmstat.numa_pte_updates
31334 ± 2% -9.1% 28485 ± 3% softirqs.RCU
60.75 ± 0% -21.4% 47.75 ± 0% time.percent_of_cpu_this_job_got
47.56 ± 0% -21.8% 37.18 ± 1% time.system_time
1021602 ± 0% -15.1% 867006 ± 0% time.voluntary_context_switches
6278 ± 0% +1.8% 6390 ± 0% vmstat.io.bo
25379 ± 1% -13.9% 21840 ± 0% vmstat.system.cs
1735 ± 0% -3.6% 1672 ± 0% vmstat.system.in
2341 ±151% +263.3% 8505 ± 0% numa-meminfo.node0.Inactive(anon)
6544 ± 25% +43.9% 9414 ± 0% numa-meminfo.node0.Mapped
2516 ±141% +246.0% 8705 ± 1% numa-meminfo.node0.Shmem
80851 ± 8% -11.5% 71514 ± 1% numa-meminfo.node0.Slab
584.50 ±151% +263.7% 2125 ± 0% numa-vmstat.node0.nr_inactive_anon
1635 ± 25% +43.8% 2352 ± 0% numa-vmstat.node0.nr_mapped
628.25 ±141% +246.3% 2175 ± 1% numa-vmstat.node0.nr_shmem
2080 ± 11% -21.5% 1634 ± 21% numa-vmstat.node0.numa_other
1.173e+08 ± 4% +22.9% 1.441e+08 ± 3% cpuidle.C1-NHM.time
36242421 ± 3% +13.6% 41189247 ± 1% cpuidle.C1E-NHM.time
4.531e+08 ± 2% -22.3% 3.522e+08 ± 2% cpuidle.C3-NHM.time
447937 ± 0% -27.5% 324877 ± 1% cpuidle.C3-NHM.usage
6.434e+08 ± 1% +12.2% 7.221e+08 ± 1% cpuidle.C6-NHM.time
139.75 ± 5% -19.3% 112.75 ± 5% cpuidle.POLL.usage
3.96 ± 0% -18.7% 3.21 ± 1% turbostat.%Busy
88.25 ± 1% -13.6% 76.25 ± 1% turbostat.Avg_MHz
24.30 ± 2% +9.5% 26.61 ± 2% turbostat.CPU%c1
39.26 ± 3% -23.4% 30.08 ± 2% turbostat.CPU%c3
32.49 ± 2% +23.4% 40.10 ± 1% turbostat.CPU%c6
12.66 ± 5% -17.4% 10.46 ± 5% turbostat.Pkg%pc3
58968 ± 0% -100.0% 0.00 ± -1% latency_stats.hits.call_rwsem_down_read_failed.get_node_info.[f2fs].alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
78507 ± 0% -100.0% 0.00 ± -1% latency_stats.hits.call_rwsem_down_write_failed.get_node_info.[f2fs].alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 77032 ± 0% latency_stats.hits.call_rwsem_down_write_failed.get_node_info.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
62300 ± 37% -85.0% 9341 ± 28% latency_stats.sum.call_rwsem_down_read_failed.f2fs_mkdir.[f2fs].vfs_mkdir.SyS_mkdir.entry_SYSCALL_64_fastpath
24001862 ± 1% -100.0% 0.00 ± -1% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
80814 ± 5% -100.0% 0.00 ± -1% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_mkdir.[f2fs].vfs_mkdir.SyS_mkdir.entry_SYSCALL_64_fastpath
251.50 ± 87% +5565.6% 14249 ± 34% latency_stats.sum.call_rwsem_down_write_failed.f2fs_init_extent_tree.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
35549488 ± 1% -100.0% 0.00 ± -1% latency_stats.sum.call_rwsem_down_write_failed.get_node_info.[f2fs].alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
116197 ± 3% -100.0% 0.00 ± -1% latency_stats.sum.call_rwsem_down_write_failed.get_node_info.[f2fs].alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_mkdir.[f2fs].vfs_mkdir.SyS_mkdir.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 25888690 ± 1% latency_stats.sum.call_rwsem_down_write_failed.get_node_info.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
0.00 ± -1% +Inf% 93688 ± 8% latency_stats.sum.call_rwsem_down_write_failed.get_node_info.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_mkdir.[f2fs].vfs_mkdir.SyS_mkdir.entry_SYSCALL_64_fastpath
4443 ± 20% -39.4% 2691 ± 2% sched_debug.cfs_rq:/.exec_clock.0
1232 ± 12% -24.1% 935.06 ± 5% sched_debug.cfs_rq:/.exec_clock.11
1114 ± 4% -19.8% 894.19 ± 8% sched_debug.cfs_rq:/.exec_clock.13
2510 ± 15% -20.5% 1996 ± 12% sched_debug.cfs_rq:/.exec_clock.4
2238 ± 0% -10.9% 1994 ± 0% sched_debug.cfs_rq:/.exec_clock.avg
1043 ± 1% -17.5% 861.26 ± 6% sched_debug.cfs_rq:/.exec_clock.min
8895 ± 13% -30.2% 6209 ± 1% sched_debug.cfs_rq:/.min_vruntime.0
4162 ± 9% -38.2% 2571 ± 6% sched_debug.cfs_rq:/.min_vruntime.11
3832 ± 4% -28.4% 2742 ± 11% sched_debug.cfs_rq:/.min_vruntime.13
7383 ± 8% -32.7% 4965 ± 25% sched_debug.cfs_rq:/.min_vruntime.3
8619 ± 36% -40.6% 5122 ± 3% sched_debug.cfs_rq:/.min_vruntime.4
11149 ± 40% -56.2% 4887 ± 8% sched_debug.cfs_rq:/.min_vruntime.7
4394 ± 6% -30.0% 3074 ± 7% sched_debug.cfs_rq:/.min_vruntime.8
4383 ± 21% -31.8% 2991 ± 13% sched_debug.cfs_rq:/.min_vruntime.9
6282 ± 1% -21.0% 4963 ± 0% sched_debug.cfs_rq:/.min_vruntime.avg
3475 ± 1% -30.5% 2416 ± 4% sched_debug.cfs_rq:/.min_vruntime.min
16.50 ±131% -92.4% 1.25 ±173% sched_debug.cfs_rq:/.runnable_load_avg.5
-5301 ±-20% -51.4% -2577 ±-23% sched_debug.cfs_rq:/.spread0.12
-5063 ±-22% -39.0% -3089 ±-20% sched_debug.cfs_rq:/.spread0.15
-2654 ±-32% -90.8% -243.56 ±-336% sched_debug.cfs_rq:/.spread0.6
-4501 ±-22% -30.4% -3134 ±-10% sched_debug.cfs_rq:/.spread0.8
15693 ± 8% -11.6% 13871 ± 9% sched_debug.cpu.nr_load_updates.0
10558 ± 3% -13.3% 9151 ± 2% sched_debug.cpu.nr_load_updates.10
10664 ± 1% -14.9% 9070 ± 2% sched_debug.cpu.nr_load_updates.11
10412 ± 1% -12.2% 9140 ± 1% sched_debug.cpu.nr_load_updates.12
10568 ± 0% -13.5% 9144 ± 1% sched_debug.cpu.nr_load_updates.13
10602 ± 0% -14.5% 9068 ± 2% sched_debug.cpu.nr_load_updates.15
14807 ± 9% -21.5% 11625 ± 9% sched_debug.cpu.nr_load_updates.3
14728 ± 9% -16.2% 12336 ± 3% sched_debug.cpu.nr_load_updates.4
13550 ± 9% -16.9% 11264 ± 10% sched_debug.cpu.nr_load_updates.5
10461 ± 1% -11.5% 9259 ± 2% sched_debug.cpu.nr_load_updates.8
10854 ± 1% -15.0% 9222 ± 1% sched_debug.cpu.nr_load_updates.9
12483 ± 0% -11.9% 11002 ± 0% sched_debug.cpu.nr_load_updates.avg
10206 ± 1% -17.0% 8471 ± 2% sched_debug.cpu.nr_load_updates.min
70384 ± 1% -10.9% 62687 ± 4% sched_debug.cpu.nr_switches.0
37725 ± 1% -19.1% 30535 ± 2% sched_debug.cpu.nr_switches.11
40729 ± 4% -9.3% 36933 ± 2% sched_debug.cpu.nr_switches.12
38235 ± 2% -20.2% 30507 ± 1% sched_debug.cpu.nr_switches.13
38149 ± 0% -21.6% 29911 ± 2% sched_debug.cpu.nr_switches.15
62122 ± 6% -20.8% 49171 ± 5% sched_debug.cpu.nr_switches.3
67546 ± 5% -15.2% 57293 ± 4% sched_debug.cpu.nr_switches.4
63223 ± 7% -18.4% 51620 ± 4% sched_debug.cpu.nr_switches.5
39171 ± 3% -23.5% 29966 ± 1% sched_debug.cpu.nr_switches.9
52157 ± 1% -13.3% 45227 ± 1% sched_debug.cpu.nr_switches.avg
73302 ± 2% -8.5% 67039 ± 1% sched_debug.cpu.nr_switches.max
37405 ± 1% -20.7% 29653 ± 1% sched_debug.cpu.nr_switches.min
-11351 ± -6% -41.6% -6624 ± -5% sched_debug.cpu.nr_uninterruptible.0
2139 ± 7% -25.9% 1584 ± 8% sched_debug.cpu.nr_uninterruptible.10
2171 ± 7% -41.3% 1274 ± 9% sched_debug.cpu.nr_uninterruptible.11
2160 ± 9% -14.2% 1854 ± 2% sched_debug.cpu.nr_uninterruptible.12
2336 ± 9% -40.1% 1400 ± 5% sched_debug.cpu.nr_uninterruptible.13
2344 ± 9% -19.5% 1886 ± 4% sched_debug.cpu.nr_uninterruptible.14
2254 ± 5% -37.5% 1408 ± 5% sched_debug.cpu.nr_uninterruptible.15
-1447 ± -5% -18.8% -1174 ±-13% sched_debug.cpu.nr_uninterruptible.3
-187.00 ±-92% +111.1% -394.75 ±-18% sched_debug.cpu.nr_uninterruptible.4
-1690 ± -4% -30.0% -1182 ±-10% sched_debug.cpu.nr_uninterruptible.5
-1455 ± -9% -18.4% -1187 ± -7% sched_debug.cpu.nr_uninterruptible.7
1982 ± 2% -33.2% 1323 ± 12% sched_debug.cpu.nr_uninterruptible.8
2342 ± 5% -38.6% 1438 ± 2% sched_debug.cpu.nr_uninterruptible.9
2513 ± 8% -23.1% 1934 ± 2% sched_debug.cpu.nr_uninterruptible.max
-11352 ± -6% -41.6% -6625 ± -5% sched_debug.cpu.nr_uninterruptible.min
3333 ± 5% -37.8% 2074 ± 3% sched_debug.cpu.nr_uninterruptible.stddev
64594 ± 4% -13.4% 55961 ± 9% sched_debug.cpu.sched_count.1
37747 ± 1% -19.1% 30556 ± 2% sched_debug.cpu.sched_count.11
40752 ± 4% -9.3% 36955 ± 2% sched_debug.cpu.sched_count.12
38258 ± 2% -19.7% 30739 ± 0% sched_debug.cpu.sched_count.13
38303 ± 1% -21.9% 29929 ± 2% sched_debug.cpu.sched_count.15
65140 ± 8% -21.6% 51079 ± 6% sched_debug.cpu.sched_count.3
83422 ± 34% -29.9% 58520 ± 3% sched_debug.cpu.sched_count.4
95088 ± 36% -41.7% 55482 ± 13% sched_debug.cpu.sched_count.7
39933 ± 6% -24.9% 29988 ± 1% sched_debug.cpu.sched_count.9
37427 ± 1% -20.7% 29673 ± 1% sched_debug.cpu.sched_count.min
30342 ± 1% -10.4% 27183 ± 4% sched_debug.cpu.sched_goidle.0
16587 ± 13% -15.6% 14005 ± 4% sched_debug.cpu.sched_goidle.10
15173 ± 1% -19.2% 12260 ± 2% sched_debug.cpu.sched_goidle.11
15803 ± 4% -10.5% 14137 ± 1% sched_debug.cpu.sched_goidle.12
15371 ± 2% -20.6% 12208 ± 1% sched_debug.cpu.sched_goidle.13
15353 ± 1% -22.9% 11829 ± 2% sched_debug.cpu.sched_goidle.15
27813 ± 6% -22.0% 21689 ± 5% sched_debug.cpu.sched_goidle.3
29571 ± 6% -17.0% 24532 ± 4% sched_debug.cpu.sched_goidle.4
28495 ± 8% -19.8% 22843 ± 5% sched_debug.cpu.sched_goidle.5
15828 ± 3% -24.9% 11887 ± 2% sched_debug.cpu.sched_goidle.9
22047 ± 1% -14.3% 18903 ± 0% sched_debug.cpu.sched_goidle.avg
32479 ± 3% -8.7% 29652 ± 3% sched_debug.cpu.sched_goidle.max
14845 ± 1% -20.9% 11738 ± 1% sched_debug.cpu.sched_goidle.min
34654 ± 7% -21.2% 27307 ± 7% sched_debug.cpu.ttwu_count.1
18674 ± 8% -18.4% 15244 ± 2% sched_debug.cpu.ttwu_count.10
16391 ± 3% -15.9% 13784 ± 3% sched_debug.cpu.ttwu_count.11
18849 ± 6% -15.1% 16000 ± 4% sched_debug.cpu.ttwu_count.12
19348 ± 18% -25.2% 14469 ± 10% sched_debug.cpu.ttwu_count.13
34534 ± 4% -24.5% 26061 ± 2% sched_debug.cpu.ttwu_count.3
34388 ± 3% -20.8% 27222 ± 8% sched_debug.cpu.ttwu_count.5
33995 ± 2% -18.2% 27821 ± 6% sched_debug.cpu.ttwu_count.7
17553 ± 7% -9.8% 15835 ± 1% sched_debug.cpu.ttwu_count.8
17278 ± 3% -15.5% 14604 ± 7% sched_debug.cpu.ttwu_count.9
29489 ± 1% -12.9% 25686 ± 1% sched_debug.cpu.ttwu_count.avg
16005 ± 1% -18.1% 13115 ± 1% sched_debug.cpu.ttwu_count.min
28738 ± 1% -21.6% 22523 ± 7% sched_debug.cpu.ttwu_local.0
4013 ± 2% -9.8% 3620 ± 1% sched_debug.cpu.ttwu_local.13
4091 ± 0% -10.2% 3674 ± 3% sched_debug.cpu.ttwu_local.15
8228 ± 16% -26.4% 6054 ± 6% sched_debug.cpu.ttwu_local.3
4115 ± 5% -13.8% 3547 ± 3% sched_debug.cpu.ttwu_local.9
7392 ± 1% -10.8% 6593 ± 1% sched_debug.cpu.ttwu_local.avg
29446 ± 2% -19.4% 23729 ± 4% sched_debug.cpu.ttwu_local.max
3903 ± 0% -13.4% 3382 ± 3% sched_debug.cpu.ttwu_local.min
6008 ± 2% -21.6% 4709 ± 6% sched_debug.cpu.ttwu_local.stddev

lkp-ne04: Nehalem-EP
Memory: 12G

To reproduce:

git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: fsmark
default-monitors:
wait: activate-monitor
kmsg:
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
cpufreq_governor: performance
default-watchdogs:
oom-killer:
watchdog:
commit: 957efb0c2144cc5ff1795f43bf2d2ca430eaa227
model: Nehalem-EP
memory: 12G
hdd_partitions: "/dev/disk/by-id/ata-ST3500514NS_9WJ03EBA-part3"
swap_partitions: "/dev/disk/by-id/ata-ST3120026AS_5MS07HA2-part2"
rootfs_partition: "/dev/disk/by-id/ata-ST3500514NS_9WJ03EBA-part1"
category: benchmark
iterations: 1x
nr_threads: 32t
disk: 1HDD
fs: f2fs
fs2:
fsmark:
filesize: 9B
test_size: 400M
sync_method: fsyncBeforeClose
nr_directories: 16d
nr_files_per_directory: 256fpd
queue: bisect
testbox: lkp-ne04
tbox_group: lkp-ne04
kconfig: x86_64-rhel
enqueue_time: 2016-01-10 20:53:36.807203593 +08:00
id: c048f087cd69ebe2df8876efff32fafeaf2b75f9
user: lkp
compiler: gcc-4.9
head_commit: 89e4e7134d7e52c9cdb1e2f824b811ee85f90a69
base_commit: 168309855a7d1e16db751e9c647119fe2d2dc878
branch: linux-devel/devel-hourly-2016011000
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/fsmark/performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd/lkp-ne04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/957efb0c2144cc5ff1795f43bf2d2ca430eaa227/0"
job_file: "/lkp/scheduled/lkp-ne04/bisect_fsmark-performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd-debian-x86_64-2015-02-07.cgz-x86_64-rhel-957efb0c2144cc5ff1795f43bf2d2ca430eaa227-20160110-69815-ausbd2-0.yaml"
nr_cpu: "$(nproc)"
max_uptime: 885.8199999999999
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/lkp-ne04/bisect_fsmark-performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd-debian-x86_64-2015-02-07.cgz-x86_64-rhel-957efb0c2144cc5ff1795f43bf2d2ca430eaa227-20160110-69815-ausbd2-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2016011000
- commit=957efb0c2144cc5ff1795f43bf2d2ca430eaa227
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/957efb0c2144cc5ff1795f43bf2d2ca430eaa227/vmlinuz-4.4.0-rc3-00311-g957efb0
- max_uptime=885
- RESULT_ROOT=/result/fsmark/performance-1x-32t-1HDD-f2fs-9B-400M-fsyncBeforeClose-16d-256fpd/lkp-ne04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/957efb0c2144cc5ff1795f43bf2d2ca430eaa227/0
- LKP_SERVER=inn
- |2-


earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal

rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/957efb0c2144cc5ff1795f43bf2d2ca430eaa227/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs2.cgz,/lkp/benchmarks/fsmark.cgz"
linux_headers_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/957efb0c2144cc5ff1795f43bf2d2ca430eaa227/linux-headers.cgz"
repeat_to: 2
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/957efb0c2144cc5ff1795f43bf2d2ca430eaa227/vmlinuz-4.4.0-rc3-00311-g957efb0"
dequeue_time: 2016-01-10 21:03:25.327473320 +08:00
job_state: finished
loadavg: 18.91 7.32 2.66 2/254 3112
start_time: '1452431044'
end_time: '1452431124'
version: "/lkp/lkp/.src-20160108-171656"

Attachment: reproduce.sh
Description: Bourne shell script