[LKP] [futex] 1d0dcb3ad9d: No primary result change, +9.6% will-it-scale.time.involuntary_context_switches, +32.2% will-it-scale.time.user_time, -4.8% will-it-scale.time.system_time

From: Huang Ying
Date: Tue May 19 2015 - 23:26:13 EST


FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
commit 1d0dcb3ad9d336e6d6ee020a750a7f8d907e28de ("futex: Implement lockless wakeups")


testcase/path_params/tbox_group: will-it-scale/powersave-pthread_mutex1/lituya

7675104990ed255b 1d0dcb3ad9d336e6d6ee020a75
---------------- --------------------------
%stddev %change %stddev
\ | \
5989 Â 2% +9.6% 6566 Â 2% will-it-scale.time.involuntary_context_switches
136 Â 0% +32.2% 180 Â 0% will-it-scale.time.user_time
31929544 Â 0% +64.7% 52574500 Â 0% will-it-scale.time.voluntary_context_switches
1241 Â 0% -4.8% 1182 Â 0% will-it-scale.time.system_time
453 Â 0% -1.3% 447 Â 0% will-it-scale.time.percent_of_cpu_this_job_got
213800 Â 0% +62.8% 348085 Â 0% vmstat.system.cs
20662 Â 2% +10.3% 22800 Â 1% vmstat.system.in
136 Â 0% +32.2% 180 Â 0% time.user_time
31929544 Â 0% +64.7% 52574500 Â 0% time.voluntary_context_switches
25497500 Â 1% +79.8% 45847592 Â 0% cpuidle.C1-HSW.usage
25909310 Â 9% +40.4% 36385075 Â 10% cpuidle.C1-HSW.time
385 Â 1% -3.9% 370 Â 1% latency_stats.avg.do_wait.SyS_wait4.system_call_fastpath
45 Â 19% +46.4% 66 Â 12% latency_stats.avg.unix_wait_for_peer.unix_dgram_sendmsg.sock_sendmsg.___sys_sendmsg.__sys_sendmsg.SyS_sendmsg.system_call_fastpath
3199870 Â 2% +107.9% 6653184 Â 3% latency_stats.hits.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
4291 Â 5% -26.1% 3172 Â 4% latency_stats.max.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
942123 Â 1% -3.8% 906698 Â 1% latency_stats.sum.do_wait.SyS_wait4.system_call_fastpath
14068 Â 10% -10.3% 12621 Â 3% latency_stats.sum.ep_poll.SyS_epoll_wait.system_call_fastpath
1442334 Â 0% -7.1% 1339318 Â 1% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
4763770 Â 1% +41.4% 6733885 Â 3% latency_stats.sum.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
49 Â 1% -24.7% 37 Â 18% sched_debug.cfs_rq[13]:/.load
52 Â 4% -18.1% 43 Â 20% sched_debug.cfs_rq[13]:/.runnable_load_avg
672 Â 29% -43.0% 383 Â 19% sched_debug.cfs_rq[14]:/.utilization_load_avg
596 Â 12% +16.9% 697 Â 1% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
51 Â 20% -35.9% 33 Â 14% sched_debug.cfs_rq[3]:/.runnable_load_avg
68 Â 27% -35.0% 44 Â 48% sched_debug.cfs_rq[3]:/.load
433123 Â 13% -21.0% 342236 Â 7% sched_debug.cfs_rq[7]:/.min_vruntime
21198 Â 15% -21.0% 16755 Â 1% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
35997 Â 20% -29.4% 25419 Â 9% sched_debug.cfs_rq[7]:/.exec_clock
906034 Â 16% +21.5% 1101140 Â 8% sched_debug.cpu#1.sched_goidle
1813964 Â 16% +21.5% 2203255 Â 8% sched_debug.cpu#1.nr_switches
911649 Â 16% +25.8% 1146513 Â 8% sched_debug.cpu#1.ttwu_count
1814079 Â 16% +21.5% 2203379 Â 8% sched_debug.cpu#1.sched_count
1616994 Â 19% +114.4% 3466653 Â 7% sched_debug.cpu#10.nr_switches
7 Â 17% -53.6% 3 Â 25% sched_debug.cpu#10.nr_uninterruptible
427 Â 5% +29.6% 553 Â 9% sched_debug.cpu#10.ttwu_local
822628 Â 20% +115.5% 1772801 Â 7% sched_debug.cpu#10.ttwu_count
1617109 Â 19% +114.4% 3466772 Â 7% sched_debug.cpu#10.sched_count
808217 Â 19% +114.4% 1732959 Â 7% sched_debug.cpu#10.sched_goidle
1818260 Â 10% +56.9% 2851959 Â 16% sched_debug.cpu#11.sched_count
908654 Â 10% +56.9% 1425514 Â 16% sched_debug.cpu#11.sched_goidle
1818138 Â 10% +56.9% 2851840 Â 16% sched_debug.cpu#11.nr_switches
896624 Â 8% +52.5% 1367494 Â 15% sched_debug.cpu#11.ttwu_count
1857 Â 11% +18.6% 2202 Â 8% sched_debug.cpu#12.curr->pid
58 Â 10% -19.8% 46 Â 20% sched_debug.cpu#13.cpu_load[2]
1885115 Â 10% +64.6% 3102536 Â 11% sched_debug.cpu#13.nr_switches
1885155 Â 10% +64.6% 3102576 Â 11% sched_debug.cpu#13.sched_count
942063 Â 10% +64.6% 1550798 Â 11% sched_debug.cpu#13.sched_goidle
920620 Â 9% +62.2% 1493320 Â 9% sched_debug.cpu#13.ttwu_count
56 Â 5% -23.7% 42 Â 15% sched_debug.cpu#13.cpu_load[4]
57 Â 7% -23.1% 44 Â 14% sched_debug.cpu#13.cpu_load[3]
49 Â 1% -24.7% 37 Â 18% sched_debug.cpu#13.load
745920 Â 12% +91.8% 1430771 Â 4% sched_debug.cpu#14.sched_goidle
1492512 Â 12% +91.8% 2862256 Â 4% sched_debug.cpu#14.nr_switches
746237 Â 12% +88.0% 1403001 Â 4% sched_debug.cpu#14.ttwu_count
1492551 Â 12% +91.8% 2862299 Â 4% sched_debug.cpu#14.sched_count
577321 Â 30% +161.5% 1509820 Â 7% sched_debug.cpu#15.sched_goidle
574967 Â 31% +165.3% 1525263 Â 2% sched_debug.cpu#15.ttwu_count
1155549 Â 30% +161.4% 3020803 Â 7% sched_debug.cpu#15.nr_switches
1155589 Â 30% +161.4% 3020846 Â 7% sched_debug.cpu#15.sched_count
1446483 Â 7% +75.4% 2537378 Â 15% sched_debug.cpu#3.nr_switches
720522 Â 9% +71.2% 1233795 Â 18% sched_debug.cpu#3.ttwu_count
722651 Â 7% +75.5% 1268100 Â 15% sched_debug.cpu#3.sched_goidle
1446610 Â 7% +75.4% 2537497 Â 15% sched_debug.cpu#3.sched_count
823639 Â 17% +80.6% 1487383 Â 14% sched_debug.cpu#5.nr_switches
27549 Â 6% +22.7% 33806 Â 16% sched_debug.cpu#5.nr_load_updates
670845 Â 9% -11.3% 595156 Â 5% sched_debug.cpu#5.avg_idle
410777 Â 17% +80.8% 742641 Â 14% sched_debug.cpu#5.sched_goidle
407183 Â 17% +78.5% 726745 Â 15% sched_debug.cpu#5.ttwu_count
823685 Â 17% +80.6% 1487428 Â 14% sched_debug.cpu#5.sched_count
1291878 Â 19% +26.7% 1636521 Â 11% sched_debug.cpu#6.nr_switches
2 Â 35% +150.0% 5 Â 24% sched_debug.cpu#6.nr_uninterruptible
1291922 Â 19% +26.7% 1636561 Â 11% sched_debug.cpu#6.sched_count
644667 Â 19% +26.7% 816931 Â 11% sched_debug.cpu#6.sched_goidle
999303 Â 8% +80.0% 1799127 Â 9% sched_debug.cpu#8.sched_goidle
1999260 Â 8% +80.0% 3599253 Â 9% sched_debug.cpu#8.nr_switches
1019255 Â 4% +77.5% 1809326 Â 8% sched_debug.cpu#8.ttwu_count
1999377 Â 8% +80.0% 3599372 Â 9% sched_debug.cpu#8.sched_count
737902 Â 8% -21.6% 578475 Â 15% sched_debug.cpu#9.avg_idle
1696089 Â 13% +100.9% 3406654 Â 2% sched_debug.cpu#9.sched_count
1695966 Â 13% +100.9% 3406521 Â 2% sched_debug.cpu#9.nr_switches
847698 Â 13% +100.9% 1702912 Â 2% sched_debug.cpu#9.sched_goidle
848268 Â 11% +113.4% 1810338 Â 5% sched_debug.cpu#9.ttwu_count

testcase/path_params/tbox_group: will-it-scale/performance-pthread_mutex1/lituya

7675104990ed255b 1d0dcb3ad9d336e6d6ee020a75
---------------- --------------------------
135 Â 0% +32.6% 179 Â 0% will-it-scale.time.user_time
31795775 Â 0% +65.2% 52519962 Â 0% will-it-scale.time.voluntary_context_switches
1242 Â 0% -4.8% 1182 Â 0% will-it-scale.time.system_time
452 Â 0% -1.1% 447 Â 0% will-it-scale.time.percent_of_cpu_this_job_got
9 Â 0% -11.1% 8 Â 0% vmstat.procs.r
213068 Â 0% +63.4% 348183 Â 0% vmstat.system.cs
20072 Â 1% +15.4% 23157 Â 1% vmstat.system.in
135 Â 0% +32.6% 179 Â 0% time.user_time
31795775 Â 0% +65.2% 52519962 Â 0% time.voluntary_context_switches
25884654 Â 0% +77.0% 45817600 Â 0% cpuidle.C1-HSW.usage
27193585 Â 9% +50.5% 40917267 Â 9% cpuidle.C1-HSW.time
405 Â 15% +90.6% 772 Â 18% cpuidle.POLL.usage
302 Â 2% -7.3% 280 Â 2% latency_stats.avg.do_wait.SyS_wait4.system_call_fastpath
637 Â 34% +81.5% 1156 Â 36% latency_stats.avg.ep_poll.SyS_epoll_wait.system_call_fastpath
221 Â 1% -8.5% 202 Â 0% latency_stats.avg.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
3365738 Â 2% +99.0% 6698670 Â 3% latency_stats.hits.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
224 Â 25% +94.2% 435 Â 39% latency_stats.max.rpc_wait_bit_killable.__rpc_execute.rpc_execute.rpc_run_task.nfs4_call_sync_sequence.[nfsv4]._nfs4_proc_access.[nfsv4].nfs4_proc_access.[nfsv4].nfs_do_access.nfs_permission.__inode_permission.inode_permission.may_open
741091 Â 3% -7.2% 687939 Â 2% latency_stats.sum.do_wait.SyS_wait4.system_call_fastpath
1150919 Â 1% -10.0% 1035254 Â 1% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
4920864 Â 2% +37.8% 6781129 Â 3% latency_stats.sum.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
5.32 Â 6% -36.1% 3.40 Â 41% perf-profile.cpu-cycles.search_binary_handler.do_execveat_common.sys_execve.return_from_execve
5.42 Â 4% -37.2% 3.40 Â 41% perf-profile.cpu-cycles.load_elf_binary.search_binary_handler.do_execveat_common.sys_execve.return_from_execve
7.46 Â 20% -43.7% 4.20 Â 41% perf-profile.cpu-cycles.return_from_execve
7.46 Â 20% -43.7% 4.20 Â 41% perf-profile.cpu-cycles.do_execveat_common.isra.29.sys_execve.return_from_execve
7.46 Â 20% -43.7% 4.20 Â 41% perf-profile.cpu-cycles.sys_execve.return_from_execve
13 Â 19% -50.9% 6 Â 35% sched_debug.cfs_rq[11]:/.nr_spread_over
23 Â 34% +48.4% 34 Â 19% sched_debug.cfs_rq[4]:/.runnable_load_avg
54 Â 22% -43.8% 30 Â 26% sched_debug.cfs_rq[7]:/.runnable_load_avg
39 Â 26% +47.8% 58 Â 24% sched_debug.cfs_rq[8]:/.runnable_load_avg
31 Â 1% +56.8% 49 Â 2% sched_debug.cfs_rq[9]:/.runnable_load_avg
567 Â 13% +23.2% 699 Â 1% sched_debug.cfs_rq[9]:/.tg_runnable_contrib
31 Â 1% +21.6% 38 Â 17% sched_debug.cfs_rq[9]:/.load
26043 Â 12% +23.3% 32103 Â 0% sched_debug.cfs_rq[9]:/.avg->runnable_avg_sum
522365 Â 32% +75.0% 914370 Â 16% sched_debug.cpu#0.ttwu_count
1020121 Â 32% +76.7% 1802488 Â 17% sched_debug.cpu#0.sched_count
507393 Â 33% +77.1% 898471 Â 18% sched_debug.cpu#0.sched_goidle
1019797 Â 32% +76.7% 1802170 Â 17% sched_debug.cpu#0.nr_switches
49 Â 7% -31.0% 34 Â 2% sched_debug.cpu#1.cpu_load[1]
51 Â 8% -33.7% 34 Â 10% sched_debug.cpu#1.cpu_load[0]
48 Â 11% -28.6% 34 Â 3% sched_debug.cpu#1.cpu_load[4]
477826 Â 16% +42.3% 680096 Â 4% sched_debug.cpu#1.avg_idle
48 Â 10% -29.0% 34 Â 3% sched_debug.cpu#1.cpu_load[3]
48 Â 9% -30.9% 33 Â 1% sched_debug.cpu#1.cpu_load[2]
1499187 Â 31% +87.2% 2806508 Â 21% sched_debug.cpu#10.nr_switches
754505 Â 34% +88.6% 1423293 Â 19% sched_debug.cpu#10.ttwu_count
1499311 Â 31% +87.2% 2806624 Â 21% sched_debug.cpu#10.sched_count
749245 Â 31% +87.2% 1402814 Â 21% sched_debug.cpu#10.sched_goidle
49 Â 10% -19.1% 40 Â 4% sched_debug.cpu#11.cpu_load[1]
52 Â 17% -33.8% 34 Â 13% sched_debug.cpu#11.cpu_load[0]
50 Â 7% -23.0% 38 Â 15% sched_debug.cpu#12.cpu_load[3]
53 Â 10% -26.8% 39 Â 15% sched_debug.cpu#12.cpu_load[2]
646988 Â 35% +123.8% 1448084 Â 19% sched_debug.cpu#12.ttwu_count
47 Â 10% -20.5% 37 Â 18% sched_debug.cpu#12.cpu_load[4]
651640 Â 36% +132.8% 1517162 Â 19% sched_debug.cpu#12.sched_goidle
1303921 Â 36% +132.8% 3035620 Â 19% sched_debug.cpu#12.nr_switches
64 Â 21% -35.0% 41 Â 33% sched_debug.cpu#12.cpu_load[0]
58 Â 16% -30.5% 40 Â 20% sched_debug.cpu#12.cpu_load[1]
1303958 Â 36% +132.8% 3035660 Â 19% sched_debug.cpu#12.sched_count
1642807 Â 22% +78.5% 2932208 Â 9% sched_debug.cpu#13.nr_switches
1642856 Â 22% +78.5% 2932247 Â 9% sched_debug.cpu#13.sched_count
821009 Â 22% +78.5% 1465663 Â 9% sched_debug.cpu#13.sched_goidle
788799 Â 19% +81.5% 1431516 Â 8% sched_debug.cpu#13.ttwu_count
870563 Â 20% +44.4% 1257081 Â 15% sched_debug.cpu#14.sched_goidle
1741973 Â 20% +44.4% 2515088 Â 15% sched_debug.cpu#14.nr_switches
866217 Â 19% +43.8% 1245955 Â 12% sched_debug.cpu#14.ttwu_count
1742015 Â 20% +44.4% 2515126 Â 15% sched_debug.cpu#14.sched_count
737410 Â 10% -16.4% 616336 Â 7% sched_debug.cpu#15.avg_idle
700131 Â 34% +86.8% 1307981 Â 17% sched_debug.cpu#15.sched_goidle
699153 Â 35% +83.6% 1283986 Â 18% sched_debug.cpu#15.ttwu_count
1401279 Â 34% +86.8% 2617150 Â 17% sched_debug.cpu#15.nr_switches
1401320 Â 34% +86.8% 2617190 Â 17% sched_debug.cpu#15.sched_count
1192893 Â 20% +100.2% 2387841 Â 32% sched_debug.cpu#3.nr_switches
601136 Â 20% +97.4% 1186749 Â 29% sched_debug.cpu#3.ttwu_count
595525 Â 20% +100.4% 1193216 Â 32% sched_debug.cpu#3.sched_goidle
1193047 Â 20% +100.2% 2387951 Â 32% sched_debug.cpu#3.sched_count
26 Â 30% +61.0% 42 Â 20% sched_debug.cpu#4.load
25 Â 30% +47.1% 37 Â 18% sched_debug.cpu#4.cpu_load[4]
973362 Â 30% +99.1% 1937649 Â 17% sched_debug.cpu#6.nr_switches
973400 Â 30% +99.1% 1937691 Â 17% sched_debug.cpu#6.sched_count
485548 Â 30% +99.4% 967982 Â 17% sched_debug.cpu#6.sched_goidle
489774 Â 31% +101.8% 988405 Â 21% sched_debug.cpu#6.ttwu_count
1037194 Â 16% +62.4% 1684672 Â 10% sched_debug.cpu#8.sched_goidle
2074962 Â 16% +62.4% 3370325 Â 10% sched_debug.cpu#8.nr_switches
1063289 Â 16% +59.3% 1693545 Â 9% sched_debug.cpu#8.ttwu_count
2075084 Â 16% +62.4% 3370436 Â 10% sched_debug.cpu#8.sched_count
35 Â 18% +45.0% 50 Â 3% sched_debug.cpu#9.cpu_load[2]
1715208 Â 22% +104.6% 3508980 Â 11% sched_debug.cpu#9.sched_count
35 Â 18% +42.0% 50 Â 3% sched_debug.cpu#9.cpu_load[3]
1901 Â 8% +15.2% 2191 Â 0% sched_debug.cpu#9.curr->pid
1715087 Â 22% +104.6% 3508864 Â 11% sched_debug.cpu#9.nr_switches
35 Â 18% +40.7% 49 Â 3% sched_debug.cpu#9.cpu_load[0]
35 Â 18% +42.0% 50 Â 3% sched_debug.cpu#9.cpu_load[4]
35 Â 18% +40.7% 49 Â 1% sched_debug.cpu#9.load
34 Â 15% +47.8% 50 Â 3% sched_debug.cpu#9.cpu_load[1]
857164 Â 22% +104.6% 1754026 Â 11% sched_debug.cpu#9.sched_goidle
868187 Â 22% +112.2% 1842383 Â 14% sched_debug.cpu#9.ttwu_count
478 Â 12% +49.7% 716 Â 21% sched_debug.cpu#9.ttwu_local

testcase/path_params/tbox_group: will-it-scale/pthread_mutex1/lkp-sb03

7675104990ed255b 1d0dcb3ad9d336e6d6ee020a75
---------------- --------------------------
142 Â 0% +26.7% 180 Â 0% will-it-scale.time.user_time
21289130 Â 1% +60.5% 34178388 Â 1% will-it-scale.time.voluntary_context_switches
2337 Â 0% -2.2% 2285 Â 0% will-it-scale.time.system_time
136738 Â 1% +60.2% 219104 Â 1% vmstat.system.cs
24114 Â 2% +19.0% 28694 Â 5% vmstat.system.in
142 Â 0% +26.7% 180 Â 0% time.user_time
21289130 Â 1% +60.5% 34178388 Â 1% time.voluntary_context_switches
1025 Â 10% +20.3% 1233 Â 8% numa-meminfo.node0.Unevictable
1025 Â 10% +20.3% 1233 Â 8% numa-meminfo.node0.Mlocked
1210 Â 8% -17.2% 1002 Â 10% numa-meminfo.node1.Mlocked
1210 Â 8% -17.2% 1002 Â 10% numa-meminfo.node1.Unevictable
256 Â 10% +20.2% 307 Â 8% numa-vmstat.node0.nr_mlock
256 Â 10% +20.2% 307 Â 8% numa-vmstat.node0.nr_unevictable
302 Â 8% -17.1% 250 Â 10% numa-vmstat.node1.nr_mlock
302 Â 8% -17.1% 250 Â 10% numa-vmstat.node1.nr_unevictable
11992500 Â 1% +64.4% 19712016 Â 2% cpuidle.C1-SNB.usage
9 Â 33% -50.0% 4 Â 27% latency_stats.avg.wait_on_page_bit_killable.__lock_page_or_retry.filemap_fault.__do_fault.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
8981882 Â 2% +31.8% 11837616 Â 2% latency_stats.hits.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
3056125 Â 0% +1.0% 3085848 Â 0% latency_stats.sum.do_wait.SyS_wait4.system_call_fastpath
4855509 Â 0% +1.4% 4923890 Â 0% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.system_call_fastpath
16544499 Â 2% +31.4% 21738877 Â 3% latency_stats.sum.futex_wait_queue_me.futex_wait.do_futex.SyS_futex.system_call_fastpath
0.86 Â 1% +29.4% 1.11 Â 2% perf-profile.cpu-cycles.print_context_stack.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity
0.00 Â 0% +Inf% 1.44 Â 6% perf-profile.cpu-cycles.ttwu_do_wakeup.ttwu_do_activate.try_to_wake_up.wake_up_process.wake_up_q
0.00 Â 0% +Inf% 10.52 Â 2% perf-profile.cpu-cycles.wake_up_q.futex_wake.do_futex.sys_futex.system_call_fastpath
0.00 Â 0% +Inf% 1.49 Â 3% perf-profile.cpu-cycles.mark_wake_futex.futex_wake.do_futex.sys_futex.system_call_fastpath
0.00 Â 0% +Inf% 10.04 Â 2% perf-profile.cpu-cycles.wake_up_process.wake_up_q.futex_wake.do_futex.sys_futex
5.57 Â 1% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.wake_up_state.wake_futex.futex_wake.do_futex.sys_futex
6.44 Â 1% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.wake_futex.futex_wake.do_futex.sys_futex.system_call_fastpath
52.44 Â 0% -17.7% 43.16 Â 0% perf-profile.cpu-cycles.futex_wake.do_futex.sys_futex.system_call_fastpath
0.56 Â 2% +93.7% 1.07 Â 4% perf-profile.cpu-cycles.tick_program_event.__hrtimer_start_range_ns.hrtimer_start_range_ns.tick_nohz_restart.tick_nohz_idle_exit
2.67 Â 2% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.enqueue_task.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_state
1.60 Â 1% +85.3% 2.96 Â 5% perf-profile.cpu-cycles.__schedule.schedule.schedule_preempt_disabled.cpu_startup_entry.start_secondary
1.55 Â 3% +89.8% 2.94 Â 5% perf-profile.cpu-cycles.tick_nohz_idle_exit.cpu_startup_entry.start_secondary
2.12 Â 1% +59.8% 3.40 Â 1% perf-profile.cpu-cycles.enqueue_entity.enqueue_task_fair.enqueue_task.activate_task.ttwu_do_activate
0.53 Â 6% +79.9% 0.96 Â 3% perf-profile.cpu-cycles.menu_select.cpuidle_select.cpu_startup_entry.start_secondary
1.04 Â 4% +83.8% 1.90 Â 4% perf-profile.cpu-cycles.get_futex_value_locked.futex_wait_setup.futex_wait.do_futex.sys_futex
0.60 Â 5% +82.6% 1.10 Â 2% perf-profile.cpu-cycles.cpuidle_select.cpu_startup_entry.start_secondary
1.12 Â 1% +105.8% 2.30 Â 4% perf-profile.cpu-cycles.hrtimer_start.tick_nohz_stop_sched_tick.__tick_nohz_idle_enter.tick_nohz_idle_enter.cpu_startup_entry
1.08 Â 1% +105.5% 2.22 Â 4% perf-profile.cpu-cycles.__hrtimer_start_range_ns.hrtimer_start.tick_nohz_stop_sched_tick.__tick_nohz_idle_enter.tick_nohz_idle_enter
0.00 Â 0% +Inf% 6.03 Â 1% perf-profile.cpu-cycles.ttwu_do_activate.constprop.87.try_to_wake_up.wake_up_process.wake_up_q.futex_wake
0.00 Â 0% +Inf% 4.55 Â 1% perf-profile.cpu-cycles.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_process.wake_up_q
89.43 Â 0% -11.2% 79.44 Â 0% perf-profile.cpu-cycles.system_call_fastpath
5.54 Â 1% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.try_to_wake_up.wake_up_state.wake_futex.futex_wake.do_futex
2.41 Â 2% +66.9% 4.02 Â 1% perf-profile.cpu-cycles.enqueue_task_fair.enqueue_task.activate_task.ttwu_do_activate.try_to_wake_up
1.31 Â 4% +107.0% 2.72 Â 3% perf-profile.cpu-cycles.dequeue_task.deactivate_task.__schedule.schedule.futex_wait_queue_me
1.32 Â 4% +105.9% 2.72 Â 3% perf-profile.cpu-cycles.deactivate_task.__schedule.schedule.futex_wait_queue_me.futex_wait
1.01 Â 2% +29.6% 1.31 Â 3% perf-profile.cpu-cycles.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.enqueue_task.activate_task
89.24 Â 0% -11.3% 79.12 Â 0% perf-profile.cpu-cycles.sys_futex.system_call_fastpath
89.03 Â 0% -11.6% 78.72 Â 0% perf-profile.cpu-cycles.do_futex.sys_futex.system_call_fastpath
27.10 Â 0% -34.4% 17.78 Â 1% perf-profile.cpu-cycles._raw_spin_lock.futex_wait_setup.futex_wait.do_futex.sys_futex
0.66 Â 4% +108.3% 1.38 Â 0% perf-profile.cpu-cycles.__remove_hrtimer.__hrtimer_start_range_ns.hrtimer_start.tick_nohz_stop_sched_tick.__tick_nohz_idle_enter
0.87 Â 6% +96.3% 1.71 Â 5% perf-profile.cpu-cycles.intel_idle.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary
1.09 Â 5% +92.6% 2.09 Â 4% perf-profile.cpu-cycles.tick_nohz_restart.tick_nohz_idle_exit.cpu_startup_entry.start_secondary
1.23 Â 5% +105.3% 2.52 Â 2% perf-profile.cpu-cycles.dequeue_task_fair.dequeue_task.deactivate_task.__schedule.schedule
2.76 Â 1% +102.0% 5.57 Â 1% perf-profile.cpu-cycles.__schedule.schedule.futex_wait_queue_me.futex_wait.do_futex
0.93 Â 2% +29.0% 1.20 Â 2% perf-profile.cpu-cycles.dump_trace.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair
31.56 Â 0% -18.7% 25.64 Â 0% perf-profile.cpu-cycles.futex_wait_setup.futex_wait.do_futex.sys_futex.system_call_fastpath
0.83 Â 3% +87.6% 1.55 Â 5% perf-profile.cpu-cycles.hrtimer_start_range_ns.tick_nohz_restart.tick_nohz_idle_exit.cpu_startup_entry.start_secondary
0.95 Â 2% +28.6% 1.22 Â 2% perf-profile.cpu-cycles.save_stack_trace_tsk.__account_scheduler_latency.enqueue_entity.enqueue_task_fair.enqueue_task
0.53 Â 1% +96.2% 1.04 Â 3% perf-profile.cpu-cycles.clockevents_program_event.tick_program_event.__remove_hrtimer.__hrtimer_start_range_ns.hrtimer_start
1.60 Â 2% +94.8% 3.12 Â 5% perf-profile.cpu-cycles.tick_nohz_stop_sched_tick.__tick_nohz_idle_enter.tick_nohz_idle_enter.cpu_startup_entry.start_secondary
42.33 Â 0% -40.8% 25.08 Â 1% perf-profile.cpu-cycles._raw_spin_lock.futex_wake.do_futex.sys_futex.system_call_fastpath
2.86 Â 1% +100.1% 5.72 Â 1% perf-profile.cpu-cycles.schedule.futex_wait_queue_me.futex_wait.do_futex.sys_futex
0.54 Â 0% +97.7% 1.06 Â 2% perf-profile.cpu-cycles.tick_program_event.__remove_hrtimer.__hrtimer_start_range_ns.hrtimer_start.tick_nohz_stop_sched_tick
1.01 Â 7% +103.5% 2.05 Â 3% perf-profile.cpu-cycles.dequeue_entity.dequeue_task_fair.dequeue_task.deactivate_task.__schedule
1.66 Â 0% +86.1% 3.08 Â 5% perf-profile.cpu-cycles.schedule_preempt_disabled.cpu_startup_entry.start_secondary
2.68 Â 2% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_state.wake_futex
1.80 Â 2% +96.0% 3.53 Â 6% perf-profile.cpu-cycles.__tick_nohz_idle_enter.tick_nohz_idle_enter.cpu_startup_entry.start_secondary
0.81 Â 3% +96.6% 1.59 Â 2% perf-profile.cpu-cycles.__hrtimer_start_range_ns.hrtimer_start_range_ns.tick_nohz_restart.tick_nohz_idle_exit.cpu_startup_entry
1.89 Â 2% +95.5% 3.70 Â 5% perf-profile.cpu-cycles.tick_nohz_idle_enter.cpu_startup_entry.start_secondary
3.42 Â 1% -100.0% 0.00 Â 0% perf-profile.cpu-cycles.ttwu_do_activate.constprop.87.try_to_wake_up.wake_up_state.wake_futex.futex_wake
1.05 Â 6% +98.1% 2.08 Â 6% perf-profile.cpu-cycles.cpuidle_enter_state.cpuidle_enter.cpu_startup_entry.start_secondary
0.62 Â 2% +96.8% 1.23 Â 1% perf-profile.cpu-cycles.set_next_entity.pick_next_task_fair.__schedule.schedule.schedule_preempt_disabled
3.46 Â 1% +97.6% 6.83 Â 1% perf-profile.cpu-cycles.futex_wait_queue_me.futex_wait.do_futex.sys_futex.system_call_fastpath
0.00 Â 0% +Inf% 4.54 Â 1% perf-profile.cpu-cycles.enqueue_task.activate_task.ttwu_do_activate.try_to_wake_up.wake_up_process
8.27 Â 1% +91.7% 15.86 Â 4% perf-profile.cpu-cycles.start_secondary
1.03 Â 2% +96.4% 2.03 Â 1% perf-profile.cpu-cycles.pick_next_task_fair.__schedule.schedule.schedule_preempt_disabled.cpu_startup_entry
8.12 Â 1% +91.6% 15.56 Â 4% perf-profile.cpu-cycles.cpu_startup_entry.start_secondary
1.10 Â 6% +100.2% 2.21 Â 5% perf-profile.cpu-cycles.cpuidle_enter.cpu_startup_entry.start_secondary
0.54 Â 2% +94.4% 1.05 Â 3% perf-profile.cpu-cycles.clockevents_program_event.tick_program_event.__hrtimer_start_range_ns.hrtimer_start_range_ns.tick_nohz_restart
1.61 Â 1% +86.8% 3.01 Â 5% perf-profile.cpu-cycles.schedule.schedule_preempt_disabled.cpu_startup_entry.start_secondary
0.00 Â 0% +Inf% 9.90 Â 2% perf-profile.cpu-cycles.try_to_wake_up.wake_up_process.wake_up_q.futex_wake.do_futex
510107 Â 6% -24.1% 387141 Â 14% sched_debug.cfs_rq[12]:/.min_vruntime
19054 Â 8% -26.6% 13995 Â 16% sched_debug.cfs_rq[12]:/.exec_clock
350 Â 31% -36.2% 223 Â 30% sched_debug.cfs_rq[13]:/.tg_runnable_contrib
16072 Â 31% -35.9% 10302 Â 29% sched_debug.cfs_rq[13]:/.avg->runnable_avg_sum
16913 Â 33% -48.9% 8648 Â 3% sched_debug.cfs_rq[14]:/.avg->runnable_avg_sum
20 Â 49% -48.8% 10 Â 31% sched_debug.cfs_rq[14]:/.runnable_load_avg
369 Â 33% -48.8% 188 Â 3% sched_debug.cfs_rq[14]:/.tg_runnable_contrib
210 Â 17% +70.7% 359 Â 2% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
15752 Â 22% +58.6% 24978 Â 21% sched_debug.cfs_rq[15]:/.exec_clock
9620 Â 17% +70.9% 16437 Â 2% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
8529 Â 5% -10.4% 7639 Â 7% sched_debug.cfs_rq[1]:/.tg_load_avg
2 Â 47% +136.4% 6 Â 31% sched_debug.cfs_rq[24]:/.nr_spread_over
47788 Â 3% +11.0% 53030 Â 4% sched_debug.cfs_rq[28]:/.exec_clock
8514 Â 5% -10.3% 7634 Â 7% sched_debug.cfs_rq[2]:/.tg_load_avg
17945 Â 18% +14.3% 20504 Â 20% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
391 Â 18% +14.6% 448 Â 19% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
360 Â 27% +43.3% 516 Â 0% sched_debug.cfs_rq[30]:/.tg_runnable_contrib
22649 Â 7% -27.4% 16434 Â 2% sched_debug.cfs_rq[31]:/.avg->runnable_avg_sum
493 Â 7% -27.6% 357 Â 2% sched_debug.cfs_rq[31]:/.tg_runnable_contrib
8510 Â 5% -10.3% 7634 Â 7% sched_debug.cfs_rq[3]:/.tg_load_avg
8511 Â 5% -12.3% 7461 Â 10% sched_debug.cfs_rq[4]:/.tg_load_avg
8503 Â 5% -12.3% 7460 Â 10% sched_debug.cfs_rq[5]:/.tg_load_avg
8197 Â 6% -9.2% 7446 Â 10% sched_debug.cfs_rq[6]:/.tg_load_avg
2994 Â 18% -40.2% 1792 Â 11% sched_debug.cpu#10.ttwu_local
620311 Â 16% +19.5% 741285 Â 9% sched_debug.cpu#11.avg_idle
1291 Â 11% +71.4% 2214 Â 17% sched_debug.cpu#12.ttwu_local
44903 Â 2% -10.9% 40028 Â 6% sched_debug.cpu#12.nr_load_updates
18 Â 43% -56.2% 8 Â 29% sched_debug.cpu#14.cpu_load[1]
16 Â 40% -64.1% 5 Â 14% sched_debug.cpu#14.cpu_load[4]
16 Â 39% -64.2% 6 Â 11% sched_debug.cpu#14.cpu_load[3]
20 Â 49% -48.8% 10 Â 31% sched_debug.cpu#14.cpu_load[0]
651720 Â 19% +24.1% 808588 Â 3% sched_debug.cpu#14.avg_idle
17 Â 42% -61.4% 6 Â 16% sched_debug.cpu#14.cpu_load[2]
778639 Â 3% -17.1% 645179 Â 3% sched_debug.cpu#15.avg_idle
54145 Â 40% +98.6% 107550 Â 26% sched_debug.cpu#15.sched_goidle
48079 Â 36% +114.8% 103279 Â 25% sched_debug.cpu#15.ttwu_count
109390 Â 40% +99.3% 218008 Â 26% sched_debug.cpu#15.nr_switches
1187 Â 42% +133.4% 2771 Â 9% sched_debug.cpu#15.curr->pid
349 Â 17% +48.8% 520 Â 8% sched_debug.cpu#16.ttwu_local
904055 Â 18% +61.1% 1456350 Â 22% sched_debug.cpu#18.sched_goidle
1810323 Â 18% +61.1% 2915683 Â 22% sched_debug.cpu#18.sched_count
1809190 Â 18% +61.0% 2913153 Â 22% sched_debug.cpu#18.nr_switches
289 Â 25% +88.0% 543 Â 10% sched_debug.cpu#20.ttwu_local
3334 Â 8% -26.2% 2460 Â 19% sched_debug.cpu#21.curr->pid
85059 Â 35% +58.4% 134729 Â 4% sched_debug.cpu#24.ttwu_count
103842 Â 6% +46.8% 152488 Â 13% sched_debug.cpu#28.sched_goidle
221524 Â 8% +40.9% 312183 Â 13% sched_debug.cpu#28.sched_count
208190 Â 6% +46.8% 305531 Â 13% sched_debug.cpu#28.nr_switches
86918 Â 5% +68.6% 146587 Â 11% sched_debug.cpu#28.ttwu_count
207717 Â 16% +31.4% 272890 Â 6% sched_debug.cpu#29.nr_switches
103534 Â 16% +31.5% 136195 Â 6% sched_debug.cpu#29.sched_goidle
88859 Â 17% +50.8% 134010 Â 8% sched_debug.cpu#29.ttwu_count
207734 Â 16% +31.6% 273286 Â 6% sched_debug.cpu#29.sched_count
954 Â 22% +56.1% 1489 Â 24% sched_debug.cpu#3.ttwu_local
81526 Â 39% +68.8% 137618 Â 16% sched_debug.cpu#30.sched_goidle
163546 Â 39% +68.6% 275753 Â 16% sched_debug.cpu#30.nr_switches
163564 Â 39% +72.4% 281962 Â 13% sched_debug.cpu#30.sched_count
70152 Â 37% +89.1% 132670 Â 19% sched_debug.cpu#30.ttwu_count
497038 Â 1% +32.8% 660184 Â 1% sched_debug.cpu#31.avg_idle
695 Â 25% +49.6% 1040 Â 19% sched_debug.cpu#4.ttwu_local
641 Â 12% +58.6% 1017 Â 20% sched_debug.cpu#6.ttwu_local

testcase/path_params/tbox_group: tlbflush/performance-200%-32x-512/lituya

7675104990ed255b 1d0dcb3ad9d336e6d6ee020a75
---------------- --------------------------

lituya: Grantley Haswell
Memory: 16G

lkp-sb03: Sandy Bridge-EP
Memory: 64G






7.5e+06 ++----------------------------------------------------------------+
7e+06 ++ O |
| O O O |
6.5e+06 O+ O O O O O O |
6e+06 ++ O O O O O O O O |
| |
5.5e+06 ++ |
5e+06 ++ |
4.5e+06 ++ |
| |
4e+06 ++ |
3.5e+06 ++ |
*..*.*..*..*. .*. .*..*.*.. .*.. .*.. .*. .*.*..*..*.*..*
3e+06 ++ *. *. *..*.*. * *. *. |
2.5e+06 ++----------------------------------------------------------------+




7.5e+06 ++----------------------------------------------------------------+
| |
7e+06 ++ O O O |
| O |
6.5e+06 O+ O O O O O O |
| O O O O O O O |
6e+06 ++ O |
| |
5.5e+06 ++ |
| |
5e+06 ++ .* |
| .*..*. : *.. .*.. .*. .*..*.*..*
4.5e+06 *+.* : .*.*..*..*.*.. .*. .. * *. *..*.*. |
| *. *. * |
4e+06 ++----------------------------------------------------------------+


cpuidle.C1-HSW.usage

5e+07 ++----------------------------------------------------------------+
O |
4.5e+07 ++ O O O O O O O O O O O O O O O O O O |
| |
| |
4e+07 ++ |
| |
3.5e+07 ++ |
| |
3e+07 ++ |
| |
*..*.*..*..*.*..*.*..*..*.*..*..*.*..*..*. .*. .*. .*.. .*..*
2.5e+07 ++ *..*. *. *. * |
| |
2e+07 ++----------------------------------------------------------------+


will-it-scale.time.user_time

185 ++--------------------------------------------------------------------+
180 O+ O O O O O O O O O O O O O O |
| O O O O |
175 ++ |
170 ++ |
| |
165 ++ |
160 ++ |
155 ++ |
| |
150 ++ |
145 ++ |
| |
140 *+.*.. .*..*..*..*.*..*..*..*..*.*.. .*.. .*. .*.. .*. |
135 ++----*-*------------------------------*-----*----*--*-----*----*--*--*


will-it-scale.time.system_time

1250 ++-------------------------------------------------------------------+
| |
1240 *+.*..*.*..*..*..*.*..*..*..*.*..*..*..*.*..*..*..*.*..*..*..*.*..*..*
| |
1230 ++ |
| |
1220 ++ |
| |
1210 ++ |
| |
1200 ++ |
| |
1190 ++ |
| O O O O O O O O |
1180 O+-O--O----O------------------O--O--O--O-O-----O--O------------------+


will-it-scale.time.percent_of_cpu_this_job_got

453 *+-*--*-*--*--*--*--*-*--*--*--*--*-*--*--*--*--*-*--*--*--*--*-*--*--*
| |
452 ++ |
| |
| |
451 ++ |
| |
450 ++ |
| |
449 ++ |
| |
| |
448 ++ O O O O O O O O O |
| |
447 O+----O----------O-------------O--O-O--O--O-----O-O-------------------+


will-it-scale.time.voluntary_context_switches

5.5e+07 ++----------------------------------------------------------------+
O O O O O O O O O O O O O O O |
| O O O O |
5e+07 ++ |
| |
| |
4.5e+07 ++ |
| |
4e+07 ++ |
| |
| |
3.5e+07 ++ |
| |
*..*.*..*..*.*..*.*..*..*.*..*..*.*..*..*.*..*..*.*..*.*..*..*.*..*
3e+07 ++----------------------------------------------------------------+


time.user_time

185 ++--------------------------------------------------------------------+
180 O+ O O O O O O O O O O O O O O |
| O O O O |
175 ++ |
170 ++ |
| |
165 ++ |
160 ++ |
155 ++ |
| |
150 ++ |
145 ++ |
| |
140 *+.*.. .*..*..*..*.*..*..*..*..*.*.. .*.. .*. .*.. .*. |
135 ++----*-*------------------------------*-----*----*--*-----*----*--*--*


time.voluntary_context_switches

5.5e+07 ++----------------------------------------------------------------+
O O O O O O O O O O O O O O O |
| O O O O |
5e+07 ++ |
| |
| |
4.5e+07 ++ |
| |
4e+07 ++ |
| |
| |
3.5e+07 ++ |
| |
*..*.*..*..*.*..*.*..*..*.*..*..*.*..*..*.*..*..*.*..*.*..*..*.*..*
3e+07 ++----------------------------------------------------------------+


vmstat.system.cs

360000 ++-----------------------------------------------------------------+
O O O O O O O O O O O O O O O O O O O |
340000 ++ |
320000 ++ |
| |
300000 ++ |
| |
280000 ++ |
| |
260000 ++ |
240000 ++ |
| |
220000 ++ |
*..*.*..*..*.*..*..*.*..*..*.*..*..*..*.*..*..*.*..*..*.*..*..*.*..*
200000 ++-----------------------------------------------------------------+

[*] bisect-good sample
[O] bisect-bad sample

To reproduce:

apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang

---
testcase: will-it-scale
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
default-watchdogs:
watch-oom:
watchdog:
cpufreq_governor: powersave
commit: 3e6df1797630d3a8554b4550eec727277091b36f
model: Grantley Haswell
nr_cpu: 16
memory: 16G
hdd_partitions:
swap_partitions:
rootfs_partition:
perf-profile:
freq: 800
will-it-scale:
test: pthread_mutex1
testbox: lituya
tbox_group: lituya
kconfig: x86_64-rhel
enqueue_time: 2015-05-09 04:48:35.857957389 +08:00
user: lkp
queue: cyclic
compiler: gcc-4.9
head_commit: 3e6df1797630d3a8554b4550eec727277091b36f
base_commit: 5ebe6afaf0057ac3eaeb98defd5456894b446d22
branch: linux-devel/devel-hourly-2015050918
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/3e6df1797630d3a8554b4550eec727277091b36f/vmlinuz-4.1.0-rc2-wl-01412-g3e6df17"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/will-it-scale/powersave-pthread_mutex1/lituya/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/3e6df1797630d3a8554b4550eec727277091b36f/0"
LKP_SERVER: inn
job_file: "/lkp/scheduled/lituya/cyclic_will-it-scale-powersave-pthread_mutex1-x86_64-rhel-CYCLIC_HEAD-3e6df1797630d3a8554b4550eec727277091b36f-0-20150509-36915-qiacgu.yaml"
dequeue_time: 2015-05-09 22:22:05.525642047 +08:00
max_uptime: 1500
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/lituya/cyclic_will-it-scale-powersave-pthread_mutex1-x86_64-rhel-CYCLIC_HEAD-3e6df1797630d3a8554b4550eec727277091b36f-0-20150509-36915-qiacgu.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015050918
- commit=3e6df1797630d3a8554b4550eec727277091b36f
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/3e6df1797630d3a8554b4550eec727277091b36f/vmlinuz-4.1.0-rc2-wl-01412-g3e6df17
- max_uptime=1500
- RESULT_ROOT=/result/will-it-scale/powersave-pthread_mutex1/lituya/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/3e6df1797630d3a8554b4550eec727277091b36f/0
- LKP_SERVER=inn
- |2-


earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal

rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/3e6df1797630d3a8554b4550eec727277091b36f/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/will-it-scale.cgz"
job_state: finished
loadavg: 9.58 5.85 2.41 1/213 4807
start_time: '1431181361'
end_time: '1431181665'
version: "/lkp/lkp/.src-20150509-201736"
echo powersave > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo powersave > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
./runtest.py pthread_mutex1 32 both 1 8 12 16
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx