[LKP] [locking/mutex] 07d2413a61d: -3.6% unixbench.score +60.8% unixbench.time.system_time

From: Huang Ying
Date: Tue Mar 17 2015 - 01:50:38 EST


FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit 07d2413a61db6500f58e614e873eed79d7f2ed72 ("locking/mutex: In mutex_spin_on_owner(), return true when owner changes")


testbox/testcase/testparams: lituya/unixbench/performance-fsdisk

d6abfdb2022368d8 07d2413a61db6500f58e614e87
---------------- --------------------------
%stddev %change %stddev
\ | \
10131 Â 1% -3.6% 9770 Â 0% unixbench.score
1049 Â 18% +60.8% 1687 Â 0% unixbench.time.system_time
7144 Â 30% +203.6% 21686 Â 3% unixbench.time.involuntary_context_switches
24573900 Â 19% -93.9% 1497516 Â 8% unixbench.time.voluntary_context_switches
369 Â 18% +59.5% 589 Â 0% unixbench.time.percent_of_cpu_this_job_got
3849 Â 5% -17.9% 3159 Â 0% uptime.idle
4060 Â 0% +25.3% 5087 Â 20% meminfo.AnonHugePages
3 Â 23% +66.7% 5 Â 0% vmstat.procs.r
174876 Â 18% -90.7% 16282 Â 5% vmstat.system.cs
87032 Â 9% -48.9% 44484 Â 2% softirqs.RCU
213221 Â 5% -79.7% 43210 Â 0% softirqs.SCHED
551372 Â 17% +60.8% 886553 Â 0% softirqs.TIMER
7144 Â 30% +203.6% 21686 Â 3% time.involuntary_context_switches
369 Â 18% +59.5% 589 Â 0% time.percent_of_cpu_this_job_got
1049 Â 18% +60.8% 1687 Â 0% time.system_time
24573900 Â 19% -93.9% 1497516 Â 8% time.voluntary_context_switches
23.55 Â 17% +57.3% 37.05 Â 0% turbostat.%Busy
779 Â 17% +57.1% 1223 Â 0% turbostat.Avg_MHz
22.32 Â 18% -61.6% 8.56 Â 4% turbostat.CPU%c1
42.53 Â 2% +5.6% 44.92 Â 0% turbostat.PkgWatt
0.57 Â 7% +7.5% 0.61 Â 1% turbostat.RAMWatt
21474227 Â 18% -93.7% 1361886 Â 8% cpuidle.C1-HSW.usage
6.755e+08 Â 27% -89.6% 70473442 Â 17% cpuidle.C1-HSW.time
26571 Â 25% -92.8% 1917 Â 4% cpuidle.C3-HSW.usage
117581 Â 21% -64.5% 41794 Â 1% cpuidle.C6-HSW.usage
1439 Â 28% -88.0% 173 Â 43% cpuidle.POLL.time
524 Â 17% -90.9% 47 Â 24% cpuidle.POLL.usage
4546 Â 15% +46.3% 6651 Â 2% sched_debug.cfs_rq[0]:/.tg->runnable_avg
246 Â 25% +55.4% 382 Â 2% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
157742 Â 23% +129.0% 361183 Â 6% sched_debug.cfs_rq[0]:/.min_vruntime
15438 Â 14% +63.1% 25172 Â 9% sched_debug.cfs_rq[0]:/.exec_clock
11297 Â 25% +54.9% 17499 Â 2% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
4559 Â 15% +46.3% 6669 Â 2% sched_debug.cfs_rq[10]:/.tg->runnable_avg
9 Â 35% +84.2% 17 Â 15% sched_debug.cfs_rq[10]:/.runnable_load_avg
11563 Â 18% +57.7% 18232 Â 10% sched_debug.cfs_rq[10]:/.avg->runnable_avg_sum
12727 Â 13% +73.8% 22116 Â 4% sched_debug.cfs_rq[10]:/.exec_clock
251 Â 18% +58.1% 397 Â 10% sched_debug.cfs_rq[10]:/.tg_runnable_contrib
152419 Â 21% +131.9% 353450 Â 4% sched_debug.cfs_rq[10]:/.min_vruntime
4559 Â 15% +46.3% 6669 Â 2% sched_debug.cfs_rq[11]:/.tg->runnable_avg
66 Â 19% -73.1% 17 Â 30% sched_debug.cfs_rq[12]:/.runnable_load_avg
4560 Â 15% +46.3% 6672 Â 2% sched_debug.cfs_rq[12]:/.tg->runnable_avg
78 Â 22% -75.1% 19 Â 31% sched_debug.cfs_rq[12]:/.load
151608 Â 21% +158.0% 391137 Â 20% sched_debug.cfs_rq[13]:/.min_vruntime
272 Â 23% +58.3% 431 Â 11% sched_debug.cfs_rq[13]:/.tg_runnable_contrib
12552 Â 13% +119.5% 27556 Â 37% sched_debug.cfs_rq[13]:/.exec_clock
4557 Â 15% +46.4% 6674 Â 2% sched_debug.cfs_rq[13]:/.tg->runnable_avg
12507 Â 23% +58.0% 19763 Â 11% sched_debug.cfs_rq[13]:/.avg->runnable_avg_sum
314 Â 8% +49.8% 471 Â 28% sched_debug.cfs_rq[14]:/.tg_runnable_contrib
4558 Â 15% +46.4% 6675 Â 2% sched_debug.cfs_rq[14]:/.tg->runnable_avg
152171 Â 21% +187.6% 437699 Â 32% sched_debug.cfs_rq[14]:/.min_vruntime
14442 Â 8% +49.7% 21618 Â 28% sched_debug.cfs_rq[14]:/.avg->runnable_avg_sum
11538 Â 16% +51.7% 17502 Â 1% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
252 Â 16% +51.6% 382 Â 0% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
152080 Â 21% +132.5% 353587 Â 4% sched_debug.cfs_rq[15]:/.min_vruntime
4560 Â 15% +46.4% 6675 Â 2% sched_debug.cfs_rq[15]:/.tg->runnable_avg
13692 Â 10% +71.8% 23520 Â 4% sched_debug.cfs_rq[15]:/.exec_clock
12631 Â 21% +59.3% 20127 Â 17% sched_debug.cfs_rq[1]:/.avg->runnable_avg_sum
10 Â 27% +166.7% 28 Â 33% sched_debug.cfs_rq[1]:/.runnable_load_avg
275 Â 21% +59.2% 438 Â 17% sched_debug.cfs_rq[1]:/.tg_runnable_contrib
4548 Â 15% +46.2% 6651 Â 2% sched_debug.cfs_rq[1]:/.tg->runnable_avg
166428 Â 20% +136.5% 393659 Â 14% sched_debug.cfs_rq[1]:/.min_vruntime
14633 Â 11% +100.7% 29361 Â 34% sched_debug.cfs_rq[1]:/.exec_clock
4547 Â 15% +46.3% 6653 Â 2% sched_debug.cfs_rq[2]:/.tg->runnable_avg
170439 Â 20% +114.8% 366033 Â 3% sched_debug.cfs_rq[2]:/.min_vruntime
11846 Â 18% +57.9% 18702 Â 6% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
15344 Â 14% +58.9% 24380 Â 2% sched_debug.cfs_rq[2]:/.exec_clock
258 Â 18% +57.9% 408 Â 6% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
168070 Â 16% +114.7% 360822 Â 4% sched_debug.cfs_rq[3]:/.min_vruntime
11531 Â 18% +51.7% 17496 Â 1% sched_debug.cfs_rq[3]:/.avg->runnable_avg_sum
251 Â 18% +52.3% 383 Â 1% sched_debug.cfs_rq[3]:/.tg_runnable_contrib
388 Â 29% -68.0% 124 Â 25% sched_debug.cfs_rq[3]:/.tg_load_contrib
365 Â 30% -72.7% 99 Â 33% sched_debug.cfs_rq[3]:/.blocked_load_avg
4548 Â 15% +46.3% 6655 Â 2% sched_debug.cfs_rq[3]:/.tg->runnable_avg
14874 Â 7% +57.8% 23470 Â 4% sched_debug.cfs_rq[3]:/.exec_clock
247 Â 12% +55.3% 383 Â 3% sched_debug.cfs_rq[4]:/.tg_runnable_contrib
11274 Â 12% +56.0% 17584 Â 3% sched_debug.cfs_rq[4]:/.avg->runnable_avg_sum
4549 Â 15% +46.3% 6657 Â 2% sched_debug.cfs_rq[4]:/.tg->runnable_avg
14121 Â 9% +65.6% 23381 Â 4% sched_debug.cfs_rq[4]:/.exec_clock
160686 Â 17% +124.3% 360493 Â 4% sched_debug.cfs_rq[4]:/.min_vruntime
11730 Â 14% +73.2% 20318 Â 17% sched_debug.cfs_rq[5]:/.avg->runnable_avg_sum
4550 Â 15% +46.3% 6658 Â 2% sched_debug.cfs_rq[5]:/.tg->runnable_avg
161214 Â 21% +173.5% 440993 Â 37% sched_debug.cfs_rq[5]:/.min_vruntime
256 Â 14% +73.0% 443 Â 17% sched_debug.cfs_rq[5]:/.tg_runnable_contrib
4550 Â 15% +46.4% 6662 Â 2% sched_debug.cfs_rq[6]:/.tg->runnable_avg
14431 Â 13% +31.6% 18997 Â 9% sched_debug.cfs_rq[6]:/.avg->runnable_avg_sum
187106 Â 33% +97.8% 370045 Â 5% sched_debug.cfs_rq[6]:/.min_vruntime
314 Â 13% +32.0% 415 Â 9% sched_debug.cfs_rq[6]:/.tg_runnable_contrib
13 Â 35% +132.1% 30 Â 40% sched_debug.cfs_rq[7]:/.runnable_load_avg
161563 Â 22% +124.0% 361967 Â 3% sched_debug.cfs_rq[7]:/.min_vruntime
4552 Â 15% +46.4% 6663 Â 2% sched_debug.cfs_rq[7]:/.tg->runnable_avg
257 Â 16% +48.4% 381 Â 2% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
3 Â 13% -53.8% 1 Â 33% sched_debug.cfs_rq[7]:/.nr_spread_over
14448 Â 18% +63.8% 23661 Â 2% sched_debug.cfs_rq[7]:/.exec_clock
11755 Â 16% +48.6% 17474 Â 2% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
4554 Â 15% +46.3% 6665 Â 2% sched_debug.cfs_rq[8]:/.tg->runnable_avg
4557 Â 15% +46.3% 6667 Â 2% sched_debug.cfs_rq[9]:/.tg->runnable_avg
192062 Â 47% +83.6% 352649 Â 4% sched_debug.cfs_rq[9]:/.min_vruntime
14 Â 36% +58.9% 22 Â 10% sched_debug.cpu#0.cpu_load[3]
13 Â 37% +66.7% 22 Â 7% sched_debug.cpu#0.cpu_load[4]
13 Â 35% +61.1% 21 Â 11% sched_debug.cpu#0.cpu_load[2]
873975 Â 12% -75.4% 215052 Â 24% sched_debug.cpu#1.sched_count
16 Â 4% +84.4% 29 Â 39% sched_debug.cpu#1.cpu_load[1]
15 Â 17% +98.4% 30 Â 35% sched_debug.cpu#1.cpu_load[4]
873936 Â 12% -75.4% 214989 Â 24% sched_debug.cpu#1.nr_switches
436585 Â 12% -75.5% 106889 Â 24% sched_debug.cpu#1.sched_goidle
564 Â 33% +106.4% 1165 Â 7% sched_debug.cpu#1.curr->pid
15 Â 14% +95.2% 30 Â 35% sched_debug.cpu#1.cpu_load[3]
465895 Â 8% -77.2% 106210 Â 24% sched_debug.cpu#1.ttwu_count
15 Â 7% +91.9% 29 Â 37% sched_debug.cpu#1.cpu_load[2]
15 Â 29% +50.8% 23 Â 9% sched_debug.cpu#10.cpu_load[3]
315517 Â 11% -94.0% 18947 Â 11% sched_debug.cpu#10.sched_goidle
336250 Â 12% -94.0% 20238 Â 12% sched_debug.cpu#10.ttwu_count
631593 Â 11% -93.9% 38620 Â 11% sched_debug.cpu#10.sched_count
631561 Â 11% -93.9% 38572 Â 11% sched_debug.cpu#10.nr_switches
556 Â 20% +93.0% 1074 Â 17% sched_debug.cpu#10.curr->pid
14 Â 28% +57.6% 23 Â 8% sched_debug.cpu#10.cpu_load[4]
341057 Â 11% -94.1% 20223 Â 8% sched_debug.cpu#11.ttwu_count
623922 Â 9% -93.7% 39166 Â 9% sched_debug.cpu#11.sched_count
311611 Â 9% -93.9% 19080 Â 9% sched_debug.cpu#11.sched_goidle
623869 Â 9% -93.7% 39116 Â 9% sched_debug.cpu#11.nr_switches
338493 Â 10% -94.2% 19504 Â 7% sched_debug.cpu#12.sched_goidle
55 Â 30% -53.4% 26 Â 16% sched_debug.cpu#12.cpu_load[1]
71 Â 17% -59.4% 29 Â 28% sched_debug.cpu#12.cpu_load[0]
79 Â 23% -75.5% 19 Â 31% sched_debug.cpu#12.load
677539 Â 10% -94.1% 40155 Â 7% sched_debug.cpu#12.nr_switches
280203 Â 33% -92.6% 20836 Â 9% sched_debug.cpu#12.ttwu_count
677589 Â 10% -94.1% 40202 Â 7% sched_debug.cpu#12.sched_count
509 Â 22% +55.6% 793 Â 15% sched_debug.cpu#12.ttwu_local
19 Â 14% +163.6% 50 Â 32% sched_debug.cpu#13.cpu_load[1]
24 Â 16% +176.3% 67 Â 36% sched_debug.cpu#13.cpu_load[0]
17 Â 18% +138.2% 40 Â 24% sched_debug.cpu#13.cpu_load[2]
646767 Â 13% -94.2% 37618 Â 11% sched_debug.cpu#13.sched_count
709 Â 37% +125.3% 1598 Â 23% sched_debug.cpu#13.curr->pid
646735 Â 13% -94.2% 37563 Â 11% sched_debug.cpu#13.nr_switches
15 Â 19% +104.8% 31 Â 23% sched_debug.cpu#13.cpu_load[4]
16 Â 19% +117.2% 34 Â 21% sched_debug.cpu#13.cpu_load[3]
327419 Â 14% -93.9% 20012 Â 9% sched_debug.cpu#13.ttwu_count
323068 Â 13% -94.3% 18422 Â 11% sched_debug.cpu#13.sched_goidle
674567 Â 13% -93.6% 43222 Â 11% sched_debug.cpu#14.sched_count
750 Â 30% +90.8% 1432 Â 15% sched_debug.cpu#14.curr->pid
478 Â 22% +33.9% 640 Â 21% sched_debug.cpu#14.ttwu_local
339046 Â 13% -93.9% 20669 Â 7% sched_debug.cpu#14.ttwu_count
336995 Â 13% -93.7% 21107 Â 11% sched_debug.cpu#14.sched_goidle
674534 Â 13% -93.6% 43164 Â 11% sched_debug.cpu#14.nr_switches
19 Â 12% +45.6% 28 Â 18% sched_debug.cpu#15.cpu_load[1]
299069 Â 13% -93.4% 19783 Â 5% sched_debug.cpu#15.sched_goidle
813 Â 42% +65.5% 1346 Â 6% sched_debug.cpu#15.curr->pid
580735 Â 3% +11.3% 646611 Â 4% sched_debug.cpu#15.avg_idle
331682 Â 14% -93.7% 20957 Â 7% sched_debug.cpu#15.ttwu_count
599051 Â 13% -93.2% 41010 Â 5% sched_debug.cpu#15.sched_count
14 Â 15% +69.5% 25 Â 5% sched_debug.cpu#15.cpu_load[4]
15 Â 12% +71.7% 25 Â 7% sched_debug.cpu#15.cpu_load[3]
16 Â 10% +62.1% 26 Â 11% sched_debug.cpu#15.cpu_load[2]
817 Â 11% +61.3% 1318 Â 17% sched_debug.cpu#15.ttwu_local
599015 Â 13% -93.2% 40958 Â 5% sched_debug.cpu#15.nr_switches
1009949 Â 27% -63.0% 373861 Â 42% sched_debug.cpu#2.sched_count
15 Â 16% +54.0% 24 Â 14% sched_debug.cpu#2.cpu_load[1]
1009909 Â 27% -63.0% 373802 Â 42% sched_debug.cpu#2.nr_switches
14 Â 18% +61.0% 23 Â 10% sched_debug.cpu#2.cpu_load[2]
504627 Â 27% -63.1% 186448 Â 42% sched_debug.cpu#2.sched_goidle
14 Â 20% +63.8% 23 Â 8% sched_debug.cpu#2.cpu_load[3]
534870 Â 25% -64.9% 187727 Â 42% sched_debug.cpu#2.ttwu_count
14 Â 23% +66.7% 23 Â 6% sched_debug.cpu#2.cpu_load[4]
16 Â 24% +62.5% 26 Â 19% sched_debug.cpu#2.cpu_load[0]
462676 Â 24% -76.7% 107823 Â 47% sched_debug.cpu#3.sched_goidle
926031 Â 24% -76.6% 217084 Â 47% sched_debug.cpu#3.sched_count
494298 Â 23% -78.2% 107584 Â 46% sched_debug.cpu#3.ttwu_count
18 Â 6% +18.9% 22 Â 5% sched_debug.cpu#3.cpu_load[2]
925992 Â 24% -76.6% 217032 Â 47% sched_debug.cpu#3.nr_switches
16 Â 13% +37.9% 22 Â 3% sched_debug.cpu#3.cpu_load[3]
15 Â 21% +52.5% 23 Â 3% sched_debug.cpu#3.cpu_load[4]
33 Â 20% -36.8% 21 Â 17% sched_debug.cpu#3.cpu_load[0]
14 Â 18% +56.9% 22 Â 3% sched_debug.cpu#4.cpu_load[4]
15 Â 23% +41.3% 22 Â 4% sched_debug.cpu#4.cpu_load[3]
948 Â 27% +36.5% 1295 Â 11% sched_debug.cpu#4.curr->pid
855 Â 43% +67.1% 1429 Â 16% sched_debug.cpu#5.curr->pid
22 Â 9% +87.8% 42 Â 29% sched_debug.cpu#5.cpu_load[0]
15 Â 13% +114.5% 33 Â 43% sched_debug.cpu#5.cpu_load[3]
15 Â 15% +119.7% 33 Â 47% sched_debug.cpu#5.cpu_load[4]
18 Â 8% +100.0% 37 Â 32% sched_debug.cpu#5.cpu_load[1]
16 Â 10% +110.8% 34 Â 38% sched_debug.cpu#5.cpu_load[2]
415247 Â 12% -80.9% 79279 Â 20% sched_debug.cpu#6.sched_goidle
831074 Â 12% -80.8% 159814 Â 20% sched_debug.cpu#6.nr_switches
831113 Â 12% -80.8% 159869 Â 20% sched_debug.cpu#6.sched_count
811 Â 36% +50.0% 1216 Â 15% sched_debug.cpu#6.curr->pid
430138 Â 18% -82.1% 76868 Â 22% sched_debug.cpu#6.ttwu_count
860988 Â 14% -70.9% 250529 Â 33% sched_debug.cpu#7.nr_switches
430158 Â 14% -71.0% 124543 Â 34% sched_debug.cpu#7.sched_goidle
861025 Â 14% -70.9% 250578 Â 33% sched_debug.cpu#7.sched_count
658 Â 21% +123.8% 1473 Â 27% sched_debug.cpu#7.curr->pid
15 Â 19% +60.0% 24 Â 7% sched_debug.cpu#7.cpu_load[4]
15 Â 14% +62.9% 25 Â 13% sched_debug.cpu#7.cpu_load[2]
440642 Â 11% -71.9% 123684 Â 34% sched_debug.cpu#7.ttwu_count
16 Â 7% +58.2% 26 Â 27% sched_debug.cpu#7.cpu_load[1]
15 Â 16% +65.0% 24 Â 9% sched_debug.cpu#7.cpu_load[3]
823 Â 25% +61.0% 1325 Â 31% sched_debug.cpu#8.curr->pid
638635 Â 13% -93.8% 39544 Â 7% sched_debug.cpu#9.sched_count
638594 Â 13% -93.8% 39497 Â 7% sched_debug.cpu#9.nr_switches
319018 Â 13% -93.9% 19378 Â 7% sched_debug.cpu#9.sched_goidle
328979 Â 10% -93.9% 19948 Â 6% sched_debug.cpu#9.ttwu_count

lituya: Grantley Haswell
Memory: 16G




softirqs.SCHED

300000 ++-----------------------------------------------------------------+
| * |
250000 ++ : * |
| : :.* .*. .* : + .* |
*.*. : * + .* * : : *.*.* + |
200000 ++ * * : * * |
| :+ |
150000 ++ * |
| |
100000 ++ |
| |
| |
50000 O+O O OO O O O O O OO O O O O O O OO O O O O O OO O O O O O OO O O O
| |
0 ++-----------------------------------------------------------------+


cpuidle.C1-HSW.usage

3e+07 ++----------------------------------------------------------------+
| * |
2.5e+07 ++* * .* :+ |
| : :: * : : * |
|: : * : * + :: : |
2e+07 ++ : * : : : * * : |
|: :+ : : : : * |
1.5e+07 ++ * *.*.*.* :: |
* * |
1e+07 ++ |
| |
| |
5e+06 ++ |
O O O OO O O OO O O O O OO O O O O O O OO O O
0 ++-----------O-O----O---O---OO-O-O----------------OO-O-O----------+


unixbench.time.voluntary_context_switches

3.5e+07 ++----------------------------------------------------------------+
| |
3e+07 ++* * .* *. |
| : :* * : : * |
2.5e+07 ++ : * : : + : : : |
|: : * : : : * : : |
2e+07 ++ :+ : : : : * * |
| * *.*.*.* :: |
1.5e+07 *+ * |
| |
1e+07 ++ |
| |
5e+06 ++ |
O O O OO O O OO O O O OO O O O O O O OO O O
0 ++-----------O-O----O-O-O---OO-O-O----------------OO-O-O----------+


unixbench.time.involuntary_context_switches

30000 ++------------------------------------------------------------------+
| |
25000 ++O O |
| O O O O O O |
O O O O O O O O O O OO O O O O O O O O
20000 ++ O O O O O O O O O |
| |
15000 ++ |
| |
10000 *+ *.* *.*.*.* *. |
|: : + : : + *. * .* |
|: : * : .* * + + .* |
5000 ++* * * * |
| |
0 ++------------------------------------------------------------------+


time.voluntary_context_switches

3.5e+07 ++----------------------------------------------------------------+
| |
3e+07 ++* * .* *. |
| : :* * : : * |
2.5e+07 ++ : * : : + : : : |
|: : * : : : * : : |
2e+07 ++ :+ : : : : * * |
| * *.*.*.* :: |
1.5e+07 *+ * |
| |
1e+07 ++ |
| |
5e+06 ++ |
O O O OO O O OO O O O OO O O O O O O OO O O
0 ++-----------O-O----O-O-O---OO-O-O----------------OO-O-O----------+


time.involuntary_context_switches

30000 ++------------------------------------------------------------------+
| |
25000 ++O O |
| O O O O O O |
O O O O O O O O O O OO O O O O O O O O
20000 ++ O O O O O O O O O |
| |
15000 ++ |
| |
10000 *+ *.* *.*.*.* *. |
|: : + : : + *. * .* |
|: : * : .* * + + .* |
5000 ++* * * * |
| |
0 ++------------------------------------------------------------------+


vmstat.system.cs

250000 ++-----------------------------------------------------------------+
| |
| * *. .* *. |
200000 ++: : * * : : * |
|: : * : : + : : : |
|: : * : : : * :: : |
150000 ++ :+ : : :: * * |
| * *.*.*.* :: |
100000 *+ * |
| |
| |
50000 ++ |
| |
O O O OO O O O O O OO O O O O O O OO O O O O O OO O O O O O OO O O O
0 ++-----------------------------------------------------------------+

[*] bisect-good sample
[O] bisect-bad sample

To reproduce:

apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang

---
testcase: unixbench
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 10
default_watchdogs:
watch-oom:
watchdog:
cpufreq_governor: performance
commit: 159e7763d517804c61a673736660a5a35f2ea5f8
model: Grantley Haswell
nr_cpu: 16
memory: 16G
hdd_partitions:
swap_partitions:
rootfs_partition:
unixbench:
test: fsdisk
testbox: lituya
tbox_group: lituya
kconfig: x86_64-rhel
enqueue_time: 2015-03-13 17:18:20.807960694 +08:00
head_commit: 159e7763d517804c61a673736660a5a35f2ea5f8
base_commit: 9eccca0843205f87c00404b663188b88eb248051
branch: next/master
kernel: "/kernel/x86_64-rhel/159e7763d517804c61a673736660a5a35f2ea5f8/vmlinuz-4.0.0-rc3-next-20150316"
user: lkp
queue: cyclic
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/lituya/unixbench/performance-fsdisk/debian-x86_64-2015-02-07.cgz/x86_64-rhel/159e7763d517804c61a673736660a5a35f2ea5f8/0"
job_file: "/lkp/scheduled/lituya/cyclic_unixbench-performance-fsdisk-x86_64-rhel-HEAD-159e7763d517804c61a673736660a5a35f2ea5f8-0-20150313-22163-1pkno0h.yaml"
dequeue_time: 2015-03-16 17:37:39.210757811 +08:00
max_uptime: 1211.5000000000002
modules_initrd: "/kernel/x86_64-rhel/159e7763d517804c61a673736660a5a35f2ea5f8/modules.cgz"
bm_initrd: "/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/unixbench-debian.cgz,/lkp/benchmarks/unixbench.cgz"
job_state: finished
loadavg: 8.28 4.83 1.96 1/211 5736
start_time: '1426498684'
end_time: '1426498982'
version: "/lkp/lkp/.src-20150316-152133"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu10/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu11/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu12/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu13/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu14/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu15/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu5/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu6/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu7/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu8/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu9/cpufreq/scaling_governor
./Run fsdisk
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx