[x86_64,entry] f04e05b81e4: -62.7% time.user_time
by LKP
FYI, we noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/entry
commit f04e05b81e4d9ae88bee379f12176f551a24394a ("x86_64,entry: Use sysret to return to userspace when possible")
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12 testbox/testcase/testparams
---------------- --------------------------
%stddev %change %stddev
\ | \
24.09 ± 4% -62.7% 8.99 ± 3% lkp-wsx02/aim9/performance-300s-creat-clo
24.09 -62.7% 8.99 GEO-MEAN time.user_time
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
1571 ± 1% -13.5% 1359 ± 0% lkp-wsx02/aim9/performance-300s-creat-clo
1571 -13.5% 1359 GEO-MEAN vmstat.system.cs
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
276 ± 0% +5.5% 291 ± 0% lkp-wsx02/aim9/performance-300s-creat-clo
276 +5.5% 291 GEO-MEAN time.system_time
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- -------------------------- ---------------------------
430 ± 36% -36.0% 275 ± 39% lkp-wsx02/aim9/performance-300s-creat-clo
430 -36.0% 275 GEO-MEAN sched_debug.cpu#38.ttwu_count
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
52761811 ± 29% -54.7% 23885493 ± 11% lkp-wsx02/aim9/performance-300s-creat-clo
52761811 -54.7% 23885493 GEO-MEAN cpuidle.C1E-NHM.time
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
21939 ± 8% +42.6% 31283 ± 23% lkp-wsx02/aim9/performance-300s-creat-clo
21939 +42.6% 31283 GEO-MEAN numa-meminfo.node1.SUnreclaim
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
5484 ± 8% +40.3% 7694 ± 22% lkp-wsx02/aim9/performance-300s-creat-clo
5484 +40.3% 7694 GEO-MEAN numa-vmstat.node1.nr_slab_unreclaimable
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
34004 ± 2% +69.9% 57781 ± 4% lkp-wsx02/aim9/performance-300s-creat-clo
34004 +69.9% 57781 GEO-MEAN slabinfo.kmalloc-256.active_objs
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
1075 ± 2% +68.8% 1815 ± 4% lkp-wsx02/aim9/performance-300s-creat-clo
1074 +68.8% 1814 GEO-MEAN slabinfo.kmalloc-256.active_slabs
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
1075 ± 2% +68.8% 1815 ± 4% lkp-wsx02/aim9/performance-300s-creat-clo
1074 +68.8% 1814 GEO-MEAN slabinfo.kmalloc-256.num_slabs
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
34415 ± 2% +68.8% 58099 ± 4% lkp-wsx02/aim9/performance-300s-creat-clo
34415 +68.8% 58099 GEO-MEAN slabinfo.kmalloc-256.num_objs
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
32500 ± 7% +31.0% 42586 ± 17% lkp-wsx02/aim9/performance-300s-creat-clo
32500 +31.0% 42586 GEO-MEAN numa-meminfo.node1.Slab
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
73657 ± 1% -23.4% 56388 ± 3% lkp-wsx02/aim9/performance-300s-creat-clo
73657 -23.4% 56388 GEO-MEAN softirqs.RCU
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
10427 ± 14% -16.8% 8675 ± 6% lkp-wsx02/aim9/performance-300s-creat-clo
10427 -16.8% 8675 GEO-MEAN sched_debug.cfs_rq[0]:/.exec_clock
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
24242 ± 8% +18.4% 28696 ± 12% lkp-wsx02/aim9/performance-300s-creat-clo
24242 +18.4% 28696 GEO-MEAN numa-meminfo.node3.Active
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
2076 ± 7% +12.1% 2328 ± 2% lkp-wsx02/aim9/performance-300s-creat-clo
2076 +12.1% 2327 GEO-MEAN numa-meminfo.node2.KernelStack
f2ee9bebb99dd4a4 f04e05b81e4d9ae88bee379f12
---------------- --------------------------
1139198 ± 5% -6.6% 1063912 ± 0% lkp-wsx02/aim9/performance-300s-creat-clo
1139198 -6.6% 1063912 GEO-MEAN proc-vmstat.pgfault
lkp-wsx02: Westmere-EX
Memory: 128G
time.user_time
26 ++-----------------*----------------*--------*-------------------------+
| .*..*... .*..*. *..*..*.. .. .*. .*..*.. .*..*..|
24 *+ *. * *. *...*. .. *
22 ++ * |
| |
20 ++ |
18 ++ |
| |
16 ++ |
14 ++ |
| |
12 ++ |
10 ++ |
O O O O O O O O O O |
8 ++-O------O--O---------O--------O---O--O--O--O------------O------------+
time.system_time
294 ++--------------------------------------------------------------------+
292 ++ O O O O O O O |
O O O O O O O O O O O O O |
290 ++ |
288 ++ |
| |
286 ++ |
284 ++ |
282 ++ |
| |
280 ++ |
278 ++ *.. |
*.. ..*.. .*.. *.. .*..*.. .. . .*
276 ++ *..*. *..*..*..*..*...*. .. *...*. *..* *..*. |
274 ++---------------------------------*----------------------------------+
softirqs.RCU
80000 ++------------------------------------------------------------------+
| |
75000 ++ ..*..*..*.. .*.. .*.. *.. |
*..*..*..*.. .*. *..*.. .*..*. *.. .. .. *..*..*
| *. *. * * |
70000 ++ |
| |
65000 ++ |
| |
60000 ++ O |
| |
| O O O O O O O |
55000 O+ O O O O O O O O O O O |
| |
50000 ++------------------------------------------------------------------+
slabinfo.kmalloc-256.active_objs
70000 ++------------------------------------------------------------------+
| |
65000 ++ O O |
60000 ++ O O O O O O O O |
O O O O O O O O |
55000 ++ |
| O O |
50000 ++ |
| |
45000 ++ |
40000 ++ |
| .*.. |
35000 ++.*.. .*. *.. *..*.. .*.. .*..*... .*.. .*..*
*. *. . .. *..*..*. *..*. *. *..*. |
30000 ++-----------------*------------------------------------------------+
slabinfo.kmalloc-256.num_objs
70000 ++------------------------------------------------------------------+
| O |
65000 ++ O |
60000 ++ O O O O O O O O O |
O O O O O O O |
55000 ++ |
| O O |
50000 ++ |
| |
45000 ++ |
40000 ++ |
| .*.. |
35000 *+.*.. .*. *... .*..*.. .*.. .*..*...*..*..*.. .*..*
| *. *. *..*..*. *..*. *. |
30000 ++------------------------------------------------------------------+
slabinfo.kmalloc-256.active_slabs
2200 ++-------------------------------------------------------------------+
| O |
2000 ++ O O O |
| O O O O O O O O O |
1800 O+ O O O O |
| O |
1600 ++ O |
| |
1400 ++ |
| |
1200 ++ *...*.. |
*..*.. .. *.. .*..*.. .*...*..*.. .*..*..*..*...*.. .*..*
1000 ++ * *. *. *..*. *. |
| |
800 ++-------------------------------------------------------------------+
slabinfo.kmalloc-256.num_slabs
2200 ++-------------------------------------------------------------------+
| O |
2000 ++ O O O |
| O O O O O O O O O |
1800 O+ O O O O |
| O |
1600 ++ O |
| |
1400 ++ |
| |
1200 ++ *...*.. |
*..*.. .. *.. .*..*.. .*...*..*.. .*..*..*..*...*.. .*..*
1000 ++ * *. *. *..*. *. |
| |
800 ++-------------------------------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
apt-get install ruby ruby-oj
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[sunrpc] WARNING: CPU: 1 PID: 1890 at lib/list_debug.c:36 __list_add+0xcb/0xd0()
by lkp@01.org
FYI, we noticed a wraning changes on
git://git.samba.org/jlayton/linux svc-pool-lock
commit 3a41085d37f2374f6ae6539f5dca3d1d53e50431 ("sunrpc: search for a thread under RCU")
<5>[ 21.635477] Key type id_legacy registered
<4>[ 21.638149] ------------[ cut here ]------------
<4>[ 21.638359] ------------[ cut here ]------------
<4>[ 21.638363] WARNING: CPU: 1 PID: 1890 at lib/list_debug.c:36 __list_add+0xcb/0xd0()
<4>[ 21.638364] list_add double add: new=ffff88007be38000, prev=ffff88007e835088, next=ffff88007be38000.
<4>[ 21.638376] Modules linked in: rpcsec_gss_krb5 nfsv4 dns_resolver nfsd auth_rpcgss ipmi_watchdog ipmi_msghandler sg sr_mod sd_mod cdrom ata_generic pata_acpi mxm_wmi pcspkr pata_marvell serio_raw firewire_ohci firewire_core ahci libahci crc_itu_t i2c_i801 libata parport_pc winbond_cir parport rc_core snd_hda_codec_realtek snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_controller snd_hda_codec snd_hwdep snd_pcm snd_timer snd wmi i7core_edac soundcore edac_core acpi_cpufreq
<4>[ 21.638377] CPU: 1 PID: 1890 Comm: nfsv4.0-svc Tainted: G I 3.18.0-rc3-ga798bc5 #1
<4>[ 21.638378] Hardware name: /DX58SO, BIOS SOX5810J.86A.4196.2009.0715.1958 07/15/2009
<4>[ 21.638380] 0000000000000009 ffff88007bd03d48 ffffffff8188f71f 0000000025a825a8
<4>[ 21.638381] ffff88007bd03d98 ffff88007bd03d88 ffffffff8106b3c1 ffff8800bf7c1b00
<4>[ 21.638381] ffff88007be38000 ffff88007be38000 ffff88007e835088 ffff88007e835098
<4>[ 21.638382] Call Trace:
<4>[ 21.638385] [<ffffffff8188f71f>] dump_stack+0x4e/0x68
<4>[ 21.638388] [<ffffffff8106b3c1>] warn_slowpath_common+0x81/0xa0
<4>[ 21.638389] [<ffffffff8106b426>] warn_slowpath_fmt+0x46/0x50
<4>[ 21.638390] [<ffffffff8141932b>] __list_add+0xcb/0xd0
<4>[ 21.638392] [<ffffffff8186c0da>] svc_recv+0x69a/0xa30
<4>[ 21.638399] [<ffffffffa0270580>] ? nfs_callback_authenticate+0x50/0x50 [nfsv4]
<4>[ 21.638406] [<ffffffffa0270580>] ? nfs_callback_authenticate+0x50/0x50 [nfsv4]
<4>[ 21.638412] [<ffffffffa02705bb>] nfs4_callback_svc+0x3b/0x60 [nfsv4]
<4>[ 21.638414] [<ffffffff8108ac0b>] kthread+0xdb/0x100
<4>[ 21.638415] [<ffffffff8108ab30>] ? kthread_create_on_node+0x180/0x180
<4>[ 21.638417] [<ffffffff81897afc>] ret_from_fork+0x7c/0xb0
<4>[ 21.638418] [<ffffffff8108ab30>] ? kthread_create_on_node+0x180/0x180
<4>[ 21.638419] ---[ end trace a8ecfce57c7c54ab ]---
<4>[ 21.646613] WARNING: CPU: 2 PID: 1876 at lib/list_debug.c:36 __list_add+0xcb/0xd0()
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[sunrpc] BUG: sleeping function called from invalid context at /kbuild/srcfreezer.h:56
by lkp@01.org
FYI, we noticed the below changes on
git://git.samba.org/jlayton/linux svc-pool-lock
commit 45a3a815b58f80369091ebba39b2a5f194a0c6b7 ("sunrpc: convert to lockless lookup of queued server threads")
+-----------------------------------------------------------------------------+------------+------------+
| | 127c16003d | 45a3a815b5 |
+-----------------------------------------------------------------------------+------------+------------+
| boot_successes | 13 | 12 |
| early-boot-hang | 1 | |
| boot_failures | 0 | 2 |
| BUG:sleeping_function_called_from_invalid_context_at/kbuild/srcfreezer.h | 0 | 2 |
| BUG:scheduling_while_atomic | 0 | 2 |
| BUG:sleeping_function_called_from_invalid_context_at_mm/filemap.c | 0 | 1 |
| INFO:rcu_sched_detected_stalls_on_CPUs/tasks | 0 | 1 |
| BUG:sleeping_function_called_from_invalid_context_at_kernel/locking/mutex.c | 0 | 1 |
| backtrace:nfsd | 0 | 2 |
| BUG:sleeping_function_called_from_invalid_context_at_mm/slub.c | 0 | 1 |
| backtrace:ext4_wait_block_bitmap | 0 | 1 |
| backtrace:ext4_mb_init_cache | 0 | 1 |
| backtrace:ext4_mb_init_group | 0 | 1 |
+-----------------------------------------------------------------------------+------------+------------+
<5>[ 46.599434] Key type id_resolver registered
<5>[ 46.603740] Key type id_legacy registered
<3>[ 46.610950] softirq: huh, entered softirq 3 NET_RX ffffffff8175bbc0 with preempt_count 00000101, exited with 00000102?
<3>[ 46.610986] BUG: sleeping function called from invalid context at /kbuild/srcfreezer.h:56
<3>[ 46.610988] in_atomic(): 1, irqs_disabled(): 0, pid: 2269, name: nfsd
<4>[ 46.610990] CPU: 4 PID: 2269 Comm: nfsd Not tainted 3.18.0-rc3-g6d2960b #1
<4>[ 46.610991] Hardware name: Supermicro X7DW3/X7DWN, BIOS 6.00 09/28/2007
<4>[ 46.610994] 0000000000000038 ffff880229ddfdb8 ffffffff818936e2 000000002d4a2d4a
<4>[ 46.610996] ffffffff81b5ab78 ffff880229ddfdc8 ffffffff810912fa ffff880229ddfdf8
<4>[ 46.610997] ffffffff8109136a ffff880229ddfdf8 ffff88022a274a28 ffff880229cd1d60
<4>[ 46.610997] Call Trace:
<4>[ 46.611005] [<ffffffff818936e2>] dump_stack+0x4e/0x68
<4>[ 46.611021] [<ffffffff810912fa>] ___might_sleep+0xda/0x110
<4>[ 46.611022] [<ffffffff8109136a>] __might_sleep+0x3a/0xd0
<4>[ 46.611026] [<ffffffff8186fa2a>] svc_recv+0x16a/0xaa0
<4>[ 46.611043] [<ffffffffa0192855>] nfsd+0x115/0x1a0 [nfsd]
<4>[ 46.611048] [<ffffffffa0192740>] ? nfsd_destroy+0xa0/0xa0 [nfsd]
<4>[ 46.611056] [<ffffffff8108aecf>] kthread+0xef/0x110
<4>[ 46.611058] [<ffffffff8108ade0>] ? kthread_create_on_node+0x180/0x180
<4>[ 46.611062] [<ffffffff8189bffc>] ret_from_fork+0x7c/0xb0
<4>[ 46.611063] [<ffffffff8108ade0>] ? kthread_create_on_node+0x180/0x180
<3>[ 46.611067] BUG: scheduling while atomic: nfsd/2269/0x00000003
<4>[ 46.611086] Modules linked in: rpcsec_gss_krb5 nfsv4 dns_resolver nfsd auth_rpcgss ipmi_watchdog ipmi_msghandler ses enclosure sg sd_mod ata_generic pata_acpi snd_pcm snd_timer snd floppy parport_pc soundcore mptsas ata_piix parport i2c_i801 libata i5400_edac pcspkr serio_raw mptscsih tpm_infineon edac_core i5k_amb mptbase shpchp scsi_transport_sas
<4>[ 46.611087] CPU: 4 PID: 2269 Comm: nfsd Not tainted 3.18.0-rc3-g6d2960b #1
<4>[ 46.611088] Hardware name: Supermicro X7DW3/X7DWN, BIOS 6.00 09/28/2007
<4>[ 46.611090] 0000000000000004 ffff880229ddfcc8 ffffffff818936e2 000000002de02de0
<4>[ 46.611091] ffff88022fd16f40 ffff880229ddfcd8 ffffffff81090abe ffff880229ddfd48
<4>[ 46.611093] ffffffff8189656a ffff880229cd1d60 0000000000016f40 ffff880229ddffd8
<4>[ 46.611093] Call Trace:
<4>[ 46.611095] [<ffffffff818936e2>] dump_stack+0x4e/0x68
<4>[ 46.611097] [<ffffffff81090abe>] __schedule_bug+0x4e/0x60
<4>[ 46.611099] [<ffffffff8189656a>] __schedule+0x65a/0x920
<4>[ 46.611100] [<ffffffff81896859>] schedule+0x29/0x70
<4>[ 46.611102] [<ffffffff8189a56a>] schedule_timeout+0x13a/0x280
<4>[ 46.611108] [<ffffffff810d65f0>] ? internal_add_timer+0xb0/0xb0
<4>[ 46.611110] [<ffffffff8186ff37>] svc_recv+0x677/0xaa0
<4>[ 46.611114] [<ffffffffa0192855>] nfsd+0x115/0x1a0 [nfsd]
<4>[ 46.611117] [<ffffffffa0192740>] ? nfsd_destroy+0xa0/0xa0 [nfsd]
<4>[ 46.611119] [<ffffffff8108aecf>] kthread+0xef/0x110
<3>[ 46.611122] BUG: scheduling while atomic: nfsd/2268/0x00000002
<4>[ 46.611126] Modules linked in:
<4>[ 46.611127] [<ffffffff8108ade0>] ? kthread_create_on_node+0x180/0x180
<4>[ 46.611128] rpcsec_gss_krb5
<4>[ 46.611132] nfsv4
<4>[ 46.611132] [<ffffffff8189bffc>] ret_from_fork+0x7c/0xb0
<4>[ 46.611133] dns_resolver
<4>[ 46.611136] nfsd
<4>[ 46.611136] [<ffffffff8108ade0>] ? kthread_create_on_node+0x180/0x180
<4>[ 46.611153] auth_rpcgss ipmi_watchdog ipmi_msghandler ses enclosure sg sd_mod ata_generic pata_acpi snd_pcm snd_timer snd floppy parport_pc soundcore mptsas ata_piix parport i2c_i801 libata i5400_edac pcspkr serio_raw mptscsih tpm_infineon edac_core i5k_amb mptbase shpchp scsi_transport_sas
<4>[ 46.611156] CPU: 0 PID: 2268 Comm: nfsd Tainted: G W 3.18.0-rc3-g6d2960b #1
<4>[ 46.611157] Hardware name: Supermicro X7DW3/X7DWN, BIOS 6.00 09/28/2007
<4>[ 46.611160] 0000000000000000 ffff880229cdbcc8 ffffffff818936e2 000000002e822e82
<4>[ 46.611161] ffff88022fc16f40 ffff880229cdbcd8 ffffffff81090abe ffff880229cdbd48
<4>[ 46.611163] ffffffff8189656a ffff880229cd0000 0000000000016f40 ffff880229cdbfd8
<4>[ 46.611163] Call Trace:
<4>[ 46.611172] [<ffffffff818936e2>] dump_stack+0x4e/0x68
<4>[ 46.611177] [<ffffffff81090abe>] __schedule_bug+0x4e/0x60
<4>[ 46.611179] [<ffffffff8189656a>] __schedule+0x65a/0x920
<4>[ 46.611181] [<ffffffff81896859>] schedule+0x29/0x70
<4>[ 46.611183] [<ffffffff8189a56a>] schedule_timeout+0x13a/0x280
<4>[ 46.611186] [<ffffffff810d65f0>] ? internal_add_timer+0xb0/0xb0
<4>[ 46.611189] [<ffffffff8186ff37>] svc_recv+0x677/0xaa0
<4>[ 46.611203] [<ffffffffa0192855>] nfsd+0x115/0x1a0 [nfsd]
<4>[ 46.611207] [<ffffffffa0192740>] ? nfsd_destroy+0xa0/0xa0 [nfsd]
<4>[ 46.611211] [<ffffffff8108aecf>] kthread+0xef/0x110
<4>[ 46.611213] [<ffffffff8108ade0>] ? kthread_create_on_node+0x180/0x180
<4>[ 46.611215] [<ffffffff8189bffc>] ret_from_fork+0x7c/0xb0
<4>[ 46.611217] [<ffffffff8108ade0>] ? kthread_create_on_node+0x180/0x180
<3>[ 46.611369] softirq: huh, entered softirq 3 NET_RX ffffffff8175bbc0 with preempt_count 00000102, exited with 00000103?
<3>[ 46.611393] BUG: scheduling while atomic: nfsd/2268/0x00000002
<4>[ 46.611403] Modules linked in: rpcsec_gss_krb5 nfsv4 dns_resolver nfsd auth_rpcgss ipmi_watchdog ipmi_msghandler ses enclosure sg sd_mod ata_generic pata_acpi snd_pcm snd_timer snd floppy parport_pc soundcore mptsas ata_piix parport i2c_i801 libata i5400_edac pcspkr
<3>[ 46.611403] BUG: scheduling while atomic: nfsd/2269/0x00000004
<4>[ 46.611405] serio_raw
<4>[ 46.611406] Modules linked in:
<4>[ 46.611407] mptscsih
<4>[ 46.611408] rpcsec_gss_krb5
<4>[ 46.611410] tpm_infineon
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[x86, irq] BUG: kernel boot hang
by lkp@01.org
FYI, we noticed the below changes on
https://github.com/jiangliu/linux.git irqdomain/p4v1
commit e530b4be849e7c865c95f25008eb52b72ec96341 ("x86, irq: Use cached IOAPIC entry instead of reading from hardware")
+----------------------+------------+------------+
| | ff34368973 | e530b4be84 |
+----------------------+------------+------------+
| boot_successes | 10 | 1 |
| boot_failures | 0 | 14 |
| BUG:kernel_boot_hang | 0 | 14 |
+----------------------+------------+------------+
[ 302.136037] Waiting up to 20 more seconds for network.
[ 312.136042] Waiting up to 10 more seconds for network.
BUG: kernel boot hang
Elapsed time: 310
qemu-system-x86_64 -enable-kvm -cpu Westmere -kernel /kernel/x86_64-lkp/dfc6ac916357d686ca861cb0a996ed85413d5d00/vmlinuz-3.18.0-rc3-00128-gdfc6ac9 -append 'user=lkp job=/lkp/scheduled/vm-kbuild-yocto-ia32-31/rand_boot-1-yocto-minimal-i386.cgz-x86_64-lkp-dfc6ac916357d686ca861cb0a996ed85413d5d00-1.yaml ARCH=x86_64 BOOT_IMAGE=/kernel/x86_64-lkp/dfc6ac916357d686ca861cb0a996ed85413d5d00/vmlinuz-3.18.0-rc3-00128-gdfc6ac9 kconfig=x86_64-lkp commit=dfc6ac916357d686ca861cb0a996ed85413d5d00 branch=linux-devel/devel-roam-lkp-201411081809 root=/dev/ram0 max_uptime=3600 RESULT_ROOT=/result/vm-kbuild-yocto-ia32/boot/1/yocto-minimal-i386.cgz/x86_64-lkp/dfc6ac916357d686ca861cb0a996ed85413d5d00/0 ip=::::vm-kbuild-yocto-ia32-31::dhcp earlyprintk=ttyS0,115200 debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw drbd.minor_count=8' -initrd /fs/sdh1/initrd-vm-kbuild-yocto-ia32-31 -m 320 -smp 1 -net nic,vlan=1,model=e1000 -net user,vlan=1 -boot order=nc -no-reboot -watchdog i6300esb -rtc base=localtime -drive file=/fs/sdh1/disk0-vm-kbuild-yocto-ia32-31,media=disk,if=virtio -pidfile /dev/shm/kboot/pid-vm-kbuild-yocto-ia32-31 -serial file:/dev/shm/kboot/serial-vm-kbuild-yocto-ia32-31 -daemonize -display none -monitor null
Thanks,
Fengguang
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[drm/i915] WARNING: CPU: 0 PID: 1814 at drivers/gpu/drm/drm_irq.c:1080 drm_wait_one_vblank+0x180/0x190 [drm]()
by lkp@01.org
FYI, we noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit 51e31d49c89055299e34b8f44d13f70e19aaaad1 ("drm/i915: Use generic vblank wait")
+-------------------------+------------+------------+
| | 07f11d49f1 | 51e31d49c8 |
+-------------------------+------------+------------+
| boot_successes | 10 | 0 |
| early-boot-hang | 1 | |
| boot_failures | 0 | 3 |
| BUG:kernel_test_crashed | 0 | 3 |
+-------------------------+------------+------------+
<6>[ 25.092524] ipmi message handler version 39.2
<6>[ 25.106110] IPMI Watchdog: driver initialized
<4>[ 46.024267] ------------[ cut here ]------------
<4>[ 46.030029] WARNING: CPU: 0 PID: 1814 at drivers/gpu/drm/drm_irq.c:1080 drm_wait_one_vblank+0x180/0x190 [drm]()
<4>[ 46.045334] vblank not available on crtc 1, ret=-22
<4>[ 46.050710] Modules linked in: ipmi_watchdog ipmi_msghandler btrfs xor raid6_pq sg sr_mod sd_mod cdrom mxm_wmi pcspkr snd_hda_codec_hdmi snd_hda_codec_conexant snd_hda_codec_generic snd_hda_intel thinkpad_acpi snd_hda_controller i915 ahci snd_hda_codec sdhci_pci libahci snd_hwdep rfkill libata sdhci uvcvideo parport_pc parport videobuf2_vmalloc snd_pcm videobuf2_core firewire_ohci snd_timer videobuf2_memops mmc_core v4l2_common video i2c_i801 intel_ips drm_kms_helper firewire_core videodev snd crc_itu_t drm wmi soundcore acpi_cpufreq
<4>[ 46.104497] CPU: 0 PID: 1814 Comm: kms_setmode Not tainted 3.18.0-rc2-g2d81df3 #1
<4>[ 46.113099] Hardware name: LENOVO 25222AU/25222AU, BIOS 6IET52WW (1.12 ) 02/16/2010
<4>[ 46.121968] 0000000000000009 ffff88006393f998 ffffffff81877998 ffff880066e0ffb8
<4>[ 46.129736] ffff88006393f9e8 ffff88006393f9d8 ffffffff8106a561 ffff88006393f9d8
<4>[ 46.134783] ffff8800755d0000 ffff8800766e3800 0000000000000001 0000000080000804
<4>[ 46.138393] Call Trace:
<4>[ 46.139060] [<ffffffff81877998>] dump_stack+0x4e/0x68
<4>[ 46.141227] [<ffffffff8106a561>] warn_slowpath_common+0x81/0xa0
<4>[ 46.144489] [<ffffffff8106a5c6>] warn_slowpath_fmt+0x46/0x50
<4>[ 46.146830] [<ffffffffa004c7e0>] drm_wait_one_vblank+0x180/0x190 [drm]
<4>[ 46.150249] [<ffffffffa023d10b>] ? gen5_read32+0x5b/0xe0 [i915]
<4>[ 46.152729] [<ffffffffa027ef97>] intel_disable_hdmi+0x1a7/0x1e0 [i915]
<4>[ 46.155166] [<ffffffffa02541aa>] ironlake_crtc_disable+0x15a/0x7a0 [i915]
<4>[ 46.158658] [<ffffffff811bd83c>] ? kmem_cache_alloc_trace+0x4c/0x210
<4>[ 46.161194] [<ffffffffa025617b>] ? __intel_set_mode+0x4b/0xaf0 [i915]
<4>[ 46.164606] [<ffffffffa02563d7>] __intel_set_mode+0x2a7/0xaf0 [i915]
<4>[ 46.167110] [<ffffffffa025bdc6>] intel_set_mode+0x16/0x30 [i915]
<4>[ 46.170410] [<ffffffffa025cf52>] intel_crtc_set_config+0xa92/0xe90 [i915]
<4>[ 46.173103] [<ffffffff810904ca>] ? __might_sleep+0x3a/0xc0
<4>[ 46.175309] [<ffffffffa0054980>] drm_mode_set_config_internal+0x60/0xf0 [drm]
<4>[ 46.178833] [<ffffffffa0059053>] drm_mode_setcrtc+0x283/0x580 [drm]
<4>[ 46.182285] [<ffffffffa004a9d7>] drm_ioctl+0x197/0x680 [drm]
<4>[ 46.184576] [<ffffffff811f0e60>] do_vfs_ioctl+0x2f0/0x4f0
<4>[ 46.186751] [<ffffffff813958f7>] ? file_has_perm+0x87/0xa0
<4>[ 46.189103] [<ffffffff811f10e1>] SyS_ioctl+0x81/0xa0
<4>[ 46.191292] [<ffffffff81880369>] system_call_fastpath+0x12/0x17
<4>[ 46.194522] ---[ end trace 7042ddb87f73ead7 ]---
<3>[ 51.725259] [drm:intel_cpu_fifo_underrun_irq_handler [i915]] *ERROR* CPU pipe A FIFO underrun
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[dmi] PANIC: early exception 0e rip 10:ffffffff81899e6b error 9 cr2 ffffffffff240000
by LKP
FYI, we noticed the below changes on
https://git.linaro.org/people/ard.biesheuvel/linux-arm efi-for-3.19
commit aacdce6e880894acb57d71dcb2e3fc61b4ed4e96 ("dmi: add support for SMBIOS 3.0 64-bit entry point")
+-----------------------+------------+------------+
| | 2fa165a26c | aacdce6e88 |
+-----------------------+------------+------------+
| boot_successes | 20 | 10 |
| early-boot-hang | 1 | |
| boot_failures | 0 | 5 |
| PANIC:early_exception | 0 | 5 |
+-----------------------+------------+------------+
[ 0.000000] BIOS-e820: [mem 0x0000000100000000-0x000000036fffffff] usable
[ 0.000000] bootconsole [earlyser0] enabled
[ 0.000000] NX (Execute Disable) protection: active
PANIC: early exception 0e rip 10:ffffffff81899e6b error 9 cr2 ffffffffff240000
[ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 3.18.0-rc2-gc5221e6 #1
[ 0.000000] 0000000000000000 ffffffff82203d30 ffffffff819f0a6e 00000000000003f8
[ 0.000000] ffffffffff240000 ffffffff82203e18 ffffffff823701b0 ffffffff82511401
[ 0.000000] 0000000000000000 0000000000000ba3 0000000000000000 ffffffffff240000
[ 0.000000] Call Trace:
[ 0.000000] [<ffffffff819f0a6e>] dump_stack+0x4e/0x68
[ 0.000000] [<ffffffff823701b0>] early_idt_handler+0x90/0xb7
[ 0.000000] [<ffffffff823c80da>] ? dmi_save_one_device+0x81/0x81
[ 0.000000] [<ffffffff81899e6b>] ? dmi_table+0x3f/0x94
[ 0.000000] [<ffffffff81899e42>] ? dmi_table+0x16/0x94
[ 0.000000] [<ffffffff823c80da>] ? dmi_save_one_device+0x81/0x81
[ 0.000000] [<ffffffff823c80da>] ? dmi_save_one_device+0x81/0x81
[ 0.000000] [<ffffffff823c7eff>] dmi_walk_early+0x44/0x69
[ 0.000000] [<ffffffff823c88a2>] dmi_present+0x180/0x1ff
[ 0.000000] [<ffffffff823c8ab3>] dmi_scan_machine+0x144/0x191
[ 0.000000] [<ffffffff82370702>] ? loglevel+0x31/0x31
[ 0.000000] [<ffffffff82377f52>] setup_arch+0x490/0xc73
[ 0.000000] [<ffffffff819eef73>] ? printk+0x4d/0x4f
[ 0.000000] [<ffffffff82370b90>] start_kernel+0x9c/0x43f
[ 0.000000] [<ffffffff82370120>] ? early_idt_handlers+0x120/0x120
[ 0.000000] [<ffffffff823704a2>] x86_64_start_reservations+0x2a/0x2c
[ 0.000000] [<ffffffff823705df>] x86_64_start_kernel+0x13b/0x14a
[ 0.000000] RIP 0x4
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[sched] e8fd90f3cb7: -18.3% unixbench.score
by LKP
FYI, we noticed the below changes on
git://bee.sh.intel.com/git/ydu19/linux for-lkp
commit e8fd90f3cb75c067a77bf9c1f222a0e06529fbc8 ("sched: Rewrite per entity runnable load average tracking")
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2 testbox/testcase/testparams
---------------- -------------------------- ---------------------------
%stddev %change %stddev
\ | \
10296 ± 0% -18.3% 8407 ± 6% nhm-white/unixbench/shell8
10296 -18.3% 8407 GEO-MEAN unixbench.score
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34111 ± 0% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34111 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[3]:/.blocked_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
102 ± 17% -100.0% 0 ± 0% nhm-white/unixbench/shell8
102 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[7]:/.runnable_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34321 ± 2% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34321 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[6]:/.tg_load_contrib
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34100 ± 2% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34100 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[6]:/.blocked_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
106 ± 27% -100.0% 0 ± 0% nhm-white/unixbench/shell8
106 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[6]:/.runnable_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34200 ± 1% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34200 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[5]:/.tg_load_contrib
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
273310 ± 0% -99.8% 647 ± 13% nhm-white/unixbench/shell8
273310 -99.8% 647 GEO-MEAN sched_debug.cfs_rq[1]:/.tg_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
33983 ± 2% -100.0% 0 ± 0% nhm-white/unixbench/shell8
33983 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[5]:/.blocked_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
104 ± 17% -100.0% 0 ± 0% nhm-white/unixbench/shell8
104 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[5]:/.runnable_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34038 ± 2% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34038 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[7]:/.tg_load_contrib
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
33442 ± 1% -100.0% 0 ± 0% nhm-white/unixbench/shell8
33442 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[4]:/.tg_load_contrib
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
68291392 ± 0% -80.5% 13291992 ± 26% nhm-white/unixbench/shell8
68291391 -80.5% 13291992 GEO-MEAN cpuidle.C1E-NHM.time
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
395964 ± 0% -82.7% 68531 ± 34% nhm-white/unixbench/shell8
395964 -82.7% 68531 GEO-MEAN cpuidle.C1E-NHM.usage
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
33230 ± 1% -100.0% 0 ± 0% nhm-white/unixbench/shell8
33230 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[4]:/.blocked_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
112 ± 20% -100.0% 0 ± 0% nhm-white/unixbench/shell8
112 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[4]:/.runnable_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34428 ± 1% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34428 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[3]:/.tg_load_contrib
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
33826 ± 2% -100.0% 0 ± 0% nhm-white/unixbench/shell8
33826 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[7]:/.blocked_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
272919 ± 0% -99.8% 648 ± 13% nhm-white/unixbench/shell8
272919 -99.8% 648 GEO-MEAN sched_debug.cfs_rq[5]:/.tg_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
120 ± 4% -100.0% 0 ± 0% nhm-white/unixbench/shell8
119 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[3]:/.runnable_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34453 ± 1% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34453 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[2]:/.tg_load_contrib
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34203 ± 1% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34202 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[2]:/.blocked_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
118 ± 17% -100.0% 0 ± 0% nhm-white/unixbench/shell8
118 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[2]:/.runnable_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34030 ± 2% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34030 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[1]:/.tg_load_contrib
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
33757 ± 3% -100.0% 0 ± 0% nhm-white/unixbench/shell8
33757 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[1]:/.blocked_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
90 ± 14% -100.0% 0 ± 0% nhm-white/unixbench/shell8
90 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[1]:/.runnable_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
34085 ± 2% -100.0% 0 ± 0% nhm-white/unixbench/shell8
34085 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[0]:/.tg_load_contrib
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
33914 ± 2% -100.0% 0 ± 0% nhm-white/unixbench/shell8
33914 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[0]:/.blocked_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
98 ± 8% -100.0% 0 ± 0% nhm-white/unixbench/shell8
98 -100.0% 0 GEO-MEAN sched_debug.cfs_rq[0]:/.runnable_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
272305 ± 0% -99.8% 649 ± 13% nhm-white/unixbench/shell8
272305 -99.8% 649 GEO-MEAN sched_debug.cfs_rq[7]:/.tg_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
272886 ± 0% -99.8% 647 ± 13% nhm-white/unixbench/shell8
272886 -99.8% 647 GEO-MEAN sched_debug.cfs_rq[4]:/.tg_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
273102 ± 0% -99.8% 648 ± 13% nhm-white/unixbench/shell8
273102 -99.8% 647 GEO-MEAN sched_debug.cfs_rq[2]:/.tg_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
273216 ± 0% -99.8% 647 ± 13% nhm-white/unixbench/shell8
273216 -99.8% 647 GEO-MEAN sched_debug.cfs_rq[3]:/.tg_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
272399 ± 0% -99.8% 648 ± 13% nhm-white/unixbench/shell8
272399 -99.8% 648 GEO-MEAN sched_debug.cfs_rq[6]:/.tg_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
273338 ± 0% -99.8% 646 ± 13% nhm-white/unixbench/shell8
273338 -99.8% 646 GEO-MEAN sched_debug.cfs_rq[0]:/.tg_load_avg
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
192468 ± 0% -71.5% 54783 ± 11% nhm-white/unixbench/shell8
192468 -71.5% 54783 GEO-MEAN sched_debug.cpu#4.ttwu_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
190003 ± 0% -71.1% 54827 ± 12% nhm-white/unixbench/shell8
190003 -71.1% 54827 GEO-MEAN sched_debug.cpu#6.ttwu_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
187822 ± 1% -61.8% 71707 ± 37% nhm-white/unixbench/shell8
187821 -61.8% 71707 GEO-MEAN sched_debug.cpu#7.ttwu_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
187984 ± 0% -71.0% 54584 ± 12% nhm-white/unixbench/shell8
187984 -71.0% 54584 GEO-MEAN sched_debug.cpu#5.ttwu_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
487696 ± 0% -65.7% 167112 ± 12% nhm-white/unixbench/shell8
487696 -65.7% 167112 GEO-MEAN softirqs.SCHED
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
6.49 ± 0% +232.0% 21.56 ± 15% nhm-white/unixbench/shell8
6.49 +232.0% 21.56 GEO-MEAN turbostat.%c3
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
2.615e+08 ± 1% +207.9% 8.052e+08 ± 2% nhm-white/unixbench/shell8
2.615e+08 +207.9% 8.052e+08 GEO-MEAN cpuidle.C6-NHM.time
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1 ± 34% +108.0% 2 ± 18% nhm-white/unixbench/shell8
1 +108.0% 2 GEO-MEAN sched_debug.cfs_rq[5]:/.nr_spread_over
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1508887 ± 0% -66.9% 499872 ± 7% nhm-white/unixbench/shell8
1508887 -66.9% 499872 GEO-MEAN sched_debug.cfs_rq[5]:/.min_vruntime
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1517135 ± 0% -66.7% 505553 ± 5% nhm-white/unixbench/shell8
1517135 -66.7% 505553 GEO-MEAN sched_debug.cfs_rq[6]:/.min_vruntime
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1509639 ± 0% -66.2% 509772 ± 6% nhm-white/unixbench/shell8
1509639 -66.2% 509772 GEO-MEAN sched_debug.cfs_rq[4]:/.min_vruntime
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
179613 ± 0% -66.1% 60863 ± 12% nhm-white/unixbench/shell8
179612 -66.1% 60863 GEO-MEAN sched_debug.cpu#3.ttwu_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
180338 ± 1% -66.2% 60896 ± 13% nhm-white/unixbench/shell8
180338 -66.2% 60896 GEO-MEAN sched_debug.cpu#1.ttwu_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
179286 ± 0% -65.9% 61065 ± 13% nhm-white/unixbench/shell8
179286 -65.9% 61064 GEO-MEAN sched_debug.cpu#2.ttwu_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
125 ± 15% -62.9% 46 ± 40% nhm-white/unixbench/shell8
125 -62.9% 46 GEO-MEAN sched_debug.cpu#3.cpu_load[0]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
178400 ± 0% -64.6% 63074 ± 12% nhm-white/unixbench/shell8
178400 -64.6% 63074 GEO-MEAN sched_debug.cpu#0.ttwu_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
967748 ± 0% -64.0% 348094 ± 10% nhm-white/unixbench/shell8
967748 -64.0% 348094 GEO-MEAN sched_debug.cpu#4.sched_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
966974 ± 0% -64.0% 347881 ± 10% nhm-white/unixbench/shell8
966974 -64.0% 347881 GEO-MEAN sched_debug.cpu#4.nr_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
107 ± 10% -54.4% 48 ± 13% nhm-white/unixbench/shell8
106 -54.4% 48 GEO-MEAN sched_debug.cpu#4.cpu_load[2]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
951681 ± 0% -63.2% 350214 ± 10% nhm-white/unixbench/shell8
951681 -63.2% 350214 GEO-MEAN sched_debug.cpu#5.sched_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
950933 ± 0% -63.2% 349999 ± 10% nhm-white/unixbench/shell8
950932 -63.2% 349999 GEO-MEAN sched_debug.cpu#5.nr_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
957692 ± 0% -63.3% 351224 ± 10% nhm-white/unixbench/shell8
957692 -63.3% 351224 GEO-MEAN sched_debug.cpu#6.sched_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
956945 ± 0% -63.3% 351024 ± 10% nhm-white/unixbench/shell8
956945 -63.3% 351024 GEO-MEAN sched_debug.cpu#6.nr_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
948204 ± 0% -55.9% 418133 ± 23% nhm-white/unixbench/shell8
948204 -55.9% 418133 GEO-MEAN sched_debug.cpu#7.sched_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
947425 ± 0% -55.9% 417965 ± 23% nhm-white/unixbench/shell8
947425 -55.9% 417965 GEO-MEAN sched_debug.cpu#7.nr_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
113 ± 14% -54.6% 51 ± 13% nhm-white/unixbench/shell8
113 -54.6% 51 GEO-MEAN sched_debug.cpu#4.cpu_load[1]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
249126 ± 0% -64.4% 88710 ± 19% nhm-white/unixbench/shell8
249126 -64.4% 88710 GEO-MEAN sched_debug.cpu#4.sched_goidle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
104 ± 6% -54.4% 47 ± 12% nhm-white/unixbench/shell8
103 -54.4% 47 GEO-MEAN sched_debug.cpu#4.cpu_load[3]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
60 ± 20% +210.8% 188 ± 47% nhm-white/unixbench/shell8
60 +210.8% 188 GEO-MEAN sched_debug.cpu#6.nr_uninterruptible
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
243578 ± 0% -63.0% 90032 ± 18% nhm-white/unixbench/shell8
243577 -63.0% 90032 GEO-MEAN sched_debug.cpu#6.sched_goidle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
240253 ± 1% -62.7% 89626 ± 18% nhm-white/unixbench/shell8
240253 -62.7% 89626 GEO-MEAN sched_debug.cpu#5.sched_goidle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
102 ± 3% -54.6% 46 ± 11% nhm-white/unixbench/shell8
102 -54.6% 46 GEO-MEAN sched_debug.cpu#4.cpu_load[4]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
124 ± 16% -55.0% 55 ± 17% nhm-white/unixbench/shell8
124 -55.0% 55 GEO-MEAN sched_debug.cpu#4.cpu_load[0]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
390759 ± 0% -56.3% 170669 ± 0% nhm-white/unixbench/shell8
390759 -56.3% 170669 GEO-MEAN sched_debug.cpu#4.ttwu_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
388660 ± 0% -56.1% 170732 ± 0% nhm-white/unixbench/shell8
388660 -56.1% 170732 GEO-MEAN sched_debug.cpu#6.ttwu_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
385167 ± 1% -54.5% 175140 ± 3% nhm-white/unixbench/shell8
385167 -54.5% 175140 GEO-MEAN sched_debug.cpu#7.ttwu_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
105 ± 9% -50.8% 51 ± 10% nhm-white/unixbench/shell8
105 -50.8% 51 GEO-MEAN sched_debug.cpu#0.cpu_load[1]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
385612 ± 0% -55.8% 170349 ± 1% nhm-white/unixbench/shell8
385612 -55.8% 170349 GEO-MEAN sched_debug.cpu#5.ttwu_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
112 ± 8% -59.4% 45 ± 39% nhm-white/unixbench/shell8
112 -59.4% 45 GEO-MEAN sched_debug.cpu#3.cpu_load[1]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
99 ± 3% -51.7% 47 ± 11% nhm-white/unixbench/shell8
98 -51.7% 47 GEO-MEAN sched_debug.cpu#6.cpu_load[4]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
100 ± 3% -54.1% 46 ± 13% nhm-white/unixbench/shell8
100 -54.1% 46 GEO-MEAN sched_debug.cpu#5.cpu_load[4]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
100 ± 3% -50.8% 49 ± 13% nhm-white/unixbench/shell8
100 -50.8% 49 GEO-MEAN sched_debug.cpu#6.cpu_load[3]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
102 ± 2% -58.9% 42 ± 39% nhm-white/unixbench/shell8
102 -58.9% 42 GEO-MEAN sched_debug.cpu#3.cpu_load[3]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
106 ± 5% -59.1% 43 ± 38% nhm-white/unixbench/shell8
106 -59.1% 43 GEO-MEAN sched_debug.cpu#3.cpu_load[2]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
102 ± 6% -53.4% 47 ± 15% nhm-white/unixbench/shell8
102 -53.4% 47 GEO-MEAN sched_debug.cpu#5.cpu_load[3]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
106 ± 11% -52.6% 50 ± 17% nhm-white/unixbench/shell8
106 -52.6% 50 GEO-MEAN sched_debug.cpu#5.cpu_load[2]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
100 ± 1% -58.9% 41 ± 39% nhm-white/unixbench/shell8
100 -58.9% 41 GEO-MEAN sched_debug.cpu#3.cpu_load[4]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
101 ± 4% -51.1% 49 ± 7% nhm-white/unixbench/shell8
101 -51.1% 49 GEO-MEAN sched_debug.cpu#0.cpu_load[2]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
106 ± 15% -48.0% 55 ± 11% nhm-white/unixbench/shell8
106 -48.0% 55 GEO-MEAN sched_debug.cpu#0.cpu_load[0]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
98 ± 6% -49.4% 50 ± 7% nhm-white/unixbench/shell8
98 -49.4% 49 GEO-MEAN sched_debug.cpu#1.cpu_load[3]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
113 ± 17% -52.4% 54 ± 16% nhm-white/unixbench/shell8
113 -52.4% 54 GEO-MEAN sched_debug.cpu#5.cpu_load[1]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
100 ± 3% -51.0% 49 ± 6% nhm-white/unixbench/shell8
100 -51.0% 48 GEO-MEAN sched_debug.cpu#0.cpu_load[3]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
99 ± 4% -49.5% 50 ± 7% nhm-white/unixbench/shell8
99 -49.5% 50 GEO-MEAN sched_debug.cpu#1.cpu_load[4]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
123 ± 18% -53.4% 57 ± 8% nhm-white/unixbench/shell8
123 -53.4% 57 GEO-MEAN sched_debug.cpu#2.cpu_load[0]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
104 ± 4% -49.2% 52 ± 15% nhm-white/unixbench/shell8
103 -49.2% 52 GEO-MEAN sched_debug.cpu#6.cpu_load[2]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
99 ± 3% -50.1% 49 ± 6% nhm-white/unixbench/shell8
98 -50.1% 49 GEO-MEAN sched_debug.cpu#0.cpu_load[4]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
124 ± 23% -52.4% 59 ± 15% nhm-white/unixbench/shell8
124 -52.4% 59 GEO-MEAN sched_debug.cpu#5.cpu_load[0]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
2 ± 19% +77.8% 4 ± 22% nhm-white/unixbench/shell8
2 +77.8% 4 GEO-MEAN sched_debug.cpu#1.nr_running
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
4 ± 17% +131.6% 11 ± 14% nhm-white/unixbench/shell8
4 +131.6% 11 GEO-MEAN sched_debug.cfs_rq[0]:/.nr_spread_over
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
114 ± 12% -45.3% 62 ± 19% nhm-white/unixbench/shell8
114 -45.3% 62 GEO-MEAN sched_debug.cpu#6.cpu_load[0]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
110 ± 11% -49.6% 55 ± 10% nhm-white/unixbench/shell8
110 -49.6% 55 GEO-MEAN sched_debug.cpu#2.cpu_load[1]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
108 ± 6% -47.0% 57 ± 18% nhm-white/unixbench/shell8
108 -47.0% 57 GEO-MEAN sched_debug.cpu#6.cpu_load[1]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
96 ± 7% -47.2% 51 ± 7% nhm-white/unixbench/shell8
96 -47.2% 51 GEO-MEAN sched_debug.cpu#1.cpu_load[2]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
100 ± 4% -48.0% 52 ± 11% nhm-white/unixbench/shell8
100 -48.0% 52 GEO-MEAN sched_debug.cpu#2.cpu_load[4]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
101 ± 6% -47.7% 53 ± 11% nhm-white/unixbench/shell8
101 -47.7% 53 GEO-MEAN sched_debug.cpu#2.cpu_load[3]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
103 ± 8% -47.6% 54 ± 11% nhm-white/unixbench/shell8
103 -47.6% 54 GEO-MEAN sched_debug.cpu#2.cpu_load[2]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
5.00 ± 0% +77.7% 8.89 ± 8% nhm-white/unixbench/shell8
5.00 +77.7% 8.89 GEO-MEAN turbostat.%c6
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
119445 ± 0% -43.6% 67340 ± 4% nhm-white/unixbench/shell8
119445 -43.6% 67340 GEO-MEAN sched_debug.cfs_rq[4]:/.exec_clock
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
90 ± 9% -41.7% 52 ± 9% nhm-white/unixbench/shell8
90 -41.7% 52 GEO-MEAN sched_debug.cpu#1.cpu_load[1]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
118113 ± 0% -53.2% 55272 ± 37% nhm-white/unixbench/shell8
118113 -53.2% 55272 GEO-MEAN sched_debug.cfs_rq[7]:/.exec_clock
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
118148 ± 0% -43.1% 67258 ± 4% nhm-white/unixbench/shell8
118148 -43.1% 67258 GEO-MEAN sched_debug.cfs_rq[5]:/.exec_clock
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
118604 ± 0% -43.3% 67280 ± 4% nhm-white/unixbench/shell8
118604 -43.3% 67280 GEO-MEAN sched_debug.cfs_rq[6]:/.exec_clock
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
941 ± 0% +91.4% 1802 ± 12% nhm-white/unixbench/shell8
941 +91.4% 1802 GEO-MEAN uptime.idle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1026977 ± 1% -46.3% 551085 ± 10% nhm-white/unixbench/shell8
1026977 -46.3% 551085 GEO-MEAN sched_debug.cpu#1.sched_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1026190 ± 1% -46.3% 550829 ± 10% nhm-white/unixbench/shell8
1026190 -46.3% 550829 GEO-MEAN sched_debug.cpu#1.nr_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1026217 ± 0% -38.1% 635710 ± 17% nhm-white/unixbench/shell8
1026217 -38.1% 635710 GEO-MEAN sched_debug.cpu#3.sched_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1025416 ± 0% -38.0% 635466 ± 17% nhm-white/unixbench/shell8
1025416 -38.0% 635466 GEO-MEAN sched_debug.cpu#3.nr_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1020645 ± 0% -45.6% 555289 ± 9% nhm-white/unixbench/shell8
1020645 -45.6% 555289 GEO-MEAN sched_debug.cpu#2.sched_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1019855 ± 0% -45.6% 555037 ± 9% nhm-white/unixbench/shell8
1019855 -45.6% 555037 GEO-MEAN sched_debug.cpu#2.nr_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1009946 ± 0% -45.6% 549668 ± 13% nhm-white/unixbench/shell8
1009946 -45.6% 549668 GEO-MEAN sched_debug.cpu#0.sched_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1009107 ± 0% -45.6% 549376 ± 13% nhm-white/unixbench/shell8
1009107 -45.6% 549376 GEO-MEAN sched_debug.cpu#0.nr_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
393502 ± 1% -41.9% 228638 ± 2% nhm-white/unixbench/shell8
393502 -41.9% 228638 GEO-MEAN sched_debug.cpu#1.ttwu_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
4.308e+08 ± 0% +77.1% 7.63e+08 ± 8% nhm-white/unixbench/shell8
4.308e+08 +77.1% 7.63e+08 GEO-MEAN cpuidle.C3-NHM.time
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
391521 ± 0% -44.8% 216253 ± 13% nhm-white/unixbench/shell8
391521 -44.8% 216253 GEO-MEAN sched_debug.cpu#3.ttwu_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
106 ± 20% -39.0% 64 ± 49% nhm-white/unixbench/shell8
106 -39.0% 64 GEO-MEAN sched_debug.cpu#7.load
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
390864 ± 0% -41.0% 230683 ± 1% nhm-white/unixbench/shell8
390864 -41.0% 230683 GEO-MEAN sched_debug.cpu#2.ttwu_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
389212 ± 0% -41.6% 227116 ± 5% nhm-white/unixbench/shell8
389212 -41.6% 227116 GEO-MEAN sched_debug.cpu#0.ttwu_count
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
102 ± 14% -26.6% 75 ± 21% nhm-white/unixbench/shell8
102 -26.6% 75 GEO-MEAN sched_debug.cpu#0.load
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
12516 ± 9% -24.2% 9487 ± 19% nhm-white/unixbench/shell8
12516 -24.2% 9487 GEO-MEAN sched_debug.cpu#4.curr->pid
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
170375 ± 0% -36.8% 107654 ± 2% nhm-white/unixbench/shell8
170375 -36.8% 107654 GEO-MEAN sched_debug.cpu#4.nr_load_updates
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
169419 ± 0% -36.0% 108482 ± 1% nhm-white/unixbench/shell8
169419 -36.0% 108482 GEO-MEAN sched_debug.cpu#6.nr_load_updates
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
169661 ± 0% -36.0% 108613 ± 2% nhm-white/unixbench/shell8
169661 -36.0% 108613 GEO-MEAN sched_debug.cpu#5.nr_load_updates
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
112 ± 6% -45.5% 61 ± 45% nhm-white/unixbench/shell8
112 -45.5% 61 GEO-MEAN sched_debug.cpu#3.load
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
13.95 ± 0% +52.3% 21.24 ± 0% nhm-white/unixbench/shell8
13.94 +52.3% 21.24 GEO-MEAN turbostat.%c1
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
80 ± 12% -31.2% 55 ± 10% nhm-white/unixbench/shell8
80 -31.2% 55 GEO-MEAN sched_debug.cpu#1.cpu_load[0]
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
282791 ± 2% -36.9% 178328 ± 13% nhm-white/unixbench/shell8
282791 -36.9% 178328 GEO-MEAN sched_debug.cpu#1.sched_goidle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
462686 ± 5% +59.9% 739766 ± 9% nhm-white/unixbench/shell8
462686 +59.9% 739766 GEO-MEAN sched_debug.cpu#6.avg_idle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
664637 ± 0% -33.5% 442292 ± 0% nhm-white/unixbench/shell8
664637 -33.5% 442291 GEO-MEAN softirqs.RCU
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
462699 ± 4% +41.1% 653020 ± 14% nhm-white/unixbench/shell8
462699 +41.1% 653020 GEO-MEAN sched_debug.cpu#7.avg_idle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
458554 ± 2% +58.5% 726779 ± 10% nhm-white/unixbench/shell8
458554 +58.5% 726779 GEO-MEAN sched_debug.cpu#5.avg_idle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
119722 ± 0% -32.4% 80958 ± 0% nhm-white/unixbench/shell8
119722 -32.4% 80958 GEO-MEAN sched_debug.cfs_rq[1]:/.exec_clock
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
119548 ± 0% -39.1% 72829 ± 22% nhm-white/unixbench/shell8
119548 -39.1% 72829 GEO-MEAN sched_debug.cfs_rq[3]:/.exec_clock
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
279808 ± 0% -35.6% 180276 ± 11% nhm-white/unixbench/shell8
279808 -35.6% 180276 GEO-MEAN sched_debug.cpu#2.sched_goidle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
118839 ± 0% -31.0% 81942 ± 2% nhm-white/unixbench/shell8
118839 -31.0% 81942 GEO-MEAN sched_debug.cfs_rq[2]:/.exec_clock
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1132186 ± 0% -31.4% 776541 ± 0% nhm-white/unixbench/shell8
1132186 -31.4% 776541 GEO-MEAN softirqs.TIMER
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
120897 ± 0% -31.5% 82798 ± 1% nhm-white/unixbench/shell8
120897 -31.5% 82798 GEO-MEAN sched_debug.cfs_rq[0]:/.exec_clock
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
272557 ± 1% -35.3% 176450 ± 16% nhm-white/unixbench/shell8
272557 -35.3% 176449 GEO-MEAN sched_debug.cpu#0.sched_goidle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
489016 ± 2% +50.9% 738092 ± 14% nhm-white/unixbench/shell8
489016 +50.9% 738091 GEO-MEAN sched_debug.cpu#4.avg_idle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
11928 ± 8% -31.3% 8190 ± 43% nhm-white/unixbench/shell8
11928 -31.3% 8190 GEO-MEAN sched_debug.cpu#7.curr->pid
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
12216 ± 11% -24.4% 9238 ± 9% nhm-white/unixbench/shell8
12216 -24.4% 9238 GEO-MEAN sched_debug.cpu#5.curr->pid
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
114 ± 21% -25.4% 85 ± 6% nhm-white/unixbench/shell8
114 -25.4% 85 GEO-MEAN sched_debug.cfs_rq[4]:/.load
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
91391004 ± 0% -26.6% 67036163 ± 5% nhm-white/unixbench/shell8
91391004 -26.6% 67036163 GEO-MEAN proc-vmstat.pgalloc_dma32
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
8324 ± 1% -25.8% 6178 ± 5% nhm-white/unixbench/shell8
8324 -25.8% 6178 GEO-MEAN proc-vmstat.thp_fault_alloc
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
2.126e+08 ± 0% -26.5% 1.562e+08 ± 5% nhm-white/unixbench/shell8
2.126e+08 -26.5% 1.562e+08 GEO-MEAN proc-vmstat.pgfault
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
760899 ± 0% -26.5% 559460 ± 5% nhm-white/unixbench/shell8
760899 -26.5% 559460 GEO-MEAN proc-vmstat.pgactivate
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1.819e+08 ± 0% -26.3% 1.341e+08 ± 5% nhm-white/unixbench/shell8
1.819e+08 -26.3% 1.341e+08 GEO-MEAN proc-vmstat.pgfree
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1.702e+08 ± 0% -26.3% 1.254e+08 ± 5% nhm-white/unixbench/shell8
1.702e+08 -26.3% 1.254e+08 GEO-MEAN proc-vmstat.numa_hit
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1.702e+08 ± 0% -26.3% 1.254e+08 ± 5% nhm-white/unixbench/shell8
1.702e+08 -26.3% 1.254e+08 GEO-MEAN proc-vmstat.numa_local
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1515333 ± 0% -30.4% 1054993 ± 22% nhm-white/unixbench/shell8
1515333 -30.4% 1054993 GEO-MEAN sched_debug.cfs_rq[1]:/.min_vruntime
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1510534 ± 0% -30.1% 1056082 ± 18% nhm-white/unixbench/shell8
1510534 -30.1% 1056082 GEO-MEAN sched_debug.cfs_rq[2]:/.min_vruntime
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
90530917 ± 0% -25.9% 67071067 ± 5% nhm-white/unixbench/shell8
90530917 -25.9% 67071067 GEO-MEAN proc-vmstat.pgalloc_normal
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1495949 ± 0% -30.8% 1034539 ± 25% nhm-white/unixbench/shell8
1495949 -30.8% 1034539 GEO-MEAN sched_debug.cfs_rq[0]:/.min_vruntime
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
611688 ± 2% +24.9% 764015 ± 5% nhm-white/unixbench/shell8
611688 +24.9% 764015 GEO-MEAN cpuidle.C6-NHM.usage
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
11521 ± 6% -13.4% 9982 ± 9% nhm-white/unixbench/shell8
11521 -13.4% 9982 GEO-MEAN sched_debug.cpu#2.curr->pid
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
171735 ± 0% -14.7% 146526 ± 2% nhm-white/unixbench/shell8
171735 -14.7% 146526 GEO-MEAN sched_debug.cpu#1.nr_load_updates
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
171042 ± 0% -12.6% 149465 ± 2% nhm-white/unixbench/shell8
171042 -12.6% 149465 GEO-MEAN sched_debug.cpu#3.nr_load_updates
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
170927 ± 0% -13.5% 147851 ± 1% nhm-white/unixbench/shell8
170927 -13.5% 147851 GEO-MEAN sched_debug.cpu#2.nr_load_updates
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
11701 ± 7% -12.2% 10271 ± 6% nhm-white/unixbench/shell8
11701 -12.2% 10271 GEO-MEAN sched_debug.cpu#0.curr->pid
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
171793 ± 0% -14.3% 147172 ± 4% nhm-white/unixbench/shell8
171793 -14.3% 147172 GEO-MEAN sched_debug.cpu#0.nr_load_updates
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
411252 ± 1% +24.9% 513466 ± 22% nhm-white/unixbench/shell8
411252 +24.9% 513466 GEO-MEAN sched_debug.cpu#1.avg_idle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
427570 ± 5% +17.2% 501275 ± 10% nhm-white/unixbench/shell8
427570 +17.2% 501275 GEO-MEAN sched_debug.cpu#2.avg_idle
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
4645 ± 1% -8.0% 4273 ± 3% nhm-white/unixbench/shell8
4644 -8.0% 4273 GEO-MEAN meminfo.KernelStack
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
4262809 ± 0% -77.9% 940165 ± 13% nhm-white/unixbench/shell8
4262809 -77.9% 940165 GEO-MEAN time.involuntary_context_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
10031 ± 0% -51.6% 4853 ± 0% nhm-white/unixbench/shell8
10031 -51.6% 4853 GEO-MEAN vmstat.system.in
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
43248 ± 0% -46.2% 23276 ± 3% nhm-white/unixbench/shell8
43248 -46.2% 23276 GEO-MEAN vmstat.system.cs
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
853 ± 0% -37.1% 537 ± 5% nhm-white/unixbench/shell8
853 -37.1% 537 GEO-MEAN time.user_time
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
74.57 ± 0% -35.2% 48.31 ± 5% nhm-white/unixbench/shell8
74.57 -35.2% 48.31 GEO-MEAN turbostat.%c0
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
584 ± 0% -35.1% 379 ± 5% nhm-white/unixbench/shell8
584 -35.1% 379 GEO-MEAN time.percent_of_cpu_this_job_got
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
1358 ± 0% -33.8% 899 ± 5% nhm-white/unixbench/shell8
1358 -33.8% 899 GEO-MEAN time.system_time
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
6466723 ± 0% -29.8% 4542766 ± 7% nhm-white/unixbench/shell8
6466723 -29.8% 4542766 GEO-MEAN time.voluntary_context_switches
8b8d967635270c5a e8fd90f3cb75c067a77bf9c1f2
---------------- --------------------------
2.118e+08 ± 0% -26.6% 1.555e+08 ± 5% nhm-white/unixbench/shell8
2.118e+08 -26.6% 1.555e+08 GEO-MEAN time.minor_page_faults
nhm-white: Nehalem
Memory: 6G
To reproduce:
apt-get install ruby ruby-oj
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[sched] b4c4e6e94e9: -23.2% ebizzy.throughput.per_thread.stddev_percent
by LKP
FYI, we noticed the below changes on
git://bee.sh.intel.com/git/ydu19/linux for-lkp
commit b4c4e6e94e912968495a31bc14b83966346a0bc6 ("sched: Remove task and group entity load_avg when they are dead")
ebizzy.throughput.per_thread.stddev_percent
0.3 ++-------------------------------------------------*-----------------+
|.* .** .**.**.* *.* **. *. : * |
0.28 *+ * * + * + : :+ * * *.*. : : .* |
0.26 ++ + : **. : * * *.* .* ** : * *.** |
| * * * :: : |
0.24 ++ * : |
| : |
0.22 ++ :*. |
| * **.*
0.2 ++ |
0.18 ++ |
| |
0.16 O+ O OO OO OO O OO OO O OO OO |
| O O |
0.14 ++-------------------------------------------------------------------+
turbostat.%c6
0.6 ++-------------------------------------------------------------------+
| |
0.55 O+ O O OO O O O O O O |
| O O O O O O O O O |
0.5 ++ |
| |
0.45 ++ |
| *
0.4 ++ +|
| **.** |
0.35 ++ * : |
| :: : |
0.3 ++ *.**. *. .* .* :: .* .* *. * : |
*.**. : * **.** *.* : : ** * *.* : *. *.* + *.**.** |
0.25 ++---*--------------------*-*------------*-*----*-----*--------------+
sched_debug.cpu#1.cpu_load[0]
55 ++--------------------------------------------*------------------------+
50 ++ : |
| : |
45 ++ :: |
40 ++ :: |
| *. * * * * * * :: |
35 ++ : * :: : : : : * : : : * |
30 ++ : : : : :: : ::: :: : : : : : : |
25 *+ : : * * * :: : :: : * : : :: : : * ::: * |
|:: ::: : :: : : * : :+: : : :: : : :: : :: |
20 ++: :: :: :: : : : * : : : : : : : : |
15 ++* * * * * *.* *.** * * * *.*.**.**.* *. |
| **.**.*
10 O+OO O O O O O O |
5 ++----O--O---O--O--O-OO-O--OO-O----------------------------------------+
sched_debug.cpu#2.cpu_load[0]
50 ++---------------------------------------------------------------------+
45 ++ * |
| : |
40 ++ : : |
35 ++* * * ** : *.* * |
| : :+ : :: : : : |
30 ++ : *. : * : : : : .* * : : : |
25 ++ * : * : : : : : : * : * : * * : |
20 ++ : : : : : : * : : : :: : + : : |
* :: * : : :: : : : : * * .* : : |
15 ++ * *.** * * **.* * *.*.** *.* * .**.*
10 ++ * |
O O O O O O OO O OO OO OO O |
5 ++ O O O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#3.cpu_load[0]
60 ++---------------------------------------------------------------------+
| * |
50 ++ : |
| * :: |
| : :: |
40 ++ :: :: |
| :: : : |
30 ++: : * : : |
| : : **.* :: * * : ** * * * **.* |
20 ++: : + + : :+ + :+: + + :+ : + : + : |
*.* ** ** * * * *.** * *.* *.**.**.* *.*.**. |
| **.**.*
10 ++ O O O O |
O OO OO O O O OO O O OO OO |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#7.cpu_load[0]
60 ++---------------------------------------------------------------------+
| * |
50 ++ : |
| : |
| :: |
40 ++ :: |
| * * :: * * |
30 ++: :: : : : :+ |
|: : * * :: * * : : *.**.* * : :: **.* |
20 ++ :+: : : : ::+ : : : : : :+ + : :: : |
* * :: *.*.* * *.* : *.: *.* * **.** * *.*.**. |
| * * * **.**.*
10 O+ O O |
| OO O OO O O OO O OO OO OO O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#10.cpu_load[0]
70 ++---------------------------------------------------------------------+
| * |
60 ++ : |
| : |
50 ++ :: |
| :: |
40 ++ : : |
| * * : : * |
30 ++ : :: : : : |
| : : * * *.* : * * .* * : : : |
20 ++ : : :+ :+ .* ::+ + : * + + : : : |
*.** *.**.*.* * * * * *.**.* ** **.*.**.**.* *. *. *.*
10 ++ * * |
O OO OO OO O OO OO O OO OO OO O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#15.cpu_load[0]
45 ++*--------------------------------------------------------------------+
| : |
40 ++: |
35 ++: * * |
|:: :: * : |
30 ++ : : : :: :: |
|: : : : : : : : * * |
25 ++ : * : * : * : : : *.**.* *.* : |
| : :: : : : : * : : : : : : : * * : |
20 ++ : : : : : : :: : : : : : : : :: : : |
15 *+ *.* ** :.** :: * .*. * *.** :.* * : : : |
| * * * * * **.* : .* .*
10 ++ ** * |
| O O |
5 O+-O-OO--O-O-OO-OO-O-OO-OO-OO-O----------------------------------------+
sched_debug.cpu#17.cpu_load[0]
45 ++----*----------------------------------------------------------------+
| : |
40 ++ :: * |
35 ++ :: : |
| :: :: |
30 ++ : : : : |
25 ++ * : **.* * *.* * **. : : |
| * :: : :: : : * :: * : * * * * : |
20 ++ :: :: : : :: : : : + :: :+ : :: + : |
15 ++ * * ** :: *.* * * * :: : *. .*. .* : |
* * * * ** **.** * .**.*
10 ++ * |
5 O+O OO OO O OO OO O O OO O O |
| O O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#19.cpu_load[0]
35 ++-*-------------*--------------------------------------------*--------+
| :: :: : |
30 ++ :: : : :: |
| : : * : :: |
25 ++* * *. : * * *. * * * * : |
| : : : * : : :: : * :: :: :: : : |
20 ++ : : : : : :: : : :: :: :: : : |
|: : : : : : : .*. *. : : : : : : : : : |
15 *+ *.** * * .* ** * **.* **.: * *.: *: : |
| * * * * *.* .**.*
10 ++ * |
| |
5 O+OO OO OO O OO OO O O OO O O |
| O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#25.cpu_load[0]
35 ++---------*-----------------------------------*---------*-------------+
| :: : : |
30 ++ :: :: : |
| : : :: :: |
25 ++ * : ** * * * * : :*.*.* :: |
| * :: : : : * : :: : : * : : : * |
20 ++ :: :: : : :: : : :: : : : : : : :: |
| : :: :: : : :: : : : : : : : : : |
15 *+* * ** **.: :: * *.* .**. *.: *.: : :.* :.* |
| * * * * * * * * * *.**.*
10 ++ |
| |
5 O+OO OO OO O O OO O OO OO OO O |
| O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#28.cpu_load[0]
50 ++---------------------------------------------------------------------+
| * * |
45 ++ : * : |
40 ++ :: +: :: |
| :: * * * : :: |
35 ++ :: : : : : :: * |
30 ++ : : : : : : : : : : : |
| : ** * * : * ** * : : : : : :: * |
25 ++: : :: : : :: : : : : : : * : : : * : |
20 ++: : :: : : : : : : : : : : : : : : + : |
| : : :: **.* : * * : * :.* *.*.* : .* : |
15 *+* * * *.* **.* * ** **.**.*
10 ++ |
O O O O O O O O |
5 ++OO--O-O--O-O--O----OO-O---O-O----------------------------------------+
sched_debug.cpu#28.cpu_load[1]
35 ++-----------------------------------------------*---------------------+
| * : |
30 ++ :: * * :: |
| :: : +: :: |
| : : : : * * : : : * |
25 ++: * * : * *: : : : : :: |
| : : *. + : * * : : : : : : : :: * |
20 ++: * : * :*. + : + : : : : : * :.*.: : .*.* : |
*.* +: * * *. * * : .** *.* * * ** : |
15 ++ * * ** **.**.*
| |
| |
10 O+OO OO OO O OO OO O O OO OO |
| O O |
5 ++---------------------------------------------------------------------+
sched_debug.cpu#30.cpu_load[0]
45 ++------*--------------------------------------------------------------+
| * : |
40 ++ : : * |
35 ++ :: :: * : * |
| :: : : : :: : |
30 ++ :: : : : : : : : : |
| : : : : * : : *. : : * : : |
25 ++: * : * * * : : *.* * * * :: * : |
| : :: : : : : : : : : : : :: : : |
20 ++: : : : : : : : : : :: : : : : |
15 ++: * :: *.** *: *.**.*.** * **.*.* * **. *.**.*
*.* * * * |
10 ++ |
| O O O O |
5 O+-O-OO-O--O-OO-OO-O-O--OO-OO------------------------------------------+
sched_debug.cpu#30.cpu_load[2]
28 ++---------------------------------------------------------------------+
| * |
26 ++ * : |
24 ++ : : : |
| :: : : * * |
22 ++ :: : * * * * :: :: |
20 ++: : : : : * : :+ *.* : : * : : |
* : :* : : : * :: * : * : * * :+ * : |
18 ++: * :: : + + : :.* + * : : + : * : |
16 ++* * *.** ** * *.* * **.*.* **. *.**.*
| * |
14 ++ |
12 O+O O OO O O OO O OO OO OO O |
| O O |
10 ++-O-------------------------------------------------------------------+
sched_debug.cpu#31.cpu_load[0]
40 ++---------------------------------------------------------------------+
*. |
35 ++* * * * * * |
| : : : :: : : |
30 ++ : :: :: :: * :: : : |
| : :: : : : :.* : :: .* : : |
25 ++ * :: : : : * : ** * :: *.* :: *.* * * |
| :: : : : : : : : :: : :: : : : : : |
20 ++ :: : : : : : : : : :: :: : : :: : |
| : *: * .* : : : * : * : : : : |
15 ++ * * * *.** *.* * *.* * *.**.**.**.*
| |
10 ++ |
O O O |
5 ++OO-OO-OO-O-OO--O-O-OO--O-OO-O----------------------------------------+
sched_debug.cpu#32.cpu_load[2]
26 ++---------------------------------------------------------------------+
| * |
24 ++ :+ |
22 ++ * * * : * |
| * * * : * : : : : |
20 ++* :*. :+ : :: * * :: .* : :.* : : : |
* : : * *.: : :: : :+: : * * :.* : * : .* : |
18 ++ :: * :: :: * : : : : * : * : * : |
| * : * *. .* :: : : *.* **. .**.*
16 ++ * ** * * ** |
14 ++ |
| |
12 ++O OO O O O O O OO |
O O OO O O O O O O |
10 ++---------------------------------------------------------------------+
sched_debug.cpu#33.cpu_load[0]
50 ++---------------------------------------------------------------------+
45 ++ * * |
| : : |
40 ++ : :: |
35 *+ * * :: : : * |
|: ::+ + :: : : : |
30 ++ : * * .* : : : : : : * |
25 ++* * : * : * : : : * *.* : : *.** * : |
20 ++: : : :: + : : * : : : : :: + + : |
| :: * :: * * :+ :: *. : * * : |
15 ++ * * *.* * * *.**.**.** **.**.*
10 ++ |
O OO O O OO O O OO O O |
5 ++ O OO O O O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#33.cpu_load[2]
26 ++---------------------------------------------------------------------+
| * |
24 ++ : |
22 *+ *.* * :: * * |
|: * + : : : : : :: |
20 ++ : * :.* * :: : : : : :: * ** |
| * : * : : : : : : * *.* * : :: * + + : |
18 ++: ** :: : : :.* + : *.*. :: *: : * : |
| :+ * * .** * * * : + * * * |
16 ++ * * ** *.**.*
14 ++ |
| |
12 O+ O O |
| OO OO OO O OO OO OO O O |
10 ++---------O-O---------------------------------------------------------+
sched_debug.cpu#34.cpu_load[0]
55 ++---------------------------------------------------------------------+
50 ++ * |
| : |
45 ++ : |
40 ++ :: |
| *. * : : * |
35 ++ : * :: : : : |
30 ++ : : : : : : : : |
25 ++ : : * * *.** .** *.** * : * * : * |
| : ::: : : * : : : : : :: :: : : |
20 ++: :: :: : .* : .* : : : : : : : :: : |
15 *+* * * * **.* *.* * **.* *.**.* * **.**.*
| |
10 ++ O O O O |
5 O+OO-OO-OO-O-OO-OO---O--OO--O------------------------------------------+
sched_debug.cpu#34.cpu_load[1]
40 ++---------------------------------------------------------------------+
| * |
35 ++ : |
| : |
30 ++ *. :: |
| : * * : : * |
25 ++ : : * : : * : : :: |
| : :+: : *. * : .** : : : * |
20 ++: * :.* **. + : * * : * : *. *. : : |
| : * *.** * + .* : : + : : *.**. : * : |
15 *+* * *.* * **.* * **.**.*
| |
10 O+ O OO O O OO O O OO O O |
| OO O O O O |
5 ++---------------------------------------------------------------------+
sched_debug.cpu#35.cpu_load[0]
50 ++---------------------------------------------------------------------+
45 ++ * * |
| : : |
40 ++ : :: * |
35 ++ :: * :: * : |
| :: :: :: : : : |
30 ++ : : * :: : : * :: : *. |
25 ++ * : : : : : * : : :+ * * : :: *.* * |
20 ++ :: : : : :: : : : : * : :: : :: : : : |
| : : : :* : *.* *.: : *. : : : : : : |
15 *+**.**.* *.* * * * * * * * *.** **.**.**.*
10 ++ |
O O O OO OO O OO OO OO O |
5 ++OO OO O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#35.cpu_load[1]
35 ++---------------------------------------------------------------------+
| * |
30 ++ : * |
| :: : |
| :: :: * * |
25 ++ :: * :: : :* |
| * : : * :+ : : * * :: : + |
20 ++ * : : : + : : * : : :+ :+ .* : :: *.* * *. * |
| + : : :* :: *.* *.: * **.* : : :+ :+ * + *. |
15 *+**.** *.* * * * * * * * * **.*
| |
| |
10 O+ O OO O O OO O OO O |
| OO OO O O O O |
5 ++---------------------------------------------------------------------+
sched_debug.cpu#35.cpu_load[2]
24 ++------------*-------------*------------------------------------------+
| : : |
22 ++ :: :: * ** |
| :: :: * : : : |
20 ++ * : : * *. : : :: :: : : |
| * : : : : : : * : : *.: : * :: : :.* * *.** |
18 ++ : : : : : :: : : : : * * + : :: * : : :+ : |
*. *.* : *.* :* * *.* *.: *.* * :: :: * : *.* .*
16 ++* * * * : * * * |
| * |
14 ++ |
| |
12 O+ O O O O O |
| OO O OO O O O |
10 ++OO-OO--O-O-----------------------------------------------------------+
sched_debug.cpu#37.cpu_load[0]
50 ++---------------------------------------------------------------------+
45 ++ * * |
| : : |
40 ++ : * : |
35 ++ :: : * :: * |
| : : :: :: * : : : * |
30 ++: : :: : : :+ : : :: : * * |
25 ++* :: : : ** * : ** * * : : : : * : : : :: |
20 ++ : : : : : :: : : + :+ : : : :: :: : : : :: |
|: : :* : : :: : : * *: *.: :: * *. : *. : : |
15 *+ * * ** * ** * * * * **.*.* **.**.*
10 ++ |
O O O O O O O |
5 ++OO OO O O O O OO O O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#37.cpu_load[1]
35 ++---------------------------------------------------------------------+
| * * |
30 ++ : * : |
| :: * : :: * |
| :: : : : :: : |
25 ++: : :: : : * : : : : * |
| : : :: * : * :+ * : : : : : : * * |
20 ++* : :: ** : : : ** *. :+ * : : *.* : *. + : .* : |
|+ : * + : :: + + * * *.: * * *. *.* : |
15 *+ * ** * ** * * **.**.*
| |
| |
10 ++O O O |
O O OO OO O OO OO OO OO O O |
5 ++---------------------------------------------------------------------+
sched_debug.cpu#37.cpu_load[2]
26 ++---------*-----------------------------------------------------------+
| * :: * |
24 ++ : * : : : * |
22 ++ :: :: * : :: : |
| : : :: : : : : : : |
20 ++* :: : ** * *.** *. * * : : *.* ** * .*.** |
18 ++ : * : : : : : : * + : *.: :+ + + * * : |
|: * : : :: : : * * * * + : : |
16 *+ ** * ** * **.**.*
14 ++ |
| |
12 ++ |
10 O+OO O OO O OO OO O OO OO OO O |
| O |
8 ++---------------------------------------------------------------------+
sched_debug.cpu#38.cpu_load[0]
45 ++---------------------------------------------------------------------+
| * |
40 ++ * : * |
35 ++ * : * * * : : |
| : :: :: : : : : : |
30 ++: : : : : : :: : : : : |
25 ++* : * : : * :: * ** *.*.* : *.* * : * |
|: : : : : : : : :: : : : : : * : : :: |
20 ++ : : : : : : : : : : : : * : : : : |
15 *+ * : *.* .** :.* * *. * : *.* :: * **.**.*
| * * * * * * |
10 ++ |
5 O+OO OO O O OO O O OO O OO O |
| O O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#39.cpu_load[2]
26 ++----*------------------------------------------*---------------------+
| : : |
24 ++ :: * : : * |
22 *+ :: : : * :: |
|+ :: * * :: *. * : :: |
20 ++** : : *.*.* : .* : : : *.* * *.**.* : ** : : |
18 ++ :: :: : * + : : : : : :+ :+ +: * |
| :: * :: * : :.* :.* * * * + *.**. .*
16 ++ * * : * * * ** |
14 ++ * |
| |
12 ++ |
10 O+OO OO OO OO O O OO O OO O |
| O O O |
8 ++---------------------------------------------------------------------+
sched_debug.cpu#39.cpu_load[3]
22 ++---------------------------------------------------------------------+
| * * * |
20 *+ : * * * + * :: |
|: :: *.* : * * : * : *.** : * :: |
|: :: : +: : : : : :: : : : : ::: : |
18 ++** : : * : : : : : : : *.* : *.* :.* :: *. |
| +: * :* * : *.* :.* * * * **.**. .*
16 ++ * * : * ** |
| * |
14 ++ |
O O OO OO OO O O |
| |
12 ++ O O O O O OO |
| O O O |
10 ++---------------------------------------------------------------------+
sched_debug.cpu#40.cpu_load[2]
26 ++------------------------------------------------*--------------------+
| : |
24 ++* * * : : |
22 ++: * : * :: *.* : *.* |
|:: :: :: : * : : : : : : |
20 *+ : *. *. : : : : : : .*. * *.*.* * * : * * |
18 ++ : : * * *.: : * : * * : :+ :: + :: |
| : : * *. : * : : * :: *.: : *.* |
16 ++ *.* * *.* * * * *.*
14 ++ |
| |
12 ++ |
10 O+O OO OO O OO O O OO O OO O |
| O O O |
8 ++---------------------------------------------------------------------+
sched_debug.cpu#43.cpu_load[0]
70 ++---------------------------------------------------------------------+
| * |
60 ++ : |
| : |
50 ++ :: |
| :: |
40 ++ : : |
| * : : *.* * |
30 ++ :: : : : : :: |
| * *.** * : : * ** *: : * * : : * * : |
20 ++* + : + + :: ::+ + + + : : + + :+ *.* : + *. + : |
* * *.** * * ** * * *.** * * ** * **.**.*
10 ++ |
O OO OO OO O OO OO O OO OO OO O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#43.cpu_load[2]
35 ++---------------------------------------------------------------------+
| |
30 ++ * |
| : |
| :: |
25 ++ :: |
| : : *.* |
20 ++ *. .*. .* * **. : : : : * |
|.**. : ** ** :+ + * + *.*: : *.*. *.**.* : .**. .* + |
15 *+ * * * + * * *.* * ** * **.**.*
| * |
| O OO O O O O |
10 O+OO OO O O O OO O O O |
| |
5 ++---------------------------------------------------------------------+
sched_debug.cpu#44.cpu_load[1]
30 ++----*---------------------------------------*------------------------+
| :+ * : |
| : * : :: |
25 ++ : : * :: : : |
* : : * : * :: * : * |
|: *.* : :: :: :+ : : : : :: |
20 ++ : : : * *.* * : :: ** : * : *.* : : * |
| * * : + : + :: : :: +: :.*.: : *.* * :+ *.|
15 ++ +: * * : * : * * * *.*.* +: **.* *
| * * * * |
| |
10 ++ |
O O O O O |
| OO OO O O O OO OO OO O O |
5 ++---------------------------------------------------------------------+
sched_debug.cpu#44.cpu_load[2]
24 ++---------------------------------------------------------------------+
| * * * |
22 ++ :+ :: :: |
20 *+ : * * * :: : : |
|: *.* : :* .* : *.* : : * :.* * |
18 ++ : * : : * : :: : * : * : * : :+ * |
| * : : : : : * : :: +: : : : *.* * :+ *.|
16 ++ :: * ::+ * :: * *.*.* *.*.* +: * .* *
| * * * * * * |
14 ++ |
12 ++ |
| O |
10 O+O OO O O OO O OO OO OO O |
| O O O |
8 ++---------------------------------------------------------------------+
sched_debug.cpu#44.cpu_load[3]
20 ++----*--------------------*--------*---------*------------------------+
| :+ : :: :: |
19 *+ : * :: :: :: * |
18 ++ *.* : ** *.* : : *.* : * * ::: * * |
|: : : : + : : : :: : : : : :: : :+ :+ |
17 ++* *. : * : *. : :: *.: : .* * : : * *. *. *.|
16 ++ * * ** * * *.* *.*.* * * * *
| :+ |
15 ++ * |
14 ++ |
| |
13 O+ O |
12 ++OO OO O O OO O OO OO OO O |
| |
11 ++-------O-O-----------------------------------------------------------+
sched_debug.cpu#44.cpu_load[4]
19 ++-------------------------*-------------------------------------------+
| : |
18 *+ **.* * *.* :: * *.* * * * * |
|+ + : :+ : : : : :+ : : ::+ :+: :+ |
17 ++** * * * : *.* : :: **.* * : * * : : **.* *.* * |
| + + :: : :: + : : : + : : :+|
16 ++ * * * * * *.*.* * *.* *
| |
15 ++ |
| |
14 O+OO OO OO OO O OO OO OO O |
| |
13 ++ OO |
| |
12 ++---------O-----------------------------------------------------------+
sched_debug.cpu#45.cpu_load[0]
45 ++---------*-----------------------------------------------------------+
| : |
40 ++ : |
35 ++ : : * |
| * * : : : |
30 ++ : : : : * * : |
25 *+: : :: : : * .* * : *. : : * |
| * : : :: : :: * : * : : : : * : : : : * |
20 ++ : : : : : :: : + : : : : + : : : : :: |
15 ++ :: : :* :: :.** *. * *.* *: ** : : *.**.**.*
| ** * * * * * * * **.*.* |
10 ++ |
5 O+OO OO O O O OO O OO OO OO O |
| O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#45.cpu_load[1]
30 ++---------*-----------------------------------------------------------+
| : |
| : |
25 ++ * * : : ** |
| : : : : * * : : |
| : : :: : : .* :*. : .* : : * |
20 *+* : : :: : * : *.* : * : : * : : : : : * |
| : : : :: :+ : * : + : : : : : * : : :: * .* |
15 ++ :: : : * :+ * ** *.: *: * * : + + *.** *.*
| ** * * * * * ** * |
| |
10 ++ |
O O O O O O O |
| OO O O O OO O OO O O O |
5 ++---------------------------------------------------------------------+
sched_debug.cpu#45.cpu_load[2]
24 ++---------------------------------------------------------------------+
| * |
22 ++ : |
20 ++ * * :: * * ** |
| : : : : *.* :*.* :: .* : + * * |
18 *+* : :: : : : : .* * : : : : * : : * : : :: |
| : : :: : : :.* : ::: : * : : : : :: : :: .* .* |
16 ++ :: :: : * :: * * * : *: * : :* + *.** *.*
| ** : * :: +: * * * * |
14 ++ * * * |
12 ++ |
| |
10 O+O OO O O OO O O OO O O |
| O OO O O |
8 ++------O--------------------------------------------------------------+
sched_debug.cpu#46.cpu_load[0]
30 ++---------------------------------------------------------------------+
|.* |
25 *+ : *.**. * * .* |
| * : * : * * : * * : * * |
| : : : :: :: : :: : : : *. : : |
20 ++ :: : :: : : :: : : : :: : : * :: : : |
| : :: : : : : .* : : :: : : : : :: : |
15 ++ * : ** * :*.* .* :* * *.* *.*. * : :*.**.**.*
| * * * * * * * |
10 ++ |
| |
| O O OO O O O O O |
5 O+ O O O O OO O OO O |
| |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#46.cpu_load[1]
24 ++*--------------------------------------------*-----------------------+
|:: :: |
22 ++: *.** * .* :: |
20 *+ : : + : * * : : * * |
| : : * :: * * : * : : : : .* :: |
18 ++ * : +: : : : :: .* : : :: : : : * : : : *.|
16 ++ +: * : : :: : *.* * : : * : : : : : : : *.* *
| * ** * :: :+ :* :.* *.*.: :: **.* |
14 ++ * * * * * : |
12 ++ * |
| |
10 ++ |
8 ++O O OO O O O O O O O |
O O O O O O |
6 ++-O-O-----O-----------------------------------------------------------+
sched_debug.cpu#46.cpu_load[2]
22 ++---------------------------------------------------------------------+
| * * |
20 ++: .* :+ |
|: : * *. * *.* : * |
18 *+ : : * :+ * * * : : : : * |
| * : +: * .* :+ *. *.* : + :: : : : *.* :+ *.|
16 ++ +: * * +: * * + : * * :.* :.*. : : : **.**.* *
| * * * * * * * :: |
14 ++ * |
| |
12 ++ |
| O O O O O O |
10 O+O OO O O O O O |
| O O O O O |
8 ++---------------------------------------------------------------------+
sched_debug.cpu#47.cpu_load[2]
22 ++-----------*-----------------------------------------*---------------+
| : * : |
20 ++ :: * :: * :: * |
| : : :: * * :: :+: : * :: |
18 ++ .**.* : : *. :: :*.* * *. :+ * :: * : :+ : : |
*. * *.* *.* * : * : : :+ : *.* * :: : *.* : .* |
16 ++* :: :: :: * : : * ** *.*
| :: * * *.* * |
14 ++ * |
| |
12 ++ |
| O O |
10 O+O O O OO OO O OO O |
| O O O O O |
8 ++---O-----------------------------------------------------------------+
sched_debug.cpu#47.cpu_load[3]
19 ++-----------*-------*-----*---------------------*--*--*---------------+
| :: :: : +: :: : |
18 ++ :: *. :: : :.* *. * *. * : : :: : * * |
17 *+ *.**.**.* : * * : : * : : * :: * * :: * : :: * : * |
|+ : :+ : : :: : : : + : : : + : + : |
16 ++* * : ** * *.* * * * * ** *.*
| :+ |
15 ++ * |
| |
14 ++ |
13 ++ O O O |
| |
12 O+OO OO O OO OO OO OO O O |
| |
11 ++---O-----------------------------------------------------------------+
sched_debug.cpu#48.cpu_load[2]
24 ++---------------------------------------------------------------------+
| * |
22 ++ : |
20 ++ * * * :: * * |
| * + + +: * * +: : : : :* |
18 *+ :+ *.** ** :+ : * :*.* :: : :: : + |
|+: * * : :: : : * :: : **. *.*. * *. |
16 ++* : : :: * :: : * * + + * .**.**.*
| ** * :: * ** * |
14 ++ * |
12 ++ |
| O O |
10 O+ O O O O O O O O O |
| O O O O O O |
8 ++-O-O-----------------------------------------------------------------+
sched_debug.cpu#49.cpu_load[1]
30 ++---------------------------------------------------------------------+
| * * |
| : : |
25 ++ : : :: |
| * : : * * :: |
| : : : : :: : :: |
20 ++ : *. : * * :: : : : :*.*.* |
|.** *: : :: : : : : .*. : * : *. |
15 *+ * *.* *.* : *.* *.** * * ** : * : *. .**.**.*
| :+ :: + : +: +: ** |
| * * * * * |
10 ++ |
| O O O O O |
O OO O OO O OO OO O OO O |
5 ++---------------------------------------------------------------------+
sched_debug.cpu#50.cpu_load[0]
35 ++-------------------------------*----------------*--------------------+
| * : : |
30 ++ :: :: :: |
| * :: : : :: |
25 *+ : : : * * * : * : : |
|: :: : * :: :: : : : : : **.* |
20 ++ : : : : ::: : : : * :: * : + : * |
| * : : * : : * :: : + : : : : : * : :+ |
15 ++ :.* **.*.* + * .* * ** *.: : : *.* : **.**.*
| * * * * *.** * |
10 ++ |
| |
5 O+OO OO OO O OO OO O OO OO OO O |
| |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#50.cpu_load[2]
22 ++-------------------------------*-------------------------------------+
| : * |
20 ++ *. :: : * |
| * : * : : :: :: |
18 *+ :: : : *. * * : * : : :: |
| * :: * : : * + + : : : * .* : : * .* *. *. |
16 ++: : :*. + * : * * : * :: * *.* * : : * **.|
| :.* * *.** : : * + : : : :: *
14 ++ * :.* * *.* * |
| * |
12 ++ |
| |
10 O+ O O O O |
| OO OO O O O OO O OO O |
8 ++----O----O-----------------------------------------------------------+
sched_debug.cpu#52.cpu_load[0]
45 ++---------------------------------------------------------------------+
| * |
40 ++ : |
35 ++ * : |
| * : * :: |
30 ++: :: : : : |
25 ++: * * : : : : : : |
|: : * * : :: * : : * : *.: : |
20 ++ : : :.* :: :: : : + : : : :+: * : |
15 ++ *. : * + .* : : : * :.* *.* .**.: *. : * :.* .**.* .*
* ** ** * :.* * * * **.* * * * |
10 ++ * |
5 O+O OO OO O OO OO O O OO O O |
| O O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#52.cpu_load[1]
35 ++---------------------------------------------------------------------+
| * |
30 ++ : |
| : |
| :: |
25 ++* * * :: |
|+: : : : : |
20 *+: * * * * * : : * : : : : |
| : + :.*. * : :: +: + : : : :+ : *.: : *. .*
15 ++ : * * * + : : : * :.* *.* .**.: *. *.: * * :.* **.** |
| *.* * * :.* * * * * * * |
| * |
10 ++ |
O O OO OO OO OO O O OO O O |
5 ++-O-------O---------O------O------------------------------------------+
sched_debug.cpu#52.cpu_load[2]
26 ++--------------------------------------------------------*------------+
| : |
24 ++ : |
22 ++ :: |
|.* * :: |
20 *+: * * * * : : : |
18 ++: .* :: : :: * * : : : : : * |
| : * :.*. .* : :: : : +: * :: : : * * : * : : :+ *
16 ++ : : * ** :: : * :.* :+ :: : : *. :+ :+: +: :.* **.* +|
14 ++ *.* * :.* * * * *.* * * * * * * |
| * |
12 ++ |
10 ++O O O OO O OO O |
O O OO O O O O O |
8 ++-------------------O------O------------------------------------------+
sched_debug.cpu#52.cpu_load[3]
22 ++--------------------------------------------------------*------------+
| : |
20 ++ :: |
| * * :: |
|+: : : : |
18 *+: * * * .* * * * * : : : : |
| : *.* .*. .* + +: +: * :+ :+ : : : :+ *. : : : : *.* .*
16 ++ : : * ** * :.** :+ * * *. : *.: * * *.* :.* *.** |
| *.* * * * * * |
14 ++ |
| |
| |
12 O+O OO O OO OO O OO O |
| O O O OO O O |
10 ++---------------------------------------------------------------------+
sched_debug.cpu#52.cpu_load[4]
19 ++--------------------------------------------------------*------------+
| : |
18 *+* * * * * * * * * * :: |
| : :: : : :: :: :: : :: : :: |
| : : : : : :: :: : ::: :: :: :: : : |
17 ++ : *.* : **.* : : : ** : * * :: : * : ** : : : : *.* *
| : : : : :+ :+ : : + : : +: : : : : : +|
16 ++ *.* * * * * * *.* *.* * *.* *.* *.** |
| |
15 ++ |
| |
| |
14 ++O O O O OO OO O OO O |
| |
13 O+-O--O--O-----------OO-----O-O----------------------------------------+
sched_debug.cpu#53.cpu_load[0]
45 ++------*--------------------------------------------------------------+
| : * * |
40 ++ :: : : |
35 ++ :: : :: |
| :: :: * : |
30 ++ : : : : : : |
25 ++ * : .* : : : : |
| **. : : * * : * *.: : .* : : |
20 ++ * :: + : : :: .* + * : * : : : |
15 ++ :: **.* : : : : * + .* * : + : : .**.**.*
* * * *.* * ** *.* **.** * *.** |
10 ++ |
5 O+O O O O OO O OO OO OO O |
| O OO O O |
0 ++---------------------------------------------------------------------+
sched_debug.cpu#53.cpu_load[1]
35 ++---------------------------------------------------------------------+
| * |
30 ++ : |
| :: * * |
| :: : : |
25 ++ :: .* :: : : |
| : : * : :: * : |
20 ++ *. * : * : : * : : .* : : |
| * * :: + : : :: .* * .*.: : *.* : : : |
15 ++ :: **.* : : : : * + + : * * : : :: : *.**.**.*
* * * *.* * ** *.* **.* * *. : |
| * |
10 ++ |
O O O O O O O O O O O |
5 ++-O-OO--O---O---O---O---O--O------------------------------------------+
sched_debug.cpu#53.cpu_load[2]
26 ++---------------------------------------------------------------------+
| * |
24 ++ : * |
22 ++ :: * : |
| :: *.* : : : |
20 ++ * : : : :: * : |
18 ++ *.* : * : : * * : : : : |
| : :: : *.* : : :: .* :: : : *.*.* : : |
16 ++* :: : : : : : : * + : : *.*.* : : :: : *.**.**.*
14 *+ * * * *.* * ** *.* **.* * *. : |
| * |
12 ++ |
10 O+ O O |
| O O O O OO O OO O |
8 ++-O-OO--O---O-------O------O------------------------------------------+
sched_debug.cpu#53.cpu_load[3]
22 ++---------------------------------------------------------------------+
| * |
20 ++ : *. |
| :: : * * * |
| : : : : :: : |
18 ++ .** : * .* : : * :: .*. : : |
| * :+ + * : : : *. .*. + : : : * * .* : |
16 *+* * * : *. : ** ** *. *.*.* **.* * : *.**.**.*
| * * * *. : |
14 ++ * |
| |
| |
12 O+ O O O O O O |
| OO OO O O O O OO O O O |
10 ++---------------------------------------------------------------------+
sched_debug.cpu#54.cpu_load[1]
26 ++---------------------------------------------------------------------+
24 ++ * |
| :: * |
22 ++ :: :: |
20 ++ * * * * : ::: |
| *. .* : * :: :: : * : * : |
18 *+ * *. : * : :: :: ::: : : : * :: * *.* : |
16 ++ :: : * :: :: : : * : : : :+ : :+ + : : *.**.*
14 ++: :: * :: ** : *.** *.**.*.* ** * * :.* |
| * * * +: * |
12 ++ * |
10 ++ |
| |
8 O+ O O OO O OO OO O |
6 ++OO-OO-O-------OO------OO---------------------------------------------+
sched_debug.cpu#54.cpu_load[2]
22 ++---------------------------------------------------------------------+
| * |
20 ++ :: |
| * :: * |
18 ++ * * *. :: * * * * : :+: |
* * *. :+ .* :: * * : * : +: :+ +: :: .* * : *.|
16 ++ :: : * * :: :: + :: : *. * :.**.*.: ** :: : * : **.* *
| * :: * :: * :: * * * :: * :+ |
14 ++ * * : * * |
| * |
12 ++ |
| |
10 O+ O O OO |
| O O O O OO O O O O |
8 ++-O--O-O-------O-------O----------------------------------------------+
sched_debug.cpu#54.cpu_load[3]
19 ++--------------------------------------------------------*------------+
| : |
18 ++ * *. * .* .* * : : * |
17 *+ * * * * : : * : * : * * * : : * : :: * * |
|+ :: :+ :+ +: : : : : : :+ : :+ : : : : + ::: :+ :+|
16 ++* : : * *.** : *.* :: *.* * *.*.* * :: : * * : * * *
| : :+ : : :: :+ |
15 ++ * * * * * * |
| |
14 ++ |
13 ++ |
| |
12 O+ O O O OO |
| |
11 ++OO-OO-OO-O--O-OO----O-OO----O----------------------------------------+
sched_debug.cpu#55.cpu_load[2]
22 ++---------------------------------------------------------------------+
| * * |
20 ++ .* .* * * : : |
| * : * : :: * :: * : :: |
18 ++ : : : : :: : * : *. : :: : * |
| * : : : : : : : :.* ** : : : :: |
16 ++ *.: * **. : *. : * : *.* * :*.*. *.: : *.**. *.*
* * * ** : : * * * : : * |
14 ++ * *.* |
| |
12 ++ |
| |
10 O+ O O O |
| O OO O OO OO |
8 ++OO-O-----O---------OO-----O-O----------------------------------------+
sched_debug.cpu#55.cpu_load[3]
20 ++---------*-----------------------------------*-----------------------+
| :: * * * ** : |
| : : : : : :: :: |
18 ++ .* * : :: : : : : *. : : :: |
| * * : :: : : * * : * ** : :: : * * |
| : +: : : : + + :+ : : : : + + :: |
16 *+* * * **.: ** * : *.* * ** **.* : *.**.**.*
| * * *.* |
14 ++ |
| |
| |
12 O+ O O O O O O O |
| |
| OO O O O O OO O O O |
10 ++---O-----------------------------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
To reproduce:
apt-get install ruby ruby-oj
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Fengguang
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[AHCI] genirq: Flags mismatch irq 20. 00002080 (ahci) vs. 00000080 (i801_smbus)
by LKP
FYI, we noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git fixes-3.18-rc1
commit ef78bbdb4dd6ef84f6d4c670f40ec202fe9807f4 ("AHCI: Fix threaded interrupt setup")
2974a1837592b49d ef78bbdb4dd6ef84f6d4c670f4 testbox/testcase/testparams
---------------- -------------------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
:12 100% 5:5 client5/boot/performance-1
:12 100% 5:5 TOTAL dmesg.genirq:Flags_mismatch_irq##(ahci)vs.#(i801_smbus)
client5: cpu: Core, memory: 12G
[ 16.143988] ipmi_si 00:07: Found new BMC (man_id: 0x000157, prod_id: 0x0028, dev_id: 0x20)
[ 16.143995] ipmi_si 00:07: IPMI kcs interface initialized
[ 16.163607] ahci 0000:00:1f.2: flags: 64bit ncq pm led pmp slum part
[ 16.170555] genirq: Flags mismatch irq 20. 00002080 (ahci) vs. 00000080 (i801_smbus)
[ 16.171533] CPU: 1 PID: 369 Comm: modprobe Not tainted 3.18.0-rc2-wl-ga4fb664 #1
[ 16.171533] Hardware name: Intel S5000PAL/S5000PAL0, BIOS S5000.86B.10.00.0094.101320081858 10/13/2008
[ 16.171533] ffff88035d539b00 ffff88035d42b988 ffffffff8187741c 00000000000033c0
[ 16.171533] ffff88032efe5400 ffff88035d42b9e8 ffffffff810cbbaf ffff88035d42b9e8
[ 16.171533] 0000000000000246 ffffffff810cbd22 ffff880344fa5800 ffff88035ca8a0e0
[ 16.171533] Call Trace:
[ 16.171533] [<ffffffff8187741c>] dump_stack+0x4e/0x68
[ 16.171533] [<ffffffff810cbbaf>] __setup_irq+0x57f/0x5d0
[ 16.171533] [<ffffffff810cbd22>] ? request_threaded_irq+0x82/0x190
[ 16.171533] [<ffffffffa008c740>] ? ahci_dev_classify+0x60/0x60 [libahci]
[ 16.171533] [<ffffffff810cbd6c>] request_threaded_irq+0xcc/0x190
[ 16.171533] [<ffffffffa008c740>] ? ahci_dev_classify+0x60/0x60 [libahci]
[ 16.171533] [<ffffffffa008ce50>] ? ahci_port_thread_fn+0x600/0x600 [libahci]
[ 16.171533] [<ffffffff810cdc6f>] devm_request_threaded_irq+0x5f/0xc0
[ 16.171533] [<ffffffffa008d4f0>] ahci_host_activate+0x80/0x220 [libahci]
[ 16.171533] [<ffffffff8143ac1a>] ? pcibios_set_master+0x5a/0x90
[ 16.171533] [<ffffffffa00f5c45>] ahci_init_one+0x8c5/0xb60 [ahci]
[ 16.171533] [<ffffffff8143c365>] local_pci_probe+0x45/0xa0
[ 16.171533] [<ffffffff8143d6d5>] ? pci_match_device+0xe5/0x110
[ 16.171533] [<ffffffff8143d811>] pci_device_probe+0xd1/0x120
[ 16.171533] [<ffffffff81513320>] driver_probe_device+0x90/0x3e0
[ 16.171533] [<ffffffff8151374b>] __driver_attach+0x9b/0xa0
[ 16.171533] [<ffffffff815136b0>] ? __device_attach+0x40/0x40
[ 16.171533] [<ffffffff8151112b>] bus_for_each_dev+0x6b/0xb0
[ 16.171533] [<ffffffff81512d9e>] driver_attach+0x1e/0x20
[ 16.171533] [<ffffffff81512980>] bus_add_driver+0x180/0x250
[ 16.171533] [<ffffffffa002c000>] ? 0xffffffffa002c000
[ 16.171533] [<ffffffff81513f54>] driver_register+0x64/0xf0
[ 16.171533] [<ffffffff8143bbbc>] __pci_register_driver+0x4c/0x50
[ 16.171533] [<ffffffffa002c01e>] ahci_pci_driver_init+0x1e/0x1000 [ahci]
[ 16.171533] [<ffffffff81002130>] do_one_initcall+0xc0/0x1f0
[ 16.171533] [<ffffffff811a44d2>] ? __vunmap+0xa2/0x100
[ 16.171533] [<ffffffff810f6491>] load_module+0x15c1/0x1a60
[ 16.171533] [<ffffffff810f1e20>] ? store_uevent+0x40/0x40
[ 16.171533] [<ffffffff810f6ad6>] SyS_finit_module+0x86/0xb0
[ 16.171533] [<ffffffff8187fb29>] system_call_fastpath+0x12/0x17
[ 16.403261] ahci: probe of 0000:00:1f.2 failed with error -16
[ 16.409519] EDAC MC0: Giving out device to module i5000_edac.c controller I5000: DEV 0000:00:10.0 (POLLED)
[ 16.419702] EDAC PCI0: Giving out device to module i5000_edac controller EDAC PCI controller: DEV 0000:00:10.0 (POLLED)
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months
[tracer branch] kernel BUG at kernel/sched/core.c:2697!
by Fengguang Wu
Hi Aaron,
FYI your patch triggered a BUG on an existing old bug.
Let's hope it provides more info to debug the problem.
commit 0d9e26329b0c9263d4d9e0422d80a0e73268c52f
Author: Aaron Tomlin <atomlin(a)redhat.com>
AuthorDate: Fri Sep 12 14:16:19 2014 +0100
Commit: Ingo Molnar <mingo(a)kernel.org>
CommitDate: Fri Sep 19 12:35:24 2014 +0200
sched: Add default-disabled option to BUG() when stack end location is overwritten
Currently in the event of a stack overrun a call to schedule()
does not check for this type of corruption. This corruption is
often silent and can go unnoticed. However once the corrupted
region is examined at a later stage, the outcome is undefined
and often results in a sporadic page fault which cannot be
handled.
This patch checks for a stack overrun and takes appropriate
action since the damage is already done, there is no point
in continuing.
Signed-off-by: Aaron Tomlin <atomlin(a)redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
Cc: aneesh.kumar(a)linux.vnet.ibm.com
Cc: dzickus(a)redhat.com
Cc: bmr(a)redhat.com
Cc: jcastillo(a)redhat.com
Cc: oleg(a)redhat.com
Cc: riel(a)redhat.com
Cc: prarit(a)redhat.com
Cc: jgh(a)redhat.com
Cc: minchan(a)kernel.org
Cc: mpe(a)ellerman.id.au
Cc: tglx(a)linutronix.de
Cc: rostedt(a)goodmis.org
Cc: hannes(a)cmpxchg.org
Cc: Alexei Starovoitov <ast(a)plumgrid.com>
Cc: Al Viro <viro(a)zeniv.linux.org.uk>
Cc: Andi Kleen <ak(a)linux.intel.com>
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Cc: Dan Streetman <ddstreet(a)ieee.org>
Cc: Davidlohr Bueso <davidlohr(a)hp.com>
Cc: David S. Miller <davem(a)davemloft.net>
Cc: Kees Cook <keescook(a)chromium.org>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Lubomir Rintel <lkundrak(a)v3.sk>
Cc: Paul E. McKenney <paulmck(a)linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1410527779-8133-4-git-send-email-atomlin@redhat.com
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
===================================================
PARENT COMMIT NOT CLEAN. LOOK OUT FOR WRONG BISECT!
===================================================
Attached dmesg for the parent commit, too, to help confirm whether it is a noise error.
+---------------------------------------------------+------------+------------+-----------+
| | a70857e46d | 0d9e26329b | v3.18-rc2 |
+---------------------------------------------------+------------+------------+-----------+
| boot_successes | 0 | 0 | 0 |
| boot_failures | 312 | 78 | 42 |
| BUG:kernel_boot_hang | 85 | 0 | 8 |
| BUG:kernel_boot_crashed | 168 | 0 | 7 |
| kernel_BUG_at_arch/x86/mm/physaddr.c | 3 | | |
| invalid_opcode | 3 | 78 | 27 |
| EIP_is_at__phys_addr | 3 | | |
| Kernel_panic-not_syncing:Fatal_exception | 57 | 78 | 27 |
| BUG:unable_to_handle_kernel | 54 | | |
| Oops | 54 | | |
| EIP_is_at_dequeue_task_fair | 2 | | |
| backtrace:schedule | 53 | | |
| BUG:spinlock_bad_magic_on_CPU | 3 | | |
| WARNING:at_kernel/trace/trace.c:register_tracer() | 3 | | |
| backtrace:register_tracer | 2 | 78 | 27 |
| backtrace:init_branch_tracer | 2 | 78 | 27 |
| backtrace:kernel_init_freeable | 2 | 78 | 27 |
| backtrace:kobject_create_and_add | 1 | | |
| backtrace:debugfs_init | 1 | | |
| backtrace:securityfs_init | 1 | | |
| backtrace:bus_register | 1 | | |
| backtrace:virtio_init | 1 | | |
| backtrace:panic | 1 | | |
| EIP_is_at_parameqn | 1 | | |
| backtrace:parse_args | 1 | | |
| EIP_is_at_put_prev_task_fair | 51 | | |
| kernel_BUG_at_kernel/sched/core.c | 0 | 78 | 27 |
| EIP_is_at__schedule | 0 | 78 | 27 |
+---------------------------------------------------+------------+------------+-----------+
[ 0.537047] Testing ftrace regs(no arch support): PASSED
[ 0.740057] Testing tracer branch:
[ 0.830265] ------------[ cut here ]------------
[ 0.830889] kernel BUG at kernel/sched/core.c:2697!
[ 0.831656] invalid opcode: 0000 [#1] PREEMPT SMP
[ 0.832314] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.17.0-rc4-00046-g0d9e263 #2
[ 0.833195] task: 52024430 ti: 52034000 task.ti: 52034000
[ 0.833842] EIP: 0060:[<4cfaa8b6>] EFLAGS: 00010202 CPU: 0
[ 0.834500] EIP is at __schedule+0x8e/0x126d
[ 0.835019] EAX: 00000001 EBX: 00000001 ECX: 00000206 EDX: 52024430
[ 0.835766] ESI: 00000001 EDI: 00000000 EBP: 52035e80 ESP: 52035e0c
[ 0.836510] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068
[ 0.837147] CR0: 8005003b CR2: ffffffff CR3: 0df58000 CR4: 000006d0
[ 0.837890] Stack:
[ 0.838143] 4df4c940 4df4c940 4e6fa080 4da171d4 52035e30 4bf1b8fc 4e6fa080 52501940
[ 0.839290] 52024430 52035e44 4bf1b8fc 4e6fa080 4da18b24 00000000 52035e78 4bebb4a4
[ 0.840000] 52035e78 00000206 00000000 00000000 52502308 52042d98 00000206 00000000
[ 0.840000] Call Trace:
[ 0.840000] [<4bf1b8fc>] ? trace_buffer_lock_reserve+0xf/0x31
[ 0.840000] [<4bf1b8fc>] ? trace_buffer_lock_reserve+0xf/0x31
[ 0.840000] [<4bebb4a4>] ? trace_hardirqs_on+0xb/0xd
[ 0.840000] [<4cfabb36>] schedule+0xa1/0xa4
[ 0.840000] [<4cfb39ca>] schedule_timeout+0x34f/0x37e
[ 0.840000] [<4bee1f73>] ? migrate_timer_list+0x247/0x247
[ 0.840000] [<4cfb3a4b>] schedule_timeout_uninterruptible+0x1a/0x1c
[ 0.840000] [<4bee4673>] msleep+0x17/0x1b
[ 0.840000] [<4bf1e321>] trace_selftest_startup_branch+0x34/0x72
[ 0.840000] [<4bf1e69e>] register_tracer+0x113/0x204
[ 0.840000] [<4dea2668>] ? stack_trace_init+0x77/0x77
[ 0.840000] [<4dea2695>] init_branch_tracer+0x2d/0x2f
[ 0.840000] [<4de7f00c>] do_one_initcall+0x12a/0x27b
[ 0.840000] [<4c415512>] ? strlen+0x9/0x1c
[ 0.840000] [<4be92117>] ? parse_args+0x36a/0x467
[ 0.840000] [<4de7f245>] kernel_init_freeable+0xe8/0x1aa
[ 0.840000] [<4cf87336>] kernel_init+0xe/0x13c
[ 0.840000] [<4cfb59a1>] ret_from_kernel_thread+0x21/0x30
[ 0.840000] [<4cf87328>] ? rest_init+0x12e/0x12e
[ 0.840000] Code: 0f b6 f3 89 f2 e8 d1 78 f7 fe 31 c9 b8 84 71 a1 4d 89 f2 e8 c3 78 f7 fe 8b 04 b5 10 99 a9 4d 40 84 db 89 04 b5 10 99 a9 4d 74 02 <0f> 0b 64 a1 b0 66 f4 4d 25 ff ff df 7f 31 db 48 74 0d 8b 45 ac
[ 0.840000] EIP: [<4cfaa8b6>] __schedule+0x8e/0x126d SS:ESP 0068:52035e0c
[ 0.840025] ---[ end trace 0d216f9877d1d8ba ]---
[ 0.840581] Kernel panic - not syncing: Fatal exception
git bisect start cac7f2429872d3733dc3f9915857b1691da2eb2f v3.17 --
git bisect bad bf10fa857f0604865006d9705e63415b9d4e0d62 # 00:42 0- 103 Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect good b528392669415dc1e53a047215e5ad6c2de879fc # 00:54 78+ 78 Merge tag 'pm+acpi-3.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
git bisect good 052db7ec86dff26f734031c3ef5c2c03a94af0af # 01:02 78+ 78 Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
git bisect good 77c688ac87183537ed0fb84ec2cb8fa8ec97c458 # 01:10 78+ 78 Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
git bisect good ebf546cc5391b9a8a17c1196b05b4357ef0138a2 # 01:23 78+ 78 Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect bad 197fe6b0e6843b6859c6a1436ff19e3c444c0502 # 01:28 0- 1 Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect good 13ead805c5a14b0e7ecd34f61404a5bfba655895 # 01:49 78+ 78 Merge branch 'perf-watchdog-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect bad faafcba3b5e15999cf75d5c5a513ac8e47e2545f # 01:54 0- 1 Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect good 9c368b5b6eccce1cbd7f68142106b3b4ddb1c5b5 # 02:12 78+ 78 sched, time: Fix lock inversion in thread_group_cputime()
git bisect bad a5e7be3b28a235108c59561bea55eea1072b23b0 # 02:16 12- 68 sched/deadline: Clear dl_entity params when setscheduling to different class
git bisect bad 0d9e26329b0c9263d4d9e0422d80a0e73268c52f # 02:21 0- 78 sched: Add default-disabled option to BUG() when stack end location is overwritten
git bisect good f3f1768f89d601ad29f4701deef91caaa82b9f57 # 02:30 78+ 78 sched/rt: Remove useless if from cleanup pick_next_task_rt()
git bisect good a15b12ac36ad4e7b856a4ae54937ae26a51aebad # 02:38 78+ 78 sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
git bisect good a70857e46dd13e87ae06bf0e64cb6a2d4f436265 # 02:48 78+ 78 sched: Add helper for task stack page overrun checking
# first bad commit: [0d9e26329b0c9263d4d9e0422d80a0e73268c52f] sched: Add default-disabled option to BUG() when stack end location is overwritten
git bisect good a70857e46dd13e87ae06bf0e64cb6a2d4f436265 # 03:06 234+ 312 sched: Add helper for task stack page overrun checking
git bisect bad 4fbe40970dc154aaeeda0584aab8913fc073127b # 03:08 190- 194 Add linux-next specific files for 20141031
git bisect bad 12d7aacab56e9ef185c3a5512e867bfd3a9504e4 # 03:14 0- 109 Merge tag 'staging-3.18-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
git bisect bad 4fbe40970dc154aaeeda0584aab8913fc073127b # 03:14 0- 236 Add linux-next specific files for 20141031
This script may reproduce the error.
----------------------------------------------------------------------------
#!/bin/bash
kernel=$1
kvm=(
qemu-system-x86_64
-cpu kvm64
-enable-kvm
-kernel $kernel
-m 320
-smp 2
-net nic,vlan=1,model=e1000
-net user,vlan=1
-boot order=nc
-no-reboot
-watchdog i6300esb
-rtc base=localtime
-serial stdio
-display none
-monitor null
)
append=(
hung_task_panic=1
earlyprintk=ttyS0,115200
debug
apic=debug
sysrq_always_enabled
rcupdate.rcu_cpu_stall_timeout=100
panic=-1
softlockup_panic=1
nmi_watchdog=panic
oops=panic
load_ramdisk=2
prompt_ramdisk=0
console=ttyS0,115200
console=tty0
vga=normal
root=/dev/ram0
rw
drbd.minor_count=8
)
"${kvm[@]}" --append "${append[*]}"
----------------------------------------------------------------------------
Thanks,
Fengguang
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
6 years, 2 months