[fork] fa0c96ab2d: BUG: unable to handle kernel NULL pointer dereference at 0000000000000008
by kernel test robot
FYI, we noticed the following commit:
https://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/vmap_stack
commit fa0c96ab2d9c97a1ac47b4f7ed7199f1eb143322 ("fork: Cache two thread stacks per cpu if CONFIG_VMAP_STACK is set")
on test machine: 1 threads qemu-system-x86_64 -enable-kvm -cpu Westmere with 320M memory
caused below changes:
+------------------------------------------+------------+------------+
| | a550c94136 | fa0c96ab2d |
+------------------------------------------+------------+------------+
| boot_successes | 0 | 0 |
| boot_failures | 56 | 54 |
| BUG:unable_to_handle_kernel | 50 | 53 |
| Oops | 27 | 21 |
| RIP:__schedule | 10 | |
| RIP:number | 5 | 1 |
| backtrace:do_wait | 9 | |
| backtrace:SyS_wait4 | 9 | |
| PANIC:double_fault | 28 | 32 |
| RIP:symbol_string | 11 | 28 |
| Kernel_panic-not_syncing:Machine_halted | 28 | 32 |
| WARNING:at_mm/vmalloc.c:#__vunmap | 2 | |
| RIP:io_serial_out | 8 | |
| Kernel_panic-not_syncing:Fatal_exception | 24 | 20 |
| RIP:queued_spin_lock_slowpath | 1 | |
| RIP:io_serial_in | 4 | 1 |
| RIP:vmalloc_fault | 2 | 1 |
| backtrace:smpboot_thread_fn | 2 | |
| RIP:__lock_acquire | 13 | 12 |
| backtrace:async_run_entry_fn | 14 | 13 |
| INFO:trying_to_register_non-static_key | 1 | |
| RIP:do_raw_spin_trylock | 1 | |
| BUG:kernel_test_hang | 1 | |
| backtrace:compat_SyS_wait4 | 2 | |
| RIP:cont_add | 1 | |
| backtrace:core_sys_select | 1 | |
| backtrace:SyS_select | 1 | |
| backtrace:vfs_read | 1 | |
| backtrace:SyS_read | 1 | |
| RIP:no_context | 1 | |
| invoked_oom-killer:gfp_mask=0x | 1 | |
| Mem-Info | 1 | |
| Out_of_memory:Kill_process | 1 | |
| BUG:Bad_page_map_in_process | 1 | |
| backtrace:oom_reaper | 1 | |
| RIP:format_decode | 0 | 1 |
| RIP:vsnprintf | 0 | 1 |
| BUG:kernel_boot_hang | 0 | 1 |
| RIP:ptep_set_access_flags | 0 | 1 |
| RIP:do_raw_spin_lock | 0 | 1 |
| backtrace:schedule_timeout | 0 | 1 |
+------------------------------------------+------------+------------+
[ 14.488344] Write protecting the kernel read-only data: 20480k
[ 14.490449] Freeing unused kernel memory: 948K (ffff880001d13000 - ffff880001e00000)
[ 14.509877] Freeing unused kernel memory: 384K (ffff8800023a0000 - ffff880002400000)
[ 14.566870] BUG: unable to handle kernel NULL pointer dereference at 0000000000000008
[ 14.589161] IP: [<ffffffff8107c809>] do_exit+0x8a9/0x958
[ 14.589906] PGD f29f067 PUD f2aa067 PMD 0
[ 14.590531] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.591621] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.592395] PGD f29f067 PUD f2aa067 PMD 0
[ 14.593028] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.594134] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.595079] PGD f29f067 PUD f2aa067 PMD 0
[ 14.595835] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.597169] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.598111] PGD f29f067 PUD f2aa067 PMD 0
[ 14.598866] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.600137] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.619127] PGD f29f067 PUD f2aa067 PMD 0
[ 14.619856] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.621195] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.622145] PGD f29f067 PUD f2aa067 PMD 0
[ 14.622845] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.624059] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.624914] PGD f29f067 PUD f2aa067 PMD 0
[ 14.625607] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.626905] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.627791] PGD f29f067 PUD f2aa067 PMD 0
[ 14.646553] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.647925] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.648805] PGD f29f067 PUD f2aa067 PMD 0
[ 14.649429] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.650620] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.651477] PGD f29f067 PUD f2aa067 PMD 0
[ 14.652168] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.653374] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.654227] PGD f29f067 PUD f2aa067 PMD 0
[ 14.654906] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.674170] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.675147] PGD f29f067 PUD f2aa067 PMD 0
[ 14.675912] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.677243] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.678192] PGD f29f067 PUD f2aa067 PMD 0
[ 14.678905] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.680120] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.680971] PGD f29f067 PUD f2aa067 PMD 0
[ 14.681657] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.682861] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.701842] PGD f29f067 PUD f2aa067 PMD 0
[ 14.702613] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.703952] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.704907] PGD f29f067 PUD f2aa067 PMD 0
[ 14.705667] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.706912] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.707773] PGD f29f067 PUD f2aa067 PMD 0
[ 14.708460] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.709662] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.710552] PGD f29f067 PUD f2aa067 PMD 0
[ 14.741364] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.742708] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.743653] PGD f29f067 PUD f2aa067 PMD 0
[ 14.744409] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.745743] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.746696] PGD f29f067 PUD f2aa067 PMD 0
[ 14.747462] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.771833] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.772792] PGD f29f067 PUD f2aa067 PMD 0
[ 14.773565] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.774912] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.775873] PGD f29f067 PUD f2aa067 PMD 0
[ 14.776646] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.777986] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.799988] PGD f29f067 PUD f2aa067 PMD 0
[ 14.800757] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.802105] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.803060] PGD f29f067 PUD f2aa067 PMD 0
[ 14.803814] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.805163] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.826139] PGD f29f067 PUD f2aa067 PMD 0
[ 14.826825] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.828031] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.828972] PGD f29f067 PUD f2aa067 PMD 0
[ 14.829734] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.831070] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.832022] PGD f29f067 PUD f2aa067 PMD 0
[ 14.832779] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.855339] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.859358] PGD f29f067 PUD f2aa067 PMD 0
[ 14.860010] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.866515] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.870436] PGD f29f067 PUD f2aa067 PMD 0
[ 14.874316] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.877654] IP:
[ 14.877893] PANIC: double fault, error_code: 0x0
[ 14.880793] CPU: 0 PID: 149 Comm: mount Not tainted 4.7.0-rc4-00260-gfa0c96a #2
[ 14.882944] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014
[ 14.891703] task: ffff88000f1e5b00 ti: ffff88000f1e5b00 task.ti: ffff88000f1e5b00
[ 14.894869] RIP: 0010:[<ffffffff814003ee>] [<ffffffff814003ee>] symbol_string+0x39/0x68
[ 14.898367] RSP: 0000:ffffc900001d3fd0 EFLAGS: 00010083
[ 14.900249] RAX: 0000000000000053 RBX: ffffffff832c6196 RCX: ffff0a00ffffff05
[ 14.902452] RDX: ffffffff8103d7fd RSI: ffffffff8103d7fd RDI: ffffc900001d3fd1
[ 14.904593] RBP: ffffc900001d40d8 R08: ffffffff82217370 R09: 0000000000000020
[ 14.906701] R10: 00000000001d4200 R11: ffffffff832c6183 R12: ffffffff832c6560
[ 14.914898] R13: ffff0a00ffffff05 R14: 00000000ffff0a00 R15: 00000000000003e0
[ 14.917045] FS: 0000000000000000(0000) GS:ffff880013800000(0000) knlGS:0000000000000000
[ 14.920560] CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
[ 14.922558] CR2: ffffc900001d3fc8 CR3: 000000000f2b3000 CR4: 00000000000006f0
[ 14.929938] Stack:
[ 14.931428]
[ 14.931634] Call Trace:
[ 14.931957] <UNK>
[ 14.932338] Code: fb 48 89 d6 49 89 cd 48 8d bd f9 fe ff ff 48 81 ec f0 00 00 00 41 8a 00 3c 42 75 07 e8 3b 53 cd ff eb 14 3c 66 74 0b 3c 73 74 07 <e8> 0b 53 cd ff eb 05 e8 16 53 cd ff 48 8d 95 f9 fe ff ff 4c 89
[ 14.940956] Kernel panic - not syncing: Machine halted.
[ 14.941876] Kernel Offset: disabled
Elapsed time: 30
FYI, raw QEMU command line is:
qemu-system-x86_64 -enable-kvm -cpu Westmere -kernel /pkg/linux/x86_64-acpi-redef/gcc-6/fa0c96ab2d9c97a1ac47b4f7ed7199f1eb143322/vmlinuz-4.7.0-rc4-00260-gfa0c96a -append 'root=/dev/ram0 user=lkp job=/lkp/scheduled/vm-kbuild-yocto-ia32-9/rand_boot-1-yocto-minimal-i386.cgz-x86_64-acpi-redef-fa0c96ab2d9c97a1ac47b4f7ed7199f1eb143322-20160626-39885-prwrwn-1.yaml ARCH=x86_64 kconfig=x86_64-acpi-redef branch=linux-devel/devel-catchup-201606260900 commit=fa0c96ab2d9c97a1ac47b4f7ed7199f1eb143322 BOOT_IMAGE=/pkg/linux/x86_64-acpi-redef/gcc-6/fa0c96ab2d9c97a1ac47b4f7ed7199f1eb143322/vmlinuz-4.7.0-rc4-00260-gfa0c96a max_uptime=600 RESULT_ROOT=/result/boot/1/vm-kbuild-yocto-ia32/yocto-minimal-i386.cgz/x86_64-acpi-redef/gcc-6/fa0c96ab2d9c97a1ac47b4f7ed7199f1eb143322/0 LKP_SERVER=inn earlyprintk=ttyS0,115200 systemd.log_level=err debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw ip=::::vm-kbuild-yocto-ia32-9::dhcp drbd.minor_count=8' -initrd /fs/sda1/initrd-vm-kbuild-yocto-ia32-9 -m 320 -smp 1 -device e1000,netdev=net0 -netdev user,id=net0 -boot order=nc -no-reboot -watchdog i6300esb -rtc base=localtime -drive file=/fs/sda1/disk0-vm-kbuild-yocto-ia32-9,media=disk,if=virtio -pidfile /dev/shm/kboot/pid-vm-kbuild-yocto-ia32-9 -serial file:/dev/shm/kboot/serial-vm-kbuild-yocto-ia32-9 -daemonize -display none -monitor null
Thanks,
Xiaolong
4 years, 8 months
[[DEBUG] force] 2642458962: BUG: unable to handle kernel paging request at ffffc90000997f18
by kernel test robot
FYI, we noticed the following commit:
https://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/vmap_stack
commit 26424589626d7f82d09d4e7c0569f9487b2e810a ("[DEBUG] force-enable CONFIG_VMAP_STACK")
on test machine: vm-intel12-yocto-x86_64: 2 threads qemu-system-x86_64 -enable-kvm -cpu Nehalem with 320M memory
caused below changes:
+------------------------------------------+------------+------------+
| | 03e46fd441 | 2642458962 |
+------------------------------------------+------------+------------+
| boot_successes | 18 | 21 |
| boot_failures | 4 | 9 |
| BUG:kernel_boot_hang | 2 | |
| WARNING:at_kernel/fork.c:#free_task | 2 | |
| backtrace:_do_fork | 2 | |
| backtrace:SyS_clone | 2 | |
| BUG:unable_to_handle_kernel | 0 | 9 |
| Oops | 0 | 9 |
| RIP:_raw_spin_lock_irq | 0 | 9 |
| Kernel_panic-not_syncing:Fatal_exception | 0 | 9 |
| backtrace:do_mount | 0 | 8 |
| backtrace:SyS_mount | 0 | 8 |
+------------------------------------------+------------+------------+
[ 4.310442] Freeing unused kernel memory: 1980K (ffff880002011000 - ffff880002200000)
[ 4.420716] UDF-fs: warning (device vdb): udf_fill_super: No partition found (2)
[ 4.422544] UDF-fs: warning (device vda): udf_fill_super: No partition found (2)
[ 4.425052] BUG: unable to handle kernel paging request at ffffc90000997f18
[ 4.426645] IP: [<ffffffff81a9ace0>] _raw_spin_lock_irq+0x2c/0x3d
[ 4.427869] PGD 1249e067 PUD 1249f067 PMD 11e4e067 PTE 0
[ 4.429245] Oops: 0002 [#1] SMP
[ 4.430086] Modules linked in:
[ 4.430992] CPU: 0 PID: 1741 Comm: mount Not tainted 4.7.0-rc4-00258-g26424589 #1
[ 4.432727] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014
[ 4.434646] task: ffff88000d950c80 ti: ffff88000d950c80 task.ti: ffff88000d950c80
[ 4.436406] RIP: 0010:[<ffffffff81a9ace0>] [<ffffffff81a9ace0>] _raw_spin_lock_irq+0x2c/0x3d
[ 4.438341] RSP: 0018:ffffc90000957c80 EFLAGS: 00010046
[ 4.439438] RAX: 0000000000000000 RBX: 7fffffffffffffff RCX: 0000000000000a66
[ 4.440735] RDX: 0000000000000001 RSI: ffff880013619bc0 RDI: ffffc90000997f18
[ 4.442035] RBP: ffffc90000957c88 R08: 0000000000019bc0 R09: ffffffff81200748
[ 4.443323] R10: ffffea0000474900 R11: 000000000001a2a0 R12: ffffc90000997f10
[ 4.444614] R13: 0000000000000002 R14: ffffc90000997f18 R15: 00000000ffffffea
[ 4.445896] FS: 00007f9ca6a32700(0000) GS:ffff880013600000(0000) knlGS:0000000000000000
[ 4.447690] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 4.448819] CR2: ffffc90000997f18 CR3: 000000000d87c000 CR4: 00000000000006f0
[ 4.450102] Stack:
[ 4.450810] ffffc90000997f18 ffffc90000957d00 ffffffff81a982eb 0000000000000246
[ 4.452827] 0000000000000000 ffffc90000957d00 ffffffff8112584b 0000000000000000
[ 4.454838] 0000000000000246 ffff88000e27f6bc 0000000000000000 ffff88000e27f080
[ 4.456845] Call Trace:
[ 4.457616] [<ffffffff81a982eb>] wait_for_common+0x44/0x197
[ 4.458719] [<ffffffff8112584b>] ? try_to_wake_up+0x2dd/0x2ef
[ 4.459877] [<ffffffff81a9845b>] wait_for_completion+0x1d/0x1f
[ 4.461027] [<ffffffff8111db10>] kthread_stop+0x82/0x10a
[ 4.462125] [<ffffffff81117f08>] destroy_workqueue+0x10d/0x1cd
[ 4.463347] [<ffffffff81445236>] xfs_destroy_mount_workqueues+0x49/0x64
[ 4.464620] [<ffffffff81445c03>] xfs_fs_fill_super+0x2c0/0x49c
[ 4.465807] [<ffffffff8123547a>] mount_bdev+0x143/0x195
[ 4.466937] [<ffffffff81445943>] ? xfs_test_remount_options+0x5b/0x5b
[ 4.468727] [<ffffffff81444568>] xfs_fs_mount+0x15/0x17
[ 4.469838] [<ffffffff8123614a>] mount_fs+0x15/0x8c
[ 4.470882] [<ffffffff8124cfc4>] vfs_kern_mount+0x6a/0xfe
[ 4.472005] [<ffffffff8124fc2f>] do_mount+0x985/0xa9a
[ 4.473078] [<ffffffff811e0846>] ? strndup_user+0x3a/0x6a
[ 4.474193] [<ffffffff8124ff6a>] SyS_mount+0x77/0x9f
[ 4.475255] [<ffffffff81a9b081>] entry_SYSCALL_64_fastpath+0x1f/0xbd
[ 4.476463] Code: 66 66 66 90 55 48 89 e5 50 48 89 7d f8 fa 66 66 90 66 66 90 e8 2d 0a 70 ff 65 ff 05 73 18 57 7e 31 c0 ba 01 00 00 00 48 8b 7d f8 <f0> 0f b1 17 85 c0 74 07 89 c6 e8 3e 20 6a ff c9 c3 66 66 66 66
[ 4.484413] RIP [<ffffffff81a9ace0>] _raw_spin_lock_irq+0x2c/0x3d
[ 4.485639] RSP <ffffc90000957c80>
[ 4.486509] CR2: ffffc90000997f18
[ 4.487366] ---[ end trace 79763b41869f2580 ]---
[ 4.488367] Kernel panic - not syncing: Fatal exception
FYI, raw QEMU command line is:
qemu-system-x86_64 -enable-kvm -cpu Nehalem -kernel /pkg/linux/x86_64-lkp/gcc-4.9/26424589626d7f82d09d4e7c0569f9487b2e810a/vmlinuz-4.7.0-rc4-00258-g26424589 -append 'root=/dev/ram0 user=lkp job=/lkp/scheduled/vm-intel12-yocto-x86_64-10/validate_boot-1-yocto-minimal-x86_64.cgz-x86_64-lkp-26424589626d7f82d09d4e7c0569f9487b2e810a-20160627-40546-2cgqlo-31.yaml ARCH=x86_64 kconfig=x86_64-lkp branch=linux-devel/devel-hourly-2016062700 commit=26424589626d7f82d09d4e7c0569f9487b2e810a BOOT_IMAGE=/pkg/linux/x86_64-lkp/gcc-4.9/26424589626d7f82d09d4e7c0569f9487b2e810a/vmlinuz-4.7.0-rc4-00258-g26424589 max_uptime=600 RESULT_ROOT=/result/boot/1/vm-intel12-yocto-x86_64/yocto-minimal-x86_64.cgz/x86_64-lkp/gcc-4.9/26424589626d7f82d09d4e7c0569f9487b2e810a/31 LKP_SERVER=inn earlyprintk=ttyS0,115200 systemd.log_level=err debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw ip=::::vm-intel12-yocto-x86_64-10::dhcp drbd.minor_count=8' -initrd /fs/KVM/initrd-vm-intel12-yocto-x86_64-10 -m 320 -smp 2 -device e1000,netdev=net0 -netdev user,id=net0 -boot order=nc -no-reboot -watchdog i6300esb -rtc base=localtime -drive file=/fs/KVM/disk0-vm-intel12-yocto-x86_64-10,media=disk,if=virtio -drive file=/fs/KVM/disk1-vm-intel12-yocto-x86_64-10,media=disk,if=virtio -pidfile /dev/shm/kboot/pid-vm-intel12-yocto-x86_64-10 -serial file:/dev/shm/kboot/serial-vm-intel12-yocto-x86_64-10 -daemonize -display none -monitor null
Thanks,
Kernel Test Robot
4 years, 8 months
[x86] ddf4847e6f: BUG: unable to handle kernel paging request at ffffffff03862040
by kernel test robot
FYI, we noticed the following commit:
https://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/vmap_stack
commit ddf4847e6f114c522fefb24c16fc7a1d75138f9f ("x86: Move thread_info into task_struct")
on test machine: vm-kbuild-yocto-x86_64: 1 threads qemu-system-x86_64 -enable-kvm -cpu SandyBridge with 320M memory
caused below changes:
+------------------------------------------+------------+------------+
| | aeea9c1c41 | ddf4847e6f |
+------------------------------------------+------------+------------+
| boot_successes | 6 | 0 |
| boot_failures | 0 | 8 |
| BUG:unable_to_handle_kernel | 0 | 8 |
| Oops:#[##] | 0 | 8 |
| RIP:entry_SYSCALL_64_after_swapgs | 0 | 8 |
| Kernel_panic-not_syncing:Fatal_exception | 0 | 8 |
+------------------------------------------+------------+------------+
[ 10.105818] Freeing unused kernel memory: 1956K (ffff880001617000 - ffff880001800000)
[ 10.113305] Freeing unused kernel memory: 836K (ffff880001b2f000 - ffff880001c00000)
[ 10.132456] x86/mm: Checked W+X mappings: passed, no W+X pages found.
[ 10.134895] BUG: unable to handle kernel paging request at ffffffff03862040
[ 10.136663] IP: [<ffffffff81610249>] entry_SYSCALL_64_after_swapgs+0x36/0x4c
[ 10.138365] PGD 1c0d067 PUD 0
[ 10.139426] Oops: 0000 [#1]
[ 10.140235] Modules linked in:
[ 10.141200] CPU: 0 PID: 1 Comm: init Not tainted 4.7.0-rc4-00254-gddf4847 #1
[ 10.142752] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014
[ 10.147089] task: ffff88001205c040 ti: ffff88001205c040 task.ti: ffff88001205c040
[ 10.148945] RIP: 0010:[<ffffffff81610249>] [<ffffffff81610249>] entry_SYSCALL_64_after_swapgs+0x36/0x4c
[ 10.151240] RSP: 0018:ffff880012063f58 EFLAGS: 00010082
[ 10.152485] RAX: 000000000000000c RBX: 0000000000000008 RCX: 00007f9b93195c9a
[ 10.154058] RDX: 0000000000000000 RSI: 00007f9b931994a0 RDI: 0000000000000000
[ 10.155635] RBP: 00000000078bfbff R08: 00007ffe3cb83479 R09: 00007ffe3cbef000
[ 10.157205] R10: 000000000000037f R11: 0000000000000246 R12: 00007ffe3cb83489
[ 10.158778] R13: 0000000000000000 R14: 00000000004028c4 R15: 0000000000001000
[ 10.160481] FS: 0000000000000000(0000) GS:ffffffff81c2f000(0000) knlGS:0000000000000000
[ 10.163887] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 10.165221] CR2: ffffffff03862040 CR3: 0000000013928000 CR4: 00000000000406b0
[ 10.166802] Stack:
[ 10.167469] 00007f9b93181a18 00007f9b93181320 00007f9b93181a18 0000000000000046
[ 10.169734] ffff880012063f98 ffffffff810a5c6a 0000000000000246 000000000000037f
[ 10.171982] 00007ffe3cbef000 00007ffe3cb83479 ffffffffffffffda 00007f9b93195c9a
[ 10.174223] Call Trace:
[ 10.174990] [<ffffffff810a5c6a>] ? trace_hardirqs_off_caller+0x36/0xa4
[ 10.176457] Code: 25 84 a3 c2 81 e8 f4 0d 9f ff 6a 2b ff 34 25 00 80 c2 81 41 53 6a 33 51 50 57 56 52 51 6a da 41 50 41 51 41 52 41 53 48 83 ec 30 <65> 4c 8b 1c 25 40 30 c3 81 41 f7 03 ff ff 08 10 0f 85 c2 00 00
[ 10.194036] RIP [<ffffffff81610249>] entry_SYSCALL_64_after_swapgs+0x36/0x4c
[ 10.195727] RSP <ffff880012063f58>
[ 10.196655] CR2: ffffffff03862040
[ 10.197564] ---[ end trace bedce588960ef6c2 ]---
[ 10.198696] Kernel panic - not syncing: Fatal exception
FYI, raw QEMU command line is:
qemu-system-x86_64 -enable-kvm -cpu SandyBridge -kernel /pkg/linux/x86_64-randconfig-h0-06262018/gcc-6/ddf4847e6f114c522fefb24c16fc7a1d75138f9f/vmlinuz-4.7.0-rc4-00254-gddf4847 -append 'root=/dev/ram0 user=lkp job=/lkp/scheduled/vm-kbuild-yocto-x86_64-51/bisect_boot-1-yocto-minimal-x86_64.cgz-x86_64-randconfig-h0-06262018-ddf4847e6f114c522fefb24c16fc7a1d75138f9f-20160627-110660-dxqka-0.yaml ARCH=x86_64 kconfig=x86_64-randconfig-h0-06262018 branch=linux-devel/devel-catchup-201606270129 commit=ddf4847e6f114c522fefb24c16fc7a1d75138f9f BOOT_IMAGE=/pkg/linux/x86_64-randconfig-h0-06262018/gcc-6/ddf4847e6f114c522fefb24c16fc7a1d75138f9f/vmlinuz-4.7.0-rc4-00254-gddf4847 max_uptime=600 RESULT_ROOT=/result/boot/1/vm-kbuild-yocto-x86_64/yocto-minimal-x86_64.cgz/x86_64-randconfig-h0-06262018/gcc-6/ddf4847e6f114c522fefb24c16fc7a1d75138f9f/0 LKP_SERVER=inn earlyprintk=ttyS0,115200 systemd.log_level=err debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw ip=::::vm-kbuild-yocto-x86_64-51::dhcp drbd.minor_count=8' -initrd /fs/sdf1/initrd-vm-kbuild-yocto-x86_64-51 -m 320 -smp 1 -device e1000,netdev=net0 -netdev user,id=net0 -boot order=nc -no-reboot -watchdog i6300esb -rtc base=localtime -drive file=/fs/sdf1/disk0-vm-kbuild-yocto-x86_64-51,media=disk,if=virtio -pidfile /dev/shm/kboot/pid-vm-kbuild-yocto-x86_64-51 -serial file:/dev/shm/kboot/serial-vm-kbuild-yocto-x86_64-51 -daemonize -display none -monitor null
Thanks,
Kernel Test Robot
4 years, 8 months
[lkp] [sched/fair] 0cef819730: netperf.Throughput_Mbps -23.7% regression
by kernel test robot
FYI, we noticed a -23.7% regression of netperf.Throughput_Mbps due to commit:
commit 0cef819730f7f67f8d2bec642e1ac1f59febb4eb ("sched/fair: Fix calc_cfs_shares()")
https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/urgent
in testcase: netperf
on test machine: lkp-hsw-d01: 8 threads Haswell with 8G memory
with following parameters: cluster=cs-localhost/cpufreq_governor=performance/ip=ipv4/nr_threads=200%/runtime=300s/send_size=10K/test=SCTP_STREAM_MANY
In addition to that, the commit also has significant impact on the following tests:
+------------------+----------------------------------------------------------------------------------------------------+
| testcase: change | ebizzy: ebizzy.throughput.per_thread.max -39.6% improvement |
| test machine | ivb42: 48 threads 2 sockets Xeon E5 (Ivytown Ivy Bridge-EP) with 64G memory |
| test parameters | cpufreq_governor=performance |
| | duration=10s |
| | iterations=100x |
| | nr_threads=200% |
+------------------+----------------------------------------------------------------------------------------------------+
| testcase: change | plzip: plzip.throughput 2.5% improvement |
| test machine | lkp-hsx04: 144 threads Brickland Haswell-EX with 512G memory |
| test parameters | cpufreq_governor=performance |
| | nr_threads=100% |
+------------------+----------------------------------------------------------------------------------------------------+
| testcase: change | netperf: netperf.Throughput_Mbps -22.4% regression |
| test machine | lkp-bdw-de1: 16 threads Broadwell-DE with 8G memory |
| test parameters | cluster=cs-localhost |
| | cpufreq_governor=performance |
| | ip=ipv4 |
| | nr_threads=200% |
| | runtime=300s |
| | send_size=10K |
| | test=SCTP_STREAM_MANY |
+------------------+----------------------------------------------------------------------------------------------------+
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Details are as below:
-------------------------------------------------------------------------------------------------->
To reproduce:
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml
=========================================================================================
cluster/compiler/cpufreq_governor/ip/kconfig/nr_threads/rootfs/runtime/send_size/tbox_group/test/testcase:
cs-localhost/gcc-4.9/performance/ipv4/x86_64-rhel/200%/debian-x86_64-2015-02-07.cgz/300s/10K/lkp-hsw-d01/SCTP_STREAM_MANY/netperf
commit:
0329dacce8 ("sched/fair: Fix effective_load()")
0cef819730 ("sched/fair: Fix calc_cfs_shares()")
0329dacce87d6741 0cef819730f7f67f8d2bec642e
---------------- --------------------------
fail:runs %reproduction fail:runs
| | |
:4 25% 1:4 kmsg.DHCP/BOOTP:Reply_not_for_us,op[#]xid[a99a731]
%stddev %change %stddev
\ | \
5238 ± 0% -23.7% 3995 ± 2% netperf.Throughput_Mbps
39356248 ± 0% -100.0% 13112 ± 7% netperf.time.involuntary_context_switches
395.00 ± 0% +32.8% 524.50 ± 1% netperf.time.percent_of_cpu_this_job_got
1159 ± 0% +32.9% 1541 ± 1% netperf.time.system_time
30.44 ± 1% +44.1% 43.86 ± 2% netperf.time.user_time
1856009 ± 11% +641.3% 13757673 ± 2% netperf.time.voluntary_context_switches
7674 ± 7% -12.1% 6745 ± 4% slabinfo.anon_vma_chain.active_objs
7705 ± 8% -12.4% 6746 ± 4% slabinfo.anon_vma_chain.num_objs
43984820 ± 0% -21.7% 34430029 ± 2% softirqs.NET_RX
136049 ± 3% -54.4% 62074 ± 14% softirqs.RCU
19.25 ± 2% +9.1% 21.00 ± 0% vmstat.procs.r
281069 ± 0% -64.5% 99885 ± 2% vmstat.system.cs
3.949e+08 ± 0% -23.2% 3.033e+08 ± 2% proc-vmstat.numa_hit
3.949e+08 ± 0% -23.2% 3.033e+08 ± 2% proc-vmstat.numa_local
1.013e+09 ± 1% -28.8% 7.213e+08 ± 2% proc-vmstat.pgalloc_dma32
1.247e+09 ± 0% -29.7% 8.766e+08 ± 2% proc-vmstat.pgalloc_normal
2.26e+09 ± 0% -29.3% 1.598e+09 ± 2% proc-vmstat.pgfree
0.05 ± 0% +45.0% 0.07 ± 11% turbostat.CPU%c1
0.65 ± 0% +32.0% 0.85 ± 1% turbostat.CPU%c7
58.58 ± 0% -8.9% 53.34 ± 1% turbostat.CorWatt
0.58 ± 5% +35.1% 0.78 ± 4% turbostat.Pkg%pc2
66.81 ± 0% -6.6% 62.43 ± 0% turbostat.PkgWatt
17187 ± 12% +60.8% 27643 ± 23% cpuidle.C1E-HSW.time
127946 ± 7% +20.3% 153920 ± 11% cpuidle.C3-HSW.time
377.00 ± 6% +26.1% 475.50 ± 7% cpuidle.C3-HSW.usage
218.00 ± 7% +12.5% 245.25 ± 3% cpuidle.C6-HSW.usage
16143034 ± 0% +33.2% 21498649 ± 1% cpuidle.C7s-HSW.time
17605 ± 0% +32.6% 23344 ± 1% cpuidle.C7s-HSW.usage
4472 ±101% +1802.2% 85081 ± 97% cpuidle.POLL.time
4.586e+11 ± 1% -23.7% 3.499e+11 ± 3% perf-stat.L1-dcache-load-misses
1.394e+12 ± 0% -23.2% 1.071e+12 ± 2% perf-stat.L1-dcache-loads
1.101e+12 ± 0% -23.3% 8.446e+11 ± 2% perf-stat.L1-dcache-stores
7.379e+10 ± 0% -23.5% 5.646e+10 ± 13% perf-stat.L1-icache-load-misses
8.397e+08 ± 25% +1022.0% 9.421e+09 ± 11% perf-stat.LLC-load-misses
8.37e+10 ± 0% -13.4% 7.252e+10 ± 2% perf-stat.LLC-loads
7.722e+08 ± 22% +789.9% 6.872e+09 ± 5% perf-stat.LLC-store-misses
3.638e+10 ± 0% -22.3% 2.827e+10 ± 2% perf-stat.LLC-stores
7.208e+11 ± 0% -23.6% 5.504e+11 ± 2% perf-stat.branch-instructions
8.614e+09 ± 1% -27.1% 6.279e+09 ± 1% perf-stat.branch-load-misses
7.213e+11 ± 0% -23.7% 5.505e+11 ± 2% perf-stat.branch-loads
8.595e+09 ± 1% -26.8% 6.294e+09 ± 1% perf-stat.branch-misses
1.672e+09 ± 21% +879.5% 1.638e+10 ± 4% perf-stat.cache-misses
1.752e+11 ± 0% -19.7% 1.407e+11 ± 2% perf-stat.cache-references
85431681 ± 0% -64.5% 30358026 ± 2% perf-stat.context-switches
1.487e+09 ± 4% -38.2% 9.194e+08 ± 3% perf-stat.dTLB-load-misses
1.395e+12 ± 0% -23.3% 1.071e+12 ± 2% perf-stat.dTLB-loads
1.092e+09 ± 12% -38.8% 6.685e+08 ± 26% perf-stat.dTLB-store-misses
1.098e+12 ± 0% -23.1% 8.444e+11 ± 2% perf-stat.dTLB-stores
7.807e+08 ± 7% -39.8% 4.702e+08 ± 10% perf-stat.iTLB-load-misses
3.924e+12 ± 0% -23.2% 3.011e+12 ± 2% perf-stat.instructions
321108 ± 0% +4.4% 335205 ± 0% perf-stat.minor-faults
8.198e+08 ± 24% +1057.7% 9.491e+09 ± 11% perf-stat.node-loads
7.678e+08 ± 22% +792.1% 6.849e+09 ± 5% perf-stat.node-stores
321076 ± 0% +4.4% 335171 ± 0% perf-stat.page-faults
34249 ± 19% +2035.3% 731325 ± 4% sched_debug.cfs_rq:/.MIN_vruntime.avg
71932 ± 12% +1049.4% 826757 ± 1% sched_debug.cfs_rq:/.MIN_vruntime.max
34055 ± 14% +494.7% 202535 ± 29% sched_debug.cfs_rq:/.MIN_vruntime.stddev
1409000 ± 3% -84.4% 219938 ± 3% sched_debug.cfs_rq:/.load.avg
1911874 ± 6% -85.7% 273218 ± 9% sched_debug.cfs_rq:/.load.max
950054 ± 9% -83.6% 156098 ± 5% sched_debug.cfs_rq:/.load.min
436947 ± 9% -91.0% 39520 ± 15% sched_debug.cfs_rq:/.load.stddev
978.81 ± 0% -77.7% 218.13 ± 1% sched_debug.cfs_rq:/.load_avg.avg
1133 ± 5% -68.4% 357.96 ± 19% sched_debug.cfs_rq:/.load_avg.max
873.67 ± 0% -81.8% 158.67 ± 3% sched_debug.cfs_rq:/.load_avg.min
34249 ± 19% +2035.3% 731325 ± 4% sched_debug.cfs_rq:/.max_vruntime.avg
71932 ± 12% +1049.3% 826757 ± 1% sched_debug.cfs_rq:/.max_vruntime.max
34055 ± 14% +494.7% 202535 ± 29% sched_debug.cfs_rq:/.max_vruntime.stddev
76109 ± 0% +975.0% 818186 ± 1% sched_debug.cfs_rq:/.min_vruntime.avg
78283 ± 0% +956.7% 827244 ± 1% sched_debug.cfs_rq:/.min_vruntime.max
74946 ± 0% +980.7% 809948 ± 1% sched_debug.cfs_rq:/.min_vruntime.min
1152 ± 15% +431.4% 6124 ± 51% sched_debug.cfs_rq:/.min_vruntime.stddev
1.36 ± 3% +25.7% 1.71 ± 3% sched_debug.cfs_rq:/.nr_running.avg
0.42 ± 9% -37.5% 0.26 ± 35% sched_debug.cfs_rq:/.nr_running.stddev
885.45 ± 0% -82.8% 152.63 ± 4% sched_debug.cfs_rq:/.runnable_load_avg.avg
939.21 ± 0% -81.3% 175.71 ± 3% sched_debug.cfs_rq:/.runnable_load_avg.max
817.71 ± 2% -87.5% 102.08 ± 28% sched_debug.cfs_rq:/.runnable_load_avg.min
834.75 ±107% +758.4% 7165 ± 49% sched_debug.cfs_rq:/.spread0.max
-2503 ±-26% +304.8% -10132 ±-83% sched_debug.cfs_rq:/.spread0.min
1153 ± 15% +431.2% 6125 ± 51% sched_debug.cfs_rq:/.spread0.stddev
35.61 ± 8% -15.3% 30.16 ± 9% sched_debug.cfs_rq:/.util_avg.stddev
640322 ± 15% -48.5% 329643 ± 26% sched_debug.cpu.avg_idle.avg
936653 ± 2% -19.0% 758535 ± 19% sched_debug.cpu.avg_idle.max
303015 ± 69% -92.6% 22535 ±109% sched_debug.cpu.avg_idle.min
876.64 ± 1% -82.4% 154.60 ± 1% sched_debug.cpu.cpu_load[0].avg
940.50 ± 0% -81.4% 174.96 ± 3% sched_debug.cpu.cpu_load[0].max
781.58 ± 8% -86.6% 104.75 ± 11% sched_debug.cpu.cpu_load[0].min
55.31 ± 36% -58.3% 23.05 ± 20% sched_debug.cpu.cpu_load[0].stddev
885.32 ± 0% -82.4% 155.97 ± 1% sched_debug.cpu.cpu_load[1].avg
929.00 ± 0% -81.3% 173.33 ± 3% sched_debug.cpu.cpu_load[1].max
810.17 ± 3% -84.8% 123.25 ± 5% sched_debug.cpu.cpu_load[1].min
37.05 ± 29% -56.9% 15.98 ± 23% sched_debug.cpu.cpu_load[1].stddev
883.74 ± 0% -82.3% 156.01 ± 0% sched_debug.cpu.cpu_load[2].avg
918.62 ± 0% -81.3% 171.71 ± 4% sched_debug.cpu.cpu_load[2].max
815.62 ± 2% -83.6% 133.75 ± 3% sched_debug.cpu.cpu_load[2].min
31.38 ± 21% -61.6% 12.05 ± 28% sched_debug.cpu.cpu_load[2].stddev
879.80 ± 0% -82.2% 156.20 ± 0% sched_debug.cpu.cpu_load[3].avg
909.04 ± 0% -81.3% 170.00 ± 4% sched_debug.cpu.cpu_load[3].max
819.21 ± 1% -82.9% 140.08 ± 3% sched_debug.cpu.cpu_load[3].min
27.59 ± 20% -66.0% 9.39 ± 33% sched_debug.cpu.cpu_load[3].stddev
876.12 ± 0% -82.1% 156.95 ± 0% sched_debug.cpu.cpu_load[4].avg
902.17 ± 0% -81.3% 168.67 ± 3% sched_debug.cpu.cpu_load[4].max
819.79 ± 1% -82.4% 144.42 ± 3% sched_debug.cpu.cpu_load[4].min
25.51 ± 20% -69.7% 7.72 ± 41% sched_debug.cpu.cpu_load[4].stddev
1479789 ± 4% -84.7% 226291 ± 3% sched_debug.cpu.load.avg
1955568 ± 3% -84.0% 313014 ± 21% sched_debug.cpu.load.max
908371 ± 8% -82.9% 155654 ± 8% sched_debug.cpu.load.min
462669 ± 7% -88.6% 52549 ± 41% sched_debug.cpu.load.stddev
5283149 ± 0% -64.5% 1876874 ± 2% sched_debug.cpu.nr_switches.avg
6042769 ± 3% -64.7% 2132662 ± 3% sched_debug.cpu.nr_switches.max
5074839 ± 0% -66.0% 1723693 ± 1% sched_debug.cpu.nr_switches.min
315533 ± 16% -60.2% 125660 ± 15% sched_debug.cpu.nr_switches.stddev
18.91 ± 1% +70.5% 32.25 ± 3% perf-profile.cycles-pp.___sys_recvmsg.__sys_recvmsg.sys_recvmsg.entry_SYSCALL_64_fastpath
77.21 ± 0% -16.8% 64.27 ± 1% perf-profile.cycles-pp.___sys_sendmsg.__sys_sendmsg.sys_sendmsg.entry_SYSCALL_64_fastpath
5.63 ± 5% -33.2% 3.76 ± 7% perf-profile.cycles-pp.__alloc_pages_nodemask.alloc_kmem_pages_node.kmalloc_large_node.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34
5.76 ± 4% -20.1% 4.60 ± 2% perf-profile.cycles-pp.__alloc_skb._sctp_make_chunk.sctp_make_datafrag_empty.sctp_datamsg_from_user.sctp_sendmsg
1.89 ± 2% -41.3% 1.11 ± 14% perf-profile.cycles-pp.__alloc_skb.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24
12.31 ± 1% +42.2% 17.51 ± 2% perf-profile.cycles-pp.__do_softirq.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip.ip_finish_output2
4.45 ± 3% -27.8% 3.21 ± 3% perf-profile.cycles-pp.__do_softirq.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip.sctp_inq_push
3.42 ± 3% -23.9% 2.60 ± 5% perf-profile.cycles-pp.__free_kmem_pages.kfree.skb_free_head.skb_release_data.skb_release_all
3.37 ± 3% -23.6% 2.58 ± 5% perf-profile.cycles-pp.__free_pages.__free_kmem_pages.kfree.skb_free_head.skb_release_data
3.35 ± 3% -23.9% 2.55 ± 5% perf-profile.cycles-pp.__free_pages_ok.__free_pages.__free_kmem_pages.kfree.skb_free_head
4.57 ± 7% -21.6% 3.58 ± 2% perf-profile.cycles-pp.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb._sctp_make_chunk.sctp_make_datafrag_empty
1.59 ± 4% -62.5% 0.60 ± 12% perf-profile.cycles-pp.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb.sctp_packet_transmit.sctp_outq_flush
4.66 ± 7% -21.6% 3.65 ± 2% perf-profile.cycles-pp.__kmalloc_reserve.isra.34.__alloc_skb._sctp_make_chunk.sctp_make_datafrag_empty.sctp_datamsg_from_user
1.62 ± 3% -54.9% 0.73 ± 30% perf-profile.cycles-pp.__kmalloc_reserve.isra.34.__alloc_skb.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork
12.44 ± 1% +41.7% 17.63 ± 2% perf-profile.cycles-pp.__local_bh_enable_ip.ip_finish_output2.ip_finish_output.ip_output.ip_local_out
4.57 ± 4% -27.8% 3.30 ± 3% perf-profile.cycles-pp.__local_bh_enable_ip.sctp_inq_push.sctp_backlog_rcv.__release_sock.release_sock
16.03 ± 2% +24.8% 20.01 ± 2% perf-profile.cycles-pp.__netif_receive_skb.process_backlog.net_rx_action.__do_softirq.do_softirq_own_stack
15.98 ± 2% +24.9% 19.96 ± 2% perf-profile.cycles-pp.__netif_receive_skb_core.__netif_receive_skb.process_backlog.net_rx_action.__do_softirq
0.00 ± -1% +Inf% 1.56 ± 4% perf-profile.cycles-pp.__release_sock.release_sock.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
24.48 ± 3% -16.7% 20.40 ± 1% perf-profile.cycles-pp.__release_sock.release_sock.sctp_sendmsg.inet_sendmsg.sock_sendmsg
1.13 ± 2% -100.0% 0.00 ± -1% perf-profile.cycles-pp.__schedule.preempt_schedule_common._cond_resched.__release_sock.release_sock
1.62 ± 5% -51.2% 0.79 ± 4% perf-profile.cycles-pp.__schedule.schedule.schedule_timeout.sctp_skb_recv_datagram.sctp_recvmsg
19.51 ± 1% +68.0% 32.78 ± 3% perf-profile.cycles-pp.__sys_recvmsg.sys_recvmsg.entry_SYSCALL_64_fastpath
78.31 ± 0% -16.7% 65.22 ± 1% perf-profile.cycles-pp.__sys_sendmsg.sys_sendmsg.entry_SYSCALL_64_fastpath
1.95 ± 4% -63.3% 0.72 ± 12% perf-profile.cycles-pp.__wake_up_common.__wake_up_sync_key.sctp_data_ready.sctp_ulpq_tail_event.sctp_ulpq_tail_data
2.06 ± 8% -60.0% 0.82 ± 19% perf-profile.cycles-pp.__wake_up_sync_key.sctp_data_ready.sctp_ulpq_tail_event.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24
1.22 ± 2% -100.0% 0.00 ± -1% perf-profile.cycles-pp._cond_resched.__release_sock.release_sock.sctp_sendmsg.inet_sendmsg
1.09 ± 5% -25.9% 0.81 ± 14% perf-profile.cycles-pp._raw_spin_lock_irqsave.get_page_from_freelist.__alloc_pages_nodemask.alloc_kmem_pages_node.kmalloc_large_node
7.25 ± 3% -16.7% 6.04 ± 2% perf-profile.cycles-pp._sctp_make_chunk.sctp_make_datafrag_empty.sctp_datamsg_from_user.sctp_sendmsg.inet_sendmsg
5.92 ± 6% -33.6% 3.92 ± 6% perf-profile.cycles-pp.alloc_kmem_pages_node.kmalloc_large_node.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb
1.91 ± 4% -63.1% 0.70 ± 11% perf-profile.cycles-pp.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.sctp_data_ready.sctp_ulpq_tail_event
1.33 ± 4% -27.2% 0.97 ± 3% perf-profile.cycles-pp.copy_msghdr_from_user.___sys_sendmsg.__sys_sendmsg.sys_sendmsg.entry_SYSCALL_64_fastpath
10.64 ± 2% +16.7% 12.43 ± 8% perf-profile.cycles-pp.copy_user_enhanced_fast_string.sctp_user_addto_chunk.sctp_datamsg_from_user.sctp_sendmsg.inet_sendmsg
9.43 ± 1% +22.1% 11.51 ± 5% perf-profile.cycles-pp.copy_user_enhanced_fast_string.skb_copy_datagram_iter.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
1.87 ± 3% -62.6% 0.70 ± 11% perf-profile.cycles-pp.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.sctp_data_ready
0.71 ± 33% -100.0% 0.00 ± -1% perf-profile.cycles-pp.dev_queue_xmit.ip_finish_output2.ip_finish_output.ip_output.ip_local_out
12.40 ± 1% +41.8% 17.59 ± 2% perf-profile.cycles-pp.do_softirq.part.13.__local_bh_enable_ip.ip_finish_output2.ip_finish_output.ip_output
4.52 ± 3% -28.1% 3.25 ± 3% perf-profile.cycles-pp.do_softirq.part.13.__local_bh_enable_ip.sctp_inq_push.sctp_backlog_rcv.__release_sock
12.35 ± 1% +42.1% 17.55 ± 2% perf-profile.cycles-pp.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip.ip_finish_output2.ip_finish_output
4.48 ± 3% -27.9% 3.23 ± 3% perf-profile.cycles-pp.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip.sctp_inq_push.sctp_backlog_rcv
1.71 ± 14% -38.5% 1.05 ± 7% perf-profile.cycles-pp.free_one_page.__free_pages_ok.__free_pages.__free_kmem_pages.kfree
5.15 ± 5% -42.8% 2.95 ± 2% perf-profile.cycles-pp.get_page_from_freelist.__alloc_pages_nodemask.alloc_kmem_pages_node.kmalloc_large_node.__kmalloc_node_track_caller
73.23 ± 0% -16.2% 61.40 ± 1% perf-profile.cycles-pp.inet_sendmsg.sock_sendmsg.___sys_sendmsg.__sys_sendmsg.sys_sendmsg
13.52 ± 3% +33.2% 18.01 ± 2% perf-profile.cycles-pp.ip_finish_output.ip_output.ip_local_out.ip_queue_xmit.sctp_v4_xmit
13.06 ± 3% +37.7% 17.98 ± 2% perf-profile.cycles-pp.ip_finish_output2.ip_finish_output.ip_output.ip_local_out.ip_queue_xmit
15.14 ± 2% +27.0% 19.24 ± 1% perf-profile.cycles-pp.ip_local_deliver.ip_rcv_finish.ip_rcv.__netif_receive_skb_core.__netif_receive_skb
15.04 ± 2% +27.2% 19.13 ± 1% perf-profile.cycles-pp.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv.__netif_receive_skb_core
14.75 ± 1% +28.2% 18.91 ± 1% perf-profile.cycles-pp.ip_local_out.ip_queue_xmit.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush
14.53 ± 1% +28.6% 18.68 ± 2% perf-profile.cycles-pp.ip_output.ip_local_out.ip_queue_xmit.sctp_v4_xmit.sctp_packet_transmit
0.84 ± 16% +1251.5% 11.42 ± 3% perf-profile.cycles-pp.ip_queue_xmit.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush.sctp_outq_tail
14.24 ± 1% -44.3% 7.93 ± 4% perf-profile.cycles-pp.ip_queue_xmit.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork
15.56 ± 2% +25.8% 19.58 ± 2% perf-profile.cycles-pp.ip_rcv.__netif_receive_skb_core.__netif_receive_skb.process_backlog.net_rx_action
15.27 ± 2% +26.8% 19.36 ± 2% perf-profile.cycles-pp.ip_rcv_finish.ip_rcv.__netif_receive_skb_core.__netif_receive_skb.process_backlog
3.06 ± 4% -22.9% 2.36 ± 6% perf-profile.cycles-pp.kfree.skb_free_head.skb_release_data.skb_release_all.consume_skb
1.15 ± 3% -17.9% 0.94 ± 7% perf-profile.cycles-pp.kfree.skb_free_head.skb_release_data.skb_release_all.kfree_skb
4.43 ± 7% -20.9% 3.50 ± 2% perf-profile.cycles-pp.kmalloc_large_node.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb._sctp_make_chunk
1.56 ± 4% -70.6% 0.46 ± 59% perf-profile.cycles-pp.kmalloc_large_node.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb.sctp_packet_transmit
0.00 ± -1% +Inf% 6.27 ± 4% perf-profile.cycles-pp.memcpy_erms.sctp_packet_transmit_chunk.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24
16.47 ± 2% +24.3% 20.48 ± 1% perf-profile.cycles-pp.net_rx_action.__do_softirq.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip
1.20 ± 1% -100.0% 0.00 ± -1% perf-profile.cycles-pp.preempt_schedule_common._cond_resched.__release_sock.release_sock.sctp_sendmsg
16.20 ± 2% +24.5% 20.17 ± 2% perf-profile.cycles-pp.process_backlog.net_rx_action.__do_softirq.do_softirq_own_stack.do_softirq.part.13
0.00 ± -1% +Inf% 1.69 ± 5% perf-profile.cycles-pp.release_sock.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg
24.70 ± 3% -16.7% 20.59 ± 1% perf-profile.cycles-pp.release_sock.sctp_sendmsg.inet_sendmsg.sock_sendmsg.___sys_sendmsg
1.70 ± 5% -49.9% 0.85 ± 5% perf-profile.cycles-pp.schedule.schedule_timeout.sctp_skb_recv_datagram.sctp_recvmsg.sock_common_recvmsg
0.00 ± -1% +Inf% 0.97 ± 2% perf-profile.cycles-pp.schedule_timeout.sctp_sendmsg.inet_sendmsg.sock_sendmsg.___sys_sendmsg
1.75 ± 6% -50.6% 0.86 ± 5% perf-profile.cycles-pp.schedule_timeout.sctp_skb_recv_datagram.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
11.53 ± 3% +40.3% 16.17 ± 1% perf-profile.cycles-pp.sctp_assoc_bh_rcv.sctp_inq_push.sctp_rcv.ip_local_deliver_finish.ip_local_deliver
1.24 ± 12% +931.0% 12.81 ± 3% perf-profile.cycles-pp.sctp_assoc_rwnd_increase.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
0.00 ± -1% +Inf% 1.52 ± 5% perf-profile.cycles-pp.sctp_backlog_rcv.__release_sock.release_sock.sctp_recvmsg.sock_common_recvmsg
23.12 ± 3% -12.2% 20.30 ± 1% perf-profile.cycles-pp.sctp_backlog_rcv.__release_sock.release_sock.sctp_sendmsg.inet_sendmsg
9.11 ± 5% +59.4% 14.53 ± 2% perf-profile.cycles-pp.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push.sctp_rcv
16.23 ± 0% -30.0% 11.36 ± 2% perf-profile.cycles-pp.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_primitive_SEND.sctp_sendmsg.inet_sendmsg
2.22 ± 7% -59.5% 0.90 ± 18% perf-profile.cycles-pp.sctp_data_ready.sctp_ulpq_tail_event.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24.sctp_do_sm
10.26 ± 4% +48.9% 15.28 ± 2% perf-profile.cycles-pp.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push.sctp_rcv.ip_local_deliver_finish
15.93 ± 0% -28.8% 11.35 ± 2% perf-profile.cycles-pp.sctp_do_sm.sctp_primitive_SEND.sctp_sendmsg.inet_sendmsg.sock_sendmsg
0.00 ± -1% +Inf% 1.52 ± 4% perf-profile.cycles-pp.sctp_inq_push.sctp_backlog_rcv.__release_sock.release_sock.sctp_recvmsg
20.12 ± 2% -9.7% 18.17 ± 2% perf-profile.cycles-pp.sctp_inq_push.sctp_backlog_rcv.__release_sock.release_sock.sctp_sendmsg
11.75 ± 3% +39.3% 16.37 ± 1% perf-profile.cycles-pp.sctp_inq_push.sctp_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish
8.20 ± 5% -17.9% 6.74 ± 2% perf-profile.cycles-pp.sctp_make_datafrag_empty.sctp_datamsg_from_user.sctp_sendmsg.inet_sendmsg.sock_sendmsg
0.14 ±173% +2767.9% 4.02 ± 3% perf-profile.cycles-pp.sctp_outq_flush.sctp_outq_tail.sctp_assoc_rwnd_increase.sctp_ulpevent_free.sctp_recvmsg
12.67 ± 3% +57.5% 19.95 ± 2% perf-profile.cycles-pp.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv
19.85 ± 1% -35.5% 12.79 ± 2% perf-profile.cycles-pp.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_primitive_SEND
0.95 ± 14% +1192.3% 12.21 ± 3% perf-profile.cycles-pp.sctp_outq_tail.sctp_assoc_rwnd_increase.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg
12.98 ± 3% +55.0% 20.12 ± 2% perf-profile.cycles-pp.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push
20.10 ± 1% -35.6% 12.95 ± 2% perf-profile.cycles-pp.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_primitive_SEND.sctp_sendmsg
0.82 ± 15% +1240.4% 10.96 ± 3% perf-profile.cycles-pp.sctp_packet_transmit.sctp_outq_flush.sctp_outq_tail.sctp_assoc_rwnd_increase.sctp_ulpevent_free
17.19 ± 1% -41.0% 10.14 ± 2% perf-profile.cycles-pp.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm
0.00 ± -1% +Inf% 1.07 ± 5% perf-profile.cycles-pp.sctp_packet_transmit.sctp_packet_transmit_chunk.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24
0.78 ± 26% +955.1% 8.23 ± 4% perf-profile.cycles-pp.sctp_packet_transmit_chunk.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm
23.37 ± 0% -34.9% 15.23 ± 2% perf-profile.cycles-pp.sctp_primitive_SEND.sctp_sendmsg.inet_sendmsg.sock_sendmsg.___sys_sendmsg
14.40 ± 2% +28.6% 18.51 ± 1% perf-profile.cycles-pp.sctp_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv
17.04 ± 1% +80.3% 30.72 ± 3% perf-profile.cycles-pp.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg.__sys_recvmsg
72.76 ± 0% -16.1% 61.03 ± 1% perf-profile.cycles-pp.sctp_sendmsg.inet_sendmsg.sock_sendmsg.___sys_sendmsg.__sys_sendmsg
2.27 ± 5% -49.9% 1.14 ± 8% perf-profile.cycles-pp.sctp_skb_recv_datagram.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg
3.39 ± 5% +337.7% 14.85 ± 2% perf-profile.cycles-pp.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg
1.77 ± 3% -42.5% 1.02 ± 25% perf-profile.cycles-pp.sctp_ulpevent_make_rcvmsg.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv
4.75 ± 4% -33.4% 3.16 ± 5% perf-profile.cycles-pp.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push
2.34 ± 7% -58.3% 0.98 ± 15% perf-profile.cycles-pp.sctp_ulpq_tail_event.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv
11.39 ± 2% +15.6% 13.17 ± 8% perf-profile.cycles-pp.sctp_user_addto_chunk.sctp_datamsg_from_user.sctp_sendmsg.inet_sendmsg.sock_sendmsg
0.84 ± 16% +1251.2% 11.42 ± 3% perf-profile.cycles-pp.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush.sctp_outq_tail.sctp_assoc_rwnd_increase
14.31 ± 1% -43.8% 8.04 ± 4% perf-profile.cycles-pp.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24
0.75 ± 3% +107.7% 1.56 ± 4% perf-profile.cycles-pp.sctp_wfree.skb_release_head_state.skb_release_all.consume_skb.sctp_chunk_put
9.90 ± 2% +21.4% 12.02 ± 6% perf-profile.cycles-pp.skb_copy_datagram_iter.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg
3.08 ± 4% -22.7% 2.38 ± 5% perf-profile.cycles-pp.skb_free_head.skb_release_data.skb_release_all.consume_skb.sctp_chunk_put
1.15 ± 3% -18.2% 0.95 ± 7% perf-profile.cycles-pp.skb_free_head.skb_release_data.skb_release_all.kfree_skb.sctp_ulpevent_free
4.11 ± 4% +6.9% 4.39 ± 3% perf-profile.cycles-pp.skb_release_all.consume_skb.sctp_chunk_put.sctp_chunk_free.sctp_outq_sack
1.70 ± 3% -13.4% 1.47 ± 5% perf-profile.cycles-pp.skb_release_all.kfree_skb.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg
3.15 ± 4% -23.0% 2.42 ± 5% perf-profile.cycles-pp.skb_release_data.skb_release_all.consume_skb.sctp_chunk_put.sctp_chunk_free
1.33 ± 3% -13.4% 1.15 ± 7% perf-profile.cycles-pp.skb_release_data.skb_release_all.kfree_skb.sctp_ulpevent_free.sctp_recvmsg
0.91 ± 6% +92.3% 1.76 ± 3% perf-profile.cycles-pp.skb_release_head_state.skb_release_all.consume_skb.sctp_chunk_put.sctp_chunk_free
17.25 ± 1% +79.2% 30.92 ± 3% perf-profile.cycles-pp.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg.__sys_recvmsg.sys_recvmsg
17.81 ± 1% +76.0% 31.35 ± 3% perf-profile.cycles-pp.sock_recvmsg.___sys_recvmsg.__sys_recvmsg.sys_recvmsg.entry_SYSCALL_64_fastpath
73.92 ± 0% -16.2% 61.91 ± 1% perf-profile.cycles-pp.sock_sendmsg.___sys_sendmsg.__sys_sendmsg.sys_sendmsg.entry_SYSCALL_64_fastpath
19.54 ± 1% +67.8% 32.79 ± 3% perf-profile.cycles-pp.sys_recvmsg.entry_SYSCALL_64_fastpath
78.34 ± 0% -16.7% 65.25 ± 1% perf-profile.cycles-pp.sys_sendmsg.entry_SYSCALL_64_fastpath
1.82 ± 2% -62.4% 0.68 ± 11% perf-profile.cycles-pp.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key
1.15 ± 5% -100.0% 0.00 ± -1% perf-profile.cycles-pp.ttwu_do_activate.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common
netperf.Throughput_Mbps
5400 ++-------------------------------------------------------------------+
**. **.***.**. * .** .***.***.***.**.** .***.***.***. **. *. **.** .**
5200 ++ * * * * * * * * * |
5000 ++ |
| |
4800 ++ |
| |
4600 ++ |
| |
4400 ++ |
4200 ++ |
| O O |
4000 ++ O O OO O O O O OO |
OO O O O O O O O |
3800 ++-------------------------------------------------------------------+
[*] bisect-good sample
[O] bisect-bad sample
***************************************************************************************************
ivb42: 48 threads 2 sockets Xeon E5 (Ivytown Ivy Bridge-EP) with 64G memory
=========================================================================================
compiler/cpufreq_governor/duration/iterations/kconfig/nr_threads/rootfs/tbox_group/testcase:
gcc-4.9/performance/10s/100x/x86_64-rhel/200%/debian-x86_64-2015-02-07.cgz/ivb42/ebizzy
commit:
0329dacce8 ("sched/fair: Fix effective_load()")
0cef819730 ("sched/fair: Fix calc_cfs_shares()")
0329dacce87d6741 0cef819730f7f67f8d2bec642e
---------------- --------------------------
fail:runs %reproduction fail:runs
| | |
:4 50% 2:4 kmsg.Spurious_LAPIC_timer_interrupt_on_cpu
%stddev %change %stddev
\ | \
1359 ± 0% -39.6% 821.75 ± 0% ebizzy.throughput.per_thread.max
190.50 ± 1% +99.7% 380.50 ± 0% ebizzy.throughput.per_thread.min
3227377 ± 0% +18.4% 3822284 ± 0% ebizzy.time.involuntary_context_switches
38.30 ± 0% -1.0% 37.93 ± 0% ebizzy.time.user
3836 ± 0% -1.0% 3798 ± 0% ebizzy.time.user_time
7422 ± 0% +1.1% 7500 ± 0% vmstat.system.cs
2747604 ± 2% +23.1% 3381516 ± 1% softirqs.RCU
257256 ± 5% +22.0% 313808 ± 4% softirqs.SCHED
8941 ± 19% +40.0% 12516 ± 10% numa-vmstat.node0.nr_active_anon
2283 ± 68% +153.0% 5776 ± 23% numa-vmstat.node0.nr_shmem
4031 ± 27% -69.1% 1243 ± 81% numa-vmstat.node1.nr_shmem
43859 ± 15% +32.5% 58111 ± 8% numa-meminfo.node0.Active
35763 ± 19% +40.0% 50075 ± 10% numa-meminfo.node0.Active(anon)
9134 ± 68% +153.0% 23107 ± 23% numa-meminfo.node0.Shmem
16120 ± 27% -69.1% 4977 ± 81% numa-meminfo.node1.Shmem
7864 ± 3% +13.9% 8959 ± 3% slabinfo.anon_vma.active_objs
7864 ± 3% +13.9% 8959 ± 3% slabinfo.anon_vma.num_objs
20622 ± 1% +21.6% 25083 ± 3% slabinfo.anon_vma_chain.active_objs
20650 ± 1% +21.6% 25121 ± 3% slabinfo.anon_vma_chain.num_objs
16762 ± 2% +22.0% 20454 ± 4% slabinfo.vm_area_struct.active_objs
16800 ± 2% +22.0% 20495 ± 4% slabinfo.vm_area_struct.num_objs
1.766e+11 ± 0% +0.7% 1.778e+11 ± 0% perf-stat.L1-dcache-prefetch-misses
5.765e+11 ± 0% -0.8% 5.721e+11 ± 0% perf-stat.L1-dcache-store-misses
3.326e+10 ± 0% +11.6% 3.71e+10 ± 0% perf-stat.L1-icache-load-misses
7440021 ± 0% +1.1% 7518907 ± 0% perf-stat.context-switches
131030 ± 0% +120.8% 289258 ± 0% perf-stat.cpu-migrations
9.909e+09 ± 0% +4.4% 1.035e+10 ± 0% perf-stat.node-store-misses
1.466e+10 ± 2% +5.3% 1.543e+10 ± 2% perf-stat.node-stores
945986 ± 0% -97.8% 21259 ± 0% sched_debug.cfs_rq:/.load.avg
1047060 ± 2% -96.0% 41384 ± 14% sched_debug.cfs_rq:/.load.max
902269 ± 0% -98.9% 10100 ± 0% sched_debug.cfs_rq:/.load.min
51815 ± 6% -86.2% 7145 ± 9% sched_debug.cfs_rq:/.load.stddev
905.05 ± 0% -97.4% 23.63 ± 10% sched_debug.cfs_rq:/.load_avg.avg
1011 ± 2% -90.7% 94.35 ± 18% sched_debug.cfs_rq:/.load_avg.max
863.59 ± 0% -98.8% 10.38 ± 0% sched_debug.cfs_rq:/.load_avg.min
37.56 ± 9% -52.1% 17.99 ± 22% sched_debug.cfs_rq:/.load_avg.stddev
508232 ± 0% +5202.4% 26948505 ± 0% sched_debug.cfs_rq:/.min_vruntime.avg
518809 ± 0% +5251.8% 27765584 ± 0% sched_debug.cfs_rq:/.min_vruntime.max
502866 ± 0% +5087.3% 26085013 ± 0% sched_debug.cfs_rq:/.min_vruntime.min
2704 ± 4% +17547.4% 477333 ± 17% sched_debug.cfs_rq:/.min_vruntime.stddev
0.03 ± 19% +3208.3% 1.10 ± 6% sched_debug.cfs_rq:/.nr_spread_over.avg
0.84 ± 21% +2882.5% 25.00 ± 14% sched_debug.cfs_rq:/.nr_spread_over.max
0.16 ± 20% +2354.4% 4.01 ± 13% sched_debug.cfs_rq:/.nr_spread_over.stddev
900.16 ± 0% -97.9% 19.11 ± 0% sched_debug.cfs_rq:/.runnable_load_avg.avg
950.12 ± 0% -96.9% 29.13 ± 2% sched_debug.cfs_rq:/.runnable_load_avg.max
863.34 ± 0% -98.8% 10.18 ± 1% sched_debug.cfs_rq:/.runnable_load_avg.min
26.66 ± 0% -83.8% 4.33 ± 2% sched_debug.cfs_rq:/.runnable_load_avg.stddev
3007 ±164% +19711.6% 595751 ± 38% sched_debug.cfs_rq:/.spread0.max
-12941 ±-36% +8281.9% -1084720 ±-40% sched_debug.cfs_rq:/.spread0.min
2703 ± 4% +17557.3% 477353 ± 17% sched_debug.cfs_rq:/.spread0.stddev
271484 ± 3% +14.0% 309560 ± 3% sched_debug.cpu.avg_idle.avg
840053 ± 6% +15.5% 970603 ± 2% sched_debug.cpu.avg_idle.max
122164 ± 10% +29.8% 158613 ± 10% sched_debug.cpu.avg_idle.stddev
900.06 ± 0% -97.9% 19.10 ± 0% sched_debug.cpu.cpu_load[0].avg
950.15 ± 0% -96.9% 29.18 ± 2% sched_debug.cpu.cpu_load[0].max
863.35 ± 0% -98.8% 10.19 ± 1% sched_debug.cpu.cpu_load[0].min
26.60 ± 0% -83.6% 4.35 ± 2% sched_debug.cpu.cpu_load[0].stddev
900.19 ± 0% -97.8% 19.47 ± 1% sched_debug.cpu.cpu_load[1].avg
947.69 ± 0% -95.4% 43.91 ± 31% sched_debug.cpu.cpu_load[1].max
863.35 ± 0% -98.8% 10.19 ± 1% sched_debug.cpu.cpu_load[1].min
26.23 ± 1% -75.8% 6.35 ± 31% sched_debug.cpu.cpu_load[1].stddev
899.05 ± 0% -97.8% 19.34 ± 1% sched_debug.cpu.cpu_load[2].avg
944.72 ± 0% -96.1% 36.68 ± 16% sched_debug.cpu.cpu_load[2].max
863.35 ± 0% -98.8% 10.19 ± 1% sched_debug.cpu.cpu_load[2].min
25.72 ± 1% -79.2% 5.34 ± 17% sched_debug.cpu.cpu_load[2].stddev
897.66 ± 0% -97.9% 19.27 ± 1% sched_debug.cpu.cpu_load[3].avg
941.85 ± 0% -96.5% 32.90 ± 7% sched_debug.cpu.cpu_load[3].max
863.32 ± 0% -98.8% 10.19 ± 1% sched_debug.cpu.cpu_load[3].min
25.12 ± 1% -80.5% 4.90 ± 11% sched_debug.cpu.cpu_load[3].stddev
896.24 ± 0% -97.9% 19.19 ± 0% sched_debug.cpu.cpu_load[4].avg
938.69 ± 0% -96.7% 31.38 ± 7% sched_debug.cpu.cpu_load[4].max
863.28 ± 0% -98.8% 10.19 ± 1% sched_debug.cpu.cpu_load[4].min
24.61 ± 1% -81.3% 4.60 ± 9% sched_debug.cpu.cpu_load[4].stddev
16425 ± 4% -23.7% 12532 ± 25% sched_debug.cpu.curr->pid.min
946912 ± 0% -97.8% 21258 ± 0% sched_debug.cpu.load.avg
1047060 ± 2% -96.0% 41802 ± 13% sched_debug.cpu.load.max
902268 ± 0% -98.9% 10100 ± 0% sched_debug.cpu.load.min
51966 ± 5% -86.2% 7157 ± 8% sched_debug.cpu.load.stddev
5.74 ± 6% -44.1% 3.21 ± 4% sched_debug.cpu.nr_running.max
1.13 ± 4% -50.2% 0.56 ± 2% sched_debug.cpu.nr_running.stddev
567072 ± 20% -73.0% 153117 ± 4% sched_debug.cpu.nr_switches.max
33254 ± 3% +31.9% 43856 ± 1% sched_debug.cpu.nr_switches.min
101510 ± 16% -73.4% 26976 ± 6% sched_debug.cpu.nr_switches.stddev
-93.43 ± -8% +30.1% -121.59 ±-13% sched_debug.cpu.nr_uninterruptible.min
566139 ± 20% -70.4% 167497 ± 9% sched_debug.cpu.sched_count.max
33050 ± 3% +39.0% 45943 ± 0% sched_debug.cpu.sched_count.min
101830 ± 16% -71.2% 29289 ± 4% sched_debug.cpu.sched_count.stddev
1857 ± 4% -11.3% 1647 ± 4% sched_debug.cpu.sched_goidle.avg
273980 ± 22% -78.3% 59590 ± 6% sched_debug.cpu.ttwu_count.max
4600 ± 6% +21.4% 5586 ± 12% sched_debug.cpu.ttwu_count.min
51498 ± 16% -74.5% 13152 ± 5% sched_debug.cpu.ttwu_count.stddev
20747 ± 0% -17.9% 17027 ± 2% sched_debug.cpu.ttwu_local.avg
265675 ± 22% -80.0% 53126 ± 5% sched_debug.cpu.ttwu_local.max
50152 ± 16% -75.3% 12409 ± 4% sched_debug.cpu.ttwu_local.stddev
***************************************************************************************************
lkp-hsx04: 144 threads Brickland Haswell-EX with 512G memory
=========================================================================================
compiler/cpufreq_governor/kconfig/nr_threads/rootfs/tbox_group/testcase:
gcc-4.9/performance/x86_64-rhel/100%/debian-x86_64-2015-02-07.cgz/lkp-hsx04/plzip
commit:
0329dacce8 ("sched/fair: Fix effective_load()")
0cef819730 ("sched/fair: Fix calc_cfs_shares()")
0329dacce87d6741 0cef819730f7f67f8d2bec642e
---------------- --------------------------
fail:runs %reproduction fail:runs
| | |
:4 75% 3:4 kmsg.DHCP/BOOTP:Reply_not_for_us,op[#]xid[#]
:4 25% 1:4 kmsg.DHCP/BOOTP:Reply_not_for_us,op[#]xid[#b98713]
%stddev %change %stddev
\ | \
2.198e+08 ± 0% +2.5% 2.253e+08 ± 0% plzip.throughput
13115 ± 0% +2.3% 13418 ± 0% plzip.time.percent_of_cpu_this_job_got
46198 ± 0% +3.2% 47659 ± 0% plzip.time.user_time
501228 ± 1% -17.2% 415017 ± 2% plzip.time.voluntary_context_switches
30291 ± 3% +8.6% 32896 ± 4% slabinfo.vm_area_struct.num_objs
62366 ± 82% -57.8% 26290 ± 10% latency_stats.max.call_rwsem_down_write_failed_killable.SyS_munmap.entry_SYSCALL_64_fastpath
2325979 ± 6% -69.5% 709462 ± 16% latency_stats.sum.call_rwsem_down_read_failed.SyS_madvise.entry_SYSCALL_64_fastpath
6995 ± 1% -8.0% 6437 ± 3% vmstat.system.cs
449401 ± 1% -3.3% 434526 ± 1% vmstat.system.in
13566509 ± 1% -10.5% 12147331 ± 3% proc-vmstat.numa_pages_migrated
307456 ± 4% -13.9% 264576 ± 8% proc-vmstat.pgmigrate_fail
13566509 ± 1% -10.5% 12147331 ± 3% proc-vmstat.pgmigrate_success
43940096 ± 3% -16.1% 36883878 ± 4% cpuidle.C3-HSW.time
138953 ± 3% -16.8% 115586 ± 4% cpuidle.C3-HSW.usage
4.377e+09 ± 2% -22.6% 3.39e+09 ± 4% cpuidle.C6-HSW.time
4794982 ± 2% -23.0% 3693173 ± 4% cpuidle.C6-HSW.usage
14563665 ± 36% -64.8% 5126454 ± 47% numa-numastat.node1.local_node
14563667 ± 36% -64.8% 5126456 ± 47% numa-numastat.node1.numa_hit
3979068 ± 16% +286.6% 15382273 ± 13% numa-numastat.node2.local_node
3979070 ± 16% +286.6% 15382276 ± 13% numa-numastat.node2.numa_hit
91.16 ± 0% +2.2% 93.20 ± 0% turbostat.%Busy
2636 ± 0% +2.2% 2695 ± 0% turbostat.Avg_MHz
6.24 ± 2% -29.4% 4.40 ± 3% turbostat.CPU%c1
2.59 ± 4% -7.9% 2.38 ± 5% turbostat.CPU%c6
694.45 ± 0% +1.1% 702.04 ± 0% turbostat.RAMWatt
8779 ± 8% -19.6% 7057 ± 4% numa-meminfo.node1.PageTables
6706 ± 50% -72.4% 1848 ±115% numa-meminfo.node1.Shmem
2617044 ± 3% +12.0% 2930380 ± 3% numa-meminfo.node2.Active
2613057 ± 3% +12.0% 2926476 ± 3% numa-meminfo.node2.Active(anon)
2703252 ± 3% +10.9% 2999010 ± 3% numa-meminfo.node2.AnonHugePages
2887787 ± 3% +11.6% 3223759 ± 3% numa-meminfo.node2.AnonPages
4665 ± 6% +92.5% 8981 ± 28% numa-meminfo.node2.Mapped
3369566 ± 2% +10.2% 3711830 ± 3% numa-meminfo.node2.MemUsed
2193 ± 8% -19.5% 1766 ± 5% numa-vmstat.node1.nr_page_table_pages
1676 ± 50% -72.5% 461.75 ±115% numa-vmstat.node1.nr_shmem
7453615 ± 26% -60.9% 2912511 ± 37% numa-vmstat.node1.numa_hit
7453614 ± 26% -60.9% 2912510 ± 37% numa-vmstat.node1.numa_local
653170 ± 3% +12.0% 731594 ± 3% numa-vmstat.node2.nr_active_anon
721847 ± 3% +11.7% 806051 ± 3% numa-vmstat.node2.nr_anon_pages
1317 ± 3% +11.1% 1464 ± 3% numa-vmstat.node2.nr_anon_transparent_hugepages
1158 ± 6% +93.9% 2245 ± 28% numa-vmstat.node2.nr_mapped
2185024 ± 31% +247.1% 7583311 ± 21% numa-vmstat.node2.numa_hit
2185023 ± 31% +247.1% 7583309 ± 21% numa-vmstat.node2.numa_local
5.836e+11 ± 0% +3.0% 6.01e+11 ± 0% perf-stat.L1-dcache-load-misses
1.507e+13 ± 0% +2.9% 1.55e+13 ± 0% perf-stat.L1-dcache-loads
3.93e+12 ± 0% +3.8% 4.078e+12 ± 0% perf-stat.L1-dcache-stores
3.031e+11 ± 0% +3.7% 3.143e+11 ± 0% perf-stat.LLC-load-misses
3.848e+11 ± 0% +3.2% 3.97e+11 ± 0% perf-stat.LLC-loads
3.665e+10 ± 0% +4.6% 3.834e+10 ± 0% perf-stat.LLC-store-misses
1.022e+11 ± 0% +4.1% 1.063e+11 ± 0% perf-stat.LLC-stores
1.112e+13 ± 0% +3.6% 1.151e+13 ± 1% perf-stat.branch-instructions
2.394e+11 ± 0% +4.0% 2.489e+11 ± 0% perf-stat.branch-load-misses
1.113e+13 ± 0% +2.9% 1.146e+13 ± 1% perf-stat.branch-loads
2.406e+11 ± 1% +3.9% 2.499e+11 ± 0% perf-stat.branch-misses
4.606e+12 ± 0% +3.2% 4.753e+12 ± 0% perf-stat.bus-cycles
3.393e+11 ± 0% +4.0% 3.529e+11 ± 0% perf-stat.cache-misses
4.964e+11 ± 0% +3.1% 5.116e+11 ± 0% perf-stat.cache-references
2488007 ± 1% -7.3% 2306257 ± 3% perf-stat.context-switches
1.332e+14 ± 0% +3.1% 1.374e+14 ± 0% perf-stat.cpu-cycles
152269 ± 1% -32.4% 102949 ± 5% perf-stat.cpu-migrations
1.505e+13 ± 0% +3.1% 1.552e+13 ± 0% perf-stat.dTLB-loads
3.94e+12 ± 0% +3.4% 4.072e+12 ± 0% perf-stat.dTLB-stores
1.754e+08 ± 21% -28.4% 1.256e+08 ± 11% perf-stat.iTLB-load-misses
5.696e+13 ± 0% +3.6% 5.899e+13 ± 0% perf-stat.instructions
4.732e+10 ± 2% -11.6% 4.184e+10 ± 4% perf-stat.node-load-misses
2.552e+11 ± 1% +6.7% 2.723e+11 ± 0% perf-stat.node-loads
7.049e+09 ± 2% -8.1% 6.476e+09 ± 4% perf-stat.node-store-misses
2.97e+10 ± 0% +7.3% 3.186e+10 ± 1% perf-stat.node-stores
1.149e+14 ± 0% +3.0% 1.184e+14 ± 0% perf-stat.ref-cycles
759797 ± 1% -99.2% 6423 ± 2% sched_debug.cfs_rq:/.load.avg
991391 ± 0% -97.8% 21997 ± 26% sched_debug.cfs_rq:/.load.max
344247 ± 36% -99.5% 1707 ± 31% sched_debug.cfs_rq:/.load.min
114087 ± 29% -97.7% 2587 ± 17% sched_debug.cfs_rq:/.load.stddev
736.09 ± 1% -98.0% 14.97 ± 10% sched_debug.cfs_rq:/.load_avg.avg
929.42 ± 1% -77.0% 213.42 ± 33% sched_debug.cfs_rq:/.load_avg.max
319.50 ± 10% -99.1% 2.78 ± 17% sched_debug.cfs_rq:/.load_avg.min
83.59 ± 10% -57.0% 35.97 ± 4% sched_debug.cfs_rq:/.load_avg.stddev
154838 ± 0% +13451.9% 20983617 ± 8% sched_debug.cfs_rq:/.min_vruntime.avg
177334 ± 1% +11995.3% 21449100 ± 8% sched_debug.cfs_rq:/.min_vruntime.max
147845 ± 0% +13477.1% 20073176 ± 8% sched_debug.cfs_rq:/.min_vruntime.min
3666 ± 10% +7085.9% 263446 ± 11% sched_debug.cfs_rq:/.min_vruntime.stddev
0.03 ± 24% +796.1% 0.24 ± 17% sched_debug.cfs_rq:/.nr_spread_over.avg
0.83 ± 14% +620.0% 6.00 ± 39% sched_debug.cfs_rq:/.nr_spread_over.max
0.14 ± 15% +506.4% 0.83 ± 24% sched_debug.cfs_rq:/.nr_spread_over.stddev
715.46 ± 2% -99.3% 5.26 ± 0% sched_debug.cfs_rq:/.runnable_load_avg.avg
821.04 ± 1% -98.4% 13.11 ± 4% sched_debug.cfs_rq:/.runnable_load_avg.max
216.38 ± 41% -99.4% 1.23 ± 19% sched_debug.cfs_rq:/.runnable_load_avg.min
90.41 ± 30% -98.2% 1.64 ± 22% sched_debug.cfs_rq:/.runnable_load_avg.stddev
-22528 ±-14% -836.6% 165943 ± 61% sched_debug.cfs_rq:/.spread0.avg
-29534 ±-13% +2423.1% -745164 ±-14% sched_debug.cfs_rq:/.spread0.min
3672 ± 10% +7042.8% 262350 ± 11% sched_debug.cfs_rq:/.spread0.stddev
98890 ± 14% +31.9% 130412 ± 5% sched_debug.cpu.avg_idle.stddev
715.02 ± 2% -99.3% 5.29 ± 0% sched_debug.cpu.cpu_load[0].avg
820.88 ± 1% -98.3% 13.83 ± 6% sched_debug.cpu.cpu_load[0].max
216.67 ± 41% -99.4% 1.23 ± 19% sched_debug.cpu.cpu_load[0].min
91.02 ± 31% -98.1% 1.77 ± 23% sched_debug.cpu.cpu_load[0].stddev
726.15 ± 1% -98.9% 7.77 ± 19% sched_debug.cpu.cpu_load[1].avg
886.12 ± 3% -87.0% 115.02 ± 41% sched_debug.cpu.cpu_load[1].max
261.54 ± 17% -99.0% 2.52 ± 13% sched_debug.cpu.cpu_load[1].min
78.89 ± 17% -81.1% 14.88 ± 51% sched_debug.cpu.cpu_load[1].stddev
723.86 ± 1% -99.0% 7.07 ± 15% sched_debug.cpu.cpu_load[2].avg
843.88 ± 2% -90.4% 81.31 ± 36% sched_debug.cpu.cpu_load[2].max
246.75 ± 19% -99.0% 2.52 ± 14% sched_debug.cpu.cpu_load[2].min
73.42 ± 18% -85.7% 10.52 ± 48% sched_debug.cpu.cpu_load[2].stddev
722.00 ± 1% -99.1% 6.51 ± 10% sched_debug.cpu.cpu_load[3].avg
816.79 ± 1% -93.0% 57.06 ± 28% sched_debug.cpu.cpu_load[3].max
239.75 ± 18% -98.9% 2.60 ± 14% sched_debug.cpu.cpu_load[3].min
69.62 ± 19% -89.5% 7.32 ± 42% sched_debug.cpu.cpu_load[3].stddev
720.26 ± 1% -99.2% 6.05 ± 7% sched_debug.cpu.cpu_load[4].avg
798.08 ± 0% -95.0% 39.58 ± 23% sched_debug.cpu.cpu_load[4].max
246.58 ± 12% -98.9% 2.64 ± 14% sched_debug.cpu.cpu_load[4].min
66.15 ± 18% -92.5% 4.95 ± 40% sched_debug.cpu.cpu_load[4].stddev
760349 ± 1% -99.2% 6410 ± 2% sched_debug.cpu.load.avg
991391 ± 0% -97.8% 21997 ± 26% sched_debug.cpu.load.max
344247 ± 36% -99.5% 1707 ± 31% sched_debug.cpu.load.min
115044 ± 29% -97.8% 2566 ± 18% sched_debug.cpu.load.stddev
4076 ± 3% -35.1% 2647 ± 3% sched_debug.cpu.nr_load_updates.stddev
2636 ± 2% -30.2% 1840 ± 4% sched_debug.cpu.nr_switches.min
81384 ± 23% +30.0% 105830 ± 10% sched_debug.cpu.sched_count.max
2386 ± 3% -32.1% 1621 ± 4% sched_debug.cpu.sched_count.min
1204 ± 3% -22.9% 929.22 ± 7% sched_debug.cpu.sched_goidle.avg
554.29 ± 4% -32.6% 373.48 ± 2% sched_debug.cpu.sched_goidle.min
823.67 ± 2% -17.6% 678.97 ± 6% sched_debug.cpu.ttwu_count.min
394.42 ± 16% -28.3% 282.88 ± 7% sched_debug.cpu.ttwu_local.min
0.72 ± 10% +84.0% 1.32 ± 17% perf-profile.cycles-pp.__default_send_IPI_dest_field.default_send_IPI_mask_sequence_phys.native_send_call_func_ipi.smp_call_function_many.native_flush_tlb_others
20.80 ± 20% +40.0% 29.12 ± 6% perf-profile.cycles-pp.__do_page_fault.do_page_fault.page_fault
14.03 ± 22% +32.2% 18.54 ± 6% perf-profile.cycles-pp.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
5.27 ± 7% +52.3% 8.02 ± 3% perf-profile.cycles-pp.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
1.66 ± 13% +63.2% 2.71 ± 15% perf-profile.cycles-pp.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
0.63 ± 6% +63.3% 1.03 ± 7% perf-profile.cycles-pp.anon_pipe_buf_release.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
21.05 ± 23% +34.5% 28.31 ± 5% perf-profile.cycles-pp.apic_timer_interrupt
40.80 ± 27% -56.5% 17.74 ± 21% perf-profile.cycles-pp.call_cpuidle.cpu_startup_entry.start_secondary
4.43 ± 15% +52.7% 6.77 ± 16% perf-profile.cycles-pp.call_function_interrupt
0.59 ± 63% +87.8% 1.11 ± 5% perf-profile.cycles-pp.clear_huge_page.do_huge_pmd_anonymous_page.handle_mm_fault.__do_page_fault.do_page_fault
12.05 ± 25% +44.6% 17.43 ± 4% perf-profile.cycles-pp.clear_page_c_e.do_huge_pmd_anonymous_page.handle_mm_fault.__do_page_fault.do_page_fault
1.17 ± 11% +67.3% 1.96 ± 10% perf-profile.cycles-pp.copy_page_from_iter.pipe_write.__vfs_write.vfs_write.sys_write
4.22 ± 7% +55.2% 6.54 ± 2% perf-profile.cycles-pp.copy_page_to_iter.pipe_read.__vfs_read.vfs_read.sys_read
0.96 ± 6% +77.1% 1.70 ± 10% perf-profile.cycles-pp.copy_user_enhanced_fast_string.copy_page_from_iter.pipe_write.__vfs_write.vfs_write
3.92 ± 8% +55.8% 6.11 ± 5% perf-profile.cycles-pp.copy_user_enhanced_fast_string.copy_page_to_iter.pipe_read.__vfs_read.vfs_read
41.00 ± 27% -56.5% 17.84 ± 21% perf-profile.cycles-pp.cpu_startup_entry.start_secondary
40.80 ± 27% -56.5% 17.74 ± 22% perf-profile.cycles-pp.cpuidle_enter.call_cpuidle.cpu_startup_entry.start_secondary
40.68 ± 27% -56.5% 17.69 ± 22% perf-profile.cycles-pp.cpuidle_enter_state.cpuidle_enter.call_cpuidle.cpu_startup_entry.start_secondary
0.85 ± 11% +78.3% 1.52 ± 18% perf-profile.cycles-pp.default_send_IPI_mask_sequence_phys.native_send_call_func_ipi.smp_call_function_many.native_flush_tlb_others.flush_tlb_page
13.47 ± 25% +44.0% 19.40 ± 4% perf-profile.cycles-pp.do_huge_pmd_anonymous_page.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
20.83 ± 20% +40.0% 29.16 ± 6% perf-profile.cycles-pp.do_page_fault.page_fault
7.77 ± 8% +53.7% 11.94 ± 5% perf-profile.cycles-pp.entry_SYSCALL_64_fastpath
3.89 ± 15% +50.0% 5.84 ± 16% perf-profile.cycles-pp.flush_smp_call_function_queue.generic_smp_call_function_single_interrupt.smp_call_function_interrupt.call_function_interrupt
0.92 ± 16% +48.6% 1.36 ± 19% perf-profile.cycles-pp.flush_tlb_func.flush_smp_call_function_queue.generic_smp_call_function_single_interrupt.smp_call_function_interrupt.call_function_interrupt
2.44 ± 8% +60.1% 3.90 ± 15% perf-profile.cycles-pp.flush_tlb_page.ptep_clear_flush.try_to_unmap_one.rmap_walk_anon.rmap_walk
3.91 ± 15% +51.2% 5.92 ± 17% perf-profile.cycles-pp.generic_smp_call_function_single_interrupt.smp_call_function_interrupt.call_function_interrupt
19.86 ± 21% +42.0% 28.20 ± 6% perf-profile.cycles-pp.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
4.39 ± 11% +50.3% 6.59 ± 13% perf-profile.cycles-pp.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
17.51 ± 24% +33.6% 23.40 ± 7% perf-profile.cycles-pp.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
40.57 ± 27% -56.1% 17.80 ± 21% perf-profile.cycles-pp.intel_idle.cpuidle_enter_state.cpuidle_enter.call_cpuidle.cpu_startup_entry
2.38 ± 23% +31.6% 3.14 ± 13% perf-profile.cycles-pp.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
0.64 ± 7% +42.4% 0.91 ± 22% perf-profile.cycles-pp.llist_add_batch.smp_call_function_many.native_flush_tlb_others.flush_tlb_page.ptep_clear_flush
1.44 ± 13% +50.0% 2.16 ± 15% perf-profile.cycles-pp.llist_reverse_order.flush_smp_call_function_queue.generic_smp_call_function_single_interrupt.smp_call_function_interrupt.call_function_interrupt
17.83 ± 24% +33.7% 23.83 ± 7% perf-profile.cycles-pp.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
3.36 ± 9% +53.2% 5.15 ± 15% perf-profile.cycles-pp.migrate_misplaced_page.handle_pte_fault.handle_mm_fault.__do_page_fault.do_page_fault
2.74 ± 8% +57.5% 4.32 ± 15% perf-profile.cycles-pp.migrate_pages.migrate_misplaced_page.handle_pte_fault.handle_mm_fault.__do_page_fault
2.43 ± 8% +59.9% 3.88 ± 16% perf-profile.cycles-pp.native_flush_tlb_others.flush_tlb_page.ptep_clear_flush.try_to_unmap_one.rmap_walk_anon
1.67 ± 14% +32.3% 2.21 ± 13% perf-profile.cycles-pp.native_irq_return_iret
1.23 ± 8% +67.0% 2.06 ± 17% perf-profile.cycles-pp.native_send_call_func_ipi.smp_call_function_many.native_flush_tlb_others.flush_tlb_page.ptep_clear_flush
20.83 ± 20% +40.0% 29.16 ± 6% perf-profile.cycles-pp.page_fault
1.38 ± 22% +46.3% 2.02 ± 10% perf-profile.cycles-pp.perf_mux_hrtimer_handler.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt
4.62 ± 7% +50.9% 6.98 ± 2% perf-profile.cycles-pp.pipe_read.__vfs_read.vfs_read.sys_read.entry_SYSCALL_64_fastpath
1.65 ± 13% +70.5% 2.81 ± 11% perf-profile.cycles-pp.pipe_write.__vfs_write.vfs_write.sys_write.entry_SYSCALL_64_fastpath
2.44 ± 8% +60.1% 3.92 ± 16% perf-profile.cycles-pp.ptep_clear_flush.try_to_unmap_one.rmap_walk_anon.rmap_walk.try_to_unmap
1.06 ± 24% +43.8% 1.53 ± 4% perf-profile.cycles-pp.rcu_check_callbacks.update_process_times.tick_sched_handle.isra.17.tick_sched_timer.__hrtimer_run_queues
1.00 ± 19% +38.3% 1.38 ± 26% perf-profile.cycles-pp.rcu_process_callbacks.__do_softirq.irq_exit.smp_apic_timer_interrupt.apic_timer_interrupt
2.52 ± 9% +58.1% 3.98 ± 15% perf-profile.cycles-pp.rmap_walk.try_to_unmap.migrate_pages.migrate_misplaced_page.handle_pte_fault
2.51 ± 8% +58.3% 3.97 ± 15% perf-profile.cycles-pp.rmap_walk_anon.rmap_walk.try_to_unmap.migrate_pages.migrate_misplaced_page
20.80 ± 23% +34.0% 27.87 ± 5% perf-profile.cycles-pp.smp_apic_timer_interrupt.apic_timer_interrupt
4.25 ± 14% +52.1% 6.47 ± 17% perf-profile.cycles-pp.smp_call_function_interrupt.call_function_interrupt
2.41 ± 8% +60.4% 3.86 ± 16% perf-profile.cycles-pp.smp_call_function_many.native_flush_tlb_others.flush_tlb_page.ptep_clear_flush.try_to_unmap_one
41.00 ± 27% -56.5% 17.85 ± 21% perf-profile.cycles-pp.start_secondary
5.27 ± 7% +52.4% 8.02 ± 3% perf-profile.cycles-pp.sys_read.entry_SYSCALL_64_fastpath
1.67 ± 12% +63.9% 2.74 ± 16% perf-profile.cycles-pp.sys_write.entry_SYSCALL_64_fastpath
1.20 ± 31% +40.8% 1.68 ± 8% perf-profile.cycles-pp.tick_program_event.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt.apic_timer_interrupt
10.67 ± 23% +27.5% 13.61 ± 6% perf-profile.cycles-pp.tick_sched_handle.isra.17.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt
11.11 ± 23% +29.1% 14.35 ± 6% perf-profile.cycles-pp.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt.local_apic_timer_interrupt.smp_apic_timer_interrupt
2.52 ± 8% +57.6% 3.98 ± 15% perf-profile.cycles-pp.try_to_unmap.migrate_pages.migrate_misplaced_page.handle_pte_fault.handle_mm_fault
2.50 ± 8% +59.0% 3.97 ± 15% perf-profile.cycles-pp.try_to_unmap_one.rmap_walk_anon.rmap_walk.try_to_unmap.migrate_pages
10.46 ± 23% +27.4% 13.32 ± 5% perf-profile.cycles-pp.update_process_times.tick_sched_handle.isra.17.tick_sched_timer.__hrtimer_run_queues.hrtimer_interrupt
5.27 ± 7% +52.4% 8.02 ± 3% perf-profile.cycles-pp.vfs_read.sys_read.entry_SYSCALL_64_fastpath
1.67 ± 12% +63.6% 2.73 ± 16% perf-profile.cycles-pp.vfs_write.sys_write.entry_SYSCALL_64_fastpath
***************************************************************************************************
lkp-bdw-de1: 16 threads Broadwell-DE with 8G memory
=========================================================================================
cluster/compiler/cpufreq_governor/ip/kconfig/nr_threads/rootfs/runtime/send_size/tbox_group/test/testcase:
cs-localhost/gcc-4.9/performance/ipv4/x86_64-rhel/200%/debian-x86_64-2015-02-07.cgz/300s/10K/lkp-bdw-de1/SCTP_STREAM_MANY/netperf
commit:
0329dacce8 ("sched/fair: Fix effective_load()")
0cef819730 ("sched/fair: Fix calc_cfs_shares()")
0329dacce87d6741 0cef819730f7f67f8d2bec642e
---------------- --------------------------
%stddev %change %stddev
\ | \
2981 ± 0% -22.4% 2312 ± 0% netperf.Throughput_Mbps
48622550 ± 0% -100.0% 14709 ± 1% netperf.time.involuntary_context_switches
787.50 ± 0% +35.0% 1063 ± 0% netperf.time.percent_of_cpu_this_job_got
2327 ± 0% +34.7% 3135 ± 0% netperf.time.system_time
47.40 ± 1% +65.4% 78.38 ± 2% netperf.time.user_time
535968 ± 7% +2872.6% 15932329 ± 0% netperf.time.voluntary_context_switches
8626 ± 2% +12.4% 9694 ± 1% slabinfo.vm_area_struct.active_objs
8626 ± 2% +12.7% 9718 ± 1% slabinfo.vm_area_struct.num_objs
49962634 ± 0% -20.2% 39865448 ± 0% softirqs.NET_RX
277592 ± 12% -62.6% 103735 ± 2% softirqs.RCU
0.44 ± 1% +31.4% 0.57 ± 0% turbostat.CPU%c6
12.88 ± 0% +84.6% 23.78 ± 0% turbostat.RAMWatt
39.00 ± 0% +10.3% 43.00 ± 0% vmstat.procs.r
336813 ± 0% -65.4% 116656 ± 0% vmstat.system.cs
39652 ± 11% -31.7% 27080 ± 19% cpuidle.C1-BDW.usage
32415902 ± 1% +28.7% 41731000 ± 0% cpuidle.C6-BDW.time
35922 ± 1% +27.4% 45773 ± 1% cpuidle.C6-BDW.usage
123.50 ± 10% -38.9% 75.50 ± 24% cpuidle.POLL.usage
4.491e+08 ± 0% -21.8% 3.511e+08 ± 0% proc-vmstat.numa_hit
4.491e+08 ± 0% -21.8% 3.511e+08 ± 0% proc-vmstat.numa_local
6.407e+08 ± 0% -27.0% 4.68e+08 ± 0% proc-vmstat.pgalloc_dma32
1.947e+09 ± 0% -29.0% 1.382e+09 ± 0% proc-vmstat.pgalloc_normal
2.588e+09 ± 0% -28.5% 1.85e+09 ± 0% proc-vmstat.pgfree
5.271e+11 ± 0% -23.1% 4.055e+11 ± 0% perf-stat.L1-dcache-load-misses
1.773e+12 ± 0% -23.3% 1.36e+12 ± 0% perf-stat.L1-dcache-loads
1.249e+12 ± 0% -21.9% 9.761e+11 ± 0% perf-stat.L1-dcache-stores
6.895e+10 ± 1% -12.0% 6.067e+10 ± 0% perf-stat.L1-icache-load-misses
7.983e+10 ± 0% -9.8% 7.199e+10 ± 0% perf-stat.LLC-loads
1.609e+11 ± 0% -27.7% 1.163e+11 ± 0% perf-stat.LLC-stores
8.92e+11 ± 0% -24.6% 6.73e+11 ± 0% perf-stat.branch-instructions
4.957e+09 ± 0% -20.9% 3.923e+09 ± 1% perf-stat.branch-load-misses
8.903e+11 ± 0% -24.5% 6.725e+11 ± 0% perf-stat.branch-loads
4.963e+09 ± 0% -20.9% 3.925e+09 ± 1% perf-stat.branch-misses
4.416e+11 ± 0% -21.0% 3.488e+11 ± 0% perf-stat.cache-misses
4.417e+11 ± 0% -21.0% 3.489e+11 ± 0% perf-stat.cache-references
1.024e+08 ± 0% -65.4% 35461500 ± 0% perf-stat.context-switches
1.153e+13 ± 0% -1.3% 1.139e+13 ± 0% perf-stat.cpu-cycles
40497 ± 10% -48.3% 20955 ± 2% perf-stat.cpu-migrations
1.356e+09 ± 3% -38.7% 8.308e+08 ± 3% perf-stat.dTLB-load-misses
1.773e+12 ± 0% -23.3% 1.361e+12 ± 0% perf-stat.dTLB-loads
1.122e+09 ± 17% -37.3% 7.033e+08 ± 22% perf-stat.dTLB-store-misses
1.251e+12 ± 0% -21.8% 9.777e+11 ± 0% perf-stat.dTLB-stores
1.122e+09 ± 4% -38.0% 6.955e+08 ± 10% perf-stat.iTLB-load-misses
4.756e+12 ± 0% -23.7% 3.629e+12 ± 0% perf-stat.instructions
330533 ± 0% +4.5% 345307 ± 0% perf-stat.minor-faults
330530 ± 0% +4.5% 345272 ± 0% perf-stat.page-faults
40195 ± 15% +3697.4% 1526416 ± 3% sched_debug.cfs_rq:/.MIN_vruntime.avg
79275 ± 1% +1998.4% 1663557 ± 0% sched_debug.cfs_rq:/.MIN_vruntime.max
36697 ± 1% +903.9% 368405 ± 41% sched_debug.cfs_rq:/.MIN_vruntime.stddev
1418335 ± 6% -91.7% 117992 ± 5% sched_debug.cfs_rq:/.load.avg
1907632 ± 0% -90.6% 178528 ± 30% sched_debug.cfs_rq:/.load.max
848623 ± 0% -90.6% 79889 ± 4% sched_debug.cfs_rq:/.load.min
495671 ± 3% -93.8% 30899 ± 63% sched_debug.cfs_rq:/.load.stddev
912.30 ± 1% -86.5% 122.71 ± 9% sched_debug.cfs_rq:/.load_avg.avg
1120 ± 5% -70.9% 325.71 ± 24% sched_debug.cfs_rq:/.load_avg.max
847.83 ± 0% -90.7% 79.17 ± 3% sched_debug.cfs_rq:/.load_avg.min
40195 ± 15% +3697.4% 1526416 ± 3% sched_debug.cfs_rq:/.max_vruntime.avg
79275 ± 1% +1998.4% 1663557 ± 0% sched_debug.cfs_rq:/.max_vruntime.max
36697 ± 1% +903.9% 368405 ± 41% sched_debug.cfs_rq:/.max_vruntime.stddev
76340 ± 0% +2071.9% 1658025 ± 0% sched_debug.cfs_rq:/.min_vruntime.avg
82383 ± 0% +1921.5% 1665398 ± 0% sched_debug.cfs_rq:/.min_vruntime.max
75027 ± 0% +2099.4% 1650121 ± 0% sched_debug.cfs_rq:/.min_vruntime.min
1817 ± 4% +121.6% 4026 ± 4% sched_debug.cfs_rq:/.min_vruntime.stddev
1.37 ± 6% +23.6% 1.69 ± 1% sched_debug.cfs_rq:/.nr_running.avg
0.48 ± 3% -42.2% 0.28 ± 19% sched_debug.cfs_rq:/.nr_running.stddev
850.46 ± 0% -90.5% 80.57 ± 1% sched_debug.cfs_rq:/.runnable_load_avg.avg
905.92 ± 0% -88.7% 102.62 ± 8% sched_debug.cfs_rq:/.runnable_load_avg.max
807.00 ± 0% -94.6% 43.92 ± 25% sched_debug.cfs_rq:/.runnable_load_avg.min
31.69 ± 9% -52.9% 14.94 ± 26% sched_debug.cfs_rq:/.runnable_load_avg.stddev
1818 ± 4% +121.5% 4027 ± 4% sched_debug.cfs_rq:/.spread0.stddev
723347 ± 3% -8.8% 659993 ± 6% sched_debug.cpu.avg_idle.avg
5.42 ± 22% -61.2% 2.10 ± 23% sched_debug.cpu.clock.stddev
5.42 ± 22% -61.2% 2.10 ± 23% sched_debug.cpu.clock_task.stddev
849.63 ± 0% -90.7% 78.95 ± 3% sched_debug.cpu.cpu_load[0].avg
905.50 ± 0% -88.9% 100.83 ± 9% sched_debug.cpu.cpu_load[0].max
801.58 ± 0% -94.8% 41.58 ± 27% sched_debug.cpu.cpu_load[0].min
32.09 ± 10% -49.5% 16.19 ± 22% sched_debug.cpu.cpu_load[0].stddev
857.22 ± 0% -90.4% 82.45 ± 4% sched_debug.cpu.cpu_load[1].avg
909.88 ± 3% -87.0% 118.62 ± 28% sched_debug.cpu.cpu_load[1].max
804.17 ± 1% -92.8% 57.75 ± 7% sched_debug.cpu.cpu_load[1].min
854.43 ± 0% -90.2% 83.40 ± 5% sched_debug.cpu.cpu_load[2].avg
905.12 ± 3% -86.7% 120.12 ± 33% sched_debug.cpu.cpu_load[2].max
806.42 ± 1% -92.0% 64.79 ± 1% sched_debug.cpu.cpu_load[2].min
850.18 ± 0% -90.2% 83.12 ± 4% sched_debug.cpu.cpu_load[3].avg
892.46 ± 2% -87.4% 112.29 ± 27% sched_debug.cpu.cpu_load[3].max
810.62 ± 1% -91.5% 69.29 ± 2% sched_debug.cpu.cpu_load[3].min
845.87 ± 0% -90.1% 83.49 ± 2% sched_debug.cpu.cpu_load[4].avg
880.92 ± 1% -88.2% 104.21 ± 18% sched_debug.cpu.cpu_load[4].max
812.58 ± 0% -91.0% 73.50 ± 2% sched_debug.cpu.cpu_load[4].min
18.63 ± 19% -58.3% 7.77 ± 76% sched_debug.cpu.cpu_load[4].stddev
908.25 ± 1% -13.8% 783.00 ± 7% sched_debug.cpu.curr->pid.min
1377580 ± 7% -91.5% 117102 ± 7% sched_debug.cpu.load.avg
1907627 ± 0% -90.8% 174984 ± 32% sched_debug.cpu.load.max
847899 ± 0% -91.0% 76223 ± 0% sched_debug.cpu.load.min
490876 ± 0% -93.6% 31482 ± 59% sched_debug.cpu.load.stddev
4.29 ± 10% -28.2% 3.08 ± 8% sched_debug.cpu.nr_running.max
0.88 ± 8% +90.5% 1.67 ± 0% sched_debug.cpu.nr_running.min
0.93 ± 12% -51.1% 0.46 ± 12% sched_debug.cpu.nr_running.stddev
3166259 ± 0% -65.4% 1094903 ± 0% sched_debug.cpu.nr_switches.avg
4656563 ± 0% -72.5% 1281469 ± 1% sched_debug.cpu.nr_switches.max
3020925 ± 0% -67.4% 986125 ± 0% sched_debug.cpu.nr_switches.min
385862 ± 2% -76.3% 91611 ± 5% sched_debug.cpu.nr_switches.stddev
17.99 ± 0% +71.6% 30.88 ± 0% perf-profile.cycles-pp.___sys_recvmsg.__sys_recvmsg.sys_recvmsg.entry_SYSCALL_64_fastpath
78.39 ± 0% -16.2% 65.66 ± 0% perf-profile.cycles-pp.___sys_sendmsg.__sys_sendmsg.sys_sendmsg.entry_SYSCALL_64_fastpath
15.21 ± 0% -35.5% 9.81 ± 4% perf-profile.cycles-pp.__alloc_pages_nodemask.alloc_kmem_pages_node.kmalloc_large_node.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34
12.78 ± 0% -31.6% 8.73 ± 2% perf-profile.cycles-pp.__alloc_skb._sctp_make_chunk.sctp_make_datafrag_empty.sctp_datamsg_from_user.sctp_sendmsg
4.03 ± 2% -46.1% 2.17 ± 2% perf-profile.cycles-pp.__alloc_skb.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24
7.88 ± 1% +90.6% 15.01 ± 0% perf-profile.cycles-pp.__do_softirq.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip.ip_finish_output2
3.50 ± 2% -34.4% 2.29 ± 3% perf-profile.cycles-pp.__do_softirq.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip.sctp_inq_push
11.38 ± 2% -41.1% 6.71 ± 2% perf-profile.cycles-pp.__free_kmem_pages.kfree.skb_free_head.skb_release_data.skb_release_all
11.35 ± 2% -41.1% 6.68 ± 2% perf-profile.cycles-pp.__free_pages.__free_kmem_pages.kfree.skb_free_head.skb_release_data
11.29 ± 2% -41.4% 6.62 ± 2% perf-profile.cycles-pp.__free_pages_ok.__free_pages.__free_kmem_pages.kfree.skb_free_head
11.76 ± 0% -33.2% 7.85 ± 2% perf-profile.cycles-pp.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb._sctp_make_chunk.sctp_make_datafrag_empty
3.78 ± 2% -47.9% 1.97 ± 2% perf-profile.cycles-pp.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb.sctp_packet_transmit.sctp_outq_flush
11.81 ± 0% -33.1% 7.90 ± 2% perf-profile.cycles-pp.__kmalloc_reserve.isra.34.__alloc_skb._sctp_make_chunk.sctp_make_datafrag_empty.sctp_datamsg_from_user
3.80 ± 2% -48.0% 1.98 ± 2% perf-profile.cycles-pp.__kmalloc_reserve.isra.34.__alloc_skb.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork
7.92 ± 1% +90.2% 15.06 ± 0% perf-profile.cycles-pp.__local_bh_enable_ip.ip_finish_output2.ip_finish_output.ip_output.ip_local_out
3.56 ± 2% -34.5% 2.33 ± 3% perf-profile.cycles-pp.__local_bh_enable_ip.sctp_inq_push.sctp_backlog_rcv.__release_sock.release_sock
10.93 ± 1% +54.2% 16.84 ± 0% perf-profile.cycles-pp.__netif_receive_skb.process_backlog.net_rx_action.__do_softirq.do_softirq_own_stack
10.91 ± 1% +54.2% 16.83 ± 0% perf-profile.cycles-pp.__netif_receive_skb_core.__netif_receive_skb.process_backlog.net_rx_action.__do_softirq
0.00 ± -1% +Inf% 1.27 ± 3% perf-profile.cycles-pp.__release_sock.release_sock.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
28.24 ± 0% -19.9% 22.63 ± 0% perf-profile.cycles-pp.__release_sock.release_sock.sctp_sendmsg.inet_sendmsg.sock_sendmsg
0.99 ± 2% -100.0% 0.00 ± -1% perf-profile.cycles-pp.__schedule.preempt_schedule_common._cond_resched.__release_sock.release_sock
1.21 ± 4% -40.3% 0.72 ± 5% perf-profile.cycles-pp.__schedule.schedule.schedule_timeout.sctp_skb_recv_datagram.sctp_recvmsg
18.77 ± 0% +68.8% 31.68 ± 0% perf-profile.cycles-pp.__sys_recvmsg.sys_recvmsg.entry_SYSCALL_64_fastpath
79.54 ± 0% -16.1% 66.70 ± 0% perf-profile.cycles-pp.__sys_sendmsg.sys_sendmsg.entry_SYSCALL_64_fastpath
1.47 ± 4% -61.9% 0.56 ± 3% perf-profile.cycles-pp.__wake_up_common.__wake_up_sync_key.sctp_data_ready.sctp_ulpq_tail_event.sctp_ulpq_tail_data
1.51 ± 4% -61.6% 0.58 ± 4% perf-profile.cycles-pp.__wake_up_sync_key.sctp_data_ready.sctp_ulpq_tail_event.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24
1.04 ± 2% -100.0% 0.00 ± -1% perf-profile.cycles-pp._cond_resched.__release_sock.release_sock.sctp_sendmsg.inet_sendmsg
8.75 ± 2% -50.7% 4.31 ± 5% perf-profile.cycles-pp._raw_spin_lock.free_one_page.__free_pages_ok.__free_pages.__free_kmem_pages
10.80 ± 0% -43.5% 6.10 ± 6% perf-profile.cycles-pp._raw_spin_lock_irqsave.get_page_from_freelist.__alloc_pages_nodemask.alloc_kmem_pages_node.kmalloc_large_node
13.78 ± 0% -28.1% 9.90 ± 1% perf-profile.cycles-pp._sctp_make_chunk.sctp_make_datafrag_empty.sctp_datamsg_from_user.sctp_sendmsg.inet_sendmsg
15.41 ± 0% -33.6% 10.23 ± 2% perf-profile.cycles-pp.alloc_kmem_pages_node.kmalloc_large_node.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb
1.44 ± 4% -61.7% 0.55 ± 4% perf-profile.cycles-pp.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.sctp_data_ready.sctp_ulpq_tail_event
10.65 ± 2% -16.5% 8.89 ± 1% perf-profile.cycles-pp.consume_skb.sctp_chunk_put.sctp_chunk_free.sctp_outq_sack.sctp_cmd_interpreter.isra.24
1.20 ± 3% -12.9% 1.05 ± 6% perf-profile.cycles-pp.copy_msghdr_from_user.___sys_recvmsg.__sys_recvmsg.sys_recvmsg.entry_SYSCALL_64_fastpath
1.78 ± 0% -18.3% 1.45 ± 3% perf-profile.cycles-pp.copy_msghdr_from_user.___sys_sendmsg.__sys_sendmsg.sys_sendmsg.entry_SYSCALL_64_fastpath
9.09 ± 1% +28.5% 11.69 ± 0% perf-profile.cycles-pp.copy_user_enhanced_fast_string.sctp_user_addto_chunk.sctp_datamsg_from_user.sctp_sendmsg.inet_sendmsg
8.11 ± 0% +28.6% 10.43 ± 1% perf-profile.cycles-pp.copy_user_enhanced_fast_string.skb_copy_datagram_iter.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
1.43 ± 4% -61.6% 0.55 ± 4% perf-profile.cycles-pp.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key.sctp_data_ready
7.94 ± 1% +89.9% 15.07 ± 0% perf-profile.cycles-pp.do_softirq.part.13.__local_bh_enable_ip.ip_finish_output2.ip_finish_output.ip_output
3.55 ± 2% -34.5% 2.33 ± 3% perf-profile.cycles-pp.do_softirq.part.13.__local_bh_enable_ip.sctp_inq_push.sctp_backlog_rcv.__release_sock
7.91 ± 1% +90.1% 15.04 ± 0% perf-profile.cycles-pp.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip.ip_finish_output2.ip_finish_output
3.52 ± 2% -34.4% 2.31 ± 3% perf-profile.cycles-pp.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip.sctp_inq_push.sctp_backlog_rcv
9.85 ± 2% -46.3% 5.29 ± 3% perf-profile.cycles-pp.free_one_page.__free_pages_ok.__free_pages.__free_kmem_pages.kfree
14.73 ± 0% -35.9% 9.44 ± 5% perf-profile.cycles-pp.get_page_from_freelist.__alloc_pages_nodemask.alloc_kmem_pages_node.kmalloc_large_node.__kmalloc_node_track_caller
74.88 ± 0% -16.3% 62.66 ± 0% perf-profile.cycles-pp.inet_sendmsg.sock_sendmsg.___sys_sendmsg.__sys_sendmsg.sys_sendmsg
8.19 ± 0% +85.8% 15.22 ± 0% perf-profile.cycles-pp.ip_finish_output.ip_output.ip_local_out.ip_queue_xmit.sctp_v4_xmit
8.16 ± 0% +86.5% 15.21 ± 0% perf-profile.cycles-pp.ip_finish_output2.ip_finish_output.ip_output.ip_local_out.ip_queue_xmit
10.41 ± 1% +57.5% 16.39 ± 0% perf-profile.cycles-pp.ip_local_deliver.ip_rcv_finish.ip_rcv.__netif_receive_skb_core.__netif_receive_skb
10.37 ± 1% +57.7% 16.35 ± 1% perf-profile.cycles-pp.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv.__netif_receive_skb_core
9.63 ± 1% +62.1% 15.61 ± 2% perf-profile.cycles-pp.ip_local_out.ip_queue_xmit.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush
9.36 ± 3% +65.1% 15.46 ± 1% perf-profile.cycles-pp.ip_output.ip_local_out.ip_queue_xmit.sctp_v4_xmit.sctp_packet_transmit
0.00 ± -1% +Inf% 10.25 ± 1% perf-profile.cycles-pp.ip_queue_xmit.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush.sctp_outq_tail
9.86 ± 1% -42.1% 5.71 ± 1% perf-profile.cycles-pp.ip_queue_xmit.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork
10.67 ± 1% +55.8% 16.62 ± 0% perf-profile.cycles-pp.ip_rcv.__netif_receive_skb_core.__netif_receive_skb.process_backlog.net_rx_action
10.49 ± 1% +57.0% 16.47 ± 0% perf-profile.cycles-pp.ip_rcv_finish.ip_rcv.__netif_receive_skb_core.__netif_receive_skb.process_backlog
9.08 ± 2% -36.0% 5.81 ± 2% perf-profile.cycles-pp.kfree.skb_free_head.skb_release_data.skb_release_all.consume_skb
3.00 ± 2% -38.1% 1.86 ± 3% perf-profile.cycles-pp.kfree.skb_free_head.skb_release_data.skb_release_all.kfree_skb
3.68 ± 1% -26.7% 2.69 ± 3% perf-profile.cycles-pp.kfree_skb.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
11.68 ± 0% -33.3% 7.80 ± 2% perf-profile.cycles-pp.kmalloc_large_node.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb._sctp_make_chunk
3.77 ± 2% -34.3% 2.48 ± 2% perf-profile.cycles-pp.kmalloc_large_node.__kmalloc_node_track_caller.__kmalloc_reserve.isra.34.__alloc_skb.sctp_packet_transmit
0.00 ± -1% +Inf% 5.48 ± 3% perf-profile.cycles-pp.memcpy_erms.sctp_packet_transmit_chunk.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24
8.74 ± 2% -50.7% 4.30 ± 5% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock.free_one_page.__free_pages_ok.__free_pages
10.17 ± 1% -44.2% 5.68 ± 7% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irqsave.get_page_from_freelist.__alloc_pages_nodemask.alloc_kmem_pages_node
11.21 ± 1% +52.8% 17.13 ± 0% perf-profile.cycles-pp.net_rx_action.__do_softirq.do_softirq_own_stack.do_softirq.part.13.__local_bh_enable_ip
1.04 ± 3% -100.0% 0.00 ± -1% perf-profile.cycles-pp.preempt_schedule_common._cond_resched.__release_sock.release_sock.sctp_sendmsg
11.05 ± 1% +53.5% 16.96 ± 0% perf-profile.cycles-pp.process_backlog.net_rx_action.__do_softirq.do_softirq_own_stack.do_softirq.part.13
0.00 ± -1% +Inf% 1.38 ± 3% perf-profile.cycles-pp.release_sock.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg
28.41 ± 0% -19.9% 22.75 ± 0% perf-profile.cycles-pp.release_sock.sctp_sendmsg.inet_sendmsg.sock_sendmsg.___sys_sendmsg
1.27 ± 4% -39.4% 0.77 ± 3% perf-profile.cycles-pp.schedule.schedule_timeout.sctp_skb_recv_datagram.sctp_recvmsg.sock_common_recvmsg
1.30 ± 4% -39.4% 0.79 ± 4% perf-profile.cycles-pp.schedule_timeout.sctp_skb_recv_datagram.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
23.46 ± 1% -9.0% 21.34 ± 0% perf-profile.cycles-pp.sctp_assoc_bh_rcv.sctp_inq_push.sctp_backlog_rcv.__release_sock.release_sock
8.11 ± 1% +73.2% 14.05 ± 1% perf-profile.cycles-pp.sctp_assoc_bh_rcv.sctp_inq_push.sctp_rcv.ip_local_deliver_finish.ip_local_deliver
0.00 ± -1% +Inf% 11.48 ± 1% perf-profile.cycles-pp.sctp_assoc_rwnd_increase.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg
0.00 ± -1% +Inf% 1.23 ± 3% perf-profile.cycles-pp.sctp_backlog_rcv.__release_sock.release_sock.sctp_recvmsg.sock_common_recvmsg
27.11 ± 0% -16.8% 22.54 ± 0% perf-profile.cycles-pp.sctp_backlog_rcv.__release_sock.release_sock.sctp_sendmsg.inet_sendmsg
11.42 ± 1% -16.6% 9.52 ± 1% perf-profile.cycles-pp.sctp_chunk_free.sctp_outq_sack.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv
11.12 ± 1% -16.5% 9.29 ± 1% perf-profile.cycles-pp.sctp_chunk_put.sctp_chunk_free.sctp_outq_sack.sctp_cmd_interpreter.isra.24.sctp_do_sm
23.05 ± 1% -10.6% 20.60 ± 0% perf-profile.cycles-pp.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push.sctp_backlog_rcv
6.30 ± 2% +103.1% 12.80 ± 1% perf-profile.cycles-pp.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push.sctp_rcv
13.52 ± 0% -30.0% 9.46 ± 0% perf-profile.cycles-pp.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_primitive_SEND.sctp_sendmsg.inet_sendmsg
1.62 ± 4% -60.6% 0.64 ± 4% perf-profile.cycles-pp.sctp_data_ready.sctp_ulpq_tail_event.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24.sctp_do_sm
23.13 ± 1% -9.7% 20.88 ± 0% perf-profile.cycles-pp.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push.sctp_backlog_rcv.__release_sock
7.19 ± 2% +85.9% 13.36 ± 0% perf-profile.cycles-pp.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push.sctp_rcv.ip_local_deliver_finish
13.32 ± 0% -29.3% 9.41 ± 0% perf-profile.cycles-pp.sctp_do_sm.sctp_primitive_SEND.sctp_sendmsg.inet_sendmsg.sock_sendmsg
0.00 ± -1% +Inf% 1.23 ± 3% perf-profile.cycles-pp.sctp_inq_push.sctp_backlog_rcv.__release_sock.release_sock.sctp_recvmsg
24.60 ± 1% -14.8% 20.97 ± 0% perf-profile.cycles-pp.sctp_inq_push.sctp_backlog_rcv.__release_sock.release_sock.sctp_sendmsg
8.28 ± 1% +71.5% 14.19 ± 1% perf-profile.cycles-pp.sctp_inq_push.sctp_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish
14.48 ± 0% -27.4% 10.51 ± 1% perf-profile.cycles-pp.sctp_make_datafrag_empty.sctp_datamsg_from_user.sctp_sendmsg.inet_sendmsg.sock_sendmsg
0.00 ± -1% +Inf% 3.40 ± 2% perf-profile.cycles-pp.sctp_outq_flush.sctp_outq_tail.sctp_assoc_rwnd_increase.sctp_ulpevent_free.sctp_recvmsg
11.73 ± 0% +60.2% 18.79 ± 1% perf-profile.cycles-pp.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv
16.10 ± 0% -34.5% 10.55 ± 0% perf-profile.cycles-pp.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_primitive_SEND
12.62 ± 1% -13.3% 10.94 ± 0% perf-profile.cycles-pp.sctp_outq_sack.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push
0.00 ± -1% +Inf% 10.94 ± 1% perf-profile.cycles-pp.sctp_outq_tail.sctp_assoc_rwnd_increase.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg
11.81 ± 0% +60.2% 18.92 ± 1% perf-profile.cycles-pp.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push
16.26 ± 0% -34.3% 10.68 ± 0% perf-profile.cycles-pp.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_primitive_SEND.sctp_sendmsg
0.00 ± -1% +Inf% 9.57 ± 2% perf-profile.cycles-pp.sctp_packet_transmit.sctp_outq_flush.sctp_outq_tail.sctp_assoc_rwnd_increase.sctp_ulpevent_free
14.75 ± 0% -37.8% 9.18 ± 1% perf-profile.cycles-pp.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm
0.00 ± -1% +Inf% 1.22 ± 1% perf-profile.cycles-pp.sctp_packet_transmit.sctp_packet_transmit_chunk.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24
0.13 ±173% +5482.4% 7.12 ± 1% perf-profile.cycles-pp.sctp_packet_transmit_chunk.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24.sctp_do_sm
18.60 ± 0% -33.0% 12.46 ± 0% perf-profile.cycles-pp.sctp_primitive_SEND.sctp_sendmsg.inet_sendmsg.sock_sendmsg.___sys_sendmsg
9.94 ± 1% +60.4% 15.94 ± 1% perf-profile.cycles-pp.sctp_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv
15.63 ± 1% +83.3% 28.65 ± 0% perf-profile.cycles-pp.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg.__sys_recvmsg
74.59 ± 0% -16.3% 62.43 ± 0% perf-profile.cycles-pp.sctp_sendmsg.inet_sendmsg.sock_sendmsg.___sys_sendmsg.__sys_sendmsg
1.68 ± 4% -35.4% 1.08 ± 6% perf-profile.cycles-pp.sctp_skb_recv_datagram.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg
4.26 ± 1% +236.0% 14.31 ± 0% perf-profile.cycles-pp.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg
1.49 ± 1% -65.9% 0.51 ± 70% perf-profile.cycles-pp.sctp_ulpevent_make_rcvmsg.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv
3.74 ± 2% -32.7% 2.51 ± 1% perf-profile.cycles-pp.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv.sctp_inq_push
1.74 ± 3% -59.1% 0.71 ± 3% perf-profile.cycles-pp.sctp_ulpq_tail_event.sctp_ulpq_tail_data.sctp_cmd_interpreter.isra.24.sctp_do_sm.sctp_assoc_bh_rcv
9.67 ± 1% +27.1% 12.29 ± 0% perf-profile.cycles-pp.sctp_user_addto_chunk.sctp_datamsg_from_user.sctp_sendmsg.inet_sendmsg.sock_sendmsg
0.00 ± -1% +Inf% 10.24 ± 1% perf-profile.cycles-pp.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush.sctp_outq_tail.sctp_assoc_rwnd_increase
9.91 ± 1% -41.5% 5.79 ± 1% perf-profile.cycles-pp.sctp_v4_xmit.sctp_packet_transmit.sctp_outq_flush.sctp_outq_uncork.sctp_cmd_interpreter.isra.24
0.61 ± 1% +165.9% 1.64 ± 3% perf-profile.cycles-pp.sctp_wfree.skb_release_head_state.skb_release_all.consume_skb.sctp_chunk_put
8.56 ± 1% +28.2% 10.98 ± 1% perf-profile.cycles-pp.skb_copy_datagram_iter.sctp_recvmsg.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg
9.10 ± 2% -36.0% 5.83 ± 2% perf-profile.cycles-pp.skb_free_head.skb_release_data.skb_release_all.consume_skb.sctp_chunk_put
3.00 ± 2% -38.0% 1.86 ± 2% perf-profile.cycles-pp.skb_free_head.skb_release_data.skb_release_all.kfree_skb.sctp_ulpevent_free
9.94 ± 2% -17.4% 8.21 ± 2% perf-profile.cycles-pp.skb_release_all.consume_skb.sctp_chunk_put.sctp_chunk_free.sctp_outq_sack
3.45 ± 2% -32.6% 2.33 ± 2% perf-profile.cycles-pp.skb_release_all.kfree_skb.sctp_ulpevent_free.sctp_recvmsg.sock_common_recvmsg
9.14 ± 2% -35.7% 5.88 ± 2% perf-profile.cycles-pp.skb_release_data.skb_release_all.consume_skb.sctp_chunk_put.sctp_chunk_free
3.19 ± 2% -35.2% 2.06 ± 3% perf-profile.cycles-pp.skb_release_data.skb_release_all.kfree_skb.sctp_ulpevent_free.sctp_recvmsg
0.78 ± 1% +150.6% 1.94 ± 3% perf-profile.cycles-pp.skb_release_head_state.skb_release_all.consume_skb.sctp_chunk_put.sctp_chunk_free
15.77 ± 1% +83.0% 28.85 ± 0% perf-profile.cycles-pp.sock_common_recvmsg.sock_recvmsg.___sys_recvmsg.__sys_recvmsg.sys_recvmsg
16.23 ± 1% +80.7% 29.32 ± 0% perf-profile.cycles-pp.sock_recvmsg.___sys_recvmsg.__sys_recvmsg.sys_recvmsg.entry_SYSCALL_64_fastpath
75.34 ± 0% -16.2% 63.14 ± 0% perf-profile.cycles-pp.sock_sendmsg.___sys_sendmsg.__sys_sendmsg.sys_sendmsg.entry_SYSCALL_64_fastpath
18.79 ± 0% +68.7% 31.69 ± 0% perf-profile.cycles-pp.sys_recvmsg.entry_SYSCALL_64_fastpath
79.56 ± 0% -16.1% 66.71 ± 0% perf-profile.cycles-pp.sys_sendmsg.entry_SYSCALL_64_fastpath
1.40 ± 4% -61.1% 0.54 ± 3% perf-profile.cycles-pp.try_to_wake_up.default_wake_function.autoremove_wake_function.__wake_up_common.__wake_up_sync_key
Thanks,
Xiaolong
4 years, 8 months
[lkp] [sched] ec58e50592: WARNING: CPU: 0 PID: 5132 at kernel/fork.c:296 free_task+0x35/0x5f
by kernel test robot
FYI, we noticed the following commit:
https://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/vmap_stack
commit ec58e505925c46bd43f9c4275c78292d4483af16 ("sched: Free the stack early if CONFIG_THREAD_INFO_IN_TASK")
in testcase: trinity
with following parameters: runtime=300s
on test machine: vm-kbuild-2G: 2 threads qemu-system-x86_64 -enable-kvm -cpu Haswell,+smep,+smap with 2G memory
caused below changes:
+-------------------------------------+------------+------------+
| | ddf4847e6f | ec58e50592 |
+-------------------------------------+------------+------------+
| boot_successes | 8 | 4 |
| boot_failures | 0 | 4 |
| WARNING:at_kernel/fork.c:#free_task | 0 | 4 |
| backtrace:_do_fork | 0 | 4 |
| backtrace:SyS_clone | 0 | 4 |
+-------------------------------------+------------+------------+
[ 29.950472] sock: process `trinity-main' is using obsolete setsockopt SO_BSDCOMPAT
[ 39.309659] VFS: Warning: trinity-c0 using old stat() call. Recompile your binary.
[ 40.512647] ------------[ cut here ]------------
[ 40.514588] WARNING: CPU: 0 PID: 5132 at kernel/fork.c:296 free_task+0x35/0x5f
[ 40.523082] Modules linked in: acpi_cpufreq
[ 40.525310] CPU: 0 PID: 5132 Comm: trinity-c0 Not tainted 4.7.0-rc4-00255-gec58e50 #1
[ 40.530022] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014
[ 40.546800] 0000000000000000 ffff88007e90fd50 ffffffff814e4776 0000000000000000
[ 40.559103] 0000000000000000 ffff88007e90fd90 ffffffff8110267c 000001287e90fda0
[ 40.572406] ffff88007e84d280 ffff8800751b2800 00000000fffffff4 ffff88007e42f0b8
[ 40.584404] Call Trace:
[ 40.586590] [<ffffffff814e4776>] dump_stack+0x85/0xbe
[ 40.588487] [<ffffffff8110267c>] __warn+0xca/0xe5
[ 40.600659] [<ffffffff81102753>] warn_slowpath_null+0x1d/0x1f
[ 40.607024] [<ffffffff810ffcc8>] free_task+0x35/0x5f
[ 40.610301] [<ffffffff81100a03>] copy_process+0x710/0x1717
[ 40.612306] [<ffffffff81101bb6>] _do_fork+0xbe/0x358
[ 40.623198] [<ffffffff81101ed6>] SyS_clone+0x19/0x1b
[ 40.628181] [<ffffffff810019c6>] do_syscall_64+0x69/0xc6
[ 40.634729] [<ffffffff81a9ae44>] entry_SYSCALL64_slow_path+0x25/0x25
[ 40.650469] ---[ end trace 942898900a0dd524 ]---
[ 222.049702] random: nonblocking pool is initialized
FYI, raw QEMU command line is:
qemu-system-x86_64 -enable-kvm -cpu Haswell,+smep,+smap -kernel /pkg/linux/x86_64-lkp/gcc-4.9/ec58e505925c46bd43f9c4275c78292d4483af16/vmlinuz-4.7.0-rc4-00255-gec58e50 -append 'root=/dev/ram0 user=lkp job=/lkp/scheduled/vm-kbuild-2G-11/bisect_trinity-300s-debian-x86_64-2015-02-07.cgz-x86_64-lkp-ec58e505925c46bd43f9c4275c78292d4483af16-20160627-111363-1oj47zp-0.yaml ARCH=x86_64 kconfig=x86_64-lkp branch=luto/x86/vmap_stack commit=ec58e505925c46bd43f9c4275c78292d4483af16 BOOT_IMAGE=/pkg/linux/x86_64-lkp/gcc-4.9/ec58e505925c46bd43f9c4275c78292d4483af16/vmlinuz-4.7.0-rc4-00255-gec58e50 max_uptime=1500 RESULT_ROOT=/result/trinity/300s/vm-kbuild-2G/debian-x86_64-2015-02-07.cgz/x86_64-lkp/gcc-4.9/ec58e505925c46bd43f9c4275c78292d4483af16/0 LKP_SERVER=inn earlyprintk=ttyS0,115200 systemd.log_level=err debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw ip=::::vm-kbuild-2G-11::dhcp' -initrd /fs/sdd1/initrd-vm-kbuild-2G-11 -m 2048 -smp 2 -device e1000,netdev=net0 -netdev user,id=net0,hostfwd=tcp::23026-:22 -boot order=nc -no-reboot -watchdog i6300esb -rtc base=localtime -drive file=/fs/sdd1/disk0-vm-kbuild-2G-11,media=disk,if=virtio -drive file=/fs/sdd1/disk1-vm-kbuild-2G-11,media=disk,if=virtio -drive file=/fs/sdd1/disk2-vm-kbuild-2G-11,media=disk,if=virtio -drive file=/fs/sdd1/disk3-vm-kbuild-2G-11,media=disk,if=virtio -drive file=/fs/sdd1/disk4-vm-kbuild-2G-11,media=disk,if=virtio -drive file=/fs/sdd1/disk5-vm-kbuild-2G-11,media=disk,if=virtio -drive file=/fs/sdd1/disk6-vm-kbuild-2G-11,media=disk,if=virtio -pidfile /dev/shm/kboot/pid-vm-kbuild-2G-11 -serial file:/dev/shm/kboot/serial-vm-kbuild-2G-11 -daemonize -display none -monitor null
To reproduce:
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml
Thanks,
Xiaolong
4 years, 8 months
[lkp] [arm64] 60edbdb955: BUG: unable to handle kernel paging request at ffff8800bffe0000
by kernel test robot
FYI, we noticed the following commit:
https://github.com/0day-ci/linux Manjeet-Pawar/arm64-swiotlb-Enable-only-when-Input-size-through-command-line/20160623-205508
commit 60edbdb955e718ac4ea403f13edc30d4dc31fbeb ("arm64:swiotlb:Enable only when Input size through command line")
on test machine: vm-lkp-wsx03-4G: 2 threads qemu-system-x86_64 -enable-kvm -cpu host with 4G memory
caused below changes:
+----------------------------------------------------------------------------+----------+------------+
| | v4.7-rc4 | 60edbdb955 |
+----------------------------------------------------------------------------+----------+------------+
| boot_successes | 5972 | 19 |
| boot_failures | 87 | 20 |
| Initramfs_unpacking_failed | 1 | |
| Kernel_panic-not_syncing:VFS:Unable_to_mount_root_fs_on_unknown-block(#,#) | 1 | |
| backtrace:prepare_namespace | 1 | |
| backtrace:kernel_init_freeable | 14 | 4 |
| kernel_BUG_at_fs/btrfs/ctree.c | 1 | |
| invalid_opcode:#[##]SMP | 1 | |
| RIP:btrfs_set_item_key_safe[btrfs] | 1 | |
| Kernel_panic-not_syncing:Fatal_exception | 1 | 1 |
| backtrace:vfs_write | 24 | |
| backtrace:SyS_write | 23 | |
| WARNING:at_drivers/gpu/drm/i915/intel_display.c:#intel_modeset_init[i915] | 5 | |
| backtrace:intel_modeset_init | 5 | |
| backtrace:warn_slowpath_fmt | 5 | |
| backtrace:__pci_register_driver | 5 | |
| backtrace:drm_pci_init | 5 | |
| backtrace:i915_init | 5 | |
| backtrace:do_init_module | 25 | |
| backtrace:load_module | 24 | |
| backtrace:SYSC_finit_module | 24 | |
| backtrace:SyS_finit_module | 24 | |
| drm:fw_domains_get[i915]] | 20 | |
| drm:__gen6_gt_wait_for_thread_c0[i915]] | 20 | |
| BUG:sleeping_function_called_from_invalid_context_at_kernel/irq/manage.c | 20 | |
| backtrace:snb_uncore_imc_init_box | 20 | |
| backtrace:register_console | 19 | |
| backtrace:init_netconsole | 20 | |
| backtrace:netpoll_poll_dev | 1 | |
| invoked_oom-killer:gfp_mask=0x | 39 | |
| Mem-Info | 39 | |
| Out_of_memory:Kill_process | 26 | |
| backtrace:_do_fork | 9 | |
| backtrace:SyS_clone | 8 | |
| backtrace:do_execveat_common | 2 | |
| backtrace:SyS_execve | 2 | |
| backtrace:kimage_load_segment | 3 | |
| backtrace:SyS_kexec_load | 3 | |
| WARNING:at_mm/early_ioremap.c:#__early_ioremap | 1 | |
| backtrace:acpi_initialize_tables | 1 | |
| backtrace:acpi_table_init | 1 | |
| backtrace:acpi_boot_table_init | 1 | |
| WARNING:at_arch/x86/events/intel/core.c:#intel_pmu_handle_irq | 6 | |
| backtrace:pgd_alloc | 1 | |
| backtrace:mm_init | 1 | |
| WARNING:at_fs/xfs/xfs_file.c:#xfs_file_read_iter | 1 | |
| WARNING:at_fs/xfs/xfs_file.c:#xfs_file_dio_aio_write | 2 | |
| backtrace:vfs_read | 3 | |
| backtrace:SyS_read | 3 | |
| backtrace:SyS_pwrite64 | 1 | |
| backtrace:vm_mmap_pgoff | 2 | |
| backtrace:SyS_mmap_pgoff | 2 | |
| backtrace:SyS_mmap | 2 | |
| BUG:kernel_test_crashed | 4 | |
| BUG:kernel_test_hang | 7 | |
| Kernel_panic-not_syncing:Out_of_memory_and_no_killable_processes | 13 | |
| backtrace:populate_rootfs | 13 | |
| BUG:unable_to_handle_kernel | 0 | 20 |
| Oops | 0 | 20 |
| RIP:__memcpy | 0 | 15 |
| Kernel_panic-not_syncing:Fatal_exception_in_interrupt | 0 | 19 |
| backtrace:cpu_startup_entry | 0 | 4 |
| backtrace:ip_auto_config | 0 | 4 |
| RIP:clear_page | 0 | 1 |
| backtrace:SYSC_newlstat | 0 | 1 |
| backtrace:SyS_newlstat | 0 | 1 |
| backtrace:nfs_create_rpc_client | 0 | 2 |
| backtrace:nfs4_init_client | 0 | 2 |
| backtrace:do_mount | 0 | 2 |
| backtrace:SyS_mount | 0 | 2 |
| backtrace:xs_tcp_setup_socket | 0 | 1 |
| backtrace:addrconf_dad_work | 0 | 1 |
| RIP:memcpy_orig | 0 | 4 |
+----------------------------------------------------------------------------+----------+------------+
[ 5.237245] input: ImExPS/2 BYD TouchPad as /devices/platform/i8042/serio1/input/input3
[ 6.617721] e1000: eth0 NIC Link is Up 1000 Mbps Full Duplex, Flow Control: RX
[ 6.620309] IPv6: ADDRCONF(NETDEV_CHANGE): eth0: link becomes ready
[ 6.624262] BUG: unable to handle kernel paging request at ffff8800bffe0000
[ 6.625923] IP: [<ffffffff81440f12>] __memcpy+0x12/0x20
[ 6.627251] PGD 23c0067 PUD 13ffff067 PMD 13fffe067 PTE 0
[ 6.628834] Oops: 0002 [#1] SMP
[ 6.629816] Modules linked in:
[ 6.630854] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.7.0-rc4-00001-g60edbdb #1
[ 6.632902] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014
[ 6.635146] task: ffffffff81e0d500 ti: ffffffff81e00000 task.ti: ffffffff81e00000
[ 6.637167] RIP: 0010:[<ffffffff81440f12>] [<ffffffff81440f12>] __memcpy+0x12/0x20
[ 6.639320] RSP: 0018:ffff88013fc03990 EFLAGS: 00010202
[ 6.640576] RAX: ffff8800bffe0000 RBX: 00000000bffe0000 RCX: 000000000000000b
[ 6.642071] RDX: 0000000000000002 RSI: ffff8801328f3802 RDI: ffff8800bffe0000
[ 6.643567] RBP: ffff88013fc039f8 R08: 00000001328f3802 R09: 0000160000000000
[ 6.645069] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
[ 6.646559] R13: 0000000000000000 R14: 0000000000000001 R15: 0000000000200000
[ 6.648066] FS: 0000000000000000(0000) GS:ffff88013fc00000(0000) knlGS:0000000000000000
[ 6.650171] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 6.651483] CR2: ffff8800bffe0000 CR3: 0000000001e06000 CR4: 00000000000006f0
[ 6.652977] Stack:
[ 6.653798] ffffffff8145b88e 0000000100000000 00000001328f3802 0000000000000001
[ 6.656135] 0000000000000202 ffff88013a5af0a0 000000000000005a 000000000017ffc0
[ 6.658459] 00000001328f3802 ffff88013a5af0a0 000000000000005a ffffc9000075e000
[ 6.660777] Call Trace:
[ 6.661653] <IRQ>
[ 6.661991] [<ffffffff8145b88e>] ? swiotlb_tbl_map_single+0x26e/0x2c0
[ 6.664004] [<ffffffff8145beb2>] swiotlb_map_page+0x82/0x1c0
[ 6.665325] [<ffffffff8161de31>] e1000_xmit_frame+0x351/0x1110
[ 6.666668] [<ffffffff817dbcd1>] dev_hard_start_xmit+0xa1/0x220
[ 6.668023] [<ffffffff81801c3c>] sch_direct_xmit+0xdc/0x1b0
[ 6.669320] [<ffffffff817dc381>] __dev_queue_xmit+0x3e1/0x640
[ 6.670639] [<ffffffff817e95d7>] ? __neigh_create+0x447/0x5a0
[ 6.671965] [<ffffffff817dc5f0>] dev_queue_xmit+0x10/0x20
[ 6.673242] [<ffffffff817e5f88>] neigh_resolve_output+0x118/0x1c0
[ 6.674622] [<ffffffff8188e2f8>] ip6_finish_output2+0x188/0x4a0
[ 6.675978] [<ffffffff813c6fea>] ? selinux_ipv6_postroute+0x1a/0x20
[ 6.677390] [<ffffffff8188f956>] ip6_finish_output+0xa6/0x100
[ 6.678712] [<ffffffff8188f9f8>] ip6_output+0x48/0xf0
[ 6.679956] [<ffffffff8188f8b0>] ? ip6_fragment+0x9c0/0x9c0
[ 6.681259] [<ffffffff818b1a70>] NF_HOOK_THRESH+0x30/0xa0
[ 6.682687] [<ffffffff8189f271>] ? icmp6_dst_alloc+0x121/0x170
[ 6.684060] [<ffffffff818b1c44>] mld_sendpack+0x164/0x210
[ 6.685335] [<ffffffff818b2eb3>] mld_ifc_timer_expire+0x193/0x2a0
[ 6.686699] [<ffffffff818b2d20>] ? mld_dad_timer_expire+0x60/0x60
[ 6.688072] [<ffffffff810ea575>] call_timer_fn+0x35/0x140
[ 6.689344] [<ffffffff818b2d20>] ? mld_dad_timer_expire+0x60/0x60
[ 6.690705] [<ffffffff810eb54d>] run_timer_softirq+0x23d/0x2e0
[ 6.692045] [<ffffffff8191a35f>] __do_softirq+0xff/0x2d2
[ 6.693309] [<ffffffff81083468>] irq_exit+0xe8/0xf0
[ 6.694531] [<ffffffff8191a152>] smp_apic_timer_interrupt+0x42/0x50
[ 6.695914] [<ffffffff8191829c>] apic_timer_interrupt+0x8c/0xa0
[ 6.697264] <EOI>
[ 6.697596] [<ffffffff8105ff26>] ? native_safe_halt+0x6/0x10
[ 6.699487] [<ffffffff8103785e>] default_idle+0x1e/0xf0
[ 6.700731] [<ffffffff8103803f>] arch_cpu_idle+0xf/0x20
[ 6.701987] [<ffffffff810c2203>] default_idle_call+0x33/0x40
[ 6.703304] [<ffffffff810c24c3>] cpu_startup_entry+0x2b3/0x310
[ 6.704644] [<ffffffff8190a445>] rest_init+0x85/0x90
[ 6.705861] [<ffffffff81fe0f77>] start_kernel+0x42e/0x43b
[ 6.707145] [<ffffffff81fe0120>] ? early_idt_handler_array+0x120/0x120
[ 6.708566] [<ffffffff81fe0481>] x86_64_start_reservations+0x2f/0x31
[ 6.724069] [<ffffffff81fe05be>] x86_64_start_kernel+0x13b/0x14a
[ 6.725424] Code: 75 05 e8 72 fb ff ff 48 8b 43 60 48 2b 43 50 88 43 4e 5b 5d f3 c3 90 90 90 66 66 90 66 90 48 89 f8 48 89 d1 48 c1 e9 03 83 e2 07 <f3> 48 a5 89 d1 f3 a4 c3 66 0f 1f 44 00 00 48 89 f8 48 89 d1 f3
[ 6.734615] RIP [<ffffffff81440f12>] __memcpy+0x12/0x20
[ 6.735954] RSP <ffff88013fc03990>
[ 6.736965] CR2: ffff8800bffe0000
[ 6.737955] ---[ end trace 711199b99b33e43a ]---
[ 6.739120] Kernel panic - not syncing: Fatal exception in interrupt
FYI, raw QEMU command line is:
qemu-system-x86_64 -enable-kvm -cpu host -kernel /pkg/linux/x86_64-rhel/gcc-4.9/60edbdb955e718ac4ea403f13edc30d4dc31fbeb/vmlinuz-4.7.0-rc4-00001-g60edbdb -append 'root=/dev/ram0 user=lkp job=/lkp/scheduled/vm-lkp-wsx03-4G-5/bisect_boot-1-debian-x86_64-2015-02-07.cgz-x86_64-rhel-60edbdb955e718ac4ea403f13edc30d4dc31fbeb-20160627-118234-1szxl6h-0.yaml ARCH=x86_64 kconfig=x86_64-rhel branch=linux-devel/devel-hourly-2016062408 commit=60edbdb955e718ac4ea403f13edc30d4dc31fbeb BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/60edbdb955e718ac4ea403f13edc30d4dc31fbeb/vmlinuz-4.7.0-rc4-00001-g60edbdb max_uptime=600 RESULT_ROOT=/result/boot/1/vm-lkp-wsx03-4G/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/60edbdb955e718ac4ea403f13edc30d4dc31fbeb/0 LKP_SERVER=inn earlyprintk=ttyS0,115200 systemd.log_level=err debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw ip=::::vm-lkp-wsx03-4G-5::dhcp' -initrd /fs/sdc1/initrd-vm-lkp-wsx03-4G-5 -m 4096 -smp 2 -device e1000,netdev=net0 -netdev user,id=net0,hostfwd=tcp::23644-:22 -boot order=nc -no-reboot -watchdog i6300esb -rtc base=localtime -drive file=/fs/sdc1/disk0-vm-lkp-wsx03-4G-5,media=disk,if=virtio -drive file=/fs/sdc1/disk1-vm-lkp-wsx03-4G-5,media=disk,if=virtio -pidfile /dev/shm/kboot/pid-vm-lkp-wsx03-4G-5 -serial file:/dev/shm/kboot/serial-vm-lkp-wsx03-4G-5 -daemonize -display none -monitor null
Thanks,
Xiaolong
4 years, 8 months
Re: [LKP] [lkp] [dcache_{readdir, dir_lseek}() users] 4e82901cd6: reaim.jobs_per_min -49.1% regression
by Linus Torvalds
On Sun, Jun 26, 2016 at 2:50 AM, Thorsten Leemhuis
<regressions(a)leemhuis.info> wrote:
>
> Al, what's the status here? This made it on my 4.7 regressions report
> due to the "regression" keyword in the subject.
I don't think the tmpfs locking is going to get changed for 4.7. This
issue will likely only show up for some very specific microbenchmarks,
and the lockless next_positive one is likely too invasive for this
stage. So the problem is fixable, and not serious enough to worry
about for 4.7.
Linus
4 years, 8 months
[x86] 87194cac13: BUG: unable to handle kernel
by kernel test robot
FYI, we noticed the following commit:
https://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/vmap_stack
commit 87194cac139aebecec89a68eff719ab44f0469a2 ("x86: Move thread_info into task_struct")
on test machine: vm-vp-quantal-x86_64: 2 threads qemu-system-x86_64 -enable-kvm with 360M memory
caused below changes:
+------------------------------------------+------------+------------+
| | 01ac3242f3 | 87194cac13 |
+------------------------------------------+------------+------------+
| boot_successes | 10 | 0 |
| boot_failures | 2 | 34 |
| BUG:kernel_test_oversize | 2 | |
| BUG:unable_to_handle_kernel | 0 | 34 |
| Oops:#[##] | 0 | 34 |
| RIP:entry_SYSCALL_64_after_swapgs | 0 | 34 |
| Kernel_panic-not_syncing:Fatal_exception | 0 | 34 |
+------------------------------------------+------------+------------+
[ 1.891698] Freeing unused kernel memory: 984K (ffff88000150a000 - ffff880001600000)
[ 1.897808] Freeing unused kernel memory: 1660K (ffff880001861000 - ffff880001a00000)
[ 1.897808] Freeing unused kernel memory: 1660K (ffff880001861000 - ffff880001a00000)
[ 1.900225] BUG: unable to handle kernel
[ 1.900225] BUG: unable to handle kernel paging requestpaging request at ffffffff03460040
at ffffffff03460040
[ 1.902293] IP:
[ 1.902293] IP: [<ffffffff81506684>] entry_SYSCALL_64_after_swapgs+0x31/0x47
FYI, raw QEMU command line is:
qemu-system-x86_64 -enable-kvm -kernel /pkg/linux/x86_64-randconfig-v0-06261108/gcc-6/87194cac139aebecec89a68eff719ab44f0469a2/vmlinuz-4.7.0-rc4-00258-g87194ca -append 'root=/dev/ram0 user=lkp job=/lkp/scheduled/vm-vp-quantal-x86_64-2/bisect_boot-1-quantal-core-x86_64.cgz-x86_64-randconfig-v0-06261108-87194cac139aebecec89a68eff719ab44f0469a2-20160626-43628-vqpxp-0.yaml ARCH=x86_64 kconfig=x86_64-randconfig-v0-06261108 branch=linux-devel/devel-hourly-2016062608 commit=87194cac139aebecec89a68eff719ab44f0469a2 BOOT_IMAGE=/pkg/linux/x86_64-randconfig-v0-06261108/gcc-6/87194cac139aebecec89a68eff719ab44f0469a2/vmlinuz-4.7.0-rc4-00258-g87194ca max_uptime=600 RESULT_ROOT=/result/boot/1/vm-vp-quantal-x86_64/quantal-core-x86_64.cgz/x86_64-randconfig-v0-06261108/gcc-6/87194cac139aebecec89a68eff719ab44f0469a2/0 LKP_SERVER=inn earlyprintk=ttyS0,115200 systemd.log_level=err debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw ip=::::vm-vp-quantal-x86_64-2::dhcp drbd.minor_count=8' -initrd /fs/sdh1/initrd-vm-vp-quantal-x86_64-2 -m 360 -smp 2 -device e1000,netdev=net0 -netdev user,id=net0 -boot order=nc -no-reboot -watchdog i6300esb -rtc base=localtime -pidfile /dev/shm/kboot/pid-vm-vp-quantal-x86_64-2 -serial file:/dev/shm/kboot/serial-vm-vp-quantal-x86_64-2 -daemonize -display none -monitor null
Thanks,
Kernel Test Robot
4 years, 8 months
[sched] a550c94136: BUG: unable to handle kernel NULL pointer dereference at (null)
by kernel test robot
FYI, we noticed the following commit:
https://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/vmap_stack
commit a550c94136a5253a727e3af39f306b8c55b1721e ("sched: Free the stack early if CONFIG_THREAD_INFO_IN_TASK")
on test machine: vm-lkp-wsx03-quantal-x86_64: 2 threads qemu-system-x86_64 -enable-kvm -cpu Haswell,+smep,+smap with 360M memory
caused below changes:
+------------------------------------------+------------+------------+
| | 87194cac13 | a550c94136 |
+------------------------------------------+------------+------------+
| boot_successes | 47 | 0 |
| boot_failures | 0 | 45 |
| BUG:unable_to_handle_kernel | 0 | 39 |
| Oops | 0 | 16 |
| RIP:__schedule | 0 | 10 |
| RIP:number | 0 | 5 |
| backtrace:do_wait | 0 | 9 |
| backtrace:SyS_wait4 | 0 | 9 |
| PANIC:double_fault | 0 | 28 |
| RIP:symbol_string | 0 | 11 |
| Kernel_panic-not_syncing:Machine_halted | 0 | 28 |
| WARNING:at_mm/vmalloc.c:#__vunmap | 0 | 2 |
| RIP:io_serial_out | 0 | 8 |
| Kernel_panic-not_syncing:Fatal_exception | 0 | 13 |
| RIP:queued_spin_lock_slowpath | 0 | 1 |
| RIP:io_serial_in | 0 | 4 |
| RIP:vmalloc_fault | 0 | 2 |
| backtrace:smpboot_thread_fn | 0 | 2 |
| RIP:__lock_acquire | 0 | 3 |
| backtrace:async_run_entry_fn | 0 | 3 |
| BUG:kernel_test_hang | 0 | 1 |
| backtrace:compat_SyS_wait4 | 0 | 2 |
| RIP:cont_add | 0 | 1 |
| backtrace:core_sys_select | 0 | 1 |
| backtrace:SyS_select | 0 | 1 |
| backtrace:vfs_read | 0 | 1 |
| backtrace:SyS_read | 0 | 1 |
| RIP:no_context | 0 | 1 |
| invoked_oom-killer:gfp_mask=0x | 0 | 1 |
| Mem-Info | 0 | 1 |
| Out_of_memory:Kill_process | 0 | 1 |
| BUG:Bad_page_map_in_process | 0 | 1 |
| backtrace:oom_reaper | 0 | 1 |
+------------------------------------------+------------+------------+
[ 14.266586] Freeing unused kernel memory: 384K (ffff8800023a0000 - ffff880002400000)
[ 14.329680] random: init urandom read with 6 bits of entropy available
[ 14.417506] hwclock
[ 14.420263] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420267] IP: [< (null)>] (null)
[ 14.420292] PGD 0
[ 14.420321] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420357] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420359] PGD 0
[ 14.420387] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420394] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420397] PGD 0
[ 14.420424] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420451] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420454] PGD 0
[ 14.420460] BUG: unable to handle kernel NULL p
[ 14.420487] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420490] PGD 0
[ 14.420517] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420524] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420526] PGD 0
[ 14.420552] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420559] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420582] PGD 0
[ 14.420588] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420615] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420618] PGD 0
[ 14.420645] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420651] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420654] PGD 0
[ 14.420681] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420687] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420710] PGD 0
[ 14.420716] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420744] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420746] PGD 0
[ 14.420752] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420779] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420782] PGD 0
[ 14.420809] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420815] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420818] PGD 0
[ 14.420845] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420872] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420875] PGD 0
[ 14.420881] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420908] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420910] PGD 0
[ 14.420938] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420944] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.420946] PGD 0
[ 14.420973] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.420980] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.421003] PGD 0
[ 14.421009] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.421036] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.421039] PGD 0
[ 14.421045] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.421110] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.421134] PGD 0
[ 14.421140] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.421168] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.421170] PGD 0
[ 14.421197] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 14.421204] IP: [<ffffffff8103d7fd>] no_context+0x2c5/0x382
[ 14.421206] PGD 0
[ 14.421234] BUG: unable to handle kernel
[ 14.454361] BUG: unable to handle kernel
[ 14.454362] PANIC: double fault, error_code: 0x0
[ 14.454370] CPU: 0 PID: 158 Comm: hwclock Not tainted 4.7.0-rc4-00259-ga550c94 #334
[ 14.454393] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014
[ 14.454397] task: ffff88000cbe4440 ti: ffff88000cbe4440 task.ti: ffff88000cbe4440
[ 14.454432] RIP: 0010:[<ffffffff815117c8>] [<ffffffff815117c8>] io_serial_out+0x15/0x17
[ 14.454435] RSP: 0018:ffffc90000150ad0 EFLAGS: 00010002
[ 14.454459] RAX: 0000000000000070 RBX: ffffffff83448380 RCX: 0000000000000000
[ 14.454462] RDX: 00000000000003f8 RSI: 0000000000000000 RDI: ffffffff83448380
[ 14.454465] RBP: ffffc90000150ad0 R08: 0000000000000002 R09: 0000000000000000
[ 14.454489] R10: ffffc90000150d18 R11: ffffffff832c5d67 R12: 0000000000000070
[ 14.454492] R13: ffffffff83448380 R14: ffffffff8151213a R15: 0000000000000059
[ 14.454496] FS: 0000000000000000(0000) GS:ffff880013c00000(0000) knlGS:0000000000000000
[ 14.454499] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 14.454522] CR2: ffffc90000150ac8 CR3: 000000000240b000 CR4: 00000000000006f0
[ 14.454531] Stack:
[ 14.454560]
[ 14.454561] Call Trace:
[ 14.454586] <UNK>
[ 14.454749] Code: 8f d9 00 00 00 8b 57 48 55 48 89 e5 d3 e6 01 f2 ec 0f b6 c0 5d c3 0f b6 8f d9 00 00 00 89 d0 8b 57 48 55 48 89 e5 d3 e6 01 f2 ee <5d> c3 8a 97 da 00 00 00 55 48 c7 87 58 02 00 00 64 16 51 81 48
[ 14.454752] Kernel panic - not syncing: Machine halted.
[ 16.562053] Shutting down cpus with NMI
[ 16.562640] Kernel Offset: disabled
FYI, raw QEMU command line is:
qemu-system-x86_64 -enable-kvm -cpu Haswell,+smep,+smap -kernel /pkg/linux/x86_64-acpi-redef/gcc-6/a550c94136a5253a727e3af39f306b8c55b1721e/vmlinuz-4.7.0-rc4-00259-ga550c94 -append 'root=/dev/ram0 user=lkp job=/lkp/scheduled/vm-lkp-wsx03-quantal-x86_64-2/bisect_boot-1-quantal-core-x86_64.cgz-x86_64-acpi-redef-a550c94136a5253a727e3af39f306b8c55b1721e-20160626-107935-1evi98y-0.yaml ARCH=x86_64 kconfig=x86_64-acpi-redef branch=linux-devel/devel-catchup-201606260900 commit=a550c94136a5253a727e3af39f306b8c55b1721e BOOT_IMAGE=/pkg/linux/x86_64-acpi-redef/gcc-6/a550c94136a5253a727e3af39f306b8c55b1721e/vmlinuz-4.7.0-rc4-00259-ga550c94 max_uptime=600 RESULT_ROOT=/result/boot/1/vm-lkp-wsx03-quantal-x86_64/quantal-core-x86_64.cgz/x86_64-acpi-redef/gcc-6/a550c94136a5253a727e3af39f306b8c55b1721e/0 LKP_SERVER=inn earlyprintk=ttyS0,115200 systemd.log_level=err debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal rw ip=::::vm-lkp-wsx03-quantal-x86_64-2::dhcp drbd.minor_count=8' -initrd /fs/sdc1/initrd-vm-lkp-wsx03-quantal-x86_64-2 -m 360 -smp 2 -device e1000,netdev=net0 -netdev user,id=net0 -boot order=nc -no-reboot -watchdog i6300esb -rtc base=localtime -pidfile /dev/shm/kboot/pid-vm-lkp-wsx03-quantal-x86_64-2 -serial file:/dev/shm/kboot/serial-vm-lkp-wsx03-quantal-x86_64-2 -daemonize -display none -monitor null
Thanks,
Kernel Test Robot
4 years, 8 months
[perf core] c5dfd78eb7: BUG: unable to handle kernel NULL pointer dereference at 00000c40
by kernel test robot
Greetings,
0day kernel testing robot got the below dmesg and the first bad commit is
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit c5dfd78eb79851e278b7973031b9ca363da87a7e
Author: Arnaldo Carvalho de Melo <acme(a)redhat.com>
AuthorDate: Thu Apr 21 12:28:50 2016 -0300
Commit: Arnaldo Carvalho de Melo <acme(a)redhat.com>
CommitDate: Wed Apr 27 10:20:39 2016 -0300
perf core: Allow setting up max frame stack depth via sysctl
The default remains 127, which is good for most cases, and not even hit
most of the time, but then for some cases, as reported by Brendan, 1024+
deep frames are appearing on the radar for things like groovy, ruby.
And in some workloads putting a _lower_ cap on this may make sense. One
that is per event still needs to be put in place tho.
The new file is:
# cat /proc/sys/kernel/perf_event_max_stack
127
Chaging it:
# echo 256 > /proc/sys/kernel/perf_event_max_stack
# cat /proc/sys/kernel/perf_event_max_stack
256
But as soon as there is some event using callchains we get:
# echo 512 > /proc/sys/kernel/perf_event_max_stack
-bash: echo: write error: Device or resource busy
#
Because we only allocate the callchain percpu data structures when there
is a user, which allows for changing the max easily, its just a matter
of having no callchain users at that point.
Reported-and-Tested-by: Brendan Gregg <brendan.d.gregg(a)gmail.com>
Reviewed-by: Frederic Weisbecker <fweisbec(a)gmail.com>
Acked-by: Alexei Starovoitov <ast(a)kernel.org>
Acked-by: David Ahern <dsahern(a)gmail.com>
Cc: Adrian Hunter <adrian.hunter(a)intel.com>
Cc: Alexander Shishkin <alexander.shishkin(a)linux.intel.com>
Cc: He Kuang <hekuang(a)huawei.com>
Cc: Jiri Olsa <jolsa(a)redhat.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Milian Wolff <milian.wolff(a)kdab.com>
Cc: Namhyung Kim <namhyung(a)kernel.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Stephane Eranian <eranian(a)google.com>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Vince Weaver <vincent.weaver(a)maine.edu>
Cc: Wang Nan <wangnan0(a)huawei.com>
Cc: Zefan Li <lizefan(a)huawei.com>
Link: http://lkml.kernel.org/r/20160426002928.GB16708@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme(a)redhat.com>
+-----------------------------------------------------------+------------+------------+-----------------+
| | c2a218c63b | c5dfd78eb7 | v4.7-rc4_062414 |
+-----------------------------------------------------------+------------+------------+-----------------+
| boot_successes | 910 | 305 | 67 |
| boot_failures | 0 | 5 | 53 |
| Oops | 0 | 5 | 2 |
| EIP_is_at_perf_prepare_sample | 0 | 5 | |
| Kernel_panic-not_syncing:Fatal_exception | 0 | 5 | 2 |
| BUG:unable_to_handle_kernel | 0 | 4 | 1 |
| backtrace:iterate_dir | 0 | 1 | |
| backtrace:SyS_getdents64 | 0 | 1 | |
| EIP_is_at_get_perf_callchain | 0 | 0 | 2 |
| BUG:kernel_test_crashed | 0 | 0 | 7 |
| IP-Config:Auto-configuration_of_network_failed | 0 | 0 | 2 |
| WARNING:at_arch/x86/mm/extable.c:#ex_handler_rdmsr_unsafe | 0 | 0 | 42 |
| backtrace:native_calibrate_cpu | 0 | 0 | 42 |
| backtrace:tsc_init | 0 | 0 | 42 |
| backtrace:x86_late_time_init | 0 | 0 | 42 |
+-----------------------------------------------------------+------------+------------+-----------------+
[main] 375 sockets created based on info from socket cachefile.
[main] Generating file descriptors
[main] Added 889 filenames from /dev
[ 56.590952] BUG: unable to handle kernel NULL pointer dereference at 00000c40
[ 56.598975] IP: [<790e4f29>] perf_prepare_sample+0x229/0x330
[ 56.599783] *pde = 00000000
[ 56.601158] Oops: 0000 [#1] SMP
[ 56.604020] CPU: 1 PID: 398 Comm: trinity-main Not tainted 4.6.0-rc4-00181-gc5dfd78 #1
[ 56.607177] task: 83584200 ti: 83778000 task.ti: 83778000
[ 56.610893] EIP: 0060:[<790e4f29>] EFLAGS: 00010002 CPU: 1
[ 56.611717] EIP is at perf_prepare_sample+0x229/0x330
[ 56.613429] EAX: 00000c40 EBX: 83779d14 ECX: 00000008 EDX: 0000019d
[ 56.615646] ESI: 83779e00 EDI: 89d0e400 EBP: 83779cfc ESP: 83779ce4
[ 56.619967] DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
[ 56.621607] CR0: 80050033 CR2: 00000c40 CR3: 12087000 CR4: 00000690
[ 56.622546] DR0: 6f062000 DR1: 00000000 DR2: 00000000 DR3: 00000000
[ 56.624617] DR6: ffff0ff0 DR7: 00000600
[ 56.625979] Stack:
[ 56.626276] 00000000 00000000 000307e6 89d0e400 83779e00 890e38c0 83779d40 790e5098
[ 56.630229] 890e38c0 00000000 00000000 790e5030 00000009 00480001 835846c0 83584200
[ 56.639146] 00000000 83779d70 79092d70 0002c018 00000007 89d0e400 00000000 83779d68
[ 56.642142] Call Trace:
[ 56.642528] [<790e5098>] perf_event_output_forward+0x68/0x130
[ 56.645403] [<790e5030>] ? perf_prepare_sample+0x330/0x330
[ 56.648553] [<79092d70>] ? __lock_acquire+0x4d0/0xbd0
[ 56.651322] [<790dd1b9>] __perf_event_overflow+0xa9/0x220
[ 56.653819] [<790e5a5f>] perf_swevent_overflow+0x4f/0x90
[ 56.654639] [<790e5b6d>] perf_swevent_event+0xcd/0x100
[ 56.658184] [<790e60cb>] ___perf_sw_event+0x26b/0x300
[ 56.660930] [<790e5e82>] ? ___perf_sw_event+0x22/0x300
[ 56.664053] [<79076260>] ? set_next_entity+0x4b0/0xcd0
[ 56.667992] [<7907e9fd>] ? pick_next_task_fair+0x6cd/0x700
[ 56.669659] [<796150a4>] ? __schedule+0xb4/0x830
[ 56.670383] [<7906afc0>] ? update_rq_clock+0x80/0xa0
[ 56.672287] [<7961537f>] __schedule+0x38f/0x830
[ 56.676127] [<79615871>] schedule+0x21/0x40
[ 56.677534] [<79000b9d>] exit_to_usermode_loop+0x7d/0xa0
[ 56.678284] [<7900100f>] do_int80_syscall_32+0xcf/0x150
[ 56.684207] [<7961a703>] entry_INT80_32+0x2f/0x2f
[ 56.686974] Code: f1 ff f6 45 f0 20 89 46 38 c7 46 3c 00 00 00 00 0f 84 4a fe ff ff 8b 55 08 89 f8 e8 32 4a 00 00 85 c0 89 46 68 0f 84 d7 00 00 00 <8b> 00 40 c1 e0 03 66 01 43 06 e9 26 fe ff ff 8b 45 08 8b 40 34
[ 56.695205] EIP: [<790e4f29>] perf_prepare_sample+0x229/0x330 SS:ESP 0068:83779ce4
[ 56.696421] CR2: 0000000000000c40
[ 56.698982] ---[ end trace 3c0cfd42bd35a255 ]---
[ 56.699680] Kernel panic - not syncing: Fatal exception
git bisect start 33688abb2802ff3a230bd2441f765477b94cc89e v4.6 --
git bisect bad 48dd7cefa010b704eb2532a2883798fd6d703a0e # 23:14 0- 1 Merge tag 'vfio-v4.7-rc1' of git://github.com/awilliam/linux-vfio
git bisect bad 676d9735cd010fc439566e2b6e9b6adc3e1179ef # 23:19 0- 1 Merge tag 'rpmsg-v4.7' of git://github.com/andersson/remoteproc
git bisect bad 7f427d3a6029331304f91ef4d7cf646f054216d2 # 23:27 110- 26 Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
git bisect bad ce6a01c2d50e1d400cb6d492841f9b1932034fc2 # 23:32 9- 2 Merge tag 'metag-for-v4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag
git bisect bad 36db171cc733bc7b8c628ef21831467d1919decd # 23:46 0- 1 Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect good 230e51f21101e49c8d73018d414adbd0d57459a1 # 23:57 310+ 4 Merge branch 'core-signals-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect good 1c19b68a279c58d6da4379bf8b6d679a300a1daf # 00:07 310+ 18 Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect good 3469d261eac65912927dca13ee8f77c744ad7aa2 # 00:18 310+ 27 Merge branch 'locking-rwsem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
git bisect good f56ebf20d0f535f5da7cfcf0000ab3e0af133f81 # 00:40 310+ 0 perf jit: memset() variable 'st' using the correct size
git bisect bad 5101ef20f0ef1de79091a1fdb6b1a7f07565545a # 00:44 0- 3 perf/arm: Special-case hetereogeneous CPUs
git bisect good 4bd112df3eea4db63fe90fb4e83c48d3f3bd6512 # 01:05 303+ 0 tools lib api fs: Add helper to read string from procfs file
git bisect bad 3dcc4436fa6f09ce093ff59bf8477c3059dc46df # 01:17 7- 1 perf tools: Introduce trigger class
git bisect bad 4cb93446c587d56e2a54f4f83113daba2c0b6dee # 01:22 3- 1 perf tools: Set the maximum allowed stack from /proc/sys/kernel/perf_event_max_stack
git bisect good c61fb959df898b994382d586046d7704476ff503 # 04:04 310+ 0 perf probe: Fix module probe issue if no dwarf support
git bisect good c2a218c63ba36946aca5943c0c8ebd3a42e3dc4b # 06:47 310+ 0 perf bench: Remove one more die() call
git bisect bad c5dfd78eb79851e278b7973031b9ca363da87a7e # 07:57 33- 2 perf core: Allow setting up max frame stack depth via sysctl
# first bad commit: [c5dfd78eb79851e278b7973031b9ca363da87a7e] perf core: Allow setting up max frame stack depth via sysctl
git bisect good c2a218c63ba36946aca5943c0c8ebd3a42e3dc4b # 08:06 910+ 0 perf bench: Remove one more die() call
# extra tests with CONFIG_DEBUG_INFO_REDUCED
git bisect bad c5dfd78eb79851e278b7973031b9ca363da87a7e # 08:10 0- 3 perf core: Allow setting up max frame stack depth via sysctl
# extra tests on HEAD of linux-devel/devel-hourly-2016062414
git bisect bad e8d665056895dafedd7882bfe250ff6cf7dfbc0d # 08:10 0- 53 0day head guard for 'devel-hourly-2016062414'
# extra tests on tree/branch linus/master
git bisect bad 63c04ee7d3b7c8d8e2726cb7c5f8a5f6fcc1e3b2 # 08:22 0- 3 Merge tag 'upstream-4.7-rc5' of git://git.infradead.org/linux-ubifs
# extra tests on tree/branch linus/master
git bisect bad 63c04ee7d3b7c8d8e2726cb7c5f8a5f6fcc1e3b2 # 08:23 0- 5 Merge tag 'upstream-4.7-rc5' of git://git.infradead.org/linux-ubifs
# extra tests on tree/branch linux-next/master
git bisect bad 2cf991dfda8b36d2878c249bcdf492366ec24c19 # 08:29 14- 1 Add linux-next specific files for 20160624
This script may reproduce the error.
----------------------------------------------------------------------------
#!/bin/bash
kernel=$1
initrd=quantal-core-i386.cgz
wget --no-clobber https://github.com/fengguang/reproduce-kernel-bug/raw/master/initrd/$initrd
kvm=(
qemu-system-x86_64
-enable-kvm
-cpu kvm64
-kernel $kernel
-initrd $initrd
-m 300
-smp 2
-device e1000,netdev=net0
-netdev user,id=net0
-boot order=nc
-no-reboot
-watchdog i6300esb
-rtc base=localtime
-serial stdio
-display none
-monitor null
)
append=(
hung_task_panic=1
earlyprintk=ttyS0,115200
systemd.log_level=err
debug
apic=debug
sysrq_always_enabled
rcupdate.rcu_cpu_stall_timeout=100
panic=-1
softlockup_panic=1
nmi_watchdog=panic
oops=panic
load_ramdisk=2
prompt_ramdisk=0
console=ttyS0,115200
console=tty0
vga=normal
root=/dev/ram0
rw
drbd.minor_count=8
)
"${kvm[@]}" --append "${append[*]}"
----------------------------------------------------------------------------
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/lkp Intel Corporation
4 years, 8 months