[rcu] e552592e038: +39.2% vm-scalability.throughput, +19.7% turbostat.Pkg_W
by Fengguang Wu
Hi Paul,
We noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git urgent.2014.06.21a
commit e552592e0383bc72e35eb21a9fabd84ad873cff1 ("rcu: Reduce overhead of cond_resched() checks for RCU")
Test case: brickland3/vm-scalability/300s-anon-w-seq-mt-64G
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
89766370 ~ 6% +39.2% 1.25e+08 ~ 9% TOTAL vm-scalability.throughput
1.317e+09 ~ 8% -45.2% 7.21e+08 ~10% TOTAL cpuidle.C6-IVT-4S.time
9 ~ 6% +58.8% 15 ~ 5% TOTAL vmstat.procs.r
12.27 ~14% +44.7% 17.74 ~12% TOTAL turbostat.%c1
20538 ~ 4% -21.3% 16155 ~ 4% TOTAL cpuidle.C6-IVT-4S.usage
77.66 ~ 2% -15.0% 65.98 ~ 5% TOTAL turbostat.%c6
260 ~ 2% -16.4% 217 ~ 4% TOTAL vmstat.memory.buff
51920 ~ 7% -14.3% 44489 ~ 5% TOTAL numa-meminfo.node0.PageTables
53822 ~ 5% -15.6% 45404 ~ 6% TOTAL proc-vmstat.nr_page_table_pages
215196 ~ 5% -15.7% 181409 ~ 6% TOTAL meminfo.PageTables
52182 ~ 4% -15.2% 44271 ~ 6% TOTAL proc-vmstat.nr_anon_transparent_hugepages
12881 ~ 7% -13.4% 11150 ~ 5% TOTAL numa-vmstat.node0.nr_page_table_pages
1.068e+08 ~ 4% -15.2% 90492587 ~ 6% TOTAL meminfo.AnonHugePages
26983682 ~ 4% -14.5% 23071571 ~ 6% TOTAL proc-vmstat.nr_anon_pages
1.079e+08 ~ 4% -14.5% 92289854 ~ 6% TOTAL meminfo.AnonPages
1.083e+08 ~ 4% -14.2% 92897630 ~ 6% TOTAL meminfo.Active(anon)
1.084e+08 ~ 4% -14.2% 92970821 ~ 6% TOTAL meminfo.Active
27067517 ~ 4% -14.2% 23232055 ~ 6% TOTAL proc-vmstat.nr_active_anon
52565 ~ 3% -12.0% 46273 ~ 3% TOTAL proc-vmstat.nr_shmem
52499 ~ 3% -12.0% 46215 ~ 3% TOTAL proc-vmstat.nr_inactive_anon
214447 ~ 3% -10.5% 191862 ~ 2% TOTAL meminfo.Shmem
214197 ~ 3% -10.5% 191636 ~ 2% TOTAL meminfo.Inactive(anon)
2779 ~13% -53.5% 1291 ~19% TOTAL time.involuntary_context_switches
1156 ~ 8% +82.3% 2108 ~ 9% TOTAL time.percent_of_cpu_this_job_got
11.58 ~10% -44.4% 6.45 ~ 9% TOTAL time.elapsed_time
1008 ~ 5% +79.5% 1810 ~ 7% TOTAL time.voluntary_context_switches
9.23 ~ 8% +72.8% 15.95 ~ 8% TOTAL turbostat.%c0
12679 ~ 8% +70.8% 21659 ~ 9% TOTAL vmstat.system.in
145 ~ 7% +60.6% 234 ~ 8% TOTAL vmstat.io.bo
3721 ~ 7% +35.2% 5029 ~ 8% TOTAL vmstat.system.cs
~ 1% +26.7% ~ 5% TOTAL turbostat.Cor_W
~ 2% +21.2% ~ 3% TOTAL turbostat.RAM_W
~ 1% +19.7% ~ 4% TOTAL turbostat.Pkg_W
All test cases:
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
89766370 ~ 6% +39.2% 1.25e+08 ~ 9% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
89766370 ~ 6% +39.2% 1.25e+08 ~ 9% TOTAL vm-scalability.throughput
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
0.36 ~ 1% -9.8% 0.32 ~ 2% lkp-nex05/will-it-scale/open1
0.36 ~ 1% -9.8% 0.32 ~ 2% TOTAL will-it-scale.scalability
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
466616 ~ 1% -2.6% 454267 ~ 1% lkp-nex05/will-it-scale/open1
511556 ~ 0% -1.3% 504762 ~ 0% lkp-snb01/will-it-scale/signal1
978172 ~ 0% -2.0% 959029 ~ 0% TOTAL will-it-scale.per_process_ops
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1381706 ~ 1% +2.3% 1413190 ~ 0% lkp-snb01/will-it-scale/futex2
299558 ~ 0% -1.8% 294312 ~ 0% lkp-snb01/will-it-scale/signal1
1681264 ~ 1% +1.6% 1707503 ~ 0% TOTAL will-it-scale.per_thread_ops
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
84771 ~ 2% +575.2% 572390 ~ 1% lkp-nex05/will-it-scale/open1
84771 ~ 2% +575.2% 572390 ~ 1% TOTAL slabinfo.kmalloc-256.active_objs
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
2656 ~ 2% +573.8% 17896 ~ 1% lkp-nex05/will-it-scale/open1
2656 ~ 2% +573.8% 17896 ~ 1% TOTAL slabinfo.kmalloc-256.num_slabs
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
85011 ~ 2% +573.7% 572697 ~ 1% lkp-nex05/will-it-scale/open1
85011 ~ 2% +573.7% 572697 ~ 1% TOTAL slabinfo.kmalloc-256.num_objs
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
2656 ~ 2% +573.8% 17896 ~ 1% lkp-nex05/will-it-scale/open1
2656 ~ 2% +573.8% 17896 ~ 1% TOTAL slabinfo.kmalloc-256.active_slabs
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1533310 ~ 2% -83.7% 250652 ~ 1% lkp-nex05/will-it-scale/open1
289790 ~ 4% -67.2% 94913 ~ 4% lkp-snb01/will-it-scale/futex2
204559 ~ 4% -62.6% 76449 ~ 7% lkp-snb01/will-it-scale/signal1
2027660 ~ 3% -79.2% 422015 ~ 3% TOTAL softirqs.RCU
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
47388 ~ 5% +147.5% 117277 ~ 1% lkp-nex05/will-it-scale/open1
47388 ~ 5% +147.5% 117277 ~ 1% TOTAL numa-meminfo.node0.SUnreclaim
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
11836 ~ 5% +149.4% 29520 ~ 2% lkp-nex05/will-it-scale/open1
11836 ~ 5% +149.4% 29520 ~ 2% TOTAL numa-vmstat.node0.nr_slab_unreclaimable
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
97325 ~ 0% +125.2% 219188 ~ 1% lkp-nex05/will-it-scale/open1
97325 ~ 0% +125.2% 219188 ~ 1% TOTAL meminfo.SUnreclaim
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
24304 ~ 0% +125.0% 54686 ~ 0% lkp-nex05/will-it-scale/open1
24304 ~ 0% +125.0% 54686 ~ 0% TOTAL proc-vmstat.nr_slab_unreclaimable
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
6364 ~ 8% +117.8% 13864 ~ 3% lkp-nex05/will-it-scale/open1
6364 ~ 8% +117.8% 13864 ~ 3% TOTAL numa-vmstat.node1.nr_slab_unreclaimable
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
25487 ~ 8% +115.7% 54988 ~ 4% lkp-nex05/will-it-scale/open1
25487 ~ 8% +115.7% 54988 ~ 4% TOTAL numa-meminfo.node1.SUnreclaim
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
63365 ~ 5% +110.5% 133379 ~ 1% lkp-nex05/will-it-scale/open1
63365 ~ 5% +110.5% 133379 ~ 1% TOTAL numa-meminfo.node0.Slab
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
6335 ~11% +83.5% 11625 ~ 4% lkp-nex05/will-it-scale/open1
6335 ~11% +83.5% 11625 ~ 4% TOTAL numa-vmstat.node3.nr_slab_unreclaimable
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
25370 ~11% +82.1% 46193 ~ 5% lkp-nex05/will-it-scale/open1
25370 ~11% +82.1% 46193 ~ 5% TOTAL numa-meminfo.node3.SUnreclaim
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
140861 ~ 0% +86.5% 262647 ~ 1% lkp-nex05/will-it-scale/open1
140861 ~ 0% +86.5% 262647 ~ 1% TOTAL meminfo.Slab
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.317e+09 ~ 8% -45.2% 7.21e+08 ~10% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
1.317e+09 ~ 8% -45.2% 7.21e+08 ~10% TOTAL cpuidle.C6-IVT-4S.time
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
38627 ~ 5% +77.8% 68673 ~ 5% lkp-nex05/will-it-scale/open1
38627 ~ 5% +77.8% 68673 ~ 5% TOTAL numa-meminfo.node1.Slab
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.10 ~ 5% +70.3% 1.88 ~ 0% lkp-nex05/will-it-scale/open1
1.10 ~ 5% +70.3% 1.88 ~ 0% TOTAL perf-profile.cpu-cycles.setup_object.isra.47.__slab_alloc.kmem_cache_alloc.get_empty_filp.path_openat
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
2.54 ~ 6% -37.7% 1.58 ~ 5% lkp-snb01/will-it-scale/futex2
2.54 ~ 6% -37.7% 1.58 ~ 5% TOTAL perf-profile.cpu-cycles.get_futex_key.futex_wait_setup.futex_wait.do_futex.sys_futex
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
9 ~ 6% +58.8% 15 ~ 5% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
9 ~ 6% +58.8% 15 ~ 5% TOTAL vmstat.procs.r
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
12.27 ~14% +44.7% 17.74 ~12% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
12.27 ~14% +44.7% 17.74 ~12% TOTAL turbostat.%c1
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
661578 ~ 2% -33.4% 440676 ~ 0% lkp-nex05/will-it-scale/open1
661578 ~ 2% -33.4% 440676 ~ 0% TOTAL cpuidle.C3-NHM.usage
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
39783 ~ 9% +50.5% 59862 ~ 5% lkp-nex05/will-it-scale/open1
39783 ~ 9% +50.5% 59862 ~ 5% TOTAL numa-meminfo.node3.Slab
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
2.03 ~ 8% +34.1% 2.73 ~ 2% lkp-nex05/will-it-scale/open1
2.03 ~ 8% +34.1% 2.73 ~ 2% TOTAL perf-profile.cpu-cycles.rcu_nocb_kthread.kthread.ret_from_fork
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
20538 ~ 4% -21.3% 16155 ~ 4% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
20538 ~ 4% -21.3% 16155 ~ 4% TOTAL cpuidle.C6-IVT-4S.usage
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.07 ~ 4% -17.3% 0.88 ~ 8% lkp-nex05/will-it-scale/open1
1.07 ~ 4% -17.3% 0.88 ~ 8% TOTAL perf-profile.cpu-cycles.__alloc_fd.get_unused_fd_flags.do_sys_open.sys_open.system_call_fastpath
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
77.66 ~ 2% -15.0% 65.98 ~ 5% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
77.66 ~ 2% -15.0% 65.98 ~ 5% TOTAL turbostat.%c6
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
0.96 ~ 3% -14.8% 0.82 ~ 5% lkp-snb01/will-it-scale/futex2
0.96 ~ 3% -14.8% 0.82 ~ 5% TOTAL perf-profile.cpu-cycles.put_page.get_futex_key.futex_wait_setup.futex_wait.do_futex
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.08 ~ 4% +22.4% 1.33 ~ 2% lkp-nex05/will-it-scale/open1
1.08 ~ 4% +22.4% 1.33 ~ 2% TOTAL perf-profile.cpu-cycles.memset.get_empty_filp.path_openat.do_filp_open.do_sys_open
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
260 ~ 2% -16.4% 217 ~ 4% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
260 ~ 2% -16.4% 217 ~ 4% TOTAL vmstat.memory.buff
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
51920 ~ 7% -14.3% 44489 ~ 5% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
51920 ~ 7% -14.3% 44489 ~ 5% TOTAL numa-meminfo.node0.PageTables
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
53822 ~ 5% -15.6% 45404 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
53822 ~ 5% -15.6% 45404 ~ 6% TOTAL proc-vmstat.nr_page_table_pages
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
215196 ~ 5% -15.7% 181409 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
215196 ~ 5% -15.7% 181409 ~ 6% TOTAL meminfo.PageTables
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
52182 ~ 4% -15.2% 44271 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
52182 ~ 4% -15.2% 44271 ~ 6% TOTAL proc-vmstat.nr_anon_transparent_hugepages
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
12881 ~ 7% -13.4% 11150 ~ 5% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
12881 ~ 7% -13.4% 11150 ~ 5% TOTAL numa-vmstat.node0.nr_page_table_pages
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.068e+08 ~ 4% -15.2% 90492587 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
1.068e+08 ~ 4% -15.2% 90492587 ~ 6% TOTAL meminfo.AnonHugePages
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
26983682 ~ 4% -14.5% 23071571 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
26983682 ~ 4% -14.5% 23071571 ~ 6% TOTAL proc-vmstat.nr_anon_pages
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.83 ~ 6% +19.3% 2.18 ~ 5% lkp-nex05/will-it-scale/open1
1.83 ~ 6% +19.3% 2.18 ~ 5% TOTAL perf-profile.cpu-cycles.get_empty_filp.path_openat.do_filp_open.do_sys_open.sys_open
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.079e+08 ~ 4% -14.5% 92289854 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
1.079e+08 ~ 4% -14.5% 92289854 ~ 6% TOTAL meminfo.AnonPages
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
254457 ~ 2% -12.2% 223290 ~ 1% lkp-nex05/will-it-scale/open1
254457 ~ 2% -12.2% 223290 ~ 1% TOTAL softirqs.SCHED
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.083e+08 ~ 4% -14.2% 92897630 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
1.083e+08 ~ 4% -14.2% 92897630 ~ 6% TOTAL meminfo.Active(anon)
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1.084e+08 ~ 4% -14.2% 92970821 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
1.084e+08 ~ 4% -14.2% 92970821 ~ 6% TOTAL meminfo.Active
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
27067517 ~ 4% -14.2% 23232055 ~ 6% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
27067517 ~ 4% -14.2% 23232055 ~ 6% TOTAL proc-vmstat.nr_active_anon
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
52565 ~ 3% -12.0% 46273 ~ 3% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
52565 ~ 3% -12.0% 46273 ~ 3% TOTAL proc-vmstat.nr_shmem
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
52499 ~ 3% -12.0% 46215 ~ 3% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
52499 ~ 3% -12.0% 46215 ~ 3% TOTAL proc-vmstat.nr_inactive_anon
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
214447 ~ 3% -10.5% 191862 ~ 2% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
214447 ~ 3% -10.5% 191862 ~ 2% TOTAL meminfo.Shmem
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
214197 ~ 3% -10.5% 191636 ~ 2% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
214197 ~ 3% -10.5% 191636 ~ 2% TOTAL meminfo.Inactive(anon)
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
681017 ~ 0% +10.3% 751298 ~ 0% lkp-nex05/will-it-scale/open1
681017 ~ 0% +10.3% 751298 ~ 0% TOTAL numa-meminfo.node0.MemUsed
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
2779 ~13% -53.5% 1291 ~19% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
65403 ~ 4% -10.8% 58332 ~ 0% lkp-nex05/will-it-scale/open1
9638 ~ 2% +109.9% 20234 ~ 1% lkp-snb01/will-it-scale/futex2
10665 ~ 2% +103.8% 21733 ~ 2% lkp-snb01/will-it-scale/signal1
88486 ~ 4% +14.8% 101591 ~ 1% TOTAL time.involuntary_context_switches
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1156 ~ 8% +82.3% 2108 ~ 9% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
1156 ~ 8% +82.3% 2108 ~ 9% TOTAL time.percent_of_cpu_this_job_got
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
11.58 ~10% -44.4% 6.45 ~ 9% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
11.58 ~10% -44.4% 6.45 ~ 9% TOTAL time.elapsed_time
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
1008 ~ 5% +79.5% 1810 ~ 7% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
1008 ~ 5% +79.5% 1810 ~ 7% TOTAL time.voluntary_context_switches
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
9.23 ~ 8% +72.8% 15.95 ~ 8% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
9.23 ~ 8% +72.8% 15.95 ~ 8% TOTAL turbostat.%c0
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
12679 ~ 8% +70.8% 21659 ~ 9% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
12679 ~ 8% +70.8% 21659 ~ 9% TOTAL vmstat.system.in
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
145 ~ 7% +60.6% 234 ~ 8% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
145 ~ 7% +60.6% 234 ~ 8% TOTAL vmstat.io.bo
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
3721 ~ 7% +35.2% 5029 ~ 8% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
5805 ~ 2% -55.9% 2563 ~ 0% lkp-nex05/will-it-scale/open1
897 ~ 1% +10.8% 994 ~ 1% lkp-snb01/will-it-scale/futex2
908 ~ 0% +11.7% 1014 ~ 0% lkp-snb01/will-it-scale/signal1
11332 ~ 3% -15.3% 9601 ~ 4% TOTAL vmstat.system.cs
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
~ 1% +26.7% ~ 5% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
~ 1% +26.7% ~ 5% TOTAL turbostat.Cor_W
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
~ 2% +21.2% ~ 3% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
~ 2% +21.2% ~ 3% TOTAL turbostat.RAM_W
v3.16-rc1 e552592e0383bc72e35eb21a9
--------------- -------------------------
~ 1% +19.7% ~ 4% brickland3/vm-scalability/300s-anon-w-seq-mt-64G
~ 1% +19.7% ~ 4% TOTAL turbostat.Pkg_W
Legend:
~XX% - stddev percent
[+-]XX% - change percent
vmstat.system.cs
6000 *+-*------*--*-------------------------------------------------------+
| *. *... .*.. |
5500 ++ *. * |
| |
5000 ++ |
| |
4500 ++ |
| |
4000 ++ |
| |
3500 ++ |
| |
3000 ++ |
| |
2500 O+-O---O--O--O--O---O--O--O---O--O--O--O---O--O--O---O--O--O--O---O--O
[*] bisect-good sample
[O] bisect-bad sample
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Fengguang
7 years, 11 months
[rcu] 0acd7c39a85: +11.0% will-it-scale.scalability
by Fengguang Wu
Hi Paul,
FYI, we are pleased to notice big performance gains in some
will-it-scale test cases:
git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git rcu_cond_resched.2014.06.20c
commit 0acd7c39a85836d90451b6c278c5cfdd21c055f3 ("rcu: Add RCU_COND_RESCHED_QS for large systems")
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
0.30 ~ 2% +14.3% 0.34 ~ 3% lkp-sbx04/will-it-scale/open1
0.34 ~ 3% +8.1% 0.37 ~ 1% nhm4/will-it-scale/read1
0.64 ~ 3% +11.0% 0.71 ~ 2% TOTAL will-it-scale.scalability
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
592828 ~ 1% +3.8% 615531 ~ 1% lkp-sbx04/will-it-scale/open1
592828 ~ 1% +3.8% 615531 ~ 1% TOTAL will-it-scale.per_process_ops
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
1689 ~ 0% +498.1% 10107 ~ 2% lkp-sbx04/will-it-scale/unlink2
1689 ~ 0% +498.1% 10107 ~ 2% TOTAL slabinfo.dentry.active_slabs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
72052 ~ 0% +489.8% 424930 ~ 2% lkp-sbx04/will-it-scale/unlink2
72052 ~ 0% +489.8% 424930 ~ 2% TOTAL slabinfo.Acpi-State.num_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
71629 ~ 0% +492.6% 424494 ~ 2% lkp-sbx04/will-it-scale/unlink2
71629 ~ 0% +492.6% 424494 ~ 2% TOTAL slabinfo.Acpi-State.active_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
1689 ~ 0% +498.1% 10107 ~ 2% lkp-sbx04/will-it-scale/unlink2
1689 ~ 0% +498.1% 10107 ~ 2% TOTAL slabinfo.dentry.num_slabs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
1412 ~ 0% +490.0% 8331 ~ 2% lkp-sbx04/will-it-scale/unlink2
1412 ~ 0% +490.0% 8331 ~ 2% TOTAL slabinfo.Acpi-State.active_slabs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
70980 ~ 0% +498.1% 424517 ~ 2% lkp-sbx04/will-it-scale/unlink2
70980 ~ 0% +498.1% 424517 ~ 2% TOTAL slabinfo.dentry.num_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
70444 ~ 0% +502.0% 424054 ~ 2% lkp-sbx04/will-it-scale/unlink2
70444 ~ 0% +502.0% 424054 ~ 2% TOTAL slabinfo.dentry.active_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
472 ~ 0% +1557.0% 7827 ~ 2% lkp-sbx04/will-it-scale/unlink2
472 ~ 0% +1557.0% 7827 ~ 2% TOTAL slabinfo.shmem_inode_cache.num_slabs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
472 ~ 0% +1557.0% 7827 ~ 2% lkp-sbx04/will-it-scale/unlink2
472 ~ 0% +1557.0% 7827 ~ 2% TOTAL slabinfo.shmem_inode_cache.active_slabs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
22697 ~ 0% +1555.5% 375756 ~ 2% lkp-sbx04/will-it-scale/unlink2
22697 ~ 0% +1555.5% 375756 ~ 2% TOTAL slabinfo.shmem_inode_cache.num_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
22297 ~ 0% +1584.0% 375494 ~ 2% lkp-sbx04/will-it-scale/unlink2
22297 ~ 0% +1584.0% 375494 ~ 2% TOTAL slabinfo.shmem_inode_cache.active_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
1412 ~ 0% +490.0% 8331 ~ 2% lkp-sbx04/will-it-scale/unlink2
1412 ~ 0% +490.0% 8331 ~ 2% TOTAL slabinfo.Acpi-State.num_slabs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
230182 ~ 2% +214.7% 724407 ~ 1% lkp-sbx04/will-it-scale/open1
29926 ~ 1% +1179.6% 382938 ~ 2% lkp-sbx04/will-it-scale/unlink2
260108 ~ 2% +325.7% 1107345 ~ 1% TOTAL slabinfo.kmalloc-256.active_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
3605 ~ 2% +214.1% 11325 ~ 1% lkp-sbx04/will-it-scale/open1
478 ~ 1% +1153.4% 5991 ~ 2% lkp-sbx04/will-it-scale/unlink2
4083 ~ 2% +324.1% 17316 ~ 1% TOTAL slabinfo.kmalloc-256.num_slabs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
3605 ~ 2% +214.1% 11325 ~ 1% lkp-sbx04/will-it-scale/open1
478 ~ 1% +1153.4% 5991 ~ 2% lkp-sbx04/will-it-scale/unlink2
4083 ~ 2% +324.1% 17316 ~ 1% TOTAL slabinfo.kmalloc-256.active_slabs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
230776 ~ 2% +214.1% 724862 ~ 1% lkp-sbx04/will-it-scale/open1
30619 ~ 1% +1152.4% 383473 ~ 2% lkp-sbx04/will-it-scale/unlink2
261395 ~ 2% +324.0% 1108335 ~ 1% TOTAL slabinfo.kmalloc-256.num_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
38033 ~ 2% +85.2% 70426 ~ 3% lkp-sbx04/will-it-scale/open1
27083 ~ 8% +375.9% 128878 ~ 1% lkp-sbx04/will-it-scale/unlink2
65116 ~ 4% +206.1% 199305 ~ 2% TOTAL numa-meminfo.node1.SUnreclaim
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
9470 ~ 1% +87.6% 17770 ~ 4% lkp-sbx04/will-it-scale/open1
6769 ~ 8% +375.7% 32201 ~ 1% lkp-sbx04/will-it-scale/unlink2
16240 ~ 4% +207.7% 49971 ~ 2% TOTAL numa-vmstat.node1.nr_slab_unreclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
302149 ~ 1% -73.4% 80465 ~ 0% lkp-nex05/will-it-scale/lock1
853141 ~ 2% -66.6% 284788 ~ 3% lkp-sbx04/will-it-scale/open1
693207 ~ 0% -82.6% 120744 ~ 9% lkp-sbx04/will-it-scale/unlink2
434870 ~ 2% -63.8% 157362 ~ 1% lkp-wsx01/will-it-scale/futex2
55884 ~ 5% -55.3% 24956 ~ 4% nhm4/will-it-scale/read1
2339252 ~ 1% -71.4% 668317 ~ 3% TOTAL softirqs.RCU
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
51492 ~ 3% +75.4% 90311 ~ 2% lkp-sbx04/will-it-scale/open1
39615 ~ 2% +332.8% 171468 ~ 1% lkp-sbx04/will-it-scale/unlink2
91107 ~ 3% +187.3% 261779 ~ 1% TOTAL numa-meminfo.node0.SUnreclaim
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
18566237 ~32% +133.3% 43314913 ~21% lkp-sbx04/will-it-scale/open1
22235565 ~30% +176.7% 61536286 ~21% lkp-sbx04/will-it-scale/unlink2
40801803 ~31% +157.0% 104851199 ~21% TOTAL cpuidle.C1-SNB.time
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
12835 ~ 4% +76.6% 22664 ~ 3% lkp-sbx04/will-it-scale/open1
9928 ~ 2% +331.3% 42820 ~ 1% lkp-sbx04/will-it-scale/unlink2
22764 ~ 3% +187.7% 65484 ~ 1% TOTAL numa-vmstat.node0.nr_slab_unreclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
149956 ~ 1% +81.6% 272270 ~ 1% lkp-sbx04/will-it-scale/open1
112319 ~ 0% +312.8% 463620 ~ 1% lkp-sbx04/will-it-scale/unlink2
262275 ~ 1% +180.6% 735890 ~ 1% TOTAL meminfo.SUnreclaim
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
49308 ~ 4% +66.6% 82136 ~ 3% lkp-sbx04/will-it-scale/open1
38437 ~ 5% +314.4% 159290 ~ 2% lkp-sbx04/will-it-scale/unlink2
87745 ~ 4% +175.1% 241426 ~ 2% TOTAL numa-meminfo.node1.Slab
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
37339 ~ 1% +81.4% 67717 ~ 1% lkp-sbx04/will-it-scale/open1
28028 ~ 0% +313.1% 115797 ~ 1% lkp-sbx04/will-it-scale/unlink2
65368 ~ 1% +180.7% 183514 ~ 1% TOTAL proc-vmstat.nr_slab_unreclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
31729 ~ 8% +79.4% 56907 ~ 2% lkp-sbx04/will-it-scale/open1
22831 ~ 6% +295.0% 90181 ~ 2% lkp-sbx04/will-it-scale/unlink2
54560 ~ 7% +169.6% 147088 ~ 2% TOTAL numa-meminfo.node2.SUnreclaim
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
8014 ~ 9% +77.4% 14215 ~ 3% lkp-sbx04/will-it-scale/open1
5703 ~ 6% +294.5% 22500 ~ 2% lkp-sbx04/will-it-scale/unlink2
13717 ~ 8% +167.6% 36715 ~ 2% TOTAL numa-vmstat.node2.nr_slab_unreclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
14049 ~ 9% +162.8% 36926 ~ 2% lkp-sbx04/will-it-scale/unlink2
14049 ~ 9% +162.8% 36926 ~ 2% TOTAL numa-meminfo.node0.SReclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
3505 ~ 9% +163.0% 9218 ~ 1% lkp-sbx04/will-it-scale/unlink2
3505 ~ 9% +163.0% 9218 ~ 1% TOTAL numa-vmstat.node0.nr_slab_reclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
2840 ~ 6% +167.5% 7598 ~ 4% lkp-sbx04/will-it-scale/unlink2
2840 ~ 6% +167.5% 7598 ~ 4% TOTAL numa-vmstat.node1.nr_slab_reclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
11353 ~ 6% +167.9% 30411 ~ 4% lkp-sbx04/will-it-scale/unlink2
11353 ~ 6% +167.9% 30411 ~ 4% TOTAL numa-meminfo.node1.SReclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
62873 ~ 2% +60.7% 101060 ~ 2% lkp-sbx04/will-it-scale/open1
53665 ~ 2% +288.3% 208395 ~ 1% lkp-sbx04/will-it-scale/unlink2
116538 ~ 2% +165.5% 309456 ~ 1% TOTAL numa-meminfo.node0.Slab
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
195370 ~ 0% +62.8% 318033 ~ 1% lkp-sbx04/will-it-scale/open1
161158 ~ 0% +258.0% 576939 ~ 1% lkp-sbx04/will-it-scale/unlink2
356528 ~ 0% +151.0% 894972 ~ 1% TOTAL meminfo.Slab
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
28175 ~ 2% +78.7% 50339 ~ 4% lkp-sbx04/will-it-scale/open1
22550 ~10% +211.4% 70229 ~ 5% lkp-sbx04/will-it-scale/unlink2
50725 ~ 6% +137.7% 120568 ~ 5% TOTAL numa-meminfo.node3.SUnreclaim
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
7070 ~ 3% +77.8% 12570 ~ 4% lkp-sbx04/will-it-scale/open1
5633 ~10% +211.2% 17532 ~ 5% lkp-sbx04/will-it-scale/unlink2
12704 ~ 6% +137.0% 30102 ~ 5% TOTAL numa-vmstat.node3.nr_slab_unreclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
48838 ~ 1% +132.0% 113319 ~ 1% lkp-sbx04/will-it-scale/unlink2
48838 ~ 1% +132.0% 113319 ~ 1% TOTAL meminfo.SReclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
12200 ~ 1% +132.1% 28313 ~ 1% lkp-sbx04/will-it-scale/unlink2
12200 ~ 1% +132.1% 28313 ~ 1% TOTAL proc-vmstat.nr_slab_reclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
39455 ~ 5% +58.3% 62461 ~ 3% lkp-sbx04/will-it-scale/open1
33103 ~10% +172.9% 90331 ~ 3% lkp-sbx04/will-it-scale/unlink2
72559 ~ 7% +110.6% 152793 ~ 3% TOTAL numa-meminfo.node3.Slab
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
43199 ~ 8% +57.6% 68080 ~ 4% lkp-sbx04/will-it-scale/open1
35542 ~ 7% +224.5% 115339 ~ 2% lkp-sbx04/will-it-scale/unlink2
31726 ~ 7% +15.3% 36592 ~12% lkp-wsx01/will-it-scale/futex2
110469 ~ 7% +99.2% 220012 ~ 4% TOTAL numa-meminfo.node2.Slab
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
12710 ~12% +97.9% 25157 ~ 5% lkp-sbx04/will-it-scale/unlink2
12710 ~12% +97.9% 25157 ~ 5% TOTAL numa-meminfo.node2.SReclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
3179 ~11% +97.5% 6279 ~ 5% lkp-sbx04/will-it-scale/unlink2
3179 ~11% +97.5% 6279 ~ 5% TOTAL numa-vmstat.node2.nr_slab_reclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
10553 ~ 9% +90.5% 20102 ~ 6% lkp-sbx04/will-it-scale/unlink2
10553 ~ 9% +90.5% 20102 ~ 6% TOTAL numa-meminfo.node3.SReclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
2644 ~10% +89.9% 5020 ~ 6% lkp-sbx04/will-it-scale/unlink2
2644 ~10% +89.9% 5020 ~ 6% TOTAL numa-vmstat.node3.nr_slab_reclaimable
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
6034 ~26% +16.1% 7008 ~22% lkp-sbx04/will-it-scale/open1
6034 ~26% +16.1% 7008 ~22% TOTAL numa-vmstat.node2.nr_active_file
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
24139 ~26% +16.1% 28033 ~22% lkp-sbx04/will-it-scale/open1
24139 ~26% +16.1% 28033 ~22% TOTAL numa-meminfo.node2.Active(file)
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
0.55 ~35% +94.2% 1.08 ~14% lkp-sbx04/will-it-scale/unlink2
0.55 ~35% +94.2% 1.08 ~14% TOTAL perf-profile.cpu-cycles.rcu_nocb_kthread.kthread.ret_from_fork
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
498234 ~ 1% -35.9% 319244 ~ 1% lkp-sbx04/will-it-scale/open1
425531 ~ 2% -39.8% 256020 ~ 2% lkp-sbx04/will-it-scale/unlink2
923765 ~ 2% -37.7% 575264 ~ 2% TOTAL cpuidle.C7-SNB.usage
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
327 ~ 9% -26.2% 241 ~21% lkp-nex05/will-it-scale/lock1
327 ~ 9% -26.2% 241 ~21% TOTAL slabinfo.blkdev_ioc.num_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
327 ~ 9% -26.2% 241 ~21% lkp-nex05/will-it-scale/lock1
327 ~ 9% -26.2% 241 ~21% TOTAL slabinfo.blkdev_ioc.active_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
4608661 ~ 5% +15.0% 5300401 ~ 3% lkp-sbx04/will-it-scale/open1
631679 ~ 4% +64.1% 1036539 ~ 1% lkp-sbx04/will-it-scale/unlink2
5240340 ~ 5% +20.9% 6336941 ~ 3% TOTAL numa-numastat.node2.numa_hit
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
4606166 ~ 5% +15.0% 5298738 ~ 3% lkp-sbx04/will-it-scale/open1
630014 ~ 4% +64.4% 1035707 ~ 1% lkp-sbx04/will-it-scale/unlink2
5236180 ~ 5% +21.0% 6334445 ~ 3% TOTAL numa-numastat.node2.local_node
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
6319649 ~ 2% +11.6% 7054325 ~ 3% lkp-sbx04/will-it-scale/open1
1017618 ~ 3% +59.6% 1624227 ~ 1% lkp-sbx04/will-it-scale/unlink2
7337268 ~ 3% +18.3% 8678552 ~ 2% TOTAL numa-numastat.node1.local_node
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
6323803 ~ 2% +11.6% 7058478 ~ 3% lkp-sbx04/will-it-scale/open1
1021772 ~ 3% +59.4% 1628381 ~ 1% lkp-sbx04/will-it-scale/unlink2
7345576 ~ 3% +18.3% 8686859 ~ 2% TOTAL numa-numastat.node1.numa_hit
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
3533020 ~ 7% +16.2% 4106277 ~ 3% lkp-sbx04/will-it-scale/open1
529616 ~ 6% +59.8% 846268 ~ 2% lkp-sbx04/will-it-scale/unlink2
4062636 ~ 7% +21.9% 4952546 ~ 3% TOTAL numa-numastat.node3.local_node
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
3534685 ~ 7% +16.2% 4108772 ~ 3% lkp-sbx04/will-it-scale/open1
532111 ~ 6% +59.7% 849592 ~ 2% lkp-sbx04/will-it-scale/unlink2
4066797 ~ 7% +21.9% 4958364 ~ 3% TOTAL numa-numastat.node3.numa_hit
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
699767 ~ 4% +30.0% 909550 ~ 2% lkp-sbx04/will-it-scale/unlink2
699767 ~ 4% +30.0% 909550 ~ 2% TOTAL numa-vmstat.node1.numa_local
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
710327 ~ 4% +29.5% 920095 ~ 2% lkp-sbx04/will-it-scale/unlink2
710327 ~ 4% +29.5% 920095 ~ 2% TOTAL numa-vmstat.node1.numa_hit
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
355465 ~ 3% +32.2% 469857 ~ 1% lkp-sbx04/will-it-scale/unlink2
355465 ~ 3% +32.2% 469857 ~ 1% TOTAL numa-vmstat.node2.numa_local
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
401800 ~ 0% +30.8% 525635 ~ 1% lkp-sbx04/will-it-scale/unlink2
401800 ~ 0% +30.8% 525635 ~ 1% TOTAL numa-meminfo.node1.MemUsed
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
330376 ~ 1% +11.7% 369194 ~ 1% lkp-sbx04/will-it-scale/open1
327252 ~ 0% +47.0% 480920 ~ 1% lkp-sbx04/will-it-scale/unlink2
657628 ~ 0% +29.3% 850114 ~ 1% TOTAL numa-meminfo.node0.MemUsed
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
395008 ~ 2% +28.8% 508797 ~ 1% lkp-sbx04/will-it-scale/unlink2
395008 ~ 2% +28.8% 508797 ~ 1% TOTAL numa-vmstat.node2.numa_hit
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
84008013 ~ 4% +12.5% 94488610 ~ 1% lkp-sbx04/will-it-scale/open1
20904484 ~ 1% +35.9% 28407692 ~ 0% lkp-sbx04/will-it-scale/unlink2
104912497 ~ 3% +17.1% 122896302 ~ 1% TOTAL proc-vmstat.pgalloc_normal
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
20 ~19% +69.3% 34 ~32% lkp-sbx04/will-it-scale/unlink2
20 ~19% +69.3% 34 ~32% TOTAL cpuidle.C6-SNB.usage
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
90818481 ~ 4% +12.2% 1.019e+08 ~ 1% lkp-sbx04/will-it-scale/open1
24507722 ~ 1% +32.8% 32552022 ~ 0% lkp-sbx04/will-it-scale/unlink2
1.153e+08 ~ 3% +16.6% 1.344e+08 ~ 1% TOTAL proc-vmstat.pgfree
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
23331601 ~ 4% +11.8% 26096052 ~ 1% lkp-sbx04/will-it-scale/open1
7360836 ~ 1% +29.1% 9500981 ~ 0% lkp-sbx04/will-it-scale/unlink2
30692438 ~ 3% +16.0% 35597034 ~ 1% TOTAL proc-vmstat.numa_local
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
23344069 ~ 4% +11.8% 26108517 ~ 1% lkp-sbx04/will-it-scale/open1
7373303 ~ 1% +29.0% 9513445 ~ 0% lkp-sbx04/will-it-scale/unlink2
30717373 ~ 3% +16.0% 35621962 ~ 1% TOTAL proc-vmstat.numa_hit
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
293077 ~ 3% +9.2% 320181 ~ 3% lkp-sbx04/will-it-scale/open1
289901 ~ 3% +29.0% 374077 ~ 2% lkp-sbx04/will-it-scale/unlink2
582978 ~ 3% +19.1% 694258 ~ 2% TOTAL numa-meminfo.node2.MemUsed
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
1.34 ~10% -18.2% 1.10 ~ 6% lkp-sbx04/will-it-scale/open1
1.34 ~10% -18.2% 1.10 ~ 6% TOTAL perf-profile.cpu-cycles.task_work_add.filp_close.__close_fd.sys_close.system_call_fastpath
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
310408 ~ 3% +17.2% 363768 ~ 4% lkp-sbx04/will-it-scale/unlink2
310408 ~ 3% +17.2% 363768 ~ 4% TOTAL numa-vmstat.node3.numa_local
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
278872 ~ 3% +18.9% 331653 ~ 2% lkp-sbx04/will-it-scale/unlink2
278872 ~ 3% +18.9% 331653 ~ 2% TOTAL numa-meminfo.node3.MemUsed
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
1.83 ~ 3% +17.5% 2.15 ~ 3% nhm4/will-it-scale/read1
1.83 ~ 3% +17.5% 2.15 ~ 3% TOTAL perf-profile.cpu-cycles.find_lock_entry.shmem_getpage_gfp.shmem_file_read_iter.new_sync_read.vfs_read
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
350441 ~ 3% +15.4% 404433 ~ 4% lkp-sbx04/will-it-scale/unlink2
350441 ~ 3% +15.4% 404433 ~ 4% TOTAL numa-vmstat.node3.numa_hit
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
11130 ~ 1% +18.7% 13216 ~ 2% lkp-nex05/will-it-scale/lock1
13337 ~ 2% +15.2% 15360 ~ 4% lkp-sbx04/will-it-scale/unlink2
12236 ~ 1% +16.4% 14247 ~ 1% lkp-wsx01/will-it-scale/futex2
36704 ~ 2% +16.7% 42823 ~ 3% TOTAL slabinfo.kmalloc-192.num_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
11130 ~ 1% +18.6% 13206 ~ 2% lkp-nex05/will-it-scale/lock1
13337 ~ 2% +14.8% 15306 ~ 4% lkp-sbx04/will-it-scale/unlink2
12236 ~ 1% +16.4% 14247 ~ 1% lkp-wsx01/will-it-scale/futex2
36704 ~ 2% +16.5% 42760 ~ 3% TOTAL slabinfo.kmalloc-192.active_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
6819069 ~ 3% +8.6% 7407793 ~ 2% lkp-sbx04/will-it-scale/open1
3615190 ~ 1% +15.2% 4164246 ~ 0% lkp-sbx04/will-it-scale/unlink2
10434260 ~ 2% +10.9% 11572039 ~ 1% TOTAL proc-vmstat.pgalloc_dma32
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
8856248 ~ 3% +8.6% 9619086 ~ 2% lkp-sbx04/will-it-scale/open1
5216071 ~ 2% +14.6% 5979211 ~ 0% lkp-sbx04/will-it-scale/unlink2
14072319 ~ 2% +10.8% 15598298 ~ 1% TOTAL numa-numastat.node0.local_node
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
8860401 ~ 3% +8.6% 9623240 ~ 2% lkp-sbx04/will-it-scale/open1
5220224 ~ 2% +14.6% 5983365 ~ 0% lkp-sbx04/will-it-scale/unlink2
14080626 ~ 2% +10.8% 15606605 ~ 1% TOTAL numa-numastat.node0.numa_hit
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
4865 ~ 2% +13.5% 5524 ~ 6% lkp-sbx04/will-it-scale/open1
4865 ~ 2% +13.5% 5524 ~ 6% TOTAL cpuidle.C1-SNB.usage
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
2231 ~ 5% +9.1% 2434 ~ 7% lkp-sbx04/will-it-scale/unlink2
2231 ~ 5% +9.1% 2434 ~ 7% TOTAL numa-meminfo.node3.KernelStack
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
2438 ~ 2% +10.7% 2700 ~ 1% lkp-nex05/will-it-scale/lock1
2438 ~ 2% +10.7% 2700 ~ 1% TOTAL slabinfo.signal_cache.num_objs
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
17183 ~ 1% +131.6% 39791 ~ 0% lkp-nex05/will-it-scale/lock1
67122 ~ 3% -18.0% 55048 ~ 0% lkp-sbx04/will-it-scale/open1
67648 ~ 6% -18.5% 55135 ~ 0% lkp-sbx04/will-it-scale/unlink2
21242 ~ 0% +99.3% 42326 ~ 0% lkp-wsx01/will-it-scale/futex2
6224 ~ 3% +52.1% 9468 ~ 2% nhm4/will-it-scale/read1
179420 ~ 4% +12.5% 201771 ~ 0% TOTAL time.involuntary_context_switches
acf426994c75e38 0acd7c39a85836d90451b6c27
--------------- -------------------------
1054 ~ 1% +19.4% 1258 ~ 1% lkp-nex05/will-it-scale/lock1
4445 ~ 1% -40.5% 2645 ~ 0% lkp-sbx04/will-it-scale/open1
3861 ~ 0% -55.5% 1717 ~ 1% lkp-sbx04/will-it-scale/unlink2
1142 ~ 1% +15.4% 1318 ~ 1% lkp-wsx01/will-it-scale/futex2
527 ~ 1% +18.1% 622 ~ 1% nhm4/will-it-scale/read1
11030 ~ 1% -31.4% 7562 ~ 1% TOTAL vmstat.system.cs
Legend:
~XX% - stddev percent
[+-]XX% - change percent
time.involuntary_context_switches
75000 *+--*----------------*----------------------------------------------+
| : + |
| *...*.. : + * |
70000 ++ *... : + + + |
| * *...*.. + + .* |
| *...* + .. |
65000 ++ * |
| |
60000 ++ |
| |
| |
55000 O+ O O O O O O O O O O O O O O O O O O
| O |
| |
50000 ++------------------------------------------------------------------+
vmstat.system.cs
6000 ++-------------------------------------------------------------------+
| |
5500 *+..*..*...*...*..*...*..*... |
| *. |
5000 ++ .. |
| |
4500 ++ *..*...*...*.. |
| * |
4000 ++ |
| |
3500 ++ |
| |
3000 ++ |
| O O O O O O O O O O O O
2500 O+-----O---O------O---O---------------------O------O------O----------+
[*] bisect-good sample
[O] bisect-bad sample
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Fengguang
7 years, 11 months
[rcu] 34577530114: +247.4% qperf.tcp.bw, -3.3% turbostat.Pkg_W
by Fengguang Wu
Hi Paul,
We are pleased to notice huge throughput increases in the qperf/iperf
tests, together with noticeable reduce of power consumption!
git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git rcu/dev
commit 34577530114e9b1de10f3aa9665bb28c8ce585ba ("rcu: Bind grace-period kthreads to non-NO_HZ_FULL CPUs")
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
8.23e+08 ~ 6% +247.4% 2.859e+09 ~ 0% bens/qperf/600s
8.23e+08 ~ 6% +247.4% 2.859e+09 ~ 0% TOTAL qperf.tcp.bw
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
7.065e+09 ~ 3% +210.0% 2.19e+10 ~ 8% bens/iperf/300s-tcp
7.065e+09 ~ 3% +210.0% 2.19e+10 ~ 8% TOTAL iperf.tcp.sender.bps
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
7.065e+09 ~ 3% +210.0% 2.19e+10 ~ 8% bens/iperf/300s-tcp
7.065e+09 ~ 3% +210.0% 2.19e+10 ~ 8% TOTAL iperf.tcp.receiver.bps
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
7.718e+08 ~ 3% +177.8% 2.144e+09 ~ 1% bens/qperf/600s
7.718e+08 ~ 3% +177.8% 2.144e+09 ~ 1% TOTAL qperf.udp.recv_bw
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
7.745e+08 ~ 3% +177.0% 2.145e+09 ~ 1% bens/qperf/600s
7.745e+08 ~ 3% +177.0% 2.145e+09 ~ 1% TOTAL qperf.udp.send_bw
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
1.65e+09 ~ 1% +4.3% 1.721e+09 ~ 0% bens/qperf/600s
1.65e+09 ~ 1% +4.3% 1.721e+09 ~ 0% TOTAL qperf.sctp.bw
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
13579 ~ 1% -2.3% 13264 ~ 1% bens/qperf/600s
13579 ~ 1% -2.3% 13264 ~ 1% TOTAL qperf.sctp.latency
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
8545 ~ 0% +1.9% 8705 ~ 0% bens/qperf/600s
8545 ~ 0% +1.9% 8705 ~ 0% TOTAL qperf.udp.latency
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
12770 ~ 0% -1.0% 12637 ~ 0% ivb43/netperf/300s-25%-TCP_CRR
12770 ~ 0% -1.0% 12637 ~ 0% TOTAL netperf.Throughput_tps
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
1015 ~ 2% -92.1% 80 ~24% ivb43/netperf/300s-25%-TCP_CRR
1015 ~ 2% -92.1% 80 ~24% TOTAL cpuidle.POLL.usage
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
104620 ~31% -83.2% 17544 ~11% ivb43/netperf/300s-25%-TCP_CRR
104620 ~31% -83.2% 17544 ~11% TOTAL numa-vmstat.node1.numa_other
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
0.12 ~ 7% -73.3% 0.03 ~12% ivb43/netperf/300s-25%-TCP_CRR
0.12 ~ 7% -73.3% 0.03 ~12% TOTAL turbostat.%c3
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
53226067 ~ 3% -78.2% 11579930 ~14% ivb43/netperf/300s-25%-TCP_CRR
3327687 ~28% +86.6% 6208767 ~42% ivb44/pigz/25%-128K
56553754 ~ 4% -68.5% 17788697 ~24% TOTAL cpuidle.C1E-IVT.time
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
878601 ~ 3% -75.2% 217607 ~12% ivb43/netperf/300s-25%-TCP_CRR
9282 ~27% +99.2% 18495 ~49% ivb44/pigz/25%-128K
887884 ~ 4% -73.4% 236102 ~15% TOTAL cpuidle.C1E-IVT.usage
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
28174239 ~ 6% -61.0% 10981001 ~ 8% ivb43/netperf/300s-25%-TCP_CRR
28174239 ~ 6% -61.0% 10981001 ~ 8% TOTAL cpuidle.C3-IVT.time
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
237 ~20% +141.1% 572 ~ 9% xbm/pigz/25%-512K
237 ~20% +141.1% 572 ~ 9% TOTAL cpuidle.C3-SNB.usage
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
8192746 ~ 3% +202.8% 24806641 ~ 7% bens/iperf/300s-tcp
15458478 ~ 1% +69.2% 26148854 ~ 0% bens/qperf/600s
23651225 ~ 2% +115.4% 50955496 ~ 4% TOTAL proc-vmstat.numa_hit
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
8192746 ~ 3% +202.8% 24806641 ~ 7% bens/iperf/300s-tcp
15458478 ~ 1% +69.2% 26148854 ~ 0% bens/qperf/600s
23651225 ~ 2% +115.4% 50955496 ~ 4% TOTAL proc-vmstat.numa_local
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
63502648 ~ 3% +209.1% 1.963e+08 ~ 8% bens/iperf/300s-tcp
2.191e+08 ~ 1% +55.9% 3.416e+08 ~ 0% bens/qperf/600s
2.826e+08 ~ 2% +90.3% 5.379e+08 ~ 3% TOTAL proc-vmstat.pgfree
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
15752293 ~ 3% +209.0% 48677753 ~ 8% bens/iperf/300s-tcp
54168455 ~ 1% +56.0% 84483873 ~ 0% bens/qperf/600s
69920749 ~ 2% +90.4% 133161627 ~ 3% TOTAL proc-vmstat.pgalloc_normal
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
9737163 ~ 2% -56.7% 4213811 ~ 6% ivb43/netperf/300s-25%-TCP_CRR
9737163 ~ 2% -56.7% 4213811 ~ 6% TOTAL cpuidle.C6-IVT.usage
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
5033341 ~ 3% +192.9% 14743041 ~12% bens/iperf/300s-tcp
21147143 ~ 0% +30.9% 27679992 ~ 0% bens/qperf/600s
26180485 ~ 1% +62.0% 42423033 ~ 4% TOTAL softirqs.NET_RX
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
0.45 ~37% +109.7% 0.95 ~34% bens/qperf/600s
0.45 ~37% +109.7% 0.95 ~34% TOTAL perf-profile.cpu-cycles.copy_user_generic_string.skb_copy_datagram_iovec.tcp_recvmsg.inet_recvmsg.sock_aio_read
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
87907 ~29% +145.9% 216127 ~27% xbm/pigz/25%-512K
87907 ~29% +145.9% 216127 ~27% TOTAL cpuidle.C3-SNB.time
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
646521 ~ 7% -49.5% 326622 ~ 9% ivb43/netperf/300s-25%-TCP_CRR
646521 ~ 7% -49.5% 326622 ~ 9% TOTAL cpuidle.C3-IVT.usage
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
658 ~13% +109.3% 1377 ~11% xbm/pigz/25%-512K
658 ~13% +109.3% 1377 ~11% TOTAL cpuidle.C1E-SNB.usage
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
47748828 ~ 3% +209.2% 1.476e+08 ~ 8% bens/iperf/300s-tcp
1.65e+08 ~ 1% +55.9% 2.571e+08 ~ 0% bens/qperf/600s
2741315 ~ 3% +7.4% 2943780 ~ 5% ivb43/netperf/300s-25%-TCP_CRR
2.155e+08 ~ 2% +89.2% 4.077e+08 ~ 3% TOTAL proc-vmstat.pgalloc_dma32
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
2.826e+08 ~ 9% -31.4% 1.938e+08 ~16% lkp-sb03/nuttcp/300s
2.826e+08 ~ 9% -31.4% 1.938e+08 ~16% TOTAL cpuidle.C1-SNB.time
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
47008 ~ 4% +52.7% 71784 ~ 4% bens/qperf/600s
20704 ~ 4% +49.2% 30899 ~ 3% xbm/pigz/25%-512K
67712 ~ 4% +51.6% 102683 ~ 4% TOTAL softirqs.RCU
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
8643 ~ 7% -32.8% 5805 ~ 1% ivb43/netperf/300s-25%-TCP_CRR
8643 ~ 7% -32.8% 5805 ~ 1% TOTAL proc-vmstat.numa_hint_faults
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
8949 ~ 7% -31.5% 6129 ~ 1% ivb43/netperf/300s-25%-TCP_CRR
8949 ~ 7% -31.5% 6129 ~ 1% TOTAL proc-vmstat.numa_pte_updates
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
29.25 ~ 0% +41.8% 41.49 ~ 1% ivb43/netperf/300s-25%-TCP_CRR
29.25 ~ 0% +41.8% 41.49 ~ 1% TOTAL turbostat.%c6
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
486 ~ 6% -26.3% 358 ~ 8% ivb43/netperf/300s-25%-TCP_CRR
524 ~ 9% -24.4% 396 ~ 6% ivb44/pigz/25%-128K
486 ~ 6% -31.6% 332 ~ 7% lkp-sb03/nuttcp/300s
1497 ~ 7% -27.4% 1088 ~ 7% TOTAL slabinfo.kmem_cache.num_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
486 ~ 6% -26.3% 358 ~ 8% ivb43/netperf/300s-25%-TCP_CRR
524 ~ 9% -24.4% 396 ~ 6% ivb44/pigz/25%-128K
486 ~ 6% -31.6% 332 ~ 7% lkp-sb03/nuttcp/300s
1497 ~ 7% -27.4% 1088 ~ 7% TOTAL slabinfo.kmem_cache.active_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
7914 ~ 6% -29.6% 5575 ~ 1% ivb43/netperf/300s-25%-TCP_CRR
7914 ~ 6% -29.6% 5575 ~ 1% TOTAL proc-vmstat.numa_hint_faults_local
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
37.08 ~ 0% -34.3% 24.36 ~ 2% ivb43/netperf/300s-25%-TCP_CRR
12.48 ~ 3% -13.8% 10.76 ~ 6% lkp-sb03/nuttcp/300s
49.56 ~ 1% -29.2% 35.11 ~ 3% TOTAL turbostat.%c1
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
596 ~ 5% -21.5% 468 ~ 6% ivb43/netperf/300s-25%-TCP_CRR
634 ~ 7% -20.2% 506 ~ 5% ivb44/pigz/25%-128K
596 ~ 5% -25.8% 442 ~ 5% lkp-sb03/nuttcp/300s
1827 ~ 6% -22.4% 1418 ~ 5% TOTAL slabinfo.kmem_cache_node.active_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
98347 ~ 1% +66.0% 163290 ~ 9% bens/iperf/300s-tcp
263164 ~ 0% +15.4% 303781 ~ 1% bens/qperf/600s
181866 ~ 3% -15.0% 154551 ~ 7% ivb44/pigz/25%-128K
16865 ~ 1% +14.6% 19322 ~ 4% xbm/pigz/25%-512K
560243 ~ 1% +14.4% 640945 ~ 5% TOTAL softirqs.SCHED
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
614 ~ 5% -20.8% 486 ~ 6% ivb43/netperf/300s-25%-TCP_CRR
652 ~ 7% -19.6% 524 ~ 4% ivb44/pigz/25%-128K
614 ~ 5% -25.0% 460 ~ 5% lkp-sb03/nuttcp/300s
1881 ~ 5% -21.8% 1472 ~ 5% TOTAL slabinfo.kmem_cache_node.num_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
1.05 ~ 6% -20.1% 0.84 ~ 5% bens/qperf/600s
1.05 ~ 6% -20.1% 0.84 ~ 5% TOTAL perf-profile.cpu-cycles.tcp_rcv_established.tcp_v4_do_rcv.tcp_prequeue_process.tcp_recvmsg.inet_recvmsg
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
10097 ~ 0% +26.7% 12797 ~ 2% bens/qperf/600s
10097 ~ 0% +26.7% 12797 ~ 2% TOTAL softirqs.HRTIMER
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
0.78 ~ 3% +22.5% 0.96 ~ 3% bens/qperf/600s
0.78 ~ 3% +22.5% 0.96 ~ 3% TOTAL perf-profile.cpu-cycles.tcp_v4_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
2.835e+09 ~ 0% -16.5% 2.368e+09 ~ 0% ivb43/netperf/300s-25%-TCP_CRR
2.835e+09 ~ 0% -16.5% 2.368e+09 ~ 0% TOTAL cpuidle.C1-IVT.time
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
5915033 ~13% +14.3% 6761676 ~ 7% ivb43/netperf/300s-25%-TCP_CRR
5915033 ~13% +14.3% 6761676 ~ 7% TOTAL meminfo.DirectMap2M
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
264559 ~ 0% +15.6% 305707 ~ 0% bens/iperf/300s-tcp
264559 ~ 0% +15.6% 305707 ~ 0% TOTAL softirqs.TIMER
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
92365 ~ 3% -12.9% 80487 ~ 9% ivb44/pigz/25%-128K
92365 ~ 3% -12.9% 80487 ~ 9% TOTAL meminfo.DirectMap4k
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
24143956 ~ 3% -8.1% 22198708 ~ 5% ivb43/netperf/300s-25%-TCP_CRR
24143956 ~ 3% -8.1% 22198708 ~ 5% TOTAL numa-numastat.node1.numa_hit
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
24140149 ~ 3% -8.1% 22196166 ~ 5% ivb43/netperf/300s-25%-TCP_CRR
24140149 ~ 3% -8.1% 22196166 ~ 5% TOTAL numa-numastat.node1.local_node
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
12512229 ~ 3% -7.6% 11556940 ~ 6% ivb43/netperf/300s-25%-TCP_CRR
12512229 ~ 3% -7.6% 11556940 ~ 6% TOTAL numa-vmstat.node1.numa_hit
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
12407607 ~ 3% -7.0% 11539395 ~ 6% ivb43/netperf/300s-25%-TCP_CRR
12407607 ~ 3% -7.0% 11539395 ~ 6% TOTAL numa-vmstat.node1.numa_local
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
25536237 ~ 3% +6.8% 27272147 ~ 5% ivb43/netperf/300s-25%-TCP_CRR
25536237 ~ 3% +6.8% 27272147 ~ 5% TOTAL numa-numastat.node0.local_node
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
25538777 ~ 3% +6.8% 27275952 ~ 5% ivb43/netperf/300s-25%-TCP_CRR
25538777 ~ 3% +6.8% 27275952 ~ 5% TOTAL numa-numastat.node0.numa_hit
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
1159 ~ 3% -10.9% 1033 ~ 1% xbm/pigz/25%-512K
1159 ~ 3% -10.9% 1033 ~ 1% TOTAL slabinfo.kmalloc-96.num_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
1159 ~ 3% -10.9% 1033 ~ 1% xbm/pigz/25%-512K
1159 ~ 3% -10.9% 1033 ~ 1% TOTAL slabinfo.kmalloc-96.active_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
1.33 ~ 2% -10.5% 1.19 ~ 3% bens/qperf/600s
1.33 ~ 2% -10.5% 1.19 ~ 3% TOTAL perf-profile.cpu-cycles.tcp_sendmsg.inet_sendmsg.sock_aio_write.do_sync_write.vfs_write
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
1126 ~ 1% -9.7% 1017 ~ 1% bens/iperf/300s-tcp
1126 ~ 1% -9.7% 1017 ~ 1% TOTAL proc-vmstat.pgactivate
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
12269 ~ 4% +5.5% 12946 ~ 4% ivb43/netperf/300s-25%-TCP_CRR
12269 ~ 4% +5.5% 12946 ~ 4% TOTAL slabinfo.kmalloc-192.num_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
12258 ~ 4% +5.6% 12946 ~ 4% ivb43/netperf/300s-25%-TCP_CRR
12258 ~ 4% +5.6% 12946 ~ 4% TOTAL slabinfo.kmalloc-192.active_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
1.16 ~ 4% +9.2% 1.26 ~ 3% ivb43/netperf/300s-25%-TCP_CRR
1.16 ~ 4% +9.2% 1.26 ~ 3% TOTAL perf-profile.cpu-cycles.get_next_timer_interrupt.tick_nohz_stop_sched_tick.__tick_nohz_idle_enter.tick_nohz_idle_enter.cpu_idle_loop
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
3135 ~ 2% +8.4% 3398 ~ 1% ivb44/pigz/25%-128K
3135 ~ 2% +8.4% 3398 ~ 1% TOTAL slabinfo.task_xstate.active_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
3135 ~ 2% +8.4% 3398 ~ 1% ivb44/pigz/25%-128K
3135 ~ 2% +8.4% 3398 ~ 1% TOTAL slabinfo.task_xstate.num_objs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
11496 ~ 5% -96.2% 435 ~12% bens/iperf/300s-tcp
23777 ~ 1% -86.3% 3264 ~10% bens/qperf/600s
22961 ~ 1% -76.7% 5351 ~ 4% ivb44/pigz/25%-128K
13976 ~ 2% -97.5% 349 ~22% lkp-sb03/nuttcp/300s
11345 ~ 0% -88.0% 1361 ~25% xbm/pigz/25%-512K
83555 ~ 2% -87.1% 10761 ~ 9% TOTAL time.involuntary_context_switches
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
36820683 ~ 0% +17.5% 43253042 ~ 0% bens/qperf/600s
36820683 ~ 0% +17.5% 43253042 ~ 0% TOTAL time.voluntary_context_switches
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
13302 ~ 0% +49.0% 19817 ~14% bens/iperf/300s-tcp
62977 ~ 0% +17.3% 73856 ~ 0% bens/qperf/600s
76279 ~ 0% +22.8% 93674 ~ 3% TOTAL vmstat.system.in
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
23681 ~ 0% +48.9% 35273 ~15% bens/iperf/300s-tcp
122654 ~ 0% +17.4% 143989 ~ 0% bens/qperf/600s
759769 ~ 0% -10.5% 680088 ~ 0% ivb43/netperf/300s-25%-TCP_CRR
906105 ~ 0% -5.2% 859351 ~ 1% TOTAL vmstat.system.cs
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
144 ~ 0% +5.4% 152 ~ 0% bens/qperf/600s
144 ~ 0% +5.4% 152 ~ 0% TOTAL time.percent_of_cpu_this_job_got
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
122 ~ 0% -4.2% 116 ~ 0% ivb43/netperf/300s-25%-TCP_CRR
~ 0% -4.2% ~ 0% TOTAL turbostat.Cor_W
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
299 ~ 0% -2.8% 290 ~ 2% bens/iperf/300s-tcp
863 ~ 0% +5.2% 908 ~ 0% bens/qperf/600s
1162 ~ 0% +3.2% 1199 ~ 0% TOTAL time.system_time
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
153 ~ 0% -3.3% 147 ~ 0% ivb43/netperf/300s-25%-TCP_CRR
~ 0% -3.3% ~ 0% TOTAL turbostat.Pkg_W
221031cb1b33258 34577530114e9b1de10f3aa96
--------------- -------------------------
33.55 ~ 0% +1.7% 34.12 ~ 0% ivb43/netperf/300s-25%-TCP_CRR
33.55 ~ 0% +1.7% 34.12 ~ 0% TOTAL turbostat.%c0
Legend:
~XX% - stddev percent
[+-]XX% - change percent
iperf.tcp.sender.bps
2.4e+10 ++----------O--------------------------------------O--------------+
O O O O O O O O O O O O O O |
2.2e+10 ++ O O O |
2e+10 ++ |
| O |
1.8e+10 ++ |
1.6e+10 ++ |
| |
1.4e+10 ++ |
1.2e+10 ++ |
| |
1e+10 ++ |
8e+09 ++ |
*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*
6e+09 ++----------------------------------------------------------------+
iperf.tcp.receiver.bps
2.4e+10 ++----------O--------------------------------------O--------------+
O O O O O O O O O O O O O O |
2.2e+10 ++ O O O |
2e+10 ++ |
| O |
1.8e+10 ++ |
1.6e+10 ++ |
| |
1.4e+10 ++ |
1.2e+10 ++ |
| |
1e+10 ++ |
8e+09 ++ |
*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*..*
6e+09 ++----------------------------------------------------------------+
time.involuntary_context_switches
14000 ++------------------------------------------------------------------+
| |
12000 *+. .*.. ..*..*..*..*.. .*..*..*..*..*.. ..*..*..*..*.. *
| .*..*. *. *. *. ..|
10000 ++ *. * |
| |
8000 ++ |
| |
6000 ++ |
| |
4000 ++ |
| |
2000 ++ |
| O O |
0 O+-O-----O--O--O---O--O--O--O--O--O--O--O--O--O------O--O--O--------+
[*] bisect-good sample
[O] bisect-bad sample
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Fengguang
7 years, 11 months
[block] BUG: unable to handle kernel NULL pointer dereference at 0000000000000028
by Fengguang Wu
Tejun,
In commit 09571194a9846177bea3afd18458312546112702 ("block, blk-mq:
draining can't be skipped even if bypass_depth was non-zero")
+------------------------------------------------------+------------+------------+
| | f5372ab3d2 | 09571194a9 |
+------------------------------------------------------+------------+------------+
| boot_successes | 25 | 19 |
| early-boot-hang | 1 | |
| boot_failures | 0 | 6 |
| BUG:kernel_test_crashed | 0 | 1 |
| BUG:unable_to_handle_kernel_NULL_pointer_dereference | 0 | 5 |
| Oops | 0 | 5 |
| RIP:blk_throtl_drain | 0 | 5 |
| kernel_BUG_at_arch/x86/mm/pageattr.c | 0 | 5 |
| invalid_opcode | 0 | 5 |
| RIP:change_page_attr_set_clr | 0 | 5 |
| Kernel_panic-not_syncing:Fatal_exception | 0 | 5 |
| backtrace:scsi_debug_exit | 0 | 5 |
| backtrace:SyS_delete_module | 0 | 5 |
+------------------------------------------------------+------------+------------+
[ 5703.793032] sda: unknown partition table
[ 5703.798102] sd 2:0:0:0: [sda] Attached SCSI disk
[ 5706.076059] sd 2:0:0:0: [sda] Synchronizing SCSI cache
[ 5706.078586] BUG: unable to handle kernel NULL pointer dereference at 0000000000000028
[ 5706.079351] IP: [<ffffffff813cdbb0>] blk_throtl_drain+0x30/0x150
[ 5706.079351] PGD 0
[ 5706.079351] Oops: 0000 [#1] SMP
[ 5706.079351] Modules linked in: sd_mod scsi_debug(-) crct10dif_generic crc_t10dif crct10dif_common loop dm_mod fuse sg sr_mod cdrom ata_generic pata_acpi parport_pc parport snd_pcm floppy cirrus syscopyarea sysfillrect sysimgblt ttm drm_kms_helper snd_timer snd soundcore drm pcspkr ata_piix libata i2c_piix4
[ 5706.079351] CPU: 3 PID: 22026 Comm: rmmod Not tainted 3.16.0-rc1-wl-00737-g114249b #1
[ 5706.079351] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 5706.079351] task: ffff8801155f1d80 ti: ffff880100e9c000 task.ti: ffff880100e9c000
[ 5706.079351] RIP: 0010:[<ffffffff813cdbb0>] [<ffffffff813cdbb0>] blk_throtl_drain+0x30/0x150
[ 5706.079351] RSP: 0018:ffff880100e9fb60 EFLAGS: 00010046
[ 5706.079351] RAX: 0000000000000000 RBX: ffff88007f8b2eb0 RCX: ffff8800974c77a0
[ 5706.079351] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
[ 5706.079351] RBP: ffff880100e9fb78 R08: 0000000000000000 R09: 0000000000000046
[ 5706.079351] R10: ffff880100e9fb78 R11: 0000000000000000 R12: ffff88007f8b2eb0
[ 5706.079351] R13: ffff8800863ef300 R14: ffff88007f8b3508 R15: ffff88008a3c2120
[ 5706.079351] FS: 00007fac6a962700(0000) GS:ffff88011fd80000(0000) knlGS:0000000000000000
[ 5706.079351] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 5706.079351] CR2: 0000000000000028 CR3: 0000000110261000 CR4: 00000000000006e0
[ 5706.079351] Stack:
[ 5706.079351] ffff88007f8b2eb0 0000000000000000 ffff88007f8b3518 ffff880100e9fb88
[ 5706.079351] ffffffff813cabee ffff880100e9fbb8 ffffffff813afcec ffff88007f8b2eb0
[ 5706.079351] ffffffff81cf8760 ffff88007f8b2eb0 ffff88008a3c2000 ffff880100e9fbd0
[ 5706.079351] Call Trace:
[ 5706.079351] [<ffffffff813cabee>] blkcg_drain_queue+0xe/0x10
[ 5706.079351] [<ffffffff813afcec>] __blk_drain_queue+0x7c/0x180
[ 5706.079351] [<ffffffff813afe7e>] blk_queue_bypass_start+0x8e/0xd0
[ 5706.079351] [<ffffffff813c9d98>] blkcg_deactivate_policy+0x38/0x140
[ 5706.079351] [<ffffffff813cde04>] blk_throtl_exit+0x34/0x50
[ 5706.079351] [<ffffffff813cac38>] blkcg_exit_queue+0x48/0x70
[ 5706.079351] [<ffffffff813b3546>] blk_release_queue+0x26/0x100
[ 5706.079351] [<ffffffff813dbd17>] kobject_cleanup+0x77/0x1b0
[ 5706.079351] [<ffffffff813dbbc8>] kobject_put+0x28/0x60
[ 5706.079351] [<ffffffff813acb85>] blk_put_queue+0x15/0x20
[ 5706.079351] [<ffffffff8151ed0b>] scsi_device_dev_release_usercontext+0xbb/0x120
[ 5706.079351] [<ffffffff810876c7>] execute_in_process_context+0x67/0x70
[ 5706.079351] [<ffffffff8151ec4c>] scsi_device_dev_release+0x1c/0x20
[ 5706.079351] [<ffffffff814ddc02>] device_release+0x32/0xa0
[ 5706.079351] [<ffffffff813dbd17>] kobject_cleanup+0x77/0x1b0
[ 5706.079351] [<ffffffff813dbbc8>] kobject_put+0x28/0x60
[ 5706.079351] [<ffffffff814ddef7>] put_device+0x17/0x20
[ 5706.079351] [<ffffffff8151f759>] __scsi_remove_device+0xa9/0xe0
[ 5706.079351] [<ffffffff8151dd04>] scsi_forget_host+0x64/0x70
[ 5706.079351] [<ffffffff81512207>] scsi_remove_host+0x77/0x120
[ 5706.079351] [<ffffffffa01c75a9>] sdebug_driver_remove+0x29/0x90 [scsi_debug]
[ 5706.079351] [<ffffffff814e218f>] __device_release_driver+0x7f/0xf0
[ 5706.079351] [<ffffffff814e2223>] device_release_driver+0x23/0x30
[ 5706.079351] [<ffffffff814e1b28>] bus_remove_device+0x108/0x180
[ 5706.079351] [<ffffffff814de429>] device_del+0x129/0x1c0
[ 5706.079351] [<ffffffff814de4de>] device_unregister+0x1e/0x60
[ 5706.079351] [<ffffffffa01c6efc>] sdebug_remove_adapter+0x4c/0x70 [scsi_debug]
[ 5706.079351] [<ffffffffa01cb52d>] scsi_debug_exit+0x19/0xaec [scsi_debug]
[ 5706.079351] [<ffffffff810ea4fe>] SyS_delete_module+0x12e/0x1c0
[ 5706.079351] [<ffffffff818363a2>] ? int_signal+0x12/0x17
[ 5706.079351] [<ffffffff818360e9>] system_call_fastpath+0x16/0x1b
[ 5706.079351] Code: 55 65 ff 04 25 a0 c7 00 00 48 89 e5 41 55 41 54 49 89 fc 53 4c 8b af 40 07 00 00 49 8b 85 a0 00 00 00 31 ff 48 8b 80 c8 05 00 00 <48> 8b 70 28 e8 f7 9b d2 ff 48 85 c0 48 89 c3 74 61 0f 1f 80 00
[ 5706.079351] RIP [<ffffffff813cdbb0>] blk_throtl_drain+0x30/0x150
[ 5706.079351] RSP <ffff880100e9fb60>
[ 5706.079351] CR2: 0000000000000028
[ 5706.079351] ------------[ cut here ]------------
Thanks,
Fengguang
7 years, 11 months
[cpufreq] kernel BUG at kernel/irq_work.c:175!
by Fengguang Wu
FYI, we find similar bug message in Viresh's tree, which contains 8
cpufreq patches based on next-20140625.
tree: git://git.linaro.org/people/vireshk/linux cpufreq/cpu0-exynos
head: d51c9fbdf49983fc3303ec576d02e8ef97da3d8b
commit: d51c9fbdf49983fc3303ec576d02e8ef97da3d8b [8/8] cpufreq: cpu0: Add support for multiple 'struct cpufreq_policy' instances
+-----------------------------------------------------------------------------+-----------+---------------+------------+
| | v3.16-rc2 | next-20140625 | d51c9fbdf4 |
+-----------------------------------------------------------------------------+-----------+---------------+------------+
| boot_successes | 3 | 3 | 1 |
| boot_failures | 0 | 0 | 2 |
| kernel_BUG_at_kernel/irq_work.c | 0 | 0 | 2 |
| invalid_opcode | 0 | 0 | 2 |
| RIP:irq_work_run | 0 | 0 | 2 |
| BUG:sleeping_function_called_from_invalid_context_at_kernel/locking/rwsem.c | 0 | 0 | 2 |
| backtrace:smpboot_thread_fn | 0 | 0 | 2 |
+-----------------------------------------------------------------------------+-----------+---------------+------------+
mount.nfs: access denied by server while mounting bee:/nfsroot/trinity
run-parts: /etc/kernel-tests/99-trinity exited with return code 32
[ 66.841357] ------------[ cut here ]------------
[ 66.841772] kernel BUG at kernel/irq_work.c:175!
[ 66.842302] invalid opcode: 0000 [#1] SMP
[ 66.842686] Modules linked in:
[ 66.842969] CPU: 1 PID: 10 Comm: migration/1 Not tainted 3.16.0-rc2-next-20140625-00008-gd51c9fb #6
[ 66.843747] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 66.844010] task: ffff88001eb75e60 ti: ffff88001e03c000 task.ti: ffff88001e03c000
[ 66.844010] RIP: 0010:[<ffffffff811820fd>] [<ffffffff811820fd>] irq_work_run+0xf/0x22
[ 66.844010] RSP: 0018:ffff88001e03fcc0 EFLAGS: 00010046
[ 66.844010] RAX: 0000000080000001 RBX: 0000000000000000 RCX: 0000000000000005
[ 66.844010] RDX: 0000000000000001 RSI: 0000000000000008 RDI: 0000000000000000
[ 66.844010] RBP: ffff88001e03fce0 R08: 0000000000000200 R09: ffffffff82137030
[ 66.844010] R10: 0000000000001c24 R11: 0000000000007c00 R12: ffff88001fd13940
[ 66.844010] R13: 0000000000000000 R14: 0000000000000000 R15: ffffffff82073270
[ 66.844010] FS: 0000000000000000(0000) GS:ffff88001fd00000(0000) knlGS:0000000000000000
[ 66.844010] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 66.844010] CR2: 00007f10054a6000 CR3: 0000000000030000 CR4: 00000000000006e0
[ 66.844010] Stack:
[ 66.844010] ffffffff811412f7 0000000000000001 ffff88001fd13980 ffffffff82055fb0
[ 66.844010] ffff88001e03fd00 ffffffff811413ea 0000000000000001 00000000fffffff0
[ 66.844010] ffff88001e03fd48 ffffffff81105027 0000000000000001 0000000000000008
[ 66.844010] Call Trace:
[ 66.844010] [<ffffffff811412f7>] ? flush_smp_call_function_queue+0xab/0x10e
[ 66.844010] [<ffffffff811413ea>] hotplug_cfd+0x90/0x97
[ 66.844010] [<ffffffff81105027>] notifier_call_chain+0x6d/0x93
[ 66.844010] [<ffffffff811050c5>] __raw_notifier_call_chain+0xe/0x10
[ 66.844010] [<ffffffff810e5408>] __cpu_notify+0x20/0x37
[ 66.844010] [<ffffffff810e5432>] cpu_notify+0x13/0x15
[ 66.844010] [<ffffffff819bd4ab>] take_cpu_down+0x27/0x3a
[ 66.844010] [<ffffffff81155737>] multi_cpu_stop+0x93/0xed
[ 66.844010] [<ffffffff811556a4>] ? cpu_stop_park+0x63/0x63
[ 66.844010] [<ffffffff811559b3>] cpu_stopper_thread+0x92/0x114
[ 66.844010] [<ffffffff819d1e64>] ? retint_restore_args+0x13/0x13
[ 66.844010] [<ffffffff819d0bbf>] ? _raw_spin_lock_irqsave+0x25/0x56
[ 66.844010] [<ffffffff81107069>] smpboot_thread_fn+0x187/0x1a5
[ 66.844010] [<ffffffff81106ee2>] ? SyS_setgroups+0x10c/0x10c
[ 66.844010] [<ffffffff81101627>] kthread+0xdb/0xe3
[ 66.844010] [<ffffffff8110154c>] ? kthread_create_on_node+0x174/0x174
[ 66.844010] [<ffffffff819d127c>] ret_from_fork+0x7c/0xb0
[ 66.844010] [<ffffffff8110154c>] ? kthread_create_on_node+0x174/0x174
[ 66.844010] Code: cd 81 e8 dc 31 f6 ff c6 05 46 c3 f8 00 01 eb 05 e8 91 ff ff ff b8 01 00 00 00 5d c3 65 8b 04 25 50 b9 00 00 a9 00 00 0f 00 75 02 <0f> 0b 55 48 89 e5 e8 70 ff ff ff 5d c3 55 48 89 e5 5d c3 55 48
[ 66.844010] RIP [<ffffffff811820fd>] irq_work_run+0xf/0x22
[ 66.844010] RSP <ffff88001e03fcc0>
[ 66.844010] ---[ end trace 043717361af3ed47 ]---
[ 66.844010] BUG: sleeping function called from invalid context at kernel/locking/rwsem.c:41
Thanks,
Fengguang
7 years, 11 months
[migration] kernel BUG at kernel/irq_work.c:175!
by Fengguang Wu
Greetings,
0day kernel testing robot got the below dmesg and the first bad commit is
git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit 68c90b2c635f18ad51ae7440162f6c082ea1288d
Merge: f08af6f ec11f8c
Author: Stephen Rothwell <sfr(a)canb.auug.org.au>
AuthorDate: Mon Jun 23 14:12:48 2014 +1000
Merge branch 'akpm-current/current'
+---------------------------------+------------+------------+------------+---------------+
| | f08af6fa87 | ec11f8c81f | 68c90b2c63 | next-20140623 |
+---------------------------------+------------+------------+------------+---------------+
| boot_successes | 60 | 60 | 0 | 0 |
| boot_failures | 0 | 0 | 20 | 13 |
| kernel_BUG_at_kernel/irq_work.c | 0 | 0 | 20 | 13 |
| invalid_opcode | 0 | 0 | 20 | 13 |
| RIP:irq_work_run | 0 | 0 | 20 | 13 |
| backtrace:smpboot_thread_fn | 0 | 0 | 20 | 13 |
+---------------------------------+------------+------------+------------+---------------+
[ 2.194744] EDD information not available.
[ 2.195290] Unregister pv shared memory for cpu 0
[ 2.206025] ------------[ cut here ]------------
[ 2.206025] kernel BUG at kernel/irq_work.c:175!
[ 2.206025] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC
[ 2.206025] CPU: 0 PID: 9 Comm: migration/0 Not tainted 3.16.0-rc2-02039-g68c90b2 #1
[ 2.206025] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 2.206025] task: ffff88001219a7e0 ti: ffff8800121a4000 task.ti: ffff8800121a4000
[ 2.206025] RIP: 0010:[<ffffffff810f9318>] [<ffffffff810f9318>] irq_work_run+0xf/0x1c
[ 2.206025] RSP: 0000:ffff8800121a7c48 EFLAGS: 00010046
[ 2.206025] RAX: 0000000080000001 RBX: 0000000000000000 RCX: 0000000000000005
[ 2.206025] RDX: 0000000000000000 RSI: 0000000000000008 RDI: 0000000000000000
[ 2.206025] RBP: ffff8800121a7c68 R08: 0000000000000002 R09: 0000000000000001
[ 2.206025] R10: ffffffff810e2a10 R11: ffffffff810b9de3 R12: ffff880012412340
[ 2.206025] R13: 0000000000000000 R14: 0000000000000000 R15: ffffffff81c83e50
[ 2.206025] FS: 0000000000000000(0000) GS:ffff880012400000(0000) knlGS:0000000000000000
[ 2.206025] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 2.206025] CR2: 0000000000000000 CR3: 0000000001c0c000 CR4: 00000000000006b0
[ 2.206025] Stack:
[ 2.206025] ffffffff810e87e0 ffff880012412380 00000000fffffff0 ffffffff81c81ba0
[ 2.206025] ffff8800121a7c88 ffffffff810e88f0 0000000000000001 00000000fffffff0
[ 2.206025] ffff8800121a7cd0 ffffffff810b6e23 0000000000000000 0000000000000008
[ 2.206025] Call Trace:
[ 2.206025] [<ffffffff810e87e0>] ? flush_smp_call_function_queue+0xa4/0x107
[ 2.206025] [<ffffffff810e88f0>] hotplug_cfd+0xad/0xbb
[ 2.206025] [<ffffffff810b6e23>] notifier_call_chain+0x68/0x8e
[ 2.206025] [<ffffffff810b70c0>] __raw_notifier_call_chain+0x9/0xb
[ 2.206025] [<ffffffff8109b39e>] __cpu_notify+0x1b/0x32
[ 2.206025] [<ffffffff8109b3c3>] cpu_notify+0xe/0x10
[ 2.206025] [<ffffffff817e2817>] take_cpu_down+0x22/0x35
[ 2.206025] [<ffffffff810f4153>] multi_cpu_stop+0x8c/0xe2
[ 2.206025] [<ffffffff810f40c7>] ? cpu_stopper_thread+0x126/0x126
[ 2.206025] [<ffffffff810f402e>] cpu_stopper_thread+0x8d/0x126
[ 2.206025] [<ffffffff810cdab4>] ? lock_acquire+0x94/0x9d
[ 2.206025] [<ffffffff817f25af>] ? _raw_spin_unlock_irqrestore+0x40/0x55
[ 2.206025] [<ffffffff810cbdcd>] ? trace_hardirqs_on_caller+0x171/0x18d
[ 2.206025] [<ffffffff817f25b7>] ? _raw_spin_unlock_irqrestore+0x48/0x55
[ 2.206025] [<ffffffff810b8e39>] smpboot_thread_fn+0x182/0x1a0
[ 2.206025] [<ffffffff810b8cb7>] ? in_egroup_p+0x2e/0x2e
[ 2.206025] [<ffffffff810b372c>] kthread+0xcd/0xd5
[ 2.206025] [<ffffffff810b365f>] ? __kthread_parkme+0x5c/0x5c
[ 2.206025] [<ffffffff817f2f3c>] ret_from_fork+0x7c/0xb0
[ 2.206025] [<ffffffff810b365f>] ? __kthread_parkme+0x5c/0x5c
[ 2.206025] Code: 48 c7 c7 65 cd b0 81 e8 43 20 fa ff c6 05 50 e1 c9 00 01 eb 02 31 db 88 d8 5b 5d c3 65 8b 04 25 10 b8 00 00 a9 00 00 0f 00 75 02 <0f> 0b 55 48 89 e5 e8 b5 fd ff ff 5d c3 55 48 89 e5 53 48 89 fb
[ 2.206025] RIP [<ffffffff810f9318>] irq_work_run+0xf/0x1c
[ 2.206025] RSP <ffff8800121a7c48>
[ 2.206025] ---[ end trace f7f1564c3a1f35d0 ]---
[ 2.206025] note: migration/0[9] exited with preempt_count 1
git bisect start 58ae500a03a6bf68eee323c342431bfdd3f460b6 f08af6fa87ea33262fe2fe5167119fb55ad9dd2c --
git bisect bad 68c90b2c635f18ad51ae7440162f6c082ea1288d # 14:19 0- 20 Merge branch 'akpm-current/current'
git bisect good 6b11d02e25c79a8961983a966b7fafcdc36c7a91 # 14:23 20+ 0 slab: do not keep free objects/slabs on dead memcg caches
git bisect good 11709212b3a5479fcc63dda3160f4f4b0251f914 # 14:27 20+ 0 mm/util.c: add kstrimdup()
git bisect good 6af20930dcfcd13270de4f29f3830312f3c36a17 # 14:33 20+ 0 fork: reset mm->pinned_vm
git bisect good 8e7c32fb574ec1b49fd0e451cb25febf51430dd9 # 14:38 20+ 0 fs/qnx6: use pr_fmt and __func__ in logging
git bisect good 6873969c750b85734bc7d06be3c51ad381b3c85a # 14:41 20+ 0 shm: remove unneeded extern for function
git bisect good 2b9ed79abc340e15bc9652048d2e8d8a283bd8a1 # 14:48 20+ 0 um: use asm-generic/scatterlist.h
git bisect good ec11f8c81fbc76534c1374e29bdf36f085ed859a # 15:12 20+ 0 lib/scatterlist: clean up useless architecture versions of scatterlist.h
# first bad commit: [68c90b2c635f18ad51ae7440162f6c082ea1288d] Merge branch 'akpm-current/current'
git bisect good f08af6fa87ea33262fe2fe5167119fb55ad9dd2c # 15:14 60+ 0 Merge branch 'rd-docs/master'
git bisect good ec11f8c81fbc76534c1374e29bdf36f085ed859a # 15:19 60+ 0 lib/scatterlist: clean up useless architecture versions of scatterlist.h
git bisect bad 58ae500a03a6bf68eee323c342431bfdd3f460b6 # 15:19 0- 13 Add linux-next specific files for 20140623
git bisect good a497c3ba1d97fc69c1e78e7b96435ba8c2cb42ee # 15:25 60+ 0 Linux 3.16-rc2
git bisect bad 58ae500a03a6bf68eee323c342431bfdd3f460b6 # 15:25 0- 13 Add linux-next specific files for 20140623
This script may reproduce the error.
-----------------------------------------------------------------------------
#!/bin/bash
kernel=$1
initrd=quantal-core-x86_64.cgz
wget --no-clobber https://github.com/fengguang/reproduce-kernel-bug/blob/master/initrd/$initrd
kvm=(
qemu-system-x86_64 -cpu kvm64 -enable-kvm
-kernel $kernel
-initrd $initrd
-smp 2
-m 256M
-net nic,vlan=0,macaddr=00:00:00:00:00:00,model=virtio
-net user,vlan=0
-net nic,vlan=1,model=e1000
-net user,vlan=1
-boot order=nc
-no-reboot
-watchdog i6300esb
-serial stdio
-display none
-monitor null
)
append=(
debug
sched_debug
apic=debug
ignore_loglevel
sysrq_always_enabled
panic=10
prompt_ramdisk=0
earlyprintk=ttyS0,115200
console=ttyS0,115200
console=tty0
vga=normal
root=/dev/ram0
rw
)
"${kvm[@]}" --append "${append[*]}"
-----------------------------------------------------------------------------
Thanks,
Fengguang
7 years, 11 months
[lockdep] BUG: 9 unexpected failures (out of 253) - debugging disabled!
by Fengguang Wu
Greetings,
0day kernel testing robot got the below dmesg and the first bad commit is
git://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git locking/core
commit b7e4888bad0c84db587a2beae72dcf1c40ec0e52
Author: Waiman Long <Waiman.Long(a)hp.com>
AuthorDate: Mon Jun 23 14:25:00 2014 -0400
Commit: Peter Zijlstra <a.p.zijlstra(a)chello.nl>
CommitDate: Tue Jun 24 16:39:40 2014 +0200
lockdep: restrict the use of recursive read_lock with qrwlock
Unlike the original unfair rwlock implementation, queued rwlock
will grant lock according to the chronological sequence of the lock
requests except when the lock requester is in the interrupt context.
Consequently, recursive read_lock calls will now hang the process if
there is a write_lock call somewhere in between the read_lock calls.
This patch updates the lockdep implementation to look for recursive
read_lock calls when queued rwlock is being used. A new read state (3)
is used to mark those read_lock call that cannot be recursively called
except in the interrupt context. The new read state does exhaust the
2 bits available in held_lock:read bit field. The addition of any new
read state in the future may require a redesign of how all those bits
are squeezed together in the held_lock structure.
Cc: Scott J Norton <scott.norton(a)hp.com>
Cc: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Waiman Long <Waiman.Long(a)hp.com>
Signed-off-by: Peter Zijlstra <peterz(a)infradead.org>
Link: http://lkml.kernel.org/r/1403547900-40658-2-git-send-email-Waiman.Long@hp...
+----------------------------------------------------+------------+------------+------------+
| | 6cc620bc8e | b7e4888bad | 5fe00a37f7 |
+----------------------------------------------------+------------+------------+------------+
| boot_successes | 60 | 0 | 0 |
| boot_failures | 0 | 20 | 13 |
| BUG:unexpected_failures(out_of)-debugging_disabled | 0 | 20 | 13 |
+----------------------------------------------------+------------+------------+------------+
[ 0.000000] --------------------------------------------------------------------------
[ 0.000000] A-A deadlock: ok | ok |FAILED|
[ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.16.0-rc1-00024-gb7e4888 #12
[ 0.000000] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 0.000000] 00000000 00000000 c1633f8c c141223b 00000001 c1633fb4 c141248b c159ffe8
[ 0.000000] c140fce6 00000000 00000004 00000002 c16bf390 00020800 c1844800 c1633fc8
[ 0.000000] c121fe62 c15a01b7 c15a01aa 00000780 c1633fe8 c1688a0d 000000ea ffffffff
[ 0.000000] Call Trace:
[ 0.000000] [<c141223b>] dump_stack+0x48/0x60
[ 0.000000] [<c141248b>] dotest+0x58/0x4bc
[ 0.000000] [<c140fce6>] ? printk+0x38/0x3a
[ 0.000000] [<c121fe62>] locking_selftest+0xd4/0x1ddb
[ 0.000000] [<c1688a0d>] start_kernel+0x2e5/0x3a1
[ 0.000000] [<c16884c5>] ? set_init_arg+0x49/0x49
[ 0.000000] [<c16882af>] i386_start_kernel+0x79/0x7d
[ 0.000000] ok | ok | ok |
[ 0.000000] A-B-B-A deadlock: ok | ok |FAILED|
[ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.16.0-rc1-00024-gb7e4888 #12
[ 0.000000] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 0.000000] 00000000 00000000 c1633f8c c141223b 00000001 c1633fb4 c141248b c159ffe8
[ 0.000000] c140fce6 00000000 00000004 00000002 c16bf390 00020800 c1844800 c1633fc8
[ 0.000000] c121feeb c15a01b7 c15a01bd 00000780 c1633fe8 c1688a0d 000000ea ffffffff
[ 0.000000] Call Trace:
[ 0.000000] [<c141223b>] dump_stack+0x48/0x60
[ 0.000000] [<c141248b>] dotest+0x58/0x4bc
[ 0.000000] [<c140fce6>] ? printk+0x38/0x3a
[ 0.000000] [<c121feeb>] locking_selftest+0x15d/0x1ddb
[ 0.000000] [<c1688a0d>] start_kernel+0x2e5/0x3a1
[ 0.000000] [<c16884c5>] ? set_init_arg+0x49/0x49
[ 0.000000] [<c16882af>] i386_start_kernel+0x79/0x7d
[ 0.000000] ok | ok | ok |
[ 0.000000] A-B-B-C-C-A deadlock: ok | ok |FAILED|
[ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.16.0-rc1-00024-gb7e4888 #12
[ 0.000000] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 0.000000] 00000000 00000000 c1633f8c c141223b 00000001 c1633fb4 c141248b c159ffe8
[ 0.000000] c140fce6 00000000 00000004 00000002 c16bf390 00020800 c1844800 c1633fc8
[ 0.000000] c121ff74 c15a01b7 c15a01ce 00000780 c1633fe8 c1688a0d 000000ea ffffffff
[ 0.000000] Call Trace:
[ 0.000000] [<c141223b>] dump_stack+0x48/0x60
[ 0.000000] [<c141248b>] dotest+0x58/0x4bc
[ 0.000000] [<c140fce6>] ? printk+0x38/0x3a
[ 0.000000] [<c121ff74>] locking_selftest+0x1e6/0x1ddb
[ 0.000000] [<c1688a0d>] start_kernel+0x2e5/0x3a1
[ 0.000000] [<c16884c5>] ? set_init_arg+0x49/0x49
[ 0.000000] [<c16882af>] i386_start_kernel+0x79/0x7d
[ 0.000000] ok | ok | ok |
[ 0.000000] A-B-C-A-B-C deadlock: ok | ok |FAILED|
[ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.16.0-rc1-00024-gb7e4888 #12
[ 0.000000] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 0.000000] 00000000 00000000 c1633f8c c141223b 00000001 c1633fb4 c141248b c159ffe8
[ 0.000000] c140fce6 00000000 00000004 00000002 c16bf390 00020800 c1844800 c1633fc8
[ 0.000000] c121fffd c15a01b7 c15a01e3 00000780 c1633fe8 c1688a0d 000000ea ffffffff
[ 0.000000] Call Trace:
[ 0.000000] [<c141223b>] dump_stack+0x48/0x60
[ 0.000000] [<c141248b>] dotest+0x58/0x4bc
[ 0.000000] [<c140fce6>] ? printk+0x38/0x3a
[ 0.000000] [<c121fffd>] locking_selftest+0x26f/0x1ddb
[ 0.000000] [<c1688a0d>] start_kernel+0x2e5/0x3a1
[ 0.000000] [<c16884c5>] ? set_init_arg+0x49/0x49
[ 0.000000] [<c16882af>] i386_start_kernel+0x79/0x7d
[ 0.000000] ok | ok | ok |
[ 0.000000] A-B-B-C-C-D-D-A deadlock: ok | ok |FAILED|
[ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.16.0-rc1-00024-gb7e4888 #12
[ 0.000000] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 0.000000] 00000000 00000000 c1633f8c c141223b 00000001 c1633fb4 c141248b c159ffe8
[ 0.000000] c140fce6 00000000 00000004 00000002 c16bf390 00020800 c1844800 c1633fc8
[ 0.000000] c1220086 c15a01b7 c15a01f8 00000780 c1633fe8 c1688a0d 000000ea ffffffff
[ 0.000000] Call Trace:
[ 0.000000] [<c141223b>] dump_stack+0x48/0x60
[ 0.000000] [<c141248b>] dotest+0x58/0x4bc
[ 0.000000] [<c140fce6>] ? printk+0x38/0x3a
[ 0.000000] [<c1220086>] locking_selftest+0x2f8/0x1ddb
[ 0.000000] [<c1688a0d>] start_kernel+0x2e5/0x3a1
[ 0.000000] [<c16884c5>] ? set_init_arg+0x49/0x49
[ 0.000000] [<c16882af>] i386_start_kernel+0x79/0x7d
[ 0.000000] ok | ok | ok |
[ 0.000000] A-B-C-D-B-D-D-A deadlock: ok | ok |FAILED|
[ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.16.0-rc1-00024-gb7e4888 #12
[ 0.000000] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 0.000000] 00000000 00000000 c1633f8c c141223b 00000001 c1633fb4 c141248b c159ffe8
[ 0.000000] c140fce6 00000000 00000004 00000002 c16bf390 00020800 c1844800 c1633fc8
[ 0.000000] c122010f c15a01b7 c15a0211 00000780 c1633fe8 c1688a0d 000000ea ffffffff
[ 0.000000] Call Trace:
[ 0.000000] [<c141223b>] dump_stack+0x48/0x60
[ 0.000000] [<c141248b>] dotest+0x58/0x4bc
[ 0.000000] [<c140fce6>] ? printk+0x38/0x3a
[ 0.000000] [<c122010f>] locking_selftest+0x381/0x1ddb
[ 0.000000] [<c1688a0d>] start_kernel+0x2e5/0x3a1
[ 0.000000] [<c16884c5>] ? set_init_arg+0x49/0x49
[ 0.000000] [<c16882af>] i386_start_kernel+0x79/0x7d
[ 0.000000] ok | ok | ok |
[ 0.000000] A-B-C-D-B-C-D-A deadlock: ok | ok |FAILED|
[ 0.000000] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.16.0-rc1-00024-gb7e4888 #12
[ 0.000000] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 0.000000] 00000000 00000000 c1633f8c c141223b 00000001 c1633fb4 c141248b c159ffe8
[ 0.000000] c140fce6 00000000 00000004 00000002 c16bf390 00020800 c1844800 c1633fc8
[ 0.000000] c1220198 c15a01b7 c15a022a 00000780 c1633fe8 c1688a0d 000000ea ffffffff
[ 0.000000] Call Trace:
[ 0.000000] [<c141223b>] dump_stack+0x48/0x60
[ 0.000000] [<c141248b>] dotest+0x58/0x4bc
[ 0.000000] [<c140fce6>] ? printk+0x38/0x3a
[ 0.000000] [<c1220198>] locking_selftest+0x40a/0x1ddb
[ 0.000000] [<c1688a0d>] start_kernel+0x2e5/0x3a1
[ 0.000000] [<c16884c5>] ? set_init_arg+0x49/0x49
[ 0.000000] [<c16882af>] i386_start_kernel+0x79/0x7d
[ 0.000000] ok | ok | ok |
[ 0.000000] double unlock: ok | ok | ok | ok | ok | ok |
[ 0.000000] initialize held: ok | ok | ok | ok | ok | ok |
git bisect start 5fe00a37f7eb81f306abdffbf4d7093da51b8ccc a497c3ba1d97fc69c1e78e7b96435ba8c2cb42ee --
git bisect bad f07035a109af7964e8446cb7ffc1721a1c2ce43c # 23:40 0- 20 Merge 'peterz-queue/locking/core' into devel-lkp-hsx01-i386-201406242318
git bisect good 4dda92923e85460ff9299b486176f09cc25ca104 # 23:56 20+ 0 Merge 'amirv/for-net' into devel-lkp-hsx01-i386-201406242318
git bisect good 5054910c90f25564062e9a1c78e4d37a51077955 # 00:18 20+ 0 Merge 'amirv/for-netdev' into devel-lkp-hsx01-i386-201406242318
git bisect good 88f2b4c15e561bb5c28709d666364f273bf54b98 # 00:51 20+ 0 rtmutex: Simplify rtmutex_slowtrylock()
git bisect good a57594a13a446d1a6ab1dcd48339f799ce586843 # 01:01 20+ 0 rtmutex: Clarify the boost/deboost part
git bisect good 6cc620bc8e9b521e61f04eefbec0c41c01fb03b9 # 01:09 20+ 0 rtmutex: Make the rtmutex tester depend on BROKEN
git bisect bad b7e4888bad0c84db587a2beae72dcf1c40ec0e52 # 01:13 0- 20 lockdep: restrict the use of recursive read_lock with qrwlock
# first bad commit: [b7e4888bad0c84db587a2beae72dcf1c40ec0e52] lockdep: restrict the use of recursive read_lock with qrwlock
git bisect good 6cc620bc8e9b521e61f04eefbec0c41c01fb03b9 # 01:16 60+ 0 rtmutex: Make the rtmutex tester depend on BROKEN
git bisect bad 5fe00a37f7eb81f306abdffbf4d7093da51b8ccc # 01:16 0- 13 0day head guard for 'devel-lkp-hsx01-i386-201406242318'
git bisect good 8b8f5d9715845f9ae2b89ce406e71877965b29ca # 01:20 60+ 0 Merge tag 'compress-3.16-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
git bisect good 3ab63dc6b2912b76632e34889ec26355096aaead # 01:21 60+ 63 Add linux-next specific files for 20140624
This script may reproduce the error.
-----------------------------------------------------------------------------
#!/bin/bash
kernel=$1
initrd=quantal-core-i386.cgz
wget --no-clobber https://github.com/fengguang/reproduce-kernel-bug/blob/master/initrd/$initrd
kvm=(
qemu-system-x86_64 -cpu kvm64 -enable-kvm
-kernel $kernel
-initrd $initrd
-smp 2
-m 256M
-net nic,vlan=0,macaddr=00:00:00:00:00:00,model=virtio
-net user,vlan=0
-net nic,vlan=1,model=e1000
-net user,vlan=1
-boot order=nc
-no-reboot
-watchdog i6300esb
-serial stdio
-display none
-monitor null
)
append=(
debug
sched_debug
apic=debug
ignore_loglevel
sysrq_always_enabled
panic=10
prompt_ramdisk=0
earlyprintk=ttyS0,115200
console=ttyS0,115200
console=tty0
vga=normal
root=/dev/ram0
rw
)
"${kvm[@]}" --append "${append[*]}"
-----------------------------------------------------------------------------
Thanks,
Fengguang
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
7 years, 11 months
[regulator] BUG: kernel early hang without any printk output
by Fengguang Wu
Hi Mark,
It seems this merge commit makes the kernel not 100% bootable.
5 out of 53 boots block w/o any dmesg output.
git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git for-next
commit a920677c35627d1baac9776dfba0286e7a1e1f0f
Merge: 3b9f3f3 ce44beb 03187c7 c969faa 53ddddc
Author: Mark Brown <broonie(a)linaro.org>
AuthorDate: Sun Jun 22 11:58:45 2014 +0100
Merge remote-tracking branches 'regulator/topic/88pm800', 'regulator/topic/ab8500', 'regulator/topic/bcm590xx' and 'regulator/topic/ltc3589' into regulator-next
Attached dmesg for the parent commit, too, to help confirm whether it is a noise error.
+------------------------------------------------------------+------------+------------+------------+------------+------------+------------+------------------+
| | 3b9f3f33ad | ce44beb7c0 | 03187c72db | c969faadf0 | 53ddddcad5 | a920677c35 | v3.16-rc2_062302 |
+------------------------------------------------------------+------------+------------+------------+------------+------------+------------+------------------+
| boot_successes | 0 | 306 | 0 | 391 | 0 | 0 | 0 |
| boot_failures | 408 | 0 | 306 | 17 | 408 | 102 | 53 |
| genirq:Flags_mismatch_irq.(serial)vs.(goldfish_pdev_bus) | 408 | 0 | 306 | 0 | 408 | 96 | 48 |
| WARNING:CPU:PID:at_kernel/events/core.c:perf_swevent_add() | 0 | 0 | 0 | 16 | | | |
| WARNING:CPU:PID:at_mm/slab_common.c:kmalloc_slab() | 0 | 0 | 0 | 1 | | | |
| backtrace:netlink_setsockopt | 0 | 0 | 0 | 1 | | | |
| backtrace:SyS_setsockopt | 0 | 0 | 0 | 1 | | | |
| backtrace:SyS_socketcall | 0 | 0 | 0 | 1 | | | |
| BUG:kernel_early_hang_without_any_printk_output | 0 | 0 | 0 | 0 | 0 | 6 | 5 |
+------------------------------------------------------------+------------+------------+------------+------------+------------+------------+------------------+
BUG: kernel early hang without any printk output
Command line: hung_task_panic=1 earlyprintk=ttyS0,115200 debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100 panic=10 softlockup_panic=1 nmi_watchdog=panic prompt_ramdisk=0 console=ttyS0,115200 console=tty0 vga=normal root=/dev/ram0 rw link=/kbuild-tests/run-queue/kvm/i386-randconfig-c1-06230242/linux-devel:devel-hourly-2014062302:a920677c35627d1baac9776dfba0286e7a1e1f0f:bisect-linux4/.vmlinuz-a920677c35627d1baac9776dfba0286e7a1e1f0f-20140623053514-40-ivb42 branch=linux-devel/devel-hourly-2014062302 BOOT_IMAGE=/kernel/i386-randconfig-c1-06230242/a920677c35627d1baac9776dfba0286e7a1e1f0f/vmlinuz-3.16.0-rc1-00017-ga920677 drbd.minor_count=8
Early hang kernel: vmlinuz-3.16.0-rc1-00017-ga920677 3.16.0-rc1-00017-ga920677 #6
Elapsed time: 35
git bisect start 375e99ffd57ef530588f58288bc5fe39cf94cbae a497c3ba1d97fc69c1e78e7b96435ba8c2cb42ee --
git bisect good b3aa7a29807dac2dd7355ac97eb1462e97acf9e0 # 03:43 102+ 102 Merge 'rcu/dev.2014.06.19c' into devel-hourly-2014062302
git bisect bad 9e6424f1b56fc78818b92a5954fe8e37da497f32 # 04:05 0- 1 Merge 'regulator/for-next' into devel-hourly-2014062302
git bisect good e9ffadad61b2af6372be2e06802c0faeb5550250 # 04:26 102+ 102 Merge 'arm-jcooper/irqchip/urgent' into devel-hourly-2014062302
git bisect good 12318f8cc77eab9ca20188257664a2fc9f63a8a9 # 04:48 102+ 102 Merge 'arm-soc/for-next' into devel-hourly-2014062302
git bisect good 222980021ed94fe1201ad129ac7672cb7275ecec # 05:05 102+ 102 Merge 'spi/for-next' into devel-hourly-2014062302
git bisect good 3b9f3f33ad71b863673388f88cd2ba7df803e730 # 05:20 102+ 102 Merge remote-tracking branch 'regulator/topic/core' into regulator-next
git bisect bad a920677c35627d1baac9776dfba0286e7a1e1f0f # 05:48 0- 1 Merge remote-tracking branches 'regulator/topic/88pm800', 'regulator/topic/ab8500', 'regulator/topic/bcm590xx' and 'regulator/topic/ltc3589' into regulator-next
git bisect good c969faadf02076641a81197f2e8641c40d232edd # 06:07 102+ 3 regulator: bcm590xx: remove unnecessary OOM messages
git bisect good ce44beb7c0b12d33ce88b396aeaea0ba4ca1bbfc # 06:29 102+ 0 regulator: 88pm800: remove duplicate PM800_BUCK3 define
git bisect good 53ddddcad5a4fc41db3e2f16c22344d8da58cf74 # 06:44 102+ 102 regulator: ltc3589: Staticize ltc3589_reg_defaults
# first bad commit: [a920677c35627d1baac9776dfba0286e7a1e1f0f] Merge remote-tracking branches 'regulator/topic/88pm800', 'regulator/topic/ab8500', 'regulator/topic/bcm590xx' and 'regulator/topic/ltc3589' into regulator-next
git bisect good 3b9f3f33ad71b863673388f88cd2ba7df803e730 # 06:52 306+ 408 Merge remote-tracking branch 'regulator/topic/core' into regulator-next
git bisect good ce44beb7c0b12d33ce88b396aeaea0ba4ca1bbfc # 06:57 306+ 0 regulator: 88pm800: remove duplicate PM800_BUCK3 define
git bisect good 03187c72db60e20354aca6802bc5cc3e42c1d6e1 # 07:10 306+ 306 regulator: ab8500: Remove ab8500_regulator_debug_init/exit()
git bisect good c969faadf02076641a81197f2e8641c40d232edd # 07:16 306+ 17 regulator: bcm590xx: remove unnecessary OOM messages
git bisect good 53ddddcad5a4fc41db3e2f16c22344d8da58cf74 # 07:23 306+ 408 regulator: ltc3589: Staticize ltc3589_reg_defaults
git bisect bad 375e99ffd57ef530588f58288bc5fe39cf94cbae # 07:23 0- 53 0day head guard for 'devel-hourly-2014062302'
git bisect good a497c3ba1d97fc69c1e78e7b96435ba8c2cb42ee # 07:41 306+ 408 Linux 3.16-rc2
git bisect good 633594bb2d3890711a887897f2003f41735f0dfa # 08:08 306+ 306 Add linux-next specific files for 20140620
This script may reproduce the error.
-----------------------------------------------------------------------------
#!/bin/bash
kernel=$1
kvm=(
qemu-system-x86_64 -cpu kvm64 -enable-kvm
-kernel $kernel
-smp 2
-m 256M
-net nic,vlan=0,macaddr=00:00:00:00:00:00,model=virtio
-net user,vlan=0
-net nic,vlan=1,model=e1000
-net user,vlan=1
-boot order=nc
-no-reboot
-watchdog i6300esb
-serial stdio
-display none
-monitor null
)
append=(
debug
sched_debug
apic=debug
ignore_loglevel
sysrq_always_enabled
panic=10
prompt_ramdisk=0
earlyprintk=ttyS0,115200
console=ttyS0,115200
console=tty0
vga=normal
root=/dev/ram0
rw
)
"${kvm[@]}" --append "${append[*]}"
-----------------------------------------------------------------------------
Thanks,
Fengguang
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
7 years, 11 months
[block, blk] BUG: unable to handle kernel NULL pointer dereference at 0000000000000028
by Jet Chen
Hi Tejun,
we noticed the below changes on
git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc.git review-mq-percpu_ref
commit c924ec35e72ce0d6c289b858d323f7eb3f5076a5 ("block, blk-mq: draining can't be skipped even if bypass_depth was non-zero")
+------------------------------------------------------+------------+------------+
| | ea854572ee | c924ec35e7 |
+------------------------------------------------------+------------+------------+
| boot_successes | 26 | 10 |
| boot_failures | 0 | 6 |
| BUG:unable_to_handle_kernel_NULL_pointer_dereference | 0 | 6 |
| Oops | 0 | 6 |
| RIP:blk_throtl_drain | 0 | 6 |
| kernel_BUG_at_arch/x86/mm/pageattr.c | 0 | 6 |
| invalid_opcode | 0 | 6 |
| RIP:change_page_attr_set_clr | 0 | 6 |
| Kernel_panic-not_syncing:Fatal_exception | 0 | 6 |
| backtrace:scsi_debug_exit | 0 | 6 |
| backtrace:SyS_delete_module | 0 | 6 |
+------------------------------------------------------+------------+------------+
[ 6254.898035] sda: unknown partition table
[ 6254.903049] sd 2:0:0:0: [sda] Attached SCSI disk
[ 6257.214012] sd 2:0:0:0: [sda] Synchronizing SCSI cache
[ 6257.216452] BUG: unable to handle kernel NULL pointer dereference at 0000000000000028
[ 6257.217194] IP: [<ffffffff813ceae0>] blk_throtl_drain+0x30/0x150
[ 6257.217194] PGD 0 [ 6257.217194] Oops: 0000 [#1] SMP [ 6257.217194] Modules linked in: sd_mod scsi_debug(-) crct10dif_generic crc_t10dif crct10dif_common loop dm_mod fuse sg sr_mod cdrom ata_generic pata_acpi parport_pc snd_pcm floppy parport snd_timer snd cirrus syscopyarea sysfillrect soundcore sysimgblt ata_piix ttm drm_kms_helper pcspkr i2c_piix4 libata drm
[ 6257.217194] CPU: 2 PID: 28645 Comm: rmmod Not tainted 3.16.0-rc1-01107-ge1fff86 #1
[ 6257.217194] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 6257.217194] task: ffff8801156c0000 ti: ffff88006af74000 task.ti: ffff88006af74000
[ 6257.217194] RIP: 0010:[<ffffffff813ceae0>] [<ffffffff813ceae0>] blk_throtl_drain+0x30/0x150
[ 6257.217194] RSP: 0018:ffff88006af77b60 EFLAGS: 00010046
[ 6257.217194] RAX: 0000000000000000 RBX: ffff88006aec0000 RCX: ffff880052240620
[ 6257.217194] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
[ 6257.217194] RBP: ffff88006af77b78 R08: 0000000000000000 R09: 0000000000000046
[ 6257.240049] R10: ffff88006af77b78 R11: 0000000000000000 R12: ffff88006aec0000
[ 6257.240049] R13: ffff88007e093600 R14: ffff88006aec0658 R15: ffff88007eb8f120
[ 6257.240049] FS: 00007fbe3f39b700(0000) GS:ffff88011fd00000(0000) knlGS:0000000000000000
[ 6257.240049] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 6257.240049] CR2: 0000000000000028 CR3: 000000006aeaf000 CR4: 00000000000006e0
[ 6257.240049] Stack:
[ 6257.240049] ffff88006aec0000 0000000000000000 ffff88006aec0668 ffff88006af77b88
[ 6257.240049] ffffffff813cbb1e ffff88006af77bb8 ffffffff813b0c1c ffff88006aec0000
[ 6257.240049] ffffffff81cf7940 ffff88006aec0000 ffff88007eb8f000 ffff88006af77bd0
[ 6257.240049] Call Trace:
[ 6257.240049] [<ffffffff813cbb1e>] blkcg_drain_queue+0xe/0x10
[ 6257.240049] [<ffffffff813b0c1c>] __blk_drain_queue+0x7c/0x180
[ 6257.240049] [<ffffffff813b0dae>] blk_queue_bypass_start+0x8e/0xd0
[ 6257.240049] [<ffffffff813cacc8>] blkcg_deactivate_policy+0x38/0x140
[ 6257.240049] [<ffffffff813ced34>] blk_throtl_exit+0x34/0x50
[ 6257.240049] [<ffffffff813cbb68>] blkcg_exit_queue+0x48/0x70
[ 6257.240049] [<ffffffff813b4476>] blk_release_queue+0x26/0x100
[ 6257.240049] [<ffffffff813dcc47>] kobject_cleanup+0x77/0x1b0
[ 6257.240049] [<ffffffff813dcaf8>] kobject_put+0x28/0x60
[ 6257.240049] [<ffffffff813adab5>] blk_put_queue+0x15/0x20
[ 6257.240049] [<ffffffff8151d9cb>] scsi_device_dev_release_usercontext+0xbb/0x120
[ 6257.240049] [<ffffffff81087727>] execute_in_process_context+0x67/0x70
[ 6257.240049] [<ffffffff8151d90c>] scsi_device_dev_release+0x1c/0x20
[ 6257.240049] [<ffffffff814deda2>] device_release+0x32/0xa0
[ 6257.240049] [<ffffffff813dcc47>] kobject_cleanup+0x77/0x1b0
[ 6257.240049] [<ffffffff813dcaf8>] kobject_put+0x28/0x60
[ 6257.240049] [<ffffffff814df097>] put_device+0x17/0x20
[ 6257.240049] [<ffffffff8151e419>] __scsi_remove_device+0xa9/0xe0
[ 6257.240049] [<ffffffff8151c9c4>] scsi_forget_host+0x64/0x70
[ 6257.240049] [<ffffffff81510ec7>] scsi_remove_host+0x77/0x120
[ 6257.240049] [<ffffffffa01c25a9>] sdebug_driver_remove+0x29/0x90 [scsi_debug]
[ 6257.240049] [<ffffffff814e332f>] __device_release_driver+0x7f/0xf0
[ 6257.240049] [<ffffffff814e33c3>] device_release_driver+0x23/0x30
[ 6257.240049] [<ffffffff814e2cc8>] bus_remove_device+0x108/0x180
[ 6257.240049] [<ffffffff814df5c9>] device_del+0x129/0x1c0
[ 6257.240049] [<ffffffff814df67e>] device_unregister+0x1e/0x60
[ 6257.240049] [<ffffffffa01c1efc>] sdebug_remove_adapter+0x4c/0x70 [scsi_debug]
[ 6257.240049] [<ffffffffa01c652d>] scsi_debug_exit+0x19/0xaec [scsi_debug]
[ 6257.240049] [<ffffffff810ea52e>] SyS_delete_module+0x12e/0x1c0
[ 6257.240049] [<ffffffff81835162>] ? int_signal+0x12/0x17
[ 6257.240049] [<ffffffff81834ea9>] system_call_fastpath+0x16/0x1b
[ 6257.240049] Code: 55 65 ff 04 25 a0 c7 00 00 48 89 e5 41 55 41 54 49 89 fc 53 4c 8b af 40 07 00 00 49 8b 85 a0 00 00 00 31 ff 48 8b 80 c8 05 00 00 <48> 8b 70 28 e8 f7 8c d2 ff 48 85 c0 48 89 c3 74 61 0f 1f 80 00 [ 6257.240049] RIP [<ffffffff813ceae0>] blk_throtl_drain+0x30/0x150
[ 6257.240049] RSP <ffff88006af77b60>
[ 6257.240049] CR2: 0000000000000028
[ 6257.240049] ------------[ cut here ]------------
Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.
Thanks,
Jet
7 years, 11 months
[memcontrol] WARNING: CPU: 0 PID: 1 at kernel/res_counter.c:28 res_counter_uncharge_locked()
by Fengguang Wu
Greetings,
0day kernel testing robot got the below dmesg and the first bad commit is
git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
commit ddc5bfec501f4be3f9e89084c2db270c0c45d1d6
Author: Johannes Weiner <hannes(a)cmpxchg.org>
AuthorDate: Fri Jun 20 10:27:58 2014 +1000
Commit: Stephen Rothwell <sfr(a)canb.auug.org.au>
CommitDate: Fri Jun 20 10:27:58 2014 +1000
mm: memcontrol: rewrite uncharge API
The memcg uncharging code that is involved towards the end of a page's
lifetime - truncation, reclaim, swapout, migration - is impressively
complicated and fragile.
Because anonymous and file pages were always charged before they had their
page->mapping established, uncharges had to happen when the page type
could still be known from the context; as in unmap for anonymous, page
cache removal for file and shmem pages, and swap cache truncation for swap
pages. However, these operations happen well before the page is actually
freed, and so a lot of synchronization is necessary:
- Charging, uncharging, page migration, and charge migration all need
to take a per-page bit spinlock as they could race with uncharging.
- Swap cache truncation happens during both swap-in and swap-out, and
possibly repeatedly before the page is actually freed. This means
that the memcg swapout code is called from many contexts that make
no sense and it has to figure out the direction from page state to
make sure memory and memory+swap are always correctly charged.
- On page migration, the old page might be unmapped but then reused,
so memcg code has to prevent untimely uncharging in that case.
Because this code - which should be a simple charge transfer - is so
special-cased, it is not reusable for replace_page_cache().
But now that charged pages always have a page->mapping, introduce
mem_cgroup_uncharge(), which is called after the final put_page(), when we
know for sure that nobody is looking at the page anymore.
For page migration, introduce mem_cgroup_migrate(), which is called after
the migration is successful and the new page is fully rmapped. Because
the old page is no longer uncharged after migration, prevent double
charges by decoupling the page's memcg association (PCG_USED and
pc->mem_cgroup) from the page holding an actual charge. The new bits
PCG_MEM and PCG_MEMSW represent the respective charges and are transferred
to the new page during migration.
mem_cgroup_migrate() is suitable for replace_page_cache() as well, which
gets rid of mem_cgroup_replace_page_cache().
Swap accounting is massively simplified: because the page is no longer
uncharged as early as swap cache deletion, a new mem_cgroup_swapout() can
transfer the page's memory+swap charge (PCG_MEMSW) to the swap entry
before the final put_page() in page reclaim.
Finally, page_cgroup changes are now protected by whatever protection the
page itself offers: anonymous pages are charged under the page table lock,
whereas page cache insertions, swapin, and migration hold the page lock.
Uncharging happens under full exclusion with no outstanding references.
Charging and uncharging also ensure that the page is off-LRU, which
serializes against charge migration. Remove the very costly page_cgroup
lock and set pc->flags non-atomically.
Signed-off-by: Johannes Weiner <hannes(a)cmpxchg.org>
Cc: Michal Hocko <mhocko(a)suse.cz>
Cc: Hugh Dickins <hughd(a)google.com>
Cc: Tejun Heo <tj(a)kernel.org>
Cc: Vladimir Davydov <vdavydov(a)parallels.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
+-----------------------------------------------------------------------+------------+------------+---------------+
| | 5b647620c6 | ddc5bfec50 | next-20140620 |
+-----------------------------------------------------------------------+------------+------------+---------------+
| boot_successes | 60 | 0 | 0 |
| boot_failures | 0 | 20 | 13 |
| WARNING:CPU:PID:at_kernel/res_counter.c:res_counter_uncharge_locked() | 0 | 20 | 13 |
| backtrace:vm_munmap | 0 | 20 | 13 |
| backtrace:SyS_munmap | 0 | 20 | 13 |
| backtrace:do_sys_open | 0 | 20 | 13 |
| backtrace:SyS_open | 0 | 20 | 13 |
| backtrace:do_execve | 0 | 20 | 13 |
| backtrace:SyS_execve | 0 | 20 | 13 |
| backtrace:do_group_exit | 0 | 20 | 13 |
| backtrace:SyS_exit_group | 0 | 20 | 13 |
| backtrace:SYSC_renameat2 | 0 | 11 | 8 |
| backtrace:SyS_rename | 0 | 11 | 8 |
| backtrace:do_munmap | 0 | 11 | 8 |
| backtrace:SyS_brk | 0 | 11 | 8 |
| Out_of_memory:Kill_process | 0 | 1 | |
| backtrace:do_unlinkat | 0 | 9 | 5 |
| backtrace:SyS_unlink | 0 | 9 | 5 |
| backtrace:SYSC_umount | 0 | 9 | |
| backtrace:SyS_umount | 0 | 9 | |
| backtrace:cleanup_mnt_work | 0 | 0 | 5 |
+-----------------------------------------------------------------------+------------+------------+---------------+
[ 2.747397] debug: unmapping init [mem 0xffff880001a3a000-0xffff880001bfffff]
[ 2.748630] debug: unmapping init [mem 0xffff8800021ad000-0xffff8800021fffff]
[ 2.752857] ------------[ cut here ]------------
[ 2.753355] WARNING: CPU: 0 PID: 1 at kernel/res_counter.c:28 res_counter_uncharge_locked+0x48/0x74()
[ 2.753355] CPU: 0 PID: 1 Comm: init Not tainted 3.16.0-rc1-00238-gddc5bfe #1
[ 2.753355] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[ 2.753355] 0000000000000000 ffff880012073c50 ffffffff81a23b9d ffff880012073c88
[ 2.753355] ffffffff810bc765 ffffffff8111fac8 0000000000001000 ffff88001200fa50
[ 2.753355] 0000000000000001 ffff88001200fa01 ffff880012073c98 ffffffff810bc84b
[ 2.753355] Call Trace:
[ 2.753355] [<ffffffff81a23b9d>] dump_stack+0x19/0x1b
[ 2.753355] [<ffffffff810bc765>] warn_slowpath_common+0x73/0x8c
[ 2.753355] [<ffffffff8111fac8>] ? res_counter_uncharge_locked+0x48/0x74
[ 2.753355] [<ffffffff810bc84b>] warn_slowpath_null+0x1a/0x1c
[ 2.753355] [<ffffffff8111fac8>] res_counter_uncharge_locked+0x48/0x74
[ 2.753355] [<ffffffff8111fd02>] res_counter_uncharge_until+0x4e/0xa9
[ 2.753355] [<ffffffff8111fd70>] res_counter_uncharge+0x13/0x15
[ 2.753355] [<ffffffff8119499c>] mem_cgroup_uncharge_end+0x73/0x8d
[ 2.753355] [<ffffffff8115735e>] release_pages+0x1f2/0x20d
[ 2.753355] [<ffffffff8116cc3a>] tlb_flush_mmu_free+0x28/0x43
[ 2.753355] [<ffffffff8116d5e5>] tlb_flush_mmu+0x20/0x23
[ 2.753355] [<ffffffff8116d5fc>] tlb_finish_mmu+0x14/0x39
[ 2.753355] [<ffffffff811730c1>] unmap_region+0xcd/0xdf
[ 2.753355] [<ffffffff81172b0e>] ? vma_gap_callbacks_propagate+0x18/0x33
[ 2.753355] [<ffffffff81174bf1>] do_munmap+0x252/0x2e0
[ 2.753355] [<ffffffff81174cc3>] vm_munmap+0x44/0x5c
[ 2.753355] [<ffffffff81174cfe>] SyS_munmap+0x23/0x29
[ 2.753355] [<ffffffff81a31567>] system_call_fastpath+0x16/0x1b
[ 2.753355] ---[ end trace cfeb07101f6fbdfb ]---
[ 2.780913] ------------[ cut here ]------------
git bisect start 633594bb2d3890711a887897f2003f41735f0dfa 71d273fa769ea21f2422a18482e002a07ab9f8f3 --
git bisect bad df2c04c68831d13d505c127b5aa172361a17c7e3 # 14:51 0- 4 Revert "mm, CMA: change cma_declare_contiguous() to obey coding convention"
git bisect bad dc8a26d69d2039a81985549b00fc7e7e2bd34dd4 # 14:58 0- 2 Merge branch 'akpm/master'
git bisect bad fe297b4d6987d04e8b3878b3ee47efd26b95114d # 15:16 0- 8 Merge branch 'akpm-current/current'
git bisect good 6b11d02e25c79a8961983a966b7fafcdc36c7a91 # 15:24 20+ 0 slab: do not keep free objects/slabs on dead memcg caches
git bisect bad 11709212b3a5479fcc63dda3160f4f4b0251f914 # 16:02 0- 4 mm/util.c: add kstrimdup()
git bisect good d070bd175fccaab0616d8aec75acbde480531fee # 16:11 20+ 0 mm: memcontrol: catch root bypass in move precharge
git bisect bad e77f4c327c7aa19d2c9ea28ebeb3a7166db418ad # 16:27 0- 12 m68k: call find_vma with the mmap_sem held in sys_cacheflush()
git bisect bad ddc5bfec501f4be3f9e89084c2db270c0c45d1d6 # 16:48 0- 1 mm: memcontrol: rewrite uncharge API
git bisect good 737f5b9367a254a3b3149b3abae65470f5ed941e # 17:10 20+ 0 mm: memcontrol: do not acquire page_cgroup lock for kmem pages
git bisect good 5b647620c6cae14cc27782c3491c2da0f1cf245c # 17:40 20+ 0 mm-memcontrol-rewrite-charge-api-fix
# first bad commit: [ddc5bfec501f4be3f9e89084c2db270c0c45d1d6] mm: memcontrol: rewrite uncharge API
git bisect good 5b647620c6cae14cc27782c3491c2da0f1cf245c # 17:43 60+ 0 mm-memcontrol-rewrite-charge-api-fix
git bisect bad 633594bb2d3890711a887897f2003f41735f0dfa # 17:43 0- 13 Add linux-next specific files for 20140620
git bisect good 3c8fb50445833b93f69b6b703a29aae3523cad0c # 18:06 60+ 0 Merge tag 'pm+acpi-3.16-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
git bisect bad 633594bb2d3890711a887897f2003f41735f0dfa # 18:06 0- 13 Add linux-next specific files for 20140620
This script may reproduce the error.
-----------------------------------------------------------------------------
#!/bin/bash
kernel=$1
initrd=quantal-core-x86_64.cgz
wget --no-clobber https://github.com/fengguang/reproduce-kernel-bug/blob/master/initrd/$initrd
kvm=(
qemu-system-x86_64 -cpu kvm64 -enable-kvm
-kernel $kernel
-initrd $initrd
-smp 2
-m 256M
-net nic,vlan=0,macaddr=00:00:00:00:00:00,model=virtio
-net user,vlan=0
-net nic,vlan=1,model=e1000
-net user,vlan=1
-boot order=nc
-no-reboot
-watchdog i6300esb
-serial stdio
-display none
-monitor null
)
append=(
debug
sched_debug
apic=debug
ignore_loglevel
sysrq_always_enabled
panic=10
prompt_ramdisk=0
earlyprintk=ttyS0,115200
console=ttyS0,115200
console=tty0
vga=normal
root=/dev/ram0
rw
)
"${kvm[@]}" --append "${append[*]}"
-----------------------------------------------------------------------------
Thanks,
Fengguang
_______________________________________________
LKP mailing list
LKP(a)linux.intel.com
7 years, 11 months