tree:
https://git.kernel.org/pub/scm/linux/kernel/git/jfern/linux.git coresched
head: 680ec43dead6f0f84db19ef07fc0f5ea44e9f2b3
commit: e7a440f33a800ee9fdf07defb1176932ab4a59b1 [14/39] sched: Simplify the core pick
loop for optimized case
config: x86_64-randconfig-m001-20201113 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
Reported-by: Dan Carpenter <dan.carpenter(a)oracle.com>
smatch warnings:
kernel/sched/core.c:4708 pick_next_task() error: we previously assumed 'next'
could be null (see line 4704)
vim +/next +4708 kernel/sched/core.c
620989bb8ec95d Peter Zijlstra 2020-06-30 4634 static struct task_struct *
620989bb8ec95d Peter Zijlstra 2020-06-30 4635 pick_next_task(struct rq *rq,
struct task_struct *prev, struct rq_flags *rf)
620989bb8ec95d Peter Zijlstra 2020-06-30 4636 {
620989bb8ec95d Peter Zijlstra 2020-06-30 4637 struct task_struct *next, *max =
NULL;
620989bb8ec95d Peter Zijlstra 2020-06-30 4638 const struct sched_class
*class;
620989bb8ec95d Peter Zijlstra 2020-06-30 4639 const struct cpumask *smt_mask;
e4fe729c9bf5c1 Joel Fernandes (Google 2020-06-30 4640) bool fi_before = false;
620989bb8ec95d Peter Zijlstra 2020-06-30 4641 bool need_sync;
620989bb8ec95d Peter Zijlstra 2020-06-30 4642 int i, j, cpu;
620989bb8ec95d Peter Zijlstra 2020-06-30 4643
620989bb8ec95d Peter Zijlstra 2020-06-30 4644 if (!sched_core_enabled(rq))
620989bb8ec95d Peter Zijlstra 2020-06-30 4645 return __pick_next_task(rq,
prev, rf);
620989bb8ec95d Peter Zijlstra 2020-06-30 4646
620989bb8ec95d Peter Zijlstra 2020-06-30 4647 cpu = cpu_of(rq);
620989bb8ec95d Peter Zijlstra 2020-06-30 4648
620989bb8ec95d Peter Zijlstra 2020-06-30 4649 /* Stopper task is switching
into idle, no need core-wide selection. */
620989bb8ec95d Peter Zijlstra 2020-06-30 4650 if (cpu_is_offline(cpu)) {
620989bb8ec95d Peter Zijlstra 2020-06-30 4651 /*
620989bb8ec95d Peter Zijlstra 2020-06-30 4652 * Reset core_pick so that we
don't enter the fastpath when
620989bb8ec95d Peter Zijlstra 2020-06-30 4653 * coming online. core_pick
would already be migrated to
620989bb8ec95d Peter Zijlstra 2020-06-30 4654 * another cpu during offline.
620989bb8ec95d Peter Zijlstra 2020-06-30 4655 */
620989bb8ec95d Peter Zijlstra 2020-06-30 4656 rq->core_pick = NULL;
620989bb8ec95d Peter Zijlstra 2020-06-30 4657 return __pick_next_task(rq,
prev, rf);
620989bb8ec95d Peter Zijlstra 2020-06-30 4658 }
620989bb8ec95d Peter Zijlstra 2020-06-30 4659
620989bb8ec95d Peter Zijlstra 2020-06-30 4660 /*
620989bb8ec95d Peter Zijlstra 2020-06-30 4661 * If there were no
{en,de}queues since we picked (IOW, the task
620989bb8ec95d Peter Zijlstra 2020-06-30 4662 * pointers are all still
valid), and we haven't scheduled the last
620989bb8ec95d Peter Zijlstra 2020-06-30 4663 * pick yet, do so now.
620989bb8ec95d Peter Zijlstra 2020-06-30 4664 *
620989bb8ec95d Peter Zijlstra 2020-06-30 4665 * rq->core_pick can be NULL
if no selection was made for a CPU because
620989bb8ec95d Peter Zijlstra 2020-06-30 4666 * it was either offline or went
offline during a sibling's core-wide
620989bb8ec95d Peter Zijlstra 2020-06-30 4667 * selection. In this case, do a
core-wide selection.
620989bb8ec95d Peter Zijlstra 2020-06-30 4668 */
620989bb8ec95d Peter Zijlstra 2020-06-30 4669 if
(rq->core->core_pick_seq == rq->core->core_task_seq &&
620989bb8ec95d Peter Zijlstra 2020-06-30 4670
rq->core->core_pick_seq != rq->core_sched_seq &&
620989bb8ec95d Peter Zijlstra 2020-06-30 4671 rq->core_pick) {
620989bb8ec95d Peter Zijlstra 2020-06-30 4672
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
620989bb8ec95d Peter Zijlstra 2020-06-30 4673
620989bb8ec95d Peter Zijlstra 2020-06-30 4674 next = rq->core_pick;
620989bb8ec95d Peter Zijlstra 2020-06-30 4675 if (next != prev) {
620989bb8ec95d Peter Zijlstra 2020-06-30 4676 put_prev_task(rq, prev);
620989bb8ec95d Peter Zijlstra 2020-06-30 4677 set_next_task(rq, next);
620989bb8ec95d Peter Zijlstra 2020-06-30 4678 }
620989bb8ec95d Peter Zijlstra 2020-06-30 4679
620989bb8ec95d Peter Zijlstra 2020-06-30 4680 rq->core_pick = NULL;
620989bb8ec95d Peter Zijlstra 2020-06-30 4681 return next;
620989bb8ec95d Peter Zijlstra 2020-06-30 4682 }
620989bb8ec95d Peter Zijlstra 2020-06-30 4683
620989bb8ec95d Peter Zijlstra 2020-06-30 4684 put_prev_task_balance(rq, prev,
rf);
620989bb8ec95d Peter Zijlstra 2020-06-30 4685
620989bb8ec95d Peter Zijlstra 2020-06-30 4686 smt_mask = cpu_smt_mask(cpu);
620989bb8ec95d Peter Zijlstra 2020-06-30 4687 need_sync =
!!rq->core->core_cookie;
620989bb8ec95d Peter Zijlstra 2020-06-30 4688
620989bb8ec95d Peter Zijlstra 2020-06-30 4689 /* reset state */
620989bb8ec95d Peter Zijlstra 2020-06-30 4690 rq->core->core_cookie =
0UL;
ba9df3989cb9de Vineeth Pillai 2020-08-28 4691 if
(rq->core->core_forceidle) {
ba9df3989cb9de Vineeth Pillai 2020-08-28 4692 need_sync = true;
e4fe729c9bf5c1 Joel Fernandes (Google 2020-06-30 4693) fi_before = true;
ba9df3989cb9de Vineeth Pillai 2020-08-28 4694 rq->core->core_forceidle
= false;
ba9df3989cb9de Vineeth Pillai 2020-08-28 4695 }
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4696)
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4697) /*
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4698) * Optimize for common case
where this CPU has no cookies
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4699) * and there are no cookied
tasks running on siblings.
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4700) */
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4701) if (!need_sync) {
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4702) for_each_class(class) {
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4703) next =
class->pick_task(rq);
e7a440f33a800e Joel Fernandes (Google 2020-11-05 @4704) if (next)
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4705) break;
^^^^^
If we exit the loop without hitting this break
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4706) }
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4707)
e7a440f33a800e Joel Fernandes (Google 2020-11-05 @4708) if (!next->core_cookie) {
^^^^^^
then this will crash.
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4709) rq->core_pick = NULL;
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4710) goto done;
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4711) }
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4712) need_sync = true;
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4713) }
e7a440f33a800e Joel Fernandes (Google 2020-11-05 4714)
620989bb8ec95d Peter Zijlstra 2020-06-30 4715 for_each_cpu(i, smt_mask) {
620989bb8ec95d Peter Zijlstra 2020-06-30 4716 struct rq *rq_i = cpu_rq(i);
620989bb8ec95d Peter Zijlstra 2020-06-30 4717
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org