tree:
git://git.ti.com/ti-linux-kernel/ti-linux-kernel.git ti-rt-linux-5.4.y
head: 39a31e930c9ef3b4f39306c49cbaa9c58c94e353
commit: 39a31e930c9ef3b4f39306c49cbaa9c58c94e353 [10/10] Merged TI feature
ti_linux_base_rt into ti-rt-linux-5.4.y
config: arm64-randconfig-r034-20210318 (attached as .config)
compiler: clang version 13.0.0 (
https://github.com/llvm/llvm-project
6db3ab2903f42712f44000afb5aa467efbd25f35)
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# install arm64 cross compiling tool for clang build
# apt-get install binutils-aarch64-linux-gnu
git remote add ti
git://git.ti.com/ti-linux-kernel/ti-linux-kernel.git
git fetch --no-tags ti ti-rt-linux-5.4.y
git checkout 39a31e930c9ef3b4f39306c49cbaa9c58c94e353
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All warnings (new ones prefixed by >>):
> kernel/rcu/srcutree.c:739:30: warning: unused variable
'sp_llock' [-Wunused-const-variable]
static
DEFINE_LOCAL_IRQ_LOCK(sp_llock);
^
1 warning generated.
vim +/sp_llock +739 kernel/rcu/srcutree.c
dad81a2026841b Paul E. McKenney 2017-03-25 738
8a5f2a03d28c15 Sebastian Andrzej Siewior 2017-10-12 @739 static
DEFINE_LOCAL_IRQ_LOCK(sp_llock);
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 740 /*
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 741 * If SRCU is likely idle,
return true, otherwise return false.
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 742 *
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 743 * Note that it is OK for
several current from-idle requests for a new
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 744 * grace period from idle to
specify expediting because they will all end
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 745 * up requesting the same grace
period anyhow. So no loss.
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 746 *
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 747 * Note also that if any CPU
(including the current one) is still invoking
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 748 * callbacks, this function will
nevertheless say "idle". This is not
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 749 * ideal, but the overhead of
checking all CPUs' callback lists is even
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 750 * less ideal, especially on
large systems. Furthermore, the wakeup
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 751 * can happen before the
callback is fully removed, so we have no choice
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 752 * but to accept this type of
error.
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 753 *
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 754 * This function is also subject
to counter-wrap errors, but let's face
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 755 * it, if this function was
preempted for enough time for the counters
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 756 * to wrap, it really
doesn't matter whether or not we expedite the grace
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 757 * period. The extra overhead
of a needlessly expedited grace period is
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 758 * negligible when amoritized
over that time period, and the extra latency
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 759 * of a needlessly non-expedited
grace period is similarly negligible.
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 760 */
aacb5d91ab1bfb Paul E. McKenney 2018-10-28 761 static bool
srcu_might_be_idle(struct srcu_struct *ssp)
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 762 {
22607d66bbc3e8 Paul E. McKenney 2017-04-25 763 unsigned long curseq;
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 764 unsigned long flags;
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 765 struct srcu_data *sdp;
22607d66bbc3e8 Paul E. McKenney 2017-04-25 766 unsigned long t;
c71706a5ffff80 Paul E. McKenney 2019-11-04 767 unsigned long tlast;
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 768
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 769 /* If the local srcu_data
structure has callbacks, not idle. */
8a5f2a03d28c15 Sebastian Andrzej Siewior 2017-10-12 770 local_lock_irqsave(sp_llock,
flags);
aacb5d91ab1bfb Paul E. McKenney 2018-10-28 771 sdp =
this_cpu_ptr(ssp->sda);
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 772 if
(rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
8a5f2a03d28c15 Sebastian Andrzej Siewior 2017-10-12 773
local_unlock_irqrestore(sp_llock, flags);
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 774 return false; /* Callbacks
already present, so not idle. */
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 775 }
8a5f2a03d28c15 Sebastian Andrzej Siewior 2017-10-12 776
local_unlock_irqrestore(sp_llock, flags);
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 777
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 778 /*
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 779 * No local callbacks, so
probabalistically probe global state.
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 780 * Exact information would
require acquiring locks, which would
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 781 * kill scalability, hence the
probabalistic nature of the probe.
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 782 */
22607d66bbc3e8 Paul E. McKenney 2017-04-25 783
22607d66bbc3e8 Paul E. McKenney 2017-04-25 784 /* First, see if enough time
has passed since the last GP. */
22607d66bbc3e8 Paul E. McKenney 2017-04-25 785 t = ktime_get_mono_fast_ns();
c71706a5ffff80 Paul E. McKenney 2019-11-04 786 tlast =
READ_ONCE(ssp->srcu_last_gp_end);
22607d66bbc3e8 Paul E. McKenney 2017-04-25 787 if (exp_holdoff == 0 ||
c71706a5ffff80 Paul E. McKenney 2019-11-04 788 time_in_range_open(t,
tlast, tlast + exp_holdoff))
22607d66bbc3e8 Paul E. McKenney 2017-04-25 789 return false; /* Too soon
after last GP. */
22607d66bbc3e8 Paul E. McKenney 2017-04-25 790
22607d66bbc3e8 Paul E. McKenney 2017-04-25 791 /* Next, check for probable
idleness. */
aacb5d91ab1bfb Paul E. McKenney 2018-10-28 792 curseq =
rcu_seq_current(&ssp->srcu_gp_seq);
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 793 smp_mb(); /* Order
->srcu_gp_seq with ->srcu_gp_seq_needed. */
aacb5d91ab1bfb Paul E. McKenney 2018-10-28 794 if (ULONG_CMP_LT(curseq,
READ_ONCE(ssp->srcu_gp_seq_needed)))
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 795 return false; /* Grace period
in progress, so not idle. */
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 796 smp_mb(); /* Order
->srcu_gp_seq with prior access. */
aacb5d91ab1bfb Paul E. McKenney 2018-10-28 797 if (curseq !=
rcu_seq_current(&ssp->srcu_gp_seq))
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 798 return false; /* GP # changed,
so not idle. */
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 799 return true; /* With reasonable
probability, idle! */
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 800 }
2da4b2a7fd8de5 Paul E. McKenney 2017-04-25 801
:::::: The code at line 739 was first introduced by commit
:::::: 8a5f2a03d28c1523e49a5c501b74ccc567d321f7 srcu: replace local_irqsave() with a
locallock
:::::: TO: Sebastian Andrzej Siewior <bigeasy(a)linutronix.de>
:::::: CC: Sebastian Andrzej Siewior <bigeasy(a)linutronix.de>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org