tree:
https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/wip
head: edd802a64c93706df4fae4a89f8e1a204bf705ea
commit: 1827ed53b4b8de9d5a215cf55d6b2c5315b14ff2 [9/13] irq_work: Provide
irq_work_queue_remote()
config: s390-allnoconfig (attached as .config)
compiler: s390-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
git checkout 1827ed53b4b8de9d5a215cf55d6b2c5315b14ff2
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=s390
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
kernel//rcu/tree.c: In function 'rcu_implicit_dynticks_qs':
> kernel//rcu/tree.c:1292:4: error: implicit declaration of
function 'irq_work_queue_on'; did you mean 'irq_work_queue_remote'?
[-Werror=implicit-function-declaration]
1292 |
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
| ^~~~~~~~~~~~~~~~~
| irq_work_queue_remote
cc1: some warnings being treated as errors
vim +1292 kernel//rcu/tree.c
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1184
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1185 /*
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1186 * Return true
if the specified CPU has passed through a quiescent
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1187 * state by
virtue of being in or having passed through an dynticks
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1188 * idle state
since the last call to dyntick_save_progress_counter()
a82dcc76021e22 kernel/rcutree.c Paul E. McKenney 2012-08-01 1189 * for this
same CPU, or by virtue of having been offline.
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1190 */
fe5ac724d81a3c kernel/rcu/tree.c Paul E. McKenney 2017-05-11 1191 static int
rcu_implicit_dynticks_qs(struct rcu_data *rdp)
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1192 {
3a19b46a5c17b1 kernel/rcu/tree.c Paul E. McKenney 2016-11-30 1193 unsigned long
jtsq;
0f9be8cabbc343 kernel/rcu/tree.c Paul E. McKenney 2017-01-27 1194 bool *rnhqp;
9226b10d78ffe7 kernel/rcu/tree.c Paul E. McKenney 2017-01-27 1195 bool *ruqp;
9b9500da815027 kernel/rcu/tree.c Paul E. McKenney 2017-08-17 1196 struct
rcu_node *rnp = rdp->mynode;
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1197
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1198 /*
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1199 * If the CPU
passed through or entered a dynticks idle phase with
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1200 * no active
irq/NMI handlers, then we can safely pretend that the CPU
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1201 * already
acknowledged the request to pass through a quiescent
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1202 * state.
Either way, that CPU cannot possibly be in an RCU
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1203 * read-side
critical section that started before the beginning
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1204 * of the
current RCU grace period.
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1205 */
dc5a4f2932f185 kernel/rcu/tree.c Paul E. McKenney 2018-08-03 1206 if
(rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
88d1bead858d88 kernel/rcu/tree.c Paul E. McKenney 2018-07-04 1207
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
9b9500da815027 kernel/rcu/tree.c Paul E. McKenney 2017-08-17 1208
rcu_gpnum_ovf(rnp, rdp);
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1209 return 1;
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1210 }
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1211
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1212 /* If waiting
too long on an offline CPU, complain. */
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1213 if
(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
88d1bead858d88 kernel/rcu/tree.c Paul E. McKenney 2018-07-04 1214
time_after(jiffies, rcu_state.gp_start + HZ)) {
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1215 bool onl;
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1216 struct
rcu_node *rnp1;
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1217
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1218 WARN_ON(1);
/* Offline CPUs are supposed to report QS! */
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1219
pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1220 __func__,
rnp->grplo, rnp->grphi, rnp->level,
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1221
(long)rnp->gp_seq, (long)rnp->completedqs);
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1222 for (rnp1 =
rnp; rnp1; rnp1 = rnp1->parent)
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1223
pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx
->rcu_gp_init_mask %#lx\n",
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1224 __func__,
rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit,
rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1225 onl =
!!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1226
pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1227 __func__,
rdp->cpu, ".o"[onl],
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1228
(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1229
(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1230 return 1; /*
Break things loose after complaining. */
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1231 }
f2e2df59786d7b kernel/rcu/tree.c Paul E. McKenney 2018-05-15 1232
65d798f0f9339a kernel/rcutree.c Paul E. McKenney 2013-04-12 1233 /*
4a81e8328d3791 kernel/rcu/tree.c Paul E. McKenney 2014-06-20 1234 * A CPU
running for an extended time within the kernel can
c06aed0e31008a kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1235 * delay RCU
grace periods: (1) At age jiffies_to_sched_qs,
c06aed0e31008a kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1236 * set
.rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1237 * both
.rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1238 *
unsynchronized assignments to the per-CPU rcu_need_heavy_qs
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1239 * variable
are safe because the assignments are repeated if this
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1240 * CPU failed
to pass through a quiescent state. This code
c06aed0e31008a kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1241 * also checks
.jiffies_resched in case jiffies_to_sched_qs
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1242 * is set way
high.
4a81e8328d3791 kernel/rcu/tree.c Paul E. McKenney 2014-06-20 1243 */
c06aed0e31008a kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1244 jtsq =
READ_ONCE(jiffies_to_sched_qs);
2dba13f0b6c2b2 kernel/rcu/tree.c Paul E. McKenney 2018-08-03 1245 ruqp =
per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
2dba13f0b6c2b2 kernel/rcu/tree.c Paul E. McKenney 2018-08-03 1246 rnhqp =
&per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
0f9be8cabbc343 kernel/rcu/tree.c Paul E. McKenney 2017-01-27 1247 if
(!READ_ONCE(*rnhqp) &&
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1248
(time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
b2b00ddf193bf8 kernel/rcu/tree.c Paul E. McKenney 2019-10-30 1249
time_after(jiffies, rcu_state.jiffies_resched) ||
b2b00ddf193bf8 kernel/rcu/tree.c Paul E. McKenney 2019-10-30 1250
rcu_state.cbovld)) {
0f9be8cabbc343 kernel/rcu/tree.c Paul E. McKenney 2017-01-27 1251
WRITE_ONCE(*rnhqp, true);
9226b10d78ffe7 kernel/rcu/tree.c Paul E. McKenney 2017-01-27 1252 /* Store
rcu_need_heavy_qs before rcu_urgent_qs. */
9226b10d78ffe7 kernel/rcu/tree.c Paul E. McKenney 2017-01-27 1253
smp_store_release(ruqp, true);
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1254 } else if
(time_after(jiffies, rcu_state.gp_start + jtsq)) {
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1255
WRITE_ONCE(*ruqp, true);
6193c76aba8ec3 kernel/rcu/tree.c Paul E. McKenney 2013-09-23 1256 }
6193c76aba8ec3 kernel/rcu/tree.c Paul E. McKenney 2013-09-23 1257
28053bc72c0e58 kernel/rcu/tree.c Paul E. McKenney 2016-12-01 1258 /*
c98cac603f1ce7 kernel/rcu/tree.c Paul E. McKenney 2018-11-21 1259 * NO_HZ_FULL
CPUs can run in-kernel without rcu_sched_clock_irq!
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1260 * The above
code handles this, but only for straight cond_resched().
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1261 * And some
in-kernel loops check need_resched() before calling
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1262 *
cond_resched(), which defeats the above code for CPUs that are
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1263 * running
in-kernel with scheduling-clock interrupts disabled.
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1264 * So hit them
over the head with the resched_cpu() hammer!
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1265 */
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1266 if
(tick_nohz_full_cpu(rdp->cpu) &&
b2b00ddf193bf8 kernel/rcu/tree.c Paul E. McKenney 2019-10-30 1267
(time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
b2b00ddf193bf8 kernel/rcu/tree.c Paul E. McKenney 2019-10-30 1268
rcu_state.cbovld)) {
05ef9e9eb3dade kernel/rcu/tree.c Joel Fernandes (Google 2019-08-15 1269)
WRITE_ONCE(*ruqp, true);
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1270
resched_cpu(rdp->cpu);
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1271
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
6193c76aba8ec3 kernel/rcu/tree.c Paul E. McKenney 2013-09-23 1272 }
6193c76aba8ec3 kernel/rcu/tree.c Paul E. McKenney 2013-09-23 1273
28053bc72c0e58 kernel/rcu/tree.c Paul E. McKenney 2016-12-01 1274 /*
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1275 * If more
than halfway to RCU CPU stall-warning time, invoke
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1276 *
resched_cpu() more frequently to try to loosen things up a bit.
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1277 * Also check
to see if the CPU is getting hammered with interrupts,
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1278 * but only
once per grace period, just to keep the IPIs down to
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1279 * a dull
roar.
28053bc72c0e58 kernel/rcu/tree.c Paul E. McKenney 2016-12-01 1280 */
7e28c5af4ef6b5 kernel/rcu/tree.c Paul E. McKenney 2018-07-11 1281 if
(time_after(jiffies, rcu_state.jiffies_resched)) {
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1282 if
(time_after(jiffies,
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1283
READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
28053bc72c0e58 kernel/rcu/tree.c Paul E. McKenney 2016-12-01 1284
resched_cpu(rdp->cpu);
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1285
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
d3052109c0bc9e kernel/rcu/tree.c Paul E. McKenney 2018-07-25 1286 }
9b9500da815027 kernel/rcu/tree.c Paul E. McKenney 2017-08-17 1287 if
(IS_ENABLED(CONFIG_IRQ_WORK) &&
8aa670cdacc182 kernel/rcu/tree.c Paul E. McKenney 2018-04-28 1288
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
9b9500da815027 kernel/rcu/tree.c Paul E. McKenney 2017-08-17 1289
(rnp->ffmask & rdp->grpmask)) {
9b9500da815027 kernel/rcu/tree.c Paul E. McKenney 2017-08-17 1290
rdp->rcu_iw_pending = true;
8aa670cdacc182 kernel/rcu/tree.c Paul E. McKenney 2018-04-28 1291
rdp->rcu_iw_gp_seq = rnp->gp_seq;
9b9500da815027 kernel/rcu/tree.c Paul E. McKenney 2017-08-17 @1292
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
9b9500da815027 kernel/rcu/tree.c Paul E. McKenney 2017-08-17 1293 }
9b9500da815027 kernel/rcu/tree.c Paul E. McKenney 2017-08-17 1294 }
4914950aaa12de kernel/rcu/tree.c Paul E. McKenney 2015-12-11 1295
a82dcc76021e22 kernel/rcutree.c Paul E. McKenney 2012-08-01 1296 return 0;
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1297 }
64db4cfff99c04 kernel/rcutree.c Paul E. McKenney 2008-12-18 1298
:::::: The code at line 1292 was first introduced by commit
:::::: 9b9500da81502738efa1b485a8835f174ff7be6d rcu: Make RCU CPU stall warnings check for
irq-disabled CPUs
:::::: TO: Paul E. McKenney <paulmck(a)linux.vnet.ibm.com>
:::::: CC: Paul E. McKenney <paulmck(a)linux.vnet.ibm.com>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org