tree:
https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/cleanup
head: 22e4e54ea0beed95dd3f31efe9f2e24ecf2bf26a
commit: 22e4e54ea0beed95dd3f31efe9f2e24ecf2bf26a [8/8] rcu/tree: Use
irq_work_queue_remote()
config: m68k-randconfig-r033-20201119 (attached as .config)
compiler: m68k-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
#
https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git/commit/?...
git remote add peterz-queue
https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git
git fetch --no-tags peterz-queue sched/cleanup
git checkout 22e4e54ea0beed95dd3f31efe9f2e24ecf2bf26a
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=m68k
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
In file included from include/linux/kernel.h:11,
from kernel/rcu/tree.c:21:
include/linux/scatterlist.h: In function 'sg_set_buf':
arch/m68k/include/asm/page_no.h:33:50: warning: ordered comparison of pointer with null
pointer [-Wextra]
33 | #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET)
&& \
| ^~
include/linux/compiler.h:78:42: note: in definition of macro 'unlikely'
78 | # define unlikely(x) __builtin_expect(!!(x), 0)
| ^
include/linux/scatterlist.h:143:2: note: in expansion of macro 'BUG_ON'
143 | BUG_ON(!virt_addr_valid(buf));
| ^~~~~~
include/linux/scatterlist.h:143:10: note: in expansion of macro
'virt_addr_valid'
143 | BUG_ON(!virt_addr_valid(buf));
| ^~~~~~~~~~~~~~~
kernel/rcu/tree.c: In function 'rcu_implicit_dynticks_qs':
> kernel/rcu/tree.c:1318:5: error: implicit declaration of function
'irq_work_queue_remote'; did you mean 'irq_work_queue_on'?
[-Werror=implicit-function-declaration]
1318 |
irq_work_queue_remote(rdp->cpu, &rdp->rcu_iw);
| ^~~~~~~~~~~~~~~~~~~~~
| irq_work_queue_on
cc1: some warnings being treated as errors
vim +1318 kernel/rcu/tree.c
1193
1194 /*
1195 * Return true if the specified CPU has passed through a quiescent
1196 * state by virtue of being in or having passed through an dynticks
1197 * idle state since the last call to dyntick_save_progress_counter()
1198 * for this same CPU, or by virtue of having been offline.
1199 */
1200 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1201 {
1202 unsigned long jtsq;
1203 bool *rnhqp;
1204 bool *ruqp;
1205 struct rcu_node *rnp = rdp->mynode;
1206
1207 raw_lockdep_assert_held_rcu_node(rnp);
1208
1209 /*
1210 * If the CPU passed through or entered a dynticks idle phase with
1211 * no active irq/NMI handlers, then we can safely pretend that the CPU
1212 * already acknowledged the request to pass through a quiescent
1213 * state. Either way, that CPU cannot possibly be in an RCU
1214 * read-side critical section that started before the beginning
1215 * of the current RCU grace period.
1216 */
1217 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1218 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu,
TPS("dti"));
1219 rcu_gpnum_ovf(rnp, rdp);
1220 return 1;
1221 }
1222
1223 /*
1224 * Complain if a CPU that is considered to be offline from RCU's
1225 * perspective has not yet reported a quiescent state. After all,
1226 * the offline CPU should have reported a quiescent state during
1227 * the CPU-offline process, or, failing that, by rcu_gp_init()
1228 * if it ran concurrently with either the CPU going offline or the
1229 * last task on a leaf rcu_node structure exiting its RCU read-side
1230 * critical section while all CPUs corresponding to that structure
1231 * are offline. This added warning detects bugs in any of these
1232 * code paths.
1233 *
1234 * The rcu_node structure's ->lock is held here, which excludes
1235 * the relevant portions the CPU-hotplug code, the grace-period
1236 * initialization code, and the rcu_read_unlock() code paths.
1237 *
1238 * For more detail, please refer to the "Hotplug CPU" section
1239 * of RCU's Requirements documentation.
1240 */
1241 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1242 bool onl;
1243 struct rcu_node *rnp1;
1244
1245 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs
%ld\n",
1246 __func__, rnp->grplo, rnp->grphi, rnp->level,
1247 (long)rnp->gp_seq, (long)rnp->completedqs);
1248 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1249 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx
->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1250 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit,
rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1251 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1252 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1253 __func__, rdp->cpu, ".o"[onl],
1254 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1255 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1256 return 1; /* Break things loose after complaining. */
1257 }
1258
1259 /*
1260 * A CPU running for an extended time within the kernel can
1261 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1262 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1263 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1264 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1265 * variable are safe because the assignments are repeated if this
1266 * CPU failed to pass through a quiescent state. This code
1267 * also checks .jiffies_resched in case jiffies_to_sched_qs
1268 * is set way high.
1269 */
1270 jtsq = READ_ONCE(jiffies_to_sched_qs);
1271 ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1272 rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1273 if (!READ_ONCE(*rnhqp) &&
1274 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1275 time_after(jiffies, rcu_state.jiffies_resched) ||
1276 rcu_state.cbovld)) {
1277 WRITE_ONCE(*rnhqp, true);
1278 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1279 smp_store_release(ruqp, true);
1280 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1281 WRITE_ONCE(*ruqp, true);
1282 }
1283
1284 /*
1285 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1286 * The above code handles this, but only for straight cond_resched().
1287 * And some in-kernel loops check need_resched() before calling
1288 * cond_resched(), which defeats the above code for CPUs that are
1289 * running in-kernel with scheduling-clock interrupts disabled.
1290 * So hit them over the head with the resched_cpu() hammer!
1291 */
1292 if (tick_nohz_full_cpu(rdp->cpu) &&
1293 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1294 rcu_state.cbovld)) {
1295 WRITE_ONCE(*ruqp, true);
1296 resched_cpu(rdp->cpu);
1297 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1298 }
1299
1300 /*
1301 * If more than halfway to RCU CPU stall-warning time, invoke
1302 * resched_cpu() more frequently to try to loosen things up a bit.
1303 * Also check to see if the CPU is getting hammered with interrupts,
1304 * but only once per grace period, just to keep the IPIs down to
1305 * a dull roar.
1306 */
1307 if (time_after(jiffies, rcu_state.jiffies_resched)) {
1308 if (time_after(jiffies,
1309 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1310 resched_cpu(rdp->cpu);
1311 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1312 }
1313 if (!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq
&&
1314 (rnp->ffmask & rdp->grpmask)) {
1315 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1316 if (likely(rdp->cpu != smp_processor_id())) {
1317 rdp->rcu_iw_pending = true;
1318 irq_work_queue_remote(rdp->cpu, &rdp->rcu_iw);
1319 }
1320 }
1321 }
1322
1323 return 0;
1324 }
1325
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org