tree:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head: f40ddce88593482919761f74910f42f4b84c004b
commit: dee081bf8f824cabeb7c7495367d5dad0a444eb1 READ_ONCE: Drop pointer qualifiers when
reading from scalar types
date: 10 months ago
config: alpha-randconfig-s032-20210218 (attached as .config)
compiler: alpha-linux-gcc (GCC) 9.3.0
reproduce:
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.3-215-g0fb77bb6-dirty
#
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit...
git remote add linus
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
git fetch --no-tags linus master
git checkout dee081bf8f824cabeb7c7495367d5dad0a444eb1
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1
CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=alpha
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
"sparse warnings: (new ones prefixed by >>)"
> mm/hmm.c:333:15: sparse: sparse: cast to non-scalar
> mm/hmm.c:333:15: sparse: sparse: cast from non-scalar
vim +333 mm/hmm.c
53f5c3f489ecdd Jérôme Glisse 2018-04-10 318
53f5c3f489ecdd Jérôme Glisse 2018-04-10 319 static int hmm_vma_walk_pmd(pmd_t *pmdp,
53f5c3f489ecdd Jérôme Glisse 2018-04-10 320 unsigned long start,
53f5c3f489ecdd Jérôme Glisse 2018-04-10 321 unsigned long end,
53f5c3f489ecdd Jérôme Glisse 2018-04-10 322 struct mm_walk *walk)
53f5c3f489ecdd Jérôme Glisse 2018-04-10 323 {
53f5c3f489ecdd Jérôme Glisse 2018-04-10 324 struct hmm_vma_walk *hmm_vma_walk =
walk->private;
53f5c3f489ecdd Jérôme Glisse 2018-04-10 325 struct hmm_range *range =
hmm_vma_walk->range;
2288a9a68175ce Jason Gunthorpe 2020-03-05 326 uint64_t *pfns =
&range->pfns[(start - range->start) >> PAGE_SHIFT];
2288a9a68175ce Jason Gunthorpe 2020-03-05 327 unsigned long npages = (end - start)
>> PAGE_SHIFT;
2288a9a68175ce Jason Gunthorpe 2020-03-05 328 unsigned long addr = start;
53f5c3f489ecdd Jérôme Glisse 2018-04-10 329 pte_t *ptep;
d08faca018c461 Jérôme Glisse 2018-10-30 330 pmd_t pmd;
53f5c3f489ecdd Jérôme Glisse 2018-04-10 331
53f5c3f489ecdd Jérôme Glisse 2018-04-10 332 again:
d08faca018c461 Jérôme Glisse 2018-10-30 @333 pmd = READ_ONCE(*pmdp);
d08faca018c461 Jérôme Glisse 2018-10-30 334 if (pmd_none(pmd))
b7a16c7ad790d0 Steven Price 2020-02-03 335 return hmm_vma_walk_hole(start, end, -1,
walk);
53f5c3f489ecdd Jérôme Glisse 2018-04-10 336
d08faca018c461 Jérôme Glisse 2018-10-30 337 if (thp_migration_supported() &&
is_pmd_migration_entry(pmd)) {
a3eb13c1579ba9 Jason Gunthorpe 2020-03-27 338 if (hmm_range_need_fault(hmm_vma_walk,
pfns, npages, 0)) {
d08faca018c461 Jérôme Glisse 2018-10-30 339 hmm_vma_walk->last = addr;
d2e8d551165ccb Ralph Campbell 2019-07-25 340 pmd_migration_entry_wait(walk->mm,
pmdp);
73231612dc7c90 Jérôme Glisse 2019-05-13 341 return -EBUSY;
d08faca018c461 Jérôme Glisse 2018-10-30 342 }
7d082987e5e562 Jason Gunthorpe 2020-03-04 343 return hmm_pfns_fill(start, end, range,
HMM_PFN_NONE);
2288a9a68175ce Jason Gunthorpe 2020-03-05 344 }
2288a9a68175ce Jason Gunthorpe 2020-03-05 345
2288a9a68175ce Jason Gunthorpe 2020-03-05 346 if (!pmd_present(pmd)) {
a3eb13c1579ba9 Jason Gunthorpe 2020-03-27 347 if (hmm_range_need_fault(hmm_vma_walk,
pfns, npages, 0))
2288a9a68175ce Jason Gunthorpe 2020-03-05 348 return -EFAULT;
d28c2c9a487708 Ralph Campbell 2019-11-04 349 return hmm_pfns_fill(start, end, range,
HMM_PFN_ERROR);
2288a9a68175ce Jason Gunthorpe 2020-03-05 350 }
d08faca018c461 Jérôme Glisse 2018-10-30 351
d08faca018c461 Jérôme Glisse 2018-10-30 352 if (pmd_devmap(pmd) ||
pmd_trans_huge(pmd)) {
53f5c3f489ecdd Jérôme Glisse 2018-04-10 353 /*
d2e8d551165ccb Ralph Campbell 2019-07-25 354 * No need to take pmd_lock here, even
if some other thread
53f5c3f489ecdd Jérôme Glisse 2018-04-10 355 * is splitting the huge pmd we will get
that event through
53f5c3f489ecdd Jérôme Glisse 2018-04-10 356 * mmu_notifier callback.
53f5c3f489ecdd Jérôme Glisse 2018-04-10 357 *
d2e8d551165ccb Ralph Campbell 2019-07-25 358 * So just read pmd value and check
again it's a transparent
53f5c3f489ecdd Jérôme Glisse 2018-04-10 359 * huge or device mapping one and
compute corresponding pfn
53f5c3f489ecdd Jérôme Glisse 2018-04-10 360 * values.
53f5c3f489ecdd Jérôme Glisse 2018-04-10 361 */
53f5c3f489ecdd Jérôme Glisse 2018-04-10 362 pmd = pmd_read_atomic(pmdp);
53f5c3f489ecdd Jérôme Glisse 2018-04-10 363 barrier();
53f5c3f489ecdd Jérôme Glisse 2018-04-10 364 if (!pmd_devmap(pmd) &&
!pmd_trans_huge(pmd))
53f5c3f489ecdd Jérôme Glisse 2018-04-10 365 goto again;
53f5c3f489ecdd Jérôme Glisse 2018-04-10 366
2288a9a68175ce Jason Gunthorpe 2020-03-05 367 return hmm_vma_handle_pmd(walk, addr,
end, pfns, pmd);
53f5c3f489ecdd Jérôme Glisse 2018-04-10 368 }
53f5c3f489ecdd Jérôme Glisse 2018-04-10 369
d08faca018c461 Jérôme Glisse 2018-10-30 370 /*
d2e8d551165ccb Ralph Campbell 2019-07-25 371 * We have handled all the valid cases
above ie either none, migration,
d08faca018c461 Jérôme Glisse 2018-10-30 372 * huge or transparent huge. At this
point either it is a valid pmd
d08faca018c461 Jérôme Glisse 2018-10-30 373 * entry pointing to pte directory or it
is a bad pmd that will not
d08faca018c461 Jérôme Glisse 2018-10-30 374 * recover.
d08faca018c461 Jérôme Glisse 2018-10-30 375 */
2288a9a68175ce Jason Gunthorpe 2020-03-05 376 if (pmd_bad(pmd)) {
a3eb13c1579ba9 Jason Gunthorpe 2020-03-27 377 if (hmm_range_need_fault(hmm_vma_walk,
pfns, npages, 0))
2288a9a68175ce Jason Gunthorpe 2020-03-05 378 return -EFAULT;
d28c2c9a487708 Ralph Campbell 2019-11-04 379 return hmm_pfns_fill(start, end, range,
HMM_PFN_ERROR);
2288a9a68175ce Jason Gunthorpe 2020-03-05 380 }
53f5c3f489ecdd Jérôme Glisse 2018-04-10 381
53f5c3f489ecdd Jérôme Glisse 2018-04-10 382 ptep = pte_offset_map(pmdp, addr);
2288a9a68175ce Jason Gunthorpe 2020-03-05 383 for (; addr < end; addr += PAGE_SIZE,
ptep++, pfns++) {
53f5c3f489ecdd Jérôme Glisse 2018-04-10 384 int r;
53f5c3f489ecdd Jérôme Glisse 2018-04-10 385
2288a9a68175ce Jason Gunthorpe 2020-03-05 386 r = hmm_vma_handle_pte(walk, addr, end,
pmdp, ptep, pfns);
53f5c3f489ecdd Jérôme Glisse 2018-04-10 387 if (r) {
dfdc22078f3f06 Jason Gunthorpe 2020-02-28 388 /* hmm_vma_handle_pte() did pte_unmap()
*/
53f5c3f489ecdd Jérôme Glisse 2018-04-10 389 hmm_vma_walk->last = addr;
53f5c3f489ecdd Jérôme Glisse 2018-04-10 390 return r;
53f5c3f489ecdd Jérôme Glisse 2018-04-10 391 }
da4c3c735ea4dc Jérôme Glisse 2017-09-08 392 }
da4c3c735ea4dc Jérôme Glisse 2017-09-08 393 pte_unmap(ptep - 1);
da4c3c735ea4dc Jérôme Glisse 2017-09-08 394
53f5c3f489ecdd Jérôme Glisse 2018-04-10 395 hmm_vma_walk->last = addr;
da4c3c735ea4dc Jérôme Glisse 2017-09-08 396 return 0;
da4c3c735ea4dc Jérôme Glisse 2017-09-08 397 }
da4c3c735ea4dc Jérôme Glisse 2017-09-08 398
:::::: The code at line 333 was first introduced by commit
:::::: d08faca018c4618068e54dfef4f1d71230feff38 mm/hmm: properly handle migration pmd
:::::: TO: Jérôme Glisse <jglisse(a)redhat.com>
:::::: CC: Linus Torvalds <torvalds(a)linux-foundation.org>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org