tree:
https://git.kernel.org/pub/scm/linux/kernel/git/sashal/linux-stable.git
stable-next
head: 71e7fa7873ec31c6633355eccb3b3049436a0fcc
commit: e0a184f834acf511b7a59105a27e450d69ef3d0d [104/157] shmem: pin the file in
shmem_fault() if mmap_sem is dropped
config: i386-defconfig (attached as .config)
compiler: gcc-7 (Debian 7.4.0-13) 7.4.0
reproduce:
git checkout e0a184f834acf511b7a59105a27e450d69ef3d0d
# save the attached .config to linux build tree
make ARCH=i386
If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
mm/shmem.c: In function 'shmem_fault':
> mm/shmem.c:2030:11: error: implicit declaration of function
'maybe_unlock_mmap_for_io' [-Werror=implicit-function-declaration]
fpin = maybe_unlock_mmap_for_io(vmf, NULL);
^~~~~~~~~~~~~~~~~~~~~~~~
mm/shmem.c:2030:9: warning: assignment makes pointer from integer without a cast
[-Wint-conversion]
fpin = maybe_unlock_mmap_for_io(vmf, NULL);
^
cc1: some warnings being treated as errors
vim +/maybe_unlock_mmap_for_io +2030 mm/shmem.c
1989
1990 static vm_fault_t shmem_fault(struct vm_fault *vmf)
1991 {
1992 struct vm_area_struct *vma = vmf->vma;
1993 struct inode *inode = file_inode(vma->vm_file);
1994 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1995 enum sgp_type sgp;
1996 int err;
1997 vm_fault_t ret = VM_FAULT_LOCKED;
1998
1999 /*
2000 * Trinity finds that probing a hole which tmpfs is punching can
2001 * prevent the hole-punch from ever completing: which in turn
2002 * locks writers out with its hold on i_mutex. So refrain from
2003 * faulting pages into the hole while it's being punched. Although
2004 * shmem_undo_range() does remove the additions, it may be unable to
2005 * keep up, as each new page needs its own unmap_mapping_range() call,
2006 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2007 *
2008 * It does not matter if we sometimes reach this check just before the
2009 * hole-punch begins, so that one fault then races with the punch:
2010 * we just need to make racing faults a rare case.
2011 *
2012 * The implementation below would be much simpler if we just used a
2013 * standard mutex or completion: but we cannot take i_mutex in fault,
2014 * and bloating every shmem inode for this unlikely case would be sad.
2015 */
2016 if (unlikely(inode->i_private)) {
2017 struct shmem_falloc *shmem_falloc;
2018
2019 spin_lock(&inode->i_lock);
2020 shmem_falloc = inode->i_private;
2021 if (shmem_falloc &&
2022 shmem_falloc->waitq &&
2023 vmf->pgoff >= shmem_falloc->start &&
2024 vmf->pgoff < shmem_falloc->next) {
2025 struct file *fpin;
2026 wait_queue_head_t *shmem_falloc_waitq;
2027 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2028
2029 ret = VM_FAULT_NOPAGE;
2030 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2031 if (fpin)
2032 ret = VM_FAULT_RETRY;
2033
2034 shmem_falloc_waitq = shmem_falloc->waitq;
2035 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2036 TASK_UNINTERRUPTIBLE);
2037 spin_unlock(&inode->i_lock);
2038 schedule();
2039
2040 /*
2041 * shmem_falloc_waitq points into the shmem_fallocate()
2042 * stack of the hole-punching task: shmem_falloc_waitq
2043 * is usually invalid by the time we reach here, but
2044 * finish_wait() does not dereference it in that case;
2045 * though i_lock needed lest racing with wake_up_all().
2046 */
2047 spin_lock(&inode->i_lock);
2048 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2049 spin_unlock(&inode->i_lock);
2050
2051 if (fpin)
2052 fput(fpin);
2053 return ret;
2054 }
2055 spin_unlock(&inode->i_lock);
2056 }
2057
2058 sgp = SGP_CACHE;
2059
2060 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2061 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2062 sgp = SGP_NOHUGE;
2063 else if (vma->vm_flags & VM_HUGEPAGE)
2064 sgp = SGP_HUGE;
2065
2066 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2067 gfp, vma, vmf, &ret);
2068 if (err)
2069 return vmf_error(err);
2070 return ret;
2071 }
2072
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation