tree:
https://android.googlesource.com/kernel/common android12-5.4
head: 62ec51519fa16401c33a867135ad9489af22ff3a
commit: 62ec51519fa16401c33a867135ad9489af22ff3a [1/1] UPSTREAM: iommu: Add gfp parameter
to iommu_ops::map
config: x86_64-randconfig-r002-20210112 (attached as .config)
compiler: clang version 12.0.0 (
https://github.com/llvm/llvm-project
68ff52ffead2ba25cca442778ab19286000daad7)
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# install x86_64 cross compiling tool for clang build
# apt-get install binutils-x86-64-linux-gnu
git remote add android-common
https://android.googlesource.com/kernel/common
git fetch --no-tags android-common android12-5.4
git checkout 62ec51519fa16401c33a867135ad9489af22ff3a
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All warnings (new ones prefixed by >>):
drivers/iommu/iommu.c:309:5: warning: no previous prototype for function
'iommu_insert_resv_region' [-Wmissing-prototypes]
int iommu_insert_resv_region(struct iommu_resv_region *new,
^
drivers/iommu/iommu.c:309:1: note: declare 'static' if the function is not
intended to be used outside of this translation unit
int iommu_insert_resv_region(struct iommu_resv_region *new,
^
static
> drivers/iommu/iommu.c:1885:5: warning: no previous prototype for
function '__iommu_map' [-Wmissing-prototypes]
int __iommu_map(struct
iommu_domain *domain, unsigned long iova,
^
drivers/iommu/iommu.c:1885:1: note: declare 'static' if the function is not
intended to be used outside of this translation unit
int __iommu_map(struct iommu_domain *domain, unsigned long iova,
^
static
> drivers/iommu/iommu.c:2036:8: warning: no previous prototype for
function '__iommu_map_sg' [-Wmissing-prototypes]
size_t
__iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
^
drivers/iommu/iommu.c:2036:1: note: declare 'static' if the function is not
intended to be used outside of this translation unit
size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
^
static
3 warnings generated.
vim +/__iommu_map +1885 drivers/iommu/iommu.c
1884
1885 int __iommu_map(struct iommu_domain *domain, unsigned long iova,
1886 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1887 {
1888 const struct iommu_ops *ops = domain->ops;
1889 unsigned long orig_iova = iova;
1890 unsigned int min_pagesz;
1891 size_t orig_size = size;
1892 phys_addr_t orig_paddr = paddr;
1893 int ret = 0;
1894
1895 if (unlikely(ops->map == NULL ||
1896 domain->pgsize_bitmap == 0UL))
1897 return -ENODEV;
1898
1899 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1900 return -EINVAL;
1901
1902 /* find out the minimum page size supported */
1903 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1904
1905 /*
1906 * both the virtual address and the physical one, as well as
1907 * the size of the mapping, must be aligned (at least) to the
1908 * size of the smallest page supported by the hardware
1909 */
1910 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1911 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1912 iova, &paddr, size, min_pagesz);
1913 return -EINVAL;
1914 }
1915
1916 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr,
size);
1917
1918 while (size) {
1919 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1920
1921 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1922 iova, &paddr, pgsize);
1923 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
1924
1925 if (ret)
1926 break;
1927
1928 iova += pgsize;
1929 paddr += pgsize;
1930 size -= pgsize;
1931 }
1932
1933 if (ops->iotlb_sync_map)
1934 ops->iotlb_sync_map(domain);
1935
1936 /* unroll mapping in case something went wrong */
1937 if (ret)
1938 iommu_unmap(domain, orig_iova, orig_size - size);
1939 else
1940 trace_map(orig_iova, orig_paddr, orig_size);
1941
1942 return ret;
1943 }
1944
1945 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1946 phys_addr_t paddr, size_t size, int prot)
1947 {
1948 might_sleep();
1949 return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
1950 }
1951 EXPORT_SYMBOL_GPL(iommu_map);
1952
1953 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
1954 phys_addr_t paddr, size_t size, int prot)
1955 {
1956 return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
1957 }
1958 EXPORT_SYMBOL_GPL(iommu_map_atomic);
1959
1960 static size_t __iommu_unmap(struct iommu_domain *domain,
1961 unsigned long iova, size_t size,
1962 struct iommu_iotlb_gather *iotlb_gather)
1963 {
1964 const struct iommu_ops *ops = domain->ops;
1965 size_t unmapped_page, unmapped = 0;
1966 unsigned long orig_iova = iova;
1967 unsigned int min_pagesz;
1968
1969 if (unlikely(ops->unmap == NULL ||
1970 domain->pgsize_bitmap == 0UL))
1971 return 0;
1972
1973 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1974 return 0;
1975
1976 /* find out the minimum page size supported */
1977 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1978
1979 /*
1980 * The virtual address, as well as the size of the mapping, must be
1981 * aligned (at least) to the size of the smallest page supported
1982 * by the hardware
1983 */
1984 if (!IS_ALIGNED(iova | size, min_pagesz)) {
1985 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1986 iova, size, min_pagesz);
1987 return 0;
1988 }
1989
1990 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1991
1992 /*
1993 * Keep iterating until we either unmap 'size' bytes (or more)
1994 * or we hit an area that isn't mapped.
1995 */
1996 while (unmapped < size) {
1997 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1998
1999 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2000 if (!unmapped_page)
2001 break;
2002
2003 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2004 iova, unmapped_page);
2005
2006 iova += unmapped_page;
2007 unmapped += unmapped_page;
2008 }
2009
2010 trace_unmap(orig_iova, size, unmapped);
2011 return unmapped;
2012 }
2013
2014 size_t iommu_unmap(struct iommu_domain *domain,
2015 unsigned long iova, size_t size)
2016 {
2017 struct iommu_iotlb_gather iotlb_gather;
2018 size_t ret;
2019
2020 iommu_iotlb_gather_init(&iotlb_gather);
2021 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2022 iommu_tlb_sync(domain, &iotlb_gather);
2023
2024 return ret;
2025 }
2026 EXPORT_SYMBOL_GPL(iommu_unmap);
2027
2028 size_t iommu_unmap_fast(struct iommu_domain *domain,
2029 unsigned long iova, size_t size,
2030 struct iommu_iotlb_gather *iotlb_gather)
2031 {
2032 return __iommu_unmap(domain, iova, size, iotlb_gather);
2033 }
2034 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2035
2036 size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long
iova,
2037 struct scatterlist *sg, unsigned int nents, int prot,
2038 gfp_t gfp)
2039 {
2040 size_t len = 0, mapped = 0;
2041 phys_addr_t start;
2042 unsigned int i = 0;
2043 int ret;
2044
2045 while (i <= nents) {
2046 phys_addr_t s_phys = sg_phys(sg);
2047
2048 if (len && s_phys != start + len) {
2049 ret = __iommu_map(domain, iova + mapped, start,
2050 len, prot, gfp);
2051
2052 if (ret)
2053 goto out_err;
2054
2055 mapped += len;
2056 len = 0;
2057 }
2058
2059 if (len) {
2060 len += sg->length;
2061 } else {
2062 len = sg->length;
2063 start = s_phys;
2064 }
2065
2066 if (++i < nents)
2067 sg = sg_next(sg);
2068 }
2069
2070 return mapped;
2071
2072 out_err:
2073 /* undo mappings already done */
2074 iommu_unmap(domain, iova, mapped);
2075
2076 return 0;
2077
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org