Hi,
I love your patch! Perhaps something to improve:
[auto build test WARNING on linus/master]
[also build test WARNING on v5.17-rc1 next-20220124]
[cannot apply to arm64/for-next/core rostedt-trace/for-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url:
https://github.com/0day-ci/linux/commits/andrey-konovalov-linux-dev/kasan...
base:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
dd81e1c7d5fb126e5fbc5c9e334d7b3ec29a16a0
config: microblaze-randconfig-s032-20220124
(
https://download.01.org/0day-ci/archive/20220125/202201251102.bV52975D-lk...)
compiler: microblaze-linux-gcc (GCC) 11.2.0
reproduce:
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.4-dirty
#
https://github.com/0day-ci/linux/commit/04d94d9a5279576fc7d5b6dd381a6d4a8...
git remote add linux-review
https://github.com/0day-ci/linux
git fetch --no-tags linux-review
andrey-konovalov-linux-dev/kasan-vmalloc-arm64-add-vmalloc-tagging-support-for-SW-HW_TAGS/20220125-021005
git checkout 04d94d9a5279576fc7d5b6dd381a6d4a86edfac2
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross C=1
CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=microblaze
SHELL=/bin/bash
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
sparse warnings: (new ones prefixed by >>)
mm/vmalloc.c:3156:21: sparse: sparse: incorrect type in assignment (different base
types) @@ expected restricted kasan_vmalloc_flags_t [usertype] kasan_flags @@ got
unsigned int @@
mm/vmalloc.c:3156:21: sparse: expected restricted kasan_vmalloc_flags_t [usertype]
kasan_flags
mm/vmalloc.c:3156:21: sparse: got unsigned int
> mm/vmalloc.c:3158:29: sparse: sparse: invalid assignment: |=
> mm/vmalloc.c:3158:29: sparse: left side has type restricted kasan_vmalloc_flags_t
> mm/vmalloc.c:3158:29: sparse: right side has type unsigned int
vim +3158 mm/vmalloc.c
3038
3039 /**
3040 * __vmalloc_node_range - allocate virtually contiguous memory
3041 * @size: allocation size
3042 * @align: desired alignment
3043 * @start: vm area range start
3044 * @end: vm area range end
3045 * @gfp_mask: flags for the page level allocator
3046 * @prot: protection mask for the allocated pages
3047 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3048 * @node: node to use for allocation or NUMA_NO_NODE
3049 * @caller: caller's return address
3050 *
3051 * Allocate enough pages to cover @size from the page level
3052 * allocator with @gfp_mask flags. Please note that the full set of gfp
3053 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3054 * supported.
3055 * Zone modifiers are not supported. From the reclaim modifiers
3056 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3057 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3058 * __GFP_RETRY_MAYFAIL are not supported).
3059 *
3060 * __GFP_NOWARN can be used to suppress failures messages.
3061 *
3062 * Map them into contiguous kernel virtual space, using a pagetable
3063 * protection of @prot.
3064 *
3065 * Return: the address of the area or %NULL on failure
3066 */
3067 void *__vmalloc_node_range(unsigned long size, unsigned long align,
3068 unsigned long start, unsigned long end, gfp_t gfp_mask,
3069 pgprot_t prot, unsigned long vm_flags, int node,
3070 const void *caller)
3071 {
3072 struct vm_struct *area;
3073 void *ret;
3074 kasan_vmalloc_flags_t kasan_flags;
3075 unsigned long real_size = size;
3076 unsigned long real_align = align;
3077 unsigned int shift = PAGE_SHIFT;
3078
3079 if (WARN_ON_ONCE(!size))
3080 return NULL;
3081
3082 if ((size >> PAGE_SHIFT) > totalram_pages()) {
3083 warn_alloc(gfp_mask, NULL,
3084 "vmalloc error: size %lu, exceeds total pages",
3085 real_size);
3086 return NULL;
3087 }
3088
3089 if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
3090 unsigned long size_per_node;
3091
3092 /*
3093 * Try huge pages. Only try for PAGE_KERNEL allocations,
3094 * others like modules don't yet expect huge pages in
3095 * their allocations due to apply_to_page_range not
3096 * supporting them.
3097 */
3098
3099 size_per_node = size;
3100 if (node == NUMA_NO_NODE)
3101 size_per_node /= num_online_nodes();
3102 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3103 shift = PMD_SHIFT;
3104 else
3105 shift = arch_vmap_pte_supported_shift(size_per_node);
3106
3107 align = max(real_align, 1UL << shift);
3108 size = ALIGN(real_size, 1UL << shift);
3109 }
3110
3111 again:
3112 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3113 VM_UNINITIALIZED | vm_flags, start, end, node,
3114 gfp_mask, caller);
3115 if (!area) {
3116 bool nofail = gfp_mask & __GFP_NOFAIL;
3117 warn_alloc(gfp_mask, NULL,
3118 "vmalloc error: size %lu, vm_struct allocation failed%s",
3119 real_size, (nofail) ? ". Retrying." : "");
3120 if (nofail) {
3121 schedule_timeout_uninterruptible(1);
3122 goto again;
3123 }
3124 goto fail;
3125 }
3126
3127 /* Prepare arguments for __vmalloc_area_node(). */
3128 if (kasan_hw_tags_enabled() &&
3129 pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3130 /*
3131 * Modify protection bits to allow tagging.
3132 * This must be done before mapping in __vmalloc_area_node().
3133 */
3134 prot = arch_vmap_pgprot_tagged(prot);
3135
3136 /*
3137 * Skip page_alloc poisoning and zeroing for physical pages
3138 * backing VM_ALLOC mapping. Memory is instead poisoned and
3139 * zeroed by kasan_unpoison_vmalloc().
3140 */
3141 gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
3142 }
3143
3144 /* Allocate physical pages and map them into vmalloc space. */
3145 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3146 if (!ret)
3147 goto fail;
3148
3149 /*
3150 * Mark the pages as accessible, now that they are mapped.
3151 * The init condition should match the one in post_alloc_hook()
3152 * (except for the should_skip_init() check) to make sure that memory
3153 * is initialized under the same conditions regardless of the enabled
3154 * KASAN mode.
3155 */
3156 kasan_flags = KASAN_VMALLOC_VM_ALLOC;
3157 if (!want_init_on_free() && want_init_on_alloc(gfp_mask))
3158 kasan_flags |= KASAN_VMALLOC_INIT;
3159 area->addr
= kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3160
3161 /*
3162 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3163 * flag. It means that vm_struct is not fully initialized.
3164 * Now, it is fully initialized, so remove this flag here.
3165 */
3166 clear_vm_uninitialized_flag(area);
3167
3168 size = PAGE_ALIGN(size);
3169 if (!(vm_flags & VM_DEFER_KMEMLEAK))
3170 kmemleak_vmalloc(area, size, gfp_mask);
3171
3172 return area->addr;
3173
3174 fail:
3175 if (shift > PAGE_SHIFT) {
3176 shift = PAGE_SHIFT;
3177 align = real_align;
3178 size = real_size;
3179 goto again;
3180 }
3181
3182 return NULL;
3183 }
3184
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org