tree:
https://github.com/intel/tdx.git guest
head: 224dd4925275ef73ef78f1412d6f9d03564294eb
commit: 90e5df51a2da1f8175059239595bfa7c8086b5c3 [29/76] x86/tdx: Make pages shared in
ioremap()
config: x86_64-randconfig-b001-20210615 (attached as .config)
compiler: clang version 13.0.0 (
https://github.com/llvm/llvm-project
64720f57bea6a6bf033feef4a5751ab9c0c3b401)
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# install x86_64 cross compiling tool for clang build
# apt-get install binutils-x86-64-linux-gnu
#
https://github.com/intel/tdx/commit/90e5df51a2da1f8175059239595bfa7c8086b5c3
git remote add intel-tdx
https://github.com/intel/tdx.git
git fetch --no-tags intel-tdx guest
git checkout 90e5df51a2da1f8175059239595bfa7c8086b5c3
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
> arch/x86/mm/ioremap.c:249:10: error: implicit declaration of
function 'tdg_shared_mask' [-Werror,-Wimplicit-function-declaration]
prot = pgprot_protected_guest(prot);
^
arch/x86/include/asm/pgtable.h:26:12: note: expanded from macro
'pgprot_protected_guest'
tdg_shared_mask())
^
arch/x86/mm/ioremap.c:721:17: warning: no previous prototype for function
'early_memremap_pgprot_adjust' [-Wmissing-prototypes]
pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
^
arch/x86/mm/ioremap.c:721:1: note: declare 'static' if the function is not
intended to be used outside of this translation unit
pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
^
static
1 warning and 1 error generated.
vim +/tdg_shared_mask +249 arch/x86/mm/ioremap.c
161
162 /*
163 * Remap an arbitrary physical address space into the kernel virtual
164 * address space. It transparently creates kernel huge I/O mapping when
165 * the physical address is aligned by a huge page size (1GB or 2MB) and
166 * the requested size is at least the huge page size.
167 *
168 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
169 * Therefore, the mapping code falls back to use a smaller page toward 4KB
170 * when a mapping range is covered by non-WB type of MTRRs.
171 *
172 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
173 * have to convert them into an offset in a page-aligned mapping, but the
174 * caller shouldn't need to know that small detail.
175 */
176 static void __iomem *
177 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
178 enum page_cache_mode pcm, void *caller, bool encrypted)
179 {
180 unsigned long offset, vaddr;
181 resource_size_t last_addr;
182 const resource_size_t unaligned_phys_addr = phys_addr;
183 const unsigned long unaligned_size = size;
184 struct ioremap_desc io_desc;
185 struct vm_struct *area;
186 enum page_cache_mode new_pcm;
187 pgprot_t prot;
188 int retval;
189 void __iomem *ret_addr;
190
191 /* Don't allow wraparound or zero size */
192 last_addr = phys_addr + size - 1;
193 if (!size || last_addr < phys_addr)
194 return NULL;
195
196 if (!phys_addr_valid(phys_addr)) {
197 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
198 (unsigned long long)phys_addr);
199 WARN_ON_ONCE(1);
200 return NULL;
201 }
202
203 __ioremap_check_mem(phys_addr, size, &io_desc);
204
205 /*
206 * Don't allow anybody to remap normal RAM that we're using..
207 */
208 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
209 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
210 &phys_addr, &last_addr);
211 return NULL;
212 }
213
214 /*
215 * Mappings have to be page-aligned
216 */
217 offset = phys_addr & ~PAGE_MASK;
218 phys_addr &= PHYSICAL_PAGE_MASK;
219 size = PAGE_ALIGN(last_addr+1) - phys_addr;
220
221 retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
222 pcm, &new_pcm);
223 if (retval) {
224 printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
225 return NULL;
226 }
227
228 if (pcm != new_pcm) {
229 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
230 printk(KERN_ERR
231 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
232 (unsigned long long)phys_addr,
233 (unsigned long long)(phys_addr + size),
234 pcm, new_pcm);
235 goto err_free_memtype;
236 }
237 pcm = new_pcm;
238 }
239
240 /*
241 * If the page being mapped is in memory and SEV is active then
242 * make sure the memory encryption attribute is enabled in the
243 * resulting mapping.
244 */
245 prot = PAGE_KERNEL_IO;
246 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
247 prot = pgprot_encrypted(prot);
248 else if (prot_guest_has(PR_GUEST_SHARED_MAPPING_INIT))
249 prot = pgprot_protected_guest(prot);
250
251 switch (pcm) {
252 case _PAGE_CACHE_MODE_UC:
253 default:
254 prot = __pgprot(pgprot_val(prot) |
255 cachemode2protval(_PAGE_CACHE_MODE_UC));
256 break;
257 case _PAGE_CACHE_MODE_UC_MINUS:
258 prot = __pgprot(pgprot_val(prot) |
259 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
260 break;
261 case _PAGE_CACHE_MODE_WC:
262 prot = __pgprot(pgprot_val(prot) |
263 cachemode2protval(_PAGE_CACHE_MODE_WC));
264 break;
265 case _PAGE_CACHE_MODE_WT:
266 prot = __pgprot(pgprot_val(prot) |
267 cachemode2protval(_PAGE_CACHE_MODE_WT));
268 break;
269 case _PAGE_CACHE_MODE_WB:
270 break;
271 }
272
273 /*
274 * Ok, go for it..
275 */
276 area = get_vm_area_caller(size, VM_IOREMAP, caller);
277 if (!area)
278 goto err_free_memtype;
279 area->phys_addr = phys_addr;
280 vaddr = (unsigned long) area->addr;
281
282 if (memtype_kernel_map_sync(phys_addr, size, pcm))
283 goto err_free_area;
284
285 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
286 goto err_free_area;
287
288 ret_addr = (void __iomem *) (vaddr + offset);
289 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
290
291 /*
292 * Check if the request spans more than any BAR in the iomem resource
293 * tree.
294 */
295 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
296 pr_warn("caller %pS mapping multiple BARs\n", caller);
297
298 return ret_addr;
299 err_free_area:
300 free_vm_area(area);
301 err_free_memtype:
302 memtype_free(phys_addr, phys_addr + size);
303 return NULL;
304 }
305
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org