tree:
https://github.com/intel/linux-intel-lts.git 5.4/yocto
head: eeb611e5394c56d45c5cc8f7dc484c9f19e93143
commit: ed60485d05462b4c58e219b174c859bc66741e91 [2/1142] keembay-ocs: Add support for
Keem Bay OCS
config: ia64-randconfig-s031-20201111 (attached as .config)
compiler: ia64-linux-gcc (GCC) 9.3.0
reproduce:
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.3-106-gd020cf33-dirty
#
https://github.com/intel/linux-intel-lts/commit/ed60485d05462b4c58e219b17...
git remote add intel-linux-intel-lts
https://github.com/intel/linux-intel-lts.git
git fetch --no-tags intel-linux-intel-lts 5.4/yocto
git checkout ed60485d05462b4c58e219b174c859bc66741e91
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1
CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=ia64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
"sparse warnings: (new ones prefixed by >>)"
> drivers/crypto/keembay/ocs-aes.c:1379:43: sparse: sparse: Using
plain integer as NULL pointer
vim +1379 drivers/crypto/keembay/ocs-aes.c
1290
1291 void ocs_create_linked_list_from_sg(struct ocs_aes_dev *aes_dev,
1292 struct scatterlist *sgl, u32 num_sgl_entries,
1293 u8 **aad_buf, dma_addr_t *aad_descriptor,
1294 u32 aad_size, u32 *aad_desc_size,
1295 u8 **data_buf, dma_addr_t *data_descriptor,
1296 u32 data_size, u32 *data_desc_size)
1297 {
1298 struct ocs_dma_linked_list *ll = NULL;
1299 u32 data_offset = 0;
1300 struct scatterlist *sg;
1301 int num_aad_ents, i;
1302
1303 if (num_sgl_entries == 0)
1304 goto ret_err;
1305
1306 sg = sgl;
1307
1308 if (aad_size) {
1309 num_aad_ents = sg_nents_for_len(sgl, aad_size);
1310 if (num_aad_ents < 0)
1311 goto ret_err;
1312
1313 *aad_desc_size = sizeof(struct ocs_dma_linked_list) *
1314 num_aad_ents;
1315
1316 /* HW requirement: descriptor must be 8 byte aligned */
1317 *aad_buf = kmalloc(*aad_desc_size, GFP_KERNEL | GFP_DMA);
1318 if (!*aad_buf)
1319 goto ret_err;
1320
1321 ll = (struct ocs_dma_linked_list *)(*aad_buf);
1322
1323 *aad_descriptor = dma_map_single(aes_dev->dev, *aad_buf,
1324 *aad_desc_size, DMA_TO_DEVICE);
1325 if (dma_mapping_error(aes_dev->dev, *aad_descriptor)) {
1326 dev_err(aes_dev->dev, "DMA mapping error\n");
1327 *aad_descriptor = 0;
1328 goto ret_err;
1329 }
1330
1331 dma_sync_single_for_cpu(aes_dev->dev, *aad_descriptor,
1332 *aad_desc_size, DMA_TO_DEVICE);
1333
1334 i = 0;
1335 while (true) {
1336 ll[i].address = sg_dma_address(sg);
1337 ll[i].byte_count = (sg_dma_len(sg) < aad_size) ?
1338 sg_dma_len(sg) : aad_size;
1339 aad_size -= ll[i].byte_count;
1340 ll[i].freeze = 0;
1341 ll[i].next = *aad_descriptor +
1342 (sizeof(struct ocs_dma_linked_list) * (i+1));
1343 ll[i].reserved = 0;
1344 ll[i].terminate = 0;
1345 i++;
1346 if (aad_size && (i < num_aad_ents))
1347 sg = sg_next(sg);
1348 else
1349 break;
1350 }
1351 ll[i-1].next = 0;
1352 ll[i-1].terminate = 1;
1353 data_offset = ll[i-1].byte_count;
1354
1355 dma_sync_single_for_device(aes_dev->dev, *aad_descriptor,
1356 *aad_desc_size, DMA_TO_DEVICE);
1357 } else {
1358 num_aad_ents = 0;
1359 }
1360
1361 if (data_size) {
1362 /* +1 for case where aad and data overlap in one sgl node */
1363 num_sgl_entries = num_sgl_entries - num_aad_ents + 1;
1364
1365 *data_desc_size = sizeof(struct ocs_dma_linked_list) *
1366 num_sgl_entries;
1367
1368 /* HW requirement: descriptor must be 8 byte aligned */
1369 *data_buf = kmalloc(*data_desc_size, GFP_KERNEL | GFP_DMA);
1370 if (!*data_buf)
1371 goto ret_err;
1372
1373 ll = (struct ocs_dma_linked_list *)(*data_buf);
1374
1375 *data_descriptor = dma_map_single(aes_dev->dev, *data_buf,
1376 *data_desc_size, DMA_TO_DEVICE);
1377 if (dma_mapping_error(aes_dev->dev, *data_descriptor)) {
1378 dev_err(aes_dev->dev, "DMA mapping error\n");
1379 data_descriptor = 0;
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org