Hi Kashyap,
I love your patch! Perhaps something to improve:
[auto build test WARNING on next-20201013]
[cannot apply to scsi/for-next mkp-scsi/for-next v5.9 v5.9-rc8 v5.9-rc7 v5.9]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url:
https://github.com/0day-ci/linux/commits/Kashyap-Desai/add-io_uring-with-...
base: f2fb1afc57304f9dd68c20a08270e287470af2eb
config: xtensa-allyesconfig (attached as .config)
compiler: xtensa-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
#
https://github.com/0day-ci/linux/commit/a3173d0d1c2ca8a45007fa994f2641aa7...
git remote add linux-review
https://github.com/0day-ci/linux
git fetch --no-tags linux-review
Kashyap-Desai/add-io_uring-with-IOPOLL-support-in-scsi-layer/20201014-202916
git checkout a3173d0d1c2ca8a45007fa994f2641aa7262719c
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=xtensa
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All warnings (new ones prefixed by >>):
drivers/scsi/scsi_debug.c: In function 'schedule_resp':
> drivers/scsi/scsi_debug.c:5442:3: warning: 'return' with
no value, in function returning non-void [-Wreturn-type]
5442 | return;
| ^~~~~~
drivers/scsi/scsi_debug.c:5359:12: note: declared here
5359 | static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info
*devip,
| ^~~~~~~~~~~~~
drivers/scsi/scsi_debug.c: At top level:
> drivers/scsi/scsi_debug.c:7246:5: warning: no previous prototype
for 'sdebug_blk_mq_poll' [-Wmissing-prototypes]
7246 | int
sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
| ^~~~~~~~~~~~~~~~~~
vim +/return +5442 drivers/scsi/scsi_debug.c
5353
5354 /* Complete the processing of the thread that queued a SCSI command to this
5355 * driver. It either completes the command by calling cmnd_done() or
5356 * schedules a hr timer or work queue then returns 0. Returns
5357 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5358 */
5359 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5360 int scsi_result,
5361 int (*pfp)(struct scsi_cmnd *,
5362 struct sdebug_dev_info *),
5363 int delta_jiff, int ndelay)
5364 {
5365 bool new_sd_dp;
5366 bool inject = false;
5367 int k, num_in_q, qdepth;
5368 unsigned long iflags;
5369 u64 ns_from_boot = 0;
5370 struct sdebug_queue *sqp;
5371 struct sdebug_queued_cmd *sqcp;
5372 struct scsi_device *sdp;
5373 struct sdebug_defer *sd_dp;
5374
5375 if (unlikely(devip == NULL)) {
5376 if (scsi_result == 0)
5377 scsi_result = DID_NO_CONNECT << 16;
5378 goto respond_in_thread;
5379 }
5380 sdp = cmnd->device;
5381
5382 if (delta_jiff == 0)
5383 goto respond_in_thread;
5384
5385 sqp = get_queue(cmnd);
5386 spin_lock_irqsave(&sqp->qc_lock, iflags);
5387 if (unlikely(atomic_read(&sqp->blocked))) {
5388 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5389 return SCSI_MLQUEUE_HOST_BUSY;
5390 }
5391 num_in_q = atomic_read(&devip->num_in_q);
5392 qdepth = cmnd->device->queue_depth;
5393 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5394 if (scsi_result) {
5395 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5396 goto respond_in_thread;
5397 } else
5398 scsi_result = device_qfull_result;
5399 } else if (unlikely(sdebug_every_nth &&
5400 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5401 (scsi_result == 0))) {
5402 if ((num_in_q == (qdepth - 1)) &&
5403 (atomic_inc_return(&sdebug_a_tsf) >=
5404 abs(sdebug_every_nth))) {
5405 atomic_set(&sdebug_a_tsf, 0);
5406 inject = true;
5407 scsi_result = device_qfull_result;
5408 }
5409 }
5410
5411 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5412 if (unlikely(k >= sdebug_max_queue)) {
5413 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5414 if (scsi_result)
5415 goto respond_in_thread;
5416 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5417 scsi_result = device_qfull_result;
5418 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5419 sdev_printk(KERN_INFO, sdp,
5420 "%s: max_queue=%d exceeded, %s\n",
5421 __func__, sdebug_max_queue,
5422 (scsi_result ? "status: TASK SET FULL" :
5423 "report: host busy"));
5424 if (scsi_result)
5425 goto respond_in_thread;
5426 else
5427 return SCSI_MLQUEUE_HOST_BUSY;
5428 }
5429 set_bit(k, sqp->in_use_bm);
5430 atomic_inc(&devip->num_in_q);
5431 sqcp = &sqp->qc_arr[k];
5432 sqcp->a_cmnd = cmnd;
5433 cmnd->host_scribble = (unsigned char *)sqcp;
5434 sd_dp = sqcp->sd_dp;
5435 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5436
5437 /* Do not complete IO from default completion path.
5438 * Let it to be on queue.
5439 * Completion should happen from mq_poll interface.
5440 */
5441 if ((sqp - sdebug_q_arr) >= (submit_queues - poll_queues))
5442 return;
5443
5444 if (!sd_dp) {
5445 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5446 if (!sd_dp) {
5447 atomic_dec(&devip->num_in_q);
5448 clear_bit(k, sqp->in_use_bm);
5449 return SCSI_MLQUEUE_HOST_BUSY;
5450 }
5451 new_sd_dp = true;
5452 } else {
5453 new_sd_dp = false;
5454 }
5455
5456 /* Set the hostwide tag */
5457 if (sdebug_host_max_queue)
5458 sd_dp->hc_idx = get_tag(cmnd);
5459
5460 if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
5461 ns_from_boot = ktime_get_boottime_ns();
5462
5463 /* one of the resp_*() response functions is called here */
5464 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5465 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5466 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5467 delta_jiff = ndelay = 0;
5468 }
5469 if (cmnd->result == 0 && scsi_result != 0)
5470 cmnd->result = scsi_result;
5471 if (cmnd->result == 0 && unlikely(sdebug_opts &
SDEBUG_OPT_TRANSPORT_ERR)) {
5472 if (atomic_read(&sdeb_inject_pending)) {
5473 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5474 atomic_set(&sdeb_inject_pending, 0);
5475 cmnd->result = check_condition_result;
5476 }
5477 }
5478
5479 if (unlikely(sdebug_verbose && cmnd->result))
5480 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5481 __func__, cmnd->result);
5482
5483 if (delta_jiff > 0 || ndelay > 0) {
5484 ktime_t kt;
5485
5486 if (delta_jiff > 0) {
5487 u64 ns = jiffies_to_nsecs(delta_jiff);
5488
5489 if (sdebug_random && ns < U32_MAX) {
5490 ns = prandom_u32_max((u32)ns);
5491 } else if (sdebug_random) {
5492 ns >>= 12; /* scale to 4 usec precision */
5493 if (ns < U32_MAX) /* over 4 hours max */
5494 ns = prandom_u32_max((u32)ns);
5495 ns <<= 12;
5496 }
5497 kt = ns_to_ktime(ns);
5498 } else { /* ndelay has a 4.2 second max */
5499 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5500 (u32)ndelay;
5501 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5502 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5503
5504 if (kt <= d) { /* elapsed duration >= kt */
5505 spin_lock_irqsave(&sqp->qc_lock, iflags);
5506 sqcp->a_cmnd = NULL;
5507 atomic_dec(&devip->num_in_q);
5508 clear_bit(k, sqp->in_use_bm);
5509 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5510 if (new_sd_dp)
5511 kfree(sd_dp);
5512 /* call scsi_done() from this thread */
5513 cmnd->scsi_done(cmnd);
5514 return 0;
5515 }
5516 /* otherwise reduce kt by elapsed time */
5517 kt -= d;
5518 }
5519 }
5520 if (!sd_dp->init_hrt) {
5521 sd_dp->init_hrt = true;
5522 sqcp->sd_dp = sd_dp;
5523 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5524 HRTIMER_MODE_REL_PINNED);
5525 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5526 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5527 sd_dp->qc_idx = k;
5528 }
5529 if (sdebug_statistics)
5530 sd_dp->issuing_cpu = raw_smp_processor_id();
5531 sd_dp->defer_t = SDEB_DEFER_HRT;
5532 /* schedule the invocation of scsi_done() for a later time */
5533 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5534 } else { /* jdelay < 0, use work queue */
5535 if (!sd_dp->init_wq) {
5536 sd_dp->init_wq = true;
5537 sqcp->sd_dp = sd_dp;
5538 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5539 sd_dp->qc_idx = k;
5540 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5541 }
5542 if (sdebug_statistics)
5543 sd_dp->issuing_cpu = raw_smp_processor_id();
5544 sd_dp->defer_t = SDEB_DEFER_WQ;
5545 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5546 atomic_read(&sdeb_inject_pending)))
5547 sd_dp->aborted = true;
5548 schedule_work(&sd_dp->ew.work);
5549 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5550 atomic_read(&sdeb_inject_pending))) {
5551 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
cmnd->request->tag);
5552 blk_abort_request(cmnd->request);
5553 atomic_set(&sdeb_inject_pending, 0);
5554 }
5555 }
5556 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result ==
device_qfull_result))
5557 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5558 num_in_q, (inject ? "<inject> " : ""),
"status: TASK SET FULL");
5559 return 0;
5560
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org