Hi Can,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on scsi/for-next]
[also build test ERROR on mkp-scsi/for-next v5.8-rc5 next-20200713]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use as documented in
https://git-scm.com/docs/git-format-patch]
url:
https://github.com/0day-ci/linux/commits/Can-Guo/Fix-up-and-simplify-erro...
base:
https://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git for-next
config: alpha-allyesconfig (attached as .config)
compiler: alpha-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=alpha
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
drivers/scsi/ufs/ufshcd.c: In function 'ufshcd_err_handler':
> drivers/scsi/ufs/ufshcd.c:5755:10: error: 'struct
request_queue' has no member named 'dev'
5755 | if (q->dev
&& (q->rpm_status == RPM_SUSPENDED ||
| ^~
> drivers/scsi/ufs/ufshcd.c:5755:21: error: 'struct
request_queue' has no member named 'rpm_status'
5755 | if
(q->dev && (q->rpm_status == RPM_SUSPENDED ||
| ^~
drivers/scsi/ufs/ufshcd.c:5756:14: error: 'struct request_queue' has no member
named 'rpm_status'
5756 | q->rpm_status == RPM_SUSPENDING))
| ^~
drivers/scsi/ufs/ufshcd.c:5757:25: error: 'struct request_queue' has no member
named 'dev'
5757 | pm_request_resume(q->dev);
| ^~
vim +5755 drivers/scsi/ufs/ufshcd.c
5579
5580 /**
5581 * ufshcd_err_handler - handle UFS errors that require s/w attention
5582 * @work: pointer to work structure
5583 */
5584 static void ufshcd_err_handler(struct work_struct *work)
5585 {
5586 struct ufs_hba *hba;
5587 struct Scsi_Host *shost;
5588 struct scsi_device *sdev;
5589 unsigned long flags;
5590 u32 err_xfer = 0;
5591 u32 err_tm = 0;
5592 int reset_err = -1;
5593 int tag;
5594 bool needs_reset = false;
5595
5596 hba = container_of(work, struct ufs_hba, eh_work);
5597 shost = hba->host;
5598
5599 spin_lock_irqsave(hba->host->host_lock, flags);
5600 if ((hba->ufshcd_state == UFSHCD_STATE_ERROR) ||
5601 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset)
&&
5602 !ufshcd_is_link_broken(hba))) {
5603 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
5604 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5605 spin_unlock_irqrestore(hba->host->host_lock, flags);
5606 return;
5607 }
5608 ufshcd_set_eh_in_progress(hba);
5609 spin_unlock_irqrestore(hba->host->host_lock, flags);
5610 pm_runtime_get_sync(hba->dev);
5611 /*
5612 * Don't assume anything of pm_runtime_get_sync(), if resume fails,
5613 * irq and clocks can be OFF, and powers can be OFF or in LPM.
5614 */
5615 ufshcd_setup_vreg(hba, true);
5616 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5617 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5618 ufshcd_setup_hba_vreg(hba, true);
5619 ufshcd_enable_irq(hba);
5620
5621 ufshcd_hold(hba, false);
5622 if (!ufshcd_is_clkgating_allowed(hba))
5623 ufshcd_setup_clocks(hba, true);
5624
5625 if (ufshcd_is_clkscaling_supported(hba)) {
5626 cancel_work_sync(&hba->clk_scaling.suspend_work);
5627 cancel_work_sync(&hba->clk_scaling.resume_work);
5628 ufshcd_suspend_clkscaling(hba);
5629 }
5630
5631 spin_lock_irqsave(hba->host->host_lock, flags);
5632 ufshcd_scsi_block_requests(hba);
5633 hba->ufshcd_state = UFSHCD_STATE_RESET;
5634
5635 /* Complete requests that have door-bell cleared by h/w */
5636 ufshcd_complete_requests(hba);
5637
5638 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5639 bool ret;
5640
5641 spin_unlock_irqrestore(hba->host->host_lock, flags);
5642 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5643 ret = ufshcd_quirk_dl_nac_errors(hba);
5644 spin_lock_irqsave(hba->host->host_lock, flags);
5645 if (!ret && !hba->force_reset && ufshcd_is_link_active(hba))
5646 goto skip_err_handling;
5647 }
5648
5649 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5650 ufshcd_is_saved_err_fatal(hba) ||
5651 ((hba->saved_err & UIC_ERROR) &&
5652 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5653 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5654 needs_reset = true;
5655
5656 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR |
5657 UFSHCD_UIC_HIBERN8_MASK)) {
5658 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5659 __func__, hba->saved_err, hba->saved_uic_err);
5660 spin_unlock_irqrestore(hba->host->host_lock, flags);
5661 ufshcd_print_host_state(hba);
5662 ufshcd_print_pwr_info(hba);
5663 ufshcd_print_host_regs(hba);
5664 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5665 spin_lock_irqsave(hba->host->host_lock, flags);
5666 }
5667
5668 /*
5669 * if host reset is required then skip clearing the pending
5670 * transfers forcefully because they will get cleared during
5671 * host reset and restore
5672 */
5673 if (needs_reset)
5674 goto skip_pending_xfer_clear;
5675
5676 /* release lock as clear command might sleep */
5677 spin_unlock_irqrestore(hba->host->host_lock, flags);
5678 /* Clear pending transfer requests */
5679 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5680 if (ufshcd_clear_cmd(hba, tag)) {
5681 err_xfer = true;
5682 goto lock_skip_pending_xfer_clear;
5683 }
5684 }
5685
5686 /* Clear pending task management requests */
5687 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5688 if (ufshcd_clear_tm_cmd(hba, tag)) {
5689 err_tm = true;
5690 goto lock_skip_pending_xfer_clear;
5691 }
5692 }
5693
5694 lock_skip_pending_xfer_clear:
5695 spin_lock_irqsave(hba->host->host_lock, flags);
5696
5697 /* Complete the requests that are cleared by s/w */
5698 ufshcd_complete_requests(hba);
5699
5700 if (err_xfer || err_tm)
5701 needs_reset = true;
5702
5703 skip_pending_xfer_clear:
5704 /* Fatal errors need reset */
5705 if (needs_reset) {
5706 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5707
5708 /*
5709 * ufshcd_reset_and_restore() does the link reinitialization
5710 * which will need atleast one empty doorbell slot to send the
5711 * device management commands (NOP and query commands).
5712 * If there is no slot empty at this moment then free up last
5713 * slot forcefully.
5714 */
5715 if (hba->outstanding_reqs == max_doorbells)
5716 __ufshcd_transfer_req_compl(hba,
5717 (1UL << (hba->nutrs - 1)));
5718
5719 hba->force_reset = false;
5720 spin_unlock_irqrestore(hba->host->host_lock, flags);
5721 reset_err = ufshcd_reset_and_restore(hba);
5722 spin_lock_irqsave(hba->host->host_lock, flags);
5723 if (reset_err)
5724 dev_err(hba->dev, "%s: reset and restore failed\n",
5725 __func__);
5726 }
5727
5728 skip_err_handling:
5729 if (!needs_reset) {
5730 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5731 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5732 if (hba->saved_err || hba->saved_uic_err)
5733 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err
0x%x",
5734 __func__, hba->saved_err, hba->saved_uic_err);
5735 }
5736
5737 if (!reset_err) {
5738 int ret;
5739 struct request_queue *q;
5740
5741 spin_unlock_irqrestore(hba->host->host_lock, flags);
5742 /*
5743 * Set RPM status of hba device to RPM_ACTIVE,
5744 * this also clears its runtime error.
5745 */
5746 ret = pm_runtime_set_active(hba->dev);
5747 /*
5748 * If hba device had runtime error, explicitly resume
5749 * its scsi devices so that block layer can wake up
5750 * those waiting in blk_queue_enter().
5751 */
5752 if (!ret) {
5753 list_for_each_entry(sdev, &shost->__devices, siblings) {
5754 q = sdev->request_queue;
5755 if (q->dev && (q->rpm_status == RPM_SUSPENDED
||
5756 q->rpm_status == RPM_SUSPENDING))
5757 pm_request_resume(q->dev);
5758 }
5759 }
5760 spin_lock_irqsave(hba->host->host_lock, flags);
5761 }
5762
5763 /* If clk_gating is held by pm ops, release it */
5764 if (pm_runtime_active(hba->dev) && hba->clk_gating.held_by_pm) {
5765 hba->clk_gating.held_by_pm = false;
5766 __ufshcd_release(hba);
5767 }
5768
5769 ufshcd_clear_eh_in_progress(hba);
5770 spin_unlock_irqrestore(hba->host->host_lock, flags);
5771 ufshcd_scsi_unblock_requests(hba);
5772 ufshcd_release(hba);
5773 if (ufshcd_is_clkscaling_supported(hba))
5774 ufshcd_resume_clkscaling(hba);
5775 pm_runtime_put_noidle(hba->dev);
5776 }
5777
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org