[ammarfaizi2-block:bpf/bpf-next/master 212/245] arch/powerpc/net/bpf_jit_comp.c:250:59: error: 'struct bpf_binary_header' has no member named 'pages'
by kernel test robot
tree: https://github.com/ammarfaizi2/linux-block bpf/bpf-next/master
head: e5313968c41ba890a91344773a0474d0246d20a3
commit: ed2d9e1a26cca963ff5ed3b76326d70f7d8201a9 [212/245] bpf: Use size instead of pages in bpf_binary_header
config: powerpc-allyesconfig (https://download.01.org/0day-ci/archive/20220210/202202101503.p1WYvmmS-lk...)
compiler: powerpc-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/ammarfaizi2/linux-block/commit/ed2d9e1a26cca963ff5ed3b...
git remote add ammarfaizi2-block https://github.com/ammarfaizi2/linux-block
git fetch --no-tags ammarfaizi2-block bpf/bpf-next/master
git checkout ed2d9e1a26cca963ff5ed3b76326d70f7d8201a9
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=powerpc SHELL=/bin/bash
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
Note: the ammarfaizi2-block/bpf/bpf-next/master HEAD e5313968c41ba890a91344773a0474d0246d20a3 builds fine.
It only hurts bisectability.
All errors (new ones prefixed by >>):
arch/powerpc/net/bpf_jit_comp.c: In function 'bpf_int_jit_compile':
>> arch/powerpc/net/bpf_jit_comp.c:250:59: error: 'struct bpf_binary_header' has no member named 'pages'
250 | bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
| ^~
vim +250 arch/powerpc/net/bpf_jit_comp.c
4ea76e90a97d22 Christophe Leroy 2021-03-22 245
4ea76e90a97d22 Christophe Leroy 2021-03-22 246 fp->bpf_func = (void *)image;
4ea76e90a97d22 Christophe Leroy 2021-03-22 247 fp->jited = 1;
983bdc0245a29c Ravi Bangoria 2021-10-12 248 fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
4ea76e90a97d22 Christophe Leroy 2021-03-22 249
4ea76e90a97d22 Christophe Leroy 2021-03-22 @250 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
4ea76e90a97d22 Christophe Leroy 2021-03-22 251 if (!fp->is_func || extra_pass) {
44a8214de96baf Hari Bathini 2021-10-25 252 bpf_jit_binary_lock_ro(bpf_hdr);
4ea76e90a97d22 Christophe Leroy 2021-03-22 253 bpf_prog_fill_jited_linfo(fp, addrs);
4ea76e90a97d22 Christophe Leroy 2021-03-22 254 out_addrs:
4ea76e90a97d22 Christophe Leroy 2021-03-22 255 kfree(addrs);
4ea76e90a97d22 Christophe Leroy 2021-03-22 256 kfree(jit_data);
4ea76e90a97d22 Christophe Leroy 2021-03-22 257 fp->aux->jit_data = NULL;
4ea76e90a97d22 Christophe Leroy 2021-03-22 258 } else {
4ea76e90a97d22 Christophe Leroy 2021-03-22 259 jit_data->addrs = addrs;
4ea76e90a97d22 Christophe Leroy 2021-03-22 260 jit_data->ctx = cgctx;
4ea76e90a97d22 Christophe Leroy 2021-03-22 261 jit_data->proglen = proglen;
4ea76e90a97d22 Christophe Leroy 2021-03-22 262 jit_data->image = image;
4ea76e90a97d22 Christophe Leroy 2021-03-22 263 jit_data->header = bpf_hdr;
4ea76e90a97d22 Christophe Leroy 2021-03-22 264 }
4ea76e90a97d22 Christophe Leroy 2021-03-22 265
4ea76e90a97d22 Christophe Leroy 2021-03-22 266 out:
4ea76e90a97d22 Christophe Leroy 2021-03-22 267 if (bpf_blinded)
4ea76e90a97d22 Christophe Leroy 2021-03-22 268 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
4ea76e90a97d22 Christophe Leroy 2021-03-22 269
4ea76e90a97d22 Christophe Leroy 2021-03-22 270 return fp;
4ea76e90a97d22 Christophe Leroy 2021-03-22 271 }
983bdc0245a29c Ravi Bangoria 2021-10-12 272
:::::: The code at line 250 was first introduced by commit
:::::: 4ea76e90a97d22f86adbb10044d29d919e620f2e powerpc/bpf: Move common functions into bpf_jit_comp.c
:::::: TO: Christophe Leroy <christophe.leroy(a)csgroup.eu>
:::::: CC: Michael Ellerman <mpe(a)ellerman.id.au>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
7 months, 1 week
Re: [PATCH v2 06/10] nvme: add copy support
by kernel test robot
Hi Nitesh,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on axboe-block/for-next]
[also build test WARNING on linus/master v5.17-rc3 next-20220209]
[cannot apply to device-mapper-dm/for-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nitesh-Shetty/block-make-bio_map...
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
config: arm64-randconfig-s032-20220207 (https://download.01.org/0day-ci/archive/20220210/202202101447.DEcXWmHU-lk...)
compiler: aarch64-linux-gcc (GCC) 11.2.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.4-dirty
# https://github.com/0day-ci/linux/commit/22cbc1d3df11aaadd02b27ce5dcb702f9...
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nitesh-Shetty/block-make-bio_map_kern-non-static/20220207-231407
git checkout 22cbc1d3df11aaadd02b27ce5dcb702f9a8f4272
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=arm64 SHELL=/bin/bash drivers/nvme/host/ drivers/nvme/target/
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
sparse warnings: (new ones prefixed by >>)
>> drivers/nvme/host/core.c:1793:42: sparse: sparse: cast to restricted __le64
drivers/nvme/host/core.c:1793:42: sparse: sparse: cast from restricted __le32
>> drivers/nvme/host/core.c:1795:48: sparse: sparse: cast to restricted __le32
>> drivers/nvme/host/core.c:1795:48: sparse: sparse: cast from restricted __le16
>> drivers/nvme/host/core.c:903:26: sparse: sparse: incorrect type in assignment (different base types) @@ expected restricted __le16 [usertype] dspec @@ got restricted __le32 [usertype] @@
drivers/nvme/host/core.c:903:26: sparse: expected restricted __le16 [usertype] dspec
drivers/nvme/host/core.c:903:26: sparse: got restricted __le32 [usertype]
vim +1793 drivers/nvme/host/core.c
1774
1775 static void nvme_config_copy(struct gendisk *disk, struct nvme_ns *ns,
1776 struct nvme_id_ns *id)
1777 {
1778 struct nvme_ctrl *ctrl = ns->ctrl;
1779 struct request_queue *queue = disk->queue;
1780
1781 if (!(ctrl->oncs & NVME_CTRL_ONCS_COPY)) {
1782 queue->limits.copy_offload = 0;
1783 queue->limits.max_copy_sectors = 0;
1784 queue->limits.max_copy_range_sectors = 0;
1785 queue->limits.max_copy_nr_ranges = 0;
1786 blk_queue_flag_clear(QUEUE_FLAG_COPY, queue);
1787 return;
1788 }
1789
1790 /* setting copy limits */
1791 blk_queue_flag_test_and_set(QUEUE_FLAG_COPY, queue);
1792 queue->limits.copy_offload = 0;
> 1793 queue->limits.max_copy_sectors = le64_to_cpu(id->mcl) *
1794 (1 << (ns->lba_shift - 9));
> 1795 queue->limits.max_copy_range_sectors = le32_to_cpu(id->mssrl) *
1796 (1 << (ns->lba_shift - 9));
1797 queue->limits.max_copy_nr_ranges = id->msrc + 1;
1798 }
1799
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
7 months, 1 week
[drm-misc:drm-misc-next 3/11] htmldocs: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:5038: warning: expecting prototype for amdgpu_device_gpu_recover(). Prototype was for amdgpu_device_gpu_recover_imp() instead
by kernel test robot
tree: git://anongit.freedesktop.org/drm/drm-misc drm-misc-next
head: 3675c2f26f33ab4928859fb8950a4697a16be5c9
commit: 54f329cc7a7a7ea265c45b206d45e3d09192aba7 [3/11] drm/amdgpu: Serialize non TDR gpu recovery with TDRs
reproduce: make htmldocs
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All warnings (new ones prefixed by >>):
>> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:5038: warning: expecting prototype for amdgpu_device_gpu_recover(). Prototype was for amdgpu_device_gpu_recover_imp() instead
vim +5038 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
e6c6338f393b74a Jack Zhang 2021-03-08 5024
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5025 /**
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5026 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5027 *
982a820bac1b643 Mauro Carvalho Chehab 2020-10-21 5028 * @adev: amdgpu_device pointer
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5029 * @job: which job trigger hang
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5030 *
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5031 * Attempt to reset the GPU if it has hung (all asics).
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5032 * Attempt to do soft-reset or full-reset and reinitialize Asic
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5033 * Returns 0 for success or an error on failure.
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5034 */
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5035
54f329cc7a7a7ea Andrey Grodzovsky 2021-12-17 5036 int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5037 struct amdgpu_job *job)
26bc534094ed45f Andrey Grodzovsky 2018-11-22 @5038 {
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5039 struct list_head device_list, *device_list_handle = NULL;
7dd8c205eaedfa3 Evan Quan 2020-04-16 5040 bool job_signaled = false;
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5041 struct amdgpu_hive_info *hive = NULL;
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5042 struct amdgpu_device *tmp_adev = NULL;
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5043 int i, r = 0;
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5044 bool need_emergency_restart = false;
3f12acc8d6d4b2e Evan Quan 2020-04-21 5045 bool audio_suspended = false;
e6c6338f393b74a Jack Zhang 2021-03-08 5046 int tmp_vram_lost_counter;
04442bf70debb19 Lijo Lazar 2021-03-16 5047 struct amdgpu_reset_context reset_context;
04442bf70debb19 Lijo Lazar 2021-03-16 5048
04442bf70debb19 Lijo Lazar 2021-03-16 5049 memset(&reset_context, 0, sizeof(reset_context));
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5050
6e3cd2a9a6ac322 Mauro Carvalho Chehab 2020-10-23 5051 /*
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5052 * Special case: RAS triggered and full reset isn't supported
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5053 */
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5054 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5055
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5056 /*
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5057 * Flush RAM to disk so that after reboot
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5058 * the user can read log and see why the system rebooted.
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5059 */
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5060 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5061 DRM_WARN("Emergency reboot.");
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5062
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5063 ksys_sync_helper();
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5064 emergency_restart();
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5065 }
d5ea093eebf022e Andrey Grodzovsky 2019-08-22 5066
b823821f2244add Le Ma 2019-11-27 5067 dev_info(adev->dev, "GPU %s begin!\n",
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5068 need_emergency_restart ? "jobs stop":"reset");
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5069
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5070 /*
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5071 * Here we trylock to avoid chain of resets executing from
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5072 * either trigger by jobs on different adevs in XGMI hive or jobs on
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5073 * different schedulers for same device while this TO handler is running.
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5074 * We always reset all schedulers for device and all devices for XGMI
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5075 * hive so that should take care of them too.
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5076 */
175ac6ec6bd8db6 Zhigang Luo 2021-11-26 5077 if (!amdgpu_sriov_vf(adev))
d95e8e97e2d522b Dennis Li 2020-08-18 5078 hive = amdgpu_get_xgmi_hive(adev);
53b3f8f40e6cff3 Dennis Li 2020-08-19 5079 if (hive) {
53b3f8f40e6cff3 Dennis Li 2020-08-19 5080 if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5081 DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
0b2d2c2eecf27f4 Andrey Grodzovsky 2019-08-27 5082 job ? job->base.id : -1, hive->hive_id);
d95e8e97e2d522b Dennis Li 2020-08-18 5083 amdgpu_put_xgmi_hive(hive);
ff99849b00fef59 Jingwen Chen 2021-07-20 5084 if (job && job->vm)
91fb309d8294be5 Horace Chen 2021-01-20 5085 drm_sched_increase_karma(&job->base);
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5086 return 0;
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5087 }
53b3f8f40e6cff3 Dennis Li 2020-08-19 5088 mutex_lock(&hive->hive_lock);
53b3f8f40e6cff3 Dennis Li 2020-08-19 5089 }
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5090
04442bf70debb19 Lijo Lazar 2021-03-16 5091 reset_context.method = AMD_RESET_METHOD_NONE;
04442bf70debb19 Lijo Lazar 2021-03-16 5092 reset_context.reset_req_dev = adev;
04442bf70debb19 Lijo Lazar 2021-03-16 5093 reset_context.job = job;
04442bf70debb19 Lijo Lazar 2021-03-16 5094 reset_context.hive = hive;
04442bf70debb19 Lijo Lazar 2021-03-16 5095 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
04442bf70debb19 Lijo Lazar 2021-03-16 5096
91fb309d8294be5 Horace Chen 2021-01-20 5097 /*
91fb309d8294be5 Horace Chen 2021-01-20 5098 * lock the device before we try to operate the linked list
91fb309d8294be5 Horace Chen 2021-01-20 5099 * if didn't get the device lock, don't touch the linked list since
91fb309d8294be5 Horace Chen 2021-01-20 5100 * others may iterating it.
91fb309d8294be5 Horace Chen 2021-01-20 5101 */
91fb309d8294be5 Horace Chen 2021-01-20 5102 r = amdgpu_device_lock_hive_adev(adev, hive);
91fb309d8294be5 Horace Chen 2021-01-20 5103 if (r) {
91fb309d8294be5 Horace Chen 2021-01-20 5104 dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
91fb309d8294be5 Horace Chen 2021-01-20 5105 job ? job->base.id : -1);
91fb309d8294be5 Horace Chen 2021-01-20 5106
91fb309d8294be5 Horace Chen 2021-01-20 5107 /* even we skipped this reset, still need to set the job to guilty */
ff99849b00fef59 Jingwen Chen 2021-07-20 5108 if (job && job->vm)
91fb309d8294be5 Horace Chen 2021-01-20 5109 drm_sched_increase_karma(&job->base);
91fb309d8294be5 Horace Chen 2021-01-20 5110 goto skip_recovery;
91fb309d8294be5 Horace Chen 2021-01-20 5111 }
91fb309d8294be5 Horace Chen 2021-01-20 5112
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5113 /*
9e94d22c0085858 Evan Quan 2020-04-16 5114 * Build list of devices to reset.
9e94d22c0085858 Evan Quan 2020-04-16 5115 * In case we are in XGMI hive mode, resort the device list
9e94d22c0085858 Evan Quan 2020-04-16 5116 * to put adev in the 1st position.
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5117 */
9e94d22c0085858 Evan Quan 2020-04-16 5118 INIT_LIST_HEAD(&device_list);
175ac6ec6bd8db6 Zhigang Luo 2021-11-26 5119 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
655ce9cb13b5967 shaoyunl 2021-03-04 5120 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
655ce9cb13b5967 shaoyunl 2021-03-04 5121 list_add_tail(&tmp_adev->reset_list, &device_list);
655ce9cb13b5967 shaoyunl 2021-03-04 5122 if (!list_is_first(&adev->reset_list, &device_list))
655ce9cb13b5967 shaoyunl 2021-03-04 5123 list_rotate_to_front(&adev->reset_list, &device_list);
655ce9cb13b5967 shaoyunl 2021-03-04 5124 device_list_handle = &device_list;
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5125 } else {
655ce9cb13b5967 shaoyunl 2021-03-04 5126 list_add_tail(&adev->reset_list, &device_list);
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5127 device_list_handle = &device_list;
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5128 }
26bc534094ed45f Andrey Grodzovsky 2018-11-22 5129
12ffa55da60f835 Andrey Grodzovsky 2019-08-30 5130 /* block all schedulers and reset given job's ring */
655ce9cb13b5967 shaoyunl 2021-03-04 5131 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
3f12acc8d6d4b2e Evan Quan 2020-04-21 5132 /*
3f12acc8d6d4b2e Evan Quan 2020-04-21 5133 * Try to put the audio codec into suspend state
3f12acc8d6d4b2e Evan Quan 2020-04-21 5134 * before gpu reset started.
3f12acc8d6d4b2e Evan Quan 2020-04-21 5135 *
3f12acc8d6d4b2e Evan Quan 2020-04-21 5136 * Due to the power domain of the graphics device
3f12acc8d6d4b2e Evan Quan 2020-04-21 5137 * is shared with AZ power domain. Without this,
3f12acc8d6d4b2e Evan Quan 2020-04-21 5138 * we may change the audio hardware from behind
3f12acc8d6d4b2e Evan Quan 2020-04-21 5139 * the audio driver's back. That will trigger
3f12acc8d6d4b2e Evan Quan 2020-04-21 5140 * some audio codec errors.
3f12acc8d6d4b2e Evan Quan 2020-04-21 5141 */
3f12acc8d6d4b2e Evan Quan 2020-04-21 5142 if (!amdgpu_device_suspend_display_audio(tmp_adev))
3f12acc8d6d4b2e Evan Quan 2020-04-21 5143 audio_suspended = true;
3f12acc8d6d4b2e Evan Quan 2020-04-21 5144
9e94d22c0085858 Evan Quan 2020-04-16 5145 amdgpu_ras_set_error_query_ready(tmp_adev, false);
9e94d22c0085858 Evan Quan 2020-04-16 5146
52fb44cf30fc6b1 Evan Quan 2020-04-16 5147 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
52fb44cf30fc6b1 Evan Quan 2020-04-16 5148
428890a3fec1315 shaoyunl 2021-11-29 5149 if (!amdgpu_sriov_vf(tmp_adev))
9e94d22c0085858 Evan Quan 2020-04-16 5150 amdgpu_amdkfd_pre_reset(tmp_adev);
9e94d22c0085858 Evan Quan 2020-04-16 5151
fdafb3597a2cc46 Evan Quan 2019-06-26 5152 /*
fdafb3597a2cc46 Evan Quan 2019-06-26 5153 * Mark these ASICs to be reseted as untracked first
fdafb3597a2cc46 Evan Quan 2019-06-26 5154 * And add them back after reset completed
fdafb3597a2cc46 Evan Quan 2019-06-26 5155 */
fdafb3597a2cc46 Evan Quan 2019-06-26 5156 amdgpu_unregister_gpu_instance(tmp_adev);
fdafb3597a2cc46 Evan Quan 2019-06-26 5157
087451f372bf76d Evan Quan 2021-10-19 5158 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
565d1941557756a Evan Quan 2020-03-11 5159
f1c1314be429718 xinhui pan 2019-07-04 5160 /* disable ras on ALL IPs */
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5161 if (!need_emergency_restart &&
b823821f2244add Le Ma 2019-11-27 5162 amdgpu_device_ip_need_full_reset(tmp_adev))
f1c1314be429718 xinhui pan 2019-07-04 5163 amdgpu_ras_suspend(tmp_adev);
f1c1314be429718 xinhui pan 2019-07-04 5164
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5165 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5166 struct amdgpu_ring *ring = tmp_adev->rings[i];
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5167
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5168 if (!ring || !ring->sched.thread)
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5169 continue;
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5170
0b2d2c2eecf27f4 Andrey Grodzovsky 2019-08-27 5171 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
7c6e68c777f1094 Andrey Grodzovsky 2019-09-13 5172
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5173 if (need_emergency_restart)
7c6e68c777f1094 Andrey Grodzovsky 2019-09-13 5174 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5175 }
8f8c80f43009672 Jingwen Chen 2021-02-25 5176 atomic_inc(&tmp_adev->gpu_reset_counter);
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5177 }
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5178
bb5c7235eaafb4e Wenhui Sheng 2020-07-13 5179 if (need_emergency_restart)
7c6e68c777f1094 Andrey Grodzovsky 2019-09-13 5180 goto skip_sched_resume;
7c6e68c777f1094 Andrey Grodzovsky 2019-09-13 5181
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5182 /*
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5183 * Must check guilty signal here since after this point all old
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5184 * HW fences are force signaled.
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5185 *
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5186 * job->base holds a reference to parent fence
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5187 */
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5188 if (job && job->base.s_fence->parent &&
7dd8c205eaedfa3 Evan Quan 2020-04-16 5189 dma_fence_is_signaled(job->base.s_fence->parent)) {
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5190 job_signaled = true;
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5191 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5192 goto skip_hw_reset;
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5193 }
1d721ed679db188 Andrey Grodzovsky 2019-04-18 5194
:::::: The code at line 5038 was first introduced by commit
:::::: 26bc534094ed45fdedef6b4ce8b96030340c5ce7 drm/amdgpu: Refactor GPU reset for XGMI hive case
:::::: TO: Andrey Grodzovsky <andrey.grodzovsky(a)amd.com>
:::::: CC: Alex Deucher <alexander.deucher(a)amd.com>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
7 months, 1 week
Re: [PATCH v2 03/10] block: Add copy offload support infrastructure
by Dan Carpenter
Hi Nitesh,
url: https://github.com/0day-ci/linux/commits/Nitesh-Shetty/block-make-bio_map...
base: https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
config: i386-randconfig-m021-20220207 (https://download.01.org/0day-ci/archive/20220209/202202090703.U5riBMIn-lk...)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
Reported-by: Dan Carpenter <dan.carpenter(a)oracle.com>
smatch warnings:
block/blk-lib.c:272 blk_copy_offload() warn: possible memory leak of 'ctx'
vim +/ctx +272 block/blk-lib.c
12a9801a7301f1 Nitesh Shetty 2022-02-07 185 int blk_copy_offload(struct block_device *src_bdev, int nr_srcs,
12a9801a7301f1 Nitesh Shetty 2022-02-07 186 struct range_entry *rlist, struct block_device *dst_bdev, gfp_t gfp_mask)
12a9801a7301f1 Nitesh Shetty 2022-02-07 187 {
12a9801a7301f1 Nitesh Shetty 2022-02-07 188 struct request_queue *sq = bdev_get_queue(src_bdev);
12a9801a7301f1 Nitesh Shetty 2022-02-07 189 struct request_queue *dq = bdev_get_queue(dst_bdev);
12a9801a7301f1 Nitesh Shetty 2022-02-07 190 struct bio *read_bio, *write_bio;
12a9801a7301f1 Nitesh Shetty 2022-02-07 191 struct copy_ctx *ctx;
12a9801a7301f1 Nitesh Shetty 2022-02-07 192 struct cio *cio;
12a9801a7301f1 Nitesh Shetty 2022-02-07 193 struct page *token;
12a9801a7301f1 Nitesh Shetty 2022-02-07 194 sector_t src_blk, copy_len, dst_blk;
12a9801a7301f1 Nitesh Shetty 2022-02-07 195 sector_t remaining, max_copy_len = LONG_MAX;
12a9801a7301f1 Nitesh Shetty 2022-02-07 196 int ri = 0, ret = 0;
12a9801a7301f1 Nitesh Shetty 2022-02-07 197
12a9801a7301f1 Nitesh Shetty 2022-02-07 198 cio = kzalloc(sizeof(struct cio), GFP_KERNEL);
12a9801a7301f1 Nitesh Shetty 2022-02-07 199 if (!cio)
12a9801a7301f1 Nitesh Shetty 2022-02-07 200 return -ENOMEM;
12a9801a7301f1 Nitesh Shetty 2022-02-07 201 atomic_set(&cio->refcount, 0);
12a9801a7301f1 Nitesh Shetty 2022-02-07 202 cio->rlist = rlist;
12a9801a7301f1 Nitesh Shetty 2022-02-07 203
12a9801a7301f1 Nitesh Shetty 2022-02-07 204 max_copy_len = min3(max_copy_len, (sector_t)sq->limits.max_copy_sectors,
12a9801a7301f1 Nitesh Shetty 2022-02-07 205 (sector_t)dq->limits.max_copy_sectors);
12a9801a7301f1 Nitesh Shetty 2022-02-07 206 max_copy_len = min3(max_copy_len, (sector_t)sq->limits.max_copy_range_sectors,
12a9801a7301f1 Nitesh Shetty 2022-02-07 207 (sector_t)dq->limits.max_copy_range_sectors) << SECTOR_SHIFT;
12a9801a7301f1 Nitesh Shetty 2022-02-07 208
12a9801a7301f1 Nitesh Shetty 2022-02-07 209 for (ri = 0; ri < nr_srcs; ri++) {
12a9801a7301f1 Nitesh Shetty 2022-02-07 210 cio->rlist[ri].comp_len = rlist[ri].len;
12a9801a7301f1 Nitesh Shetty 2022-02-07 211 for (remaining = rlist[ri].len, src_blk = rlist[ri].src, dst_blk = rlist[ri].dst;
12a9801a7301f1 Nitesh Shetty 2022-02-07 212 remaining > 0;
12a9801a7301f1 Nitesh Shetty 2022-02-07 213 remaining -= copy_len, src_blk += copy_len, dst_blk += copy_len) {
12a9801a7301f1 Nitesh Shetty 2022-02-07 214 copy_len = min(remaining, max_copy_len);
12a9801a7301f1 Nitesh Shetty 2022-02-07 215
12a9801a7301f1 Nitesh Shetty 2022-02-07 216 token = alloc_page(gfp_mask);
12a9801a7301f1 Nitesh Shetty 2022-02-07 217 if (unlikely(!token)) {
12a9801a7301f1 Nitesh Shetty 2022-02-07 218 ret = -ENOMEM;
12a9801a7301f1 Nitesh Shetty 2022-02-07 219 goto err_token;
12a9801a7301f1 Nitesh Shetty 2022-02-07 220 }
12a9801a7301f1 Nitesh Shetty 2022-02-07 221
12a9801a7301f1 Nitesh Shetty 2022-02-07 222 read_bio = bio_alloc(src_bdev, 1, REQ_OP_READ | REQ_COPY | REQ_NOMERGE,
12a9801a7301f1 Nitesh Shetty 2022-02-07 223 gfp_mask);
12a9801a7301f1 Nitesh Shetty 2022-02-07 224 if (!read_bio) {
12a9801a7301f1 Nitesh Shetty 2022-02-07 225 ret = -ENOMEM;
12a9801a7301f1 Nitesh Shetty 2022-02-07 226 goto err_read_bio;
12a9801a7301f1 Nitesh Shetty 2022-02-07 227 }
12a9801a7301f1 Nitesh Shetty 2022-02-07 228 read_bio->bi_iter.bi_sector = src_blk >> SECTOR_SHIFT;
12a9801a7301f1 Nitesh Shetty 2022-02-07 229 read_bio->bi_iter.bi_size = copy_len;
12a9801a7301f1 Nitesh Shetty 2022-02-07 230 __bio_add_page(read_bio, token, PAGE_SIZE, 0);
12a9801a7301f1 Nitesh Shetty 2022-02-07 231 ret = submit_bio_wait(read_bio);
12a9801a7301f1 Nitesh Shetty 2022-02-07 232 if (ret) {
12a9801a7301f1 Nitesh Shetty 2022-02-07 233 bio_put(read_bio);
12a9801a7301f1 Nitesh Shetty 2022-02-07 234 goto err_read_bio;
12a9801a7301f1 Nitesh Shetty 2022-02-07 235 }
12a9801a7301f1 Nitesh Shetty 2022-02-07 236 bio_put(read_bio);
12a9801a7301f1 Nitesh Shetty 2022-02-07 237 ctx = kzalloc(sizeof(struct copy_ctx), gfp_mask);
12a9801a7301f1 Nitesh Shetty 2022-02-07 238 if (!ctx) {
12a9801a7301f1 Nitesh Shetty 2022-02-07 239 ret = -ENOMEM;
12a9801a7301f1 Nitesh Shetty 2022-02-07 240 goto err_read_bio;
12a9801a7301f1 Nitesh Shetty 2022-02-07 241 }
12a9801a7301f1 Nitesh Shetty 2022-02-07 242 ctx->cio = cio;
12a9801a7301f1 Nitesh Shetty 2022-02-07 243 ctx->range_idx = ri;
12a9801a7301f1 Nitesh Shetty 2022-02-07 244 ctx->start_sec = rlist[ri].src;
12a9801a7301f1 Nitesh Shetty 2022-02-07 245
12a9801a7301f1 Nitesh Shetty 2022-02-07 246 write_bio = bio_alloc(dst_bdev, 1, REQ_OP_WRITE | REQ_COPY | REQ_NOMERGE,
12a9801a7301f1 Nitesh Shetty 2022-02-07 247 gfp_mask);
12a9801a7301f1 Nitesh Shetty 2022-02-07 248 if (!write_bio) {
Please call kfree(ctx) before the goto.
12a9801a7301f1 Nitesh Shetty 2022-02-07 249 ret = -ENOMEM;
12a9801a7301f1 Nitesh Shetty 2022-02-07 250 goto err_read_bio;
12a9801a7301f1 Nitesh Shetty 2022-02-07 251 }
12a9801a7301f1 Nitesh Shetty 2022-02-07 252
12a9801a7301f1 Nitesh Shetty 2022-02-07 253 write_bio->bi_iter.bi_sector = dst_blk >> SECTOR_SHIFT;
12a9801a7301f1 Nitesh Shetty 2022-02-07 254 write_bio->bi_iter.bi_size = copy_len;
12a9801a7301f1 Nitesh Shetty 2022-02-07 255 __bio_add_page(write_bio, token, PAGE_SIZE, 0);
12a9801a7301f1 Nitesh Shetty 2022-02-07 256 write_bio->bi_end_io = bio_copy_end_io;
12a9801a7301f1 Nitesh Shetty 2022-02-07 257 write_bio->bi_private = ctx;
12a9801a7301f1 Nitesh Shetty 2022-02-07 258 atomic_inc(&cio->refcount);
12a9801a7301f1 Nitesh Shetty 2022-02-07 259 submit_bio(write_bio);
12a9801a7301f1 Nitesh Shetty 2022-02-07 260 }
12a9801a7301f1 Nitesh Shetty 2022-02-07 261 }
12a9801a7301f1 Nitesh Shetty 2022-02-07 262
12a9801a7301f1 Nitesh Shetty 2022-02-07 263 /* Wait for completion of all IO's*/
12a9801a7301f1 Nitesh Shetty 2022-02-07 264 return cio_await_completion(cio);
12a9801a7301f1 Nitesh Shetty 2022-02-07 265
12a9801a7301f1 Nitesh Shetty 2022-02-07 266 err_read_bio:
12a9801a7301f1 Nitesh Shetty 2022-02-07 267 __free_page(token);
12a9801a7301f1 Nitesh Shetty 2022-02-07 268 err_token:
12a9801a7301f1 Nitesh Shetty 2022-02-07 269 rlist[ri].comp_len = min_t(sector_t, rlist[ri].comp_len, (rlist[ri].len - remaining));
12a9801a7301f1 Nitesh Shetty 2022-02-07 270
12a9801a7301f1 Nitesh Shetty 2022-02-07 271 cio->io_err = ret;
12a9801a7301f1 Nitesh Shetty 2022-02-07 @272 return cio_await_completion(cio);
12a9801a7301f1 Nitesh Shetty 2022-02-07 273 }
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
7 months, 1 week