Hi Vladimir,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on drm/drm-next]
[also build test ERROR on v5.16-rc5]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url:
https://github.com/0day-ci/linux/commits/Vladimir-Lypak/drm-msm-a5xx-Add-...
base:
git://anongit.freedesktop.org/drm/drm drm-next
config: riscv-allyesconfig
(
https://download.01.org/0day-ci/archive/20211213/202112131430.xn9Z7DDL-lk...)
compiler: riscv64-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
#
https://github.com/0day-ci/linux/commit/8b3d7b1aee7cbef05b8df99a0d17e1da5...
git remote add linux-review
https://github.com/0day-ci/linux
git fetch --no-tags linux-review
Vladimir-Lypak/drm-msm-a5xx-Add-support-for-Adreno-506-GPU/20211213-004110
git checkout 8b3d7b1aee7cbef05b8df99a0d17e1da59b23752
# save the config file to linux build tree
mkdir build_dir
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir
ARCH=riscv SHELL=/bin/bash
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
Note: the
linux-review/Vladimir-Lypak/drm-msm-a5xx-Add-support-for-Adreno-506-GPU/20211213-004110
HEAD 9035efa0597803896493d62ee3a5e1d34e0ab080 builds fine.
It only hurts bisectability.
All errors (new ones prefixed by >>):
drivers/gpu/drm/msm/adreno/a5xx_gpu.c: In function 'a5xx_hw_init':
> drivers/gpu/drm/msm/adreno/a5xx_gpu.c:863:28: error:
'adreno_gou' undeclared (first use in this function); did you mean
'adreno_gpu'?
863 | if (adreno_is_a506(adreno_gou) ||
adreno_is_a508(adreno_gpu) ||
| ^~~~~~~~~~
| adreno_gpu
drivers/gpu/drm/msm/adreno/a5xx_gpu.c:863:28: note: each undeclared identifier is
reported only once for each function it appears in
vim +863 drivers/gpu/drm/msm/adreno/a5xx_gpu.c
658
659 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
660 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
661 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
662 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
663 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
664 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
665 A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
666 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
667 A5XX_RBBM_INT_0_MASK_CP_SW | \
668 A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
669 A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
670 A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
671
672 static int a5xx_hw_init(struct msm_gpu *gpu)
673 {
674 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
675 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
676 u32 regbit;
677 int ret;
678
679 gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
680
681 if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
682 adreno_is_a540(adreno_gpu))
683 gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
684
685 /* Make all blocks contribute to the GPU BUSY perf counter */
686 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
687
688 /* Enable RBBM error reporting bits */
689 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
690
691 if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
692 /*
693 * Mask out the activity signals from RB1-3 to avoid false
694 * positives
695 */
696
697 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
698 0xF0000000);
699 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
700 0xFFFFFFFF);
701 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
702 0xFFFFFFFF);
703 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
704 0xFFFFFFFF);
705 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
706 0xFFFFFFFF);
707 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
708 0xFFFFFFFF);
709 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
710 0xFFFFFFFF);
711 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
712 0xFFFFFFFF);
713 }
714
715 /* Enable fault detection */
716 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
717 (1 << 30) | 0xFFFF);
718
719 /* Turn on performance counters */
720 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
721
722 /* Select CP0 to always count cycles */
723 gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
724
725 /* Select RBBM0 to countable 6 to get the busy status for devfreq */
726 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
727
728 /* Increase VFD cache access so LRZ and other data gets evicted less */
729 gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
730
731 /* Disable L2 bypass in the UCHE */
732 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
733 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
734 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
735 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
736
737 /* Set the GMEM VA range (0 to gpu->gmem) */
738 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
739 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
740 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
741 0x00100000 + adreno_gpu->gmem - 1);
742 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
743
744 if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
745 adreno_is_a510(adreno_gpu)) {
746 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
747 if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
748 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
749 else
750 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
751 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
752 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
753 } else {
754 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
755 if (adreno_is_a530(adreno_gpu))
756 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
757 else
758 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
759 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
760 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
761 }
762
763 if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
764 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
765 (0x100 << 11 | 0x100 << 22));
766 else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
767 adreno_is_a512(adreno_gpu))
768 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
769 (0x200 << 11 | 0x200 << 22));
770 else
771 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
772 (0x400 << 11 | 0x300 << 22));
773
774 if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
775 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
776
777 /*
778 * Disable the RB sampler datapath DP2 clock gating optimization
779 * for 1-SP GPUs, as it is enabled by default.
780 */
781 if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
782 adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
783 gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
784
785 /* Disable UCHE global filter as SP can invalidate/flush independently */
786 gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
787
788 /* Enable USE_RETENTION_FLOPS */
789 gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
790
791 /* Enable ME/PFP split notification */
792 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
793
794 /*
795 * In A5x, CCU can send context_done event of a particular context to
796 * UCHE which ultimately reaches CP even when there is valid
797 * transaction of that context inside CCU. This can let CP to program
798 * config registers, which will make the "valid transaction" inside
799 * CCU to be interpreted differently. This can cause gpu fault. This
800 * bug is fixed in latest A510 revision. To enable this bug fix -
801 * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
802 * (disable). For older A510 version this bit is unused.
803 */
804 if (adreno_is_a510(adreno_gpu))
805 gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
806
807 /* Enable HWCG */
808 a5xx_set_hwcg(gpu, true);
809
810 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
811
812 /* Set the highest bank bit */
813 if (adreno_is_a540(adreno_gpu))
814 regbit = 2;
815 else
816 regbit = 1;
817
818 gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
819 gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
820
821 if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
822 adreno_is_a540(adreno_gpu))
823 gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
824
825 /* Disable All flat shading optimization (ALLFLATOPTDIS) */
826 gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
827
828 /* Protect registers from the CP */
829 gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
830
831 /* RBBM */
832 gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
833 gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
834 gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
835 gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
836 gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
837 gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
838
839 /* Content protect */
840 gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
841 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
842 16));
843 gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
844 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
845
846 /* CP */
847 gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
848 gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
849 gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
850 gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
851
852 /* RB */
853 gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
854 gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
855
856 /* VPC */
857 gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
858 gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
859
860 /* UCHE */
861 gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
862
863 if (adreno_is_a506(adreno_gou) || adreno_is_a508(adreno_gpu) ||
864 adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
865 adreno_is_a512(adreno_gpu) || adreno_is_a530(adreno_gpu))
866 gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
867 ADRENO_PROTECT_RW(0x10000, 0x8000));
868
869 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
870 /*
871 * Disable the trusted memory range - we don't actually supported secure
872 * memory rendering at this point in time and we don't want to block off
873 * part of the virtual memory space.
874 */
875 gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
876 REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
877 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
878
879 /* Put the GPU into 64 bit by default */
880 gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
881 gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
882 gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
883 gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
884 gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
885 gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
886 gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
887 gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
888 gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
889 gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
890 gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
891 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
892
893 /*
894 * VPC corner case with local memory load kill leads to corrupt
895 * internal state. Normal Disable does not work for all a5x chips.
896 * So do the following setting to disable it.
897 */
898 if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
899 gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
900 gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
901 }
902
903 ret = adreno_hw_init(gpu);
904 if (ret)
905 return ret;
906
907 if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
908 a5xx_gpmu_ucode_init(gpu);
909
910 ret = a5xx_ucode_init(gpu);
911 if (ret)
912 return ret;
913
914 /* Set the ringbuffer address */
915 gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
916 gpu->rb[0]->iova);
917
918 /*
919 * If the microcode supports the WHERE_AM_I opcode then we can use that
920 * in lieu of the RPTR shadow and enable preemption. Otherwise, we
921 * can't safely use the RPTR shadow or preemption. In either case, the
922 * RPTR shadow should be disabled in hardware.
923 */
924 gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
925 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
926
927 /* Create a privileged buffer for the RPTR shadow */
928 if (a5xx_gpu->has_whereami) {
929 if (!a5xx_gpu->shadow_bo) {
930 a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
931 sizeof(u32) * gpu->nr_rings,
932 MSM_BO_WC | MSM_BO_MAP_PRIV,
933 gpu->aspace, &a5xx_gpu->shadow_bo,
934 &a5xx_gpu->shadow_iova);
935
936 if (IS_ERR(a5xx_gpu->shadow))
937 return PTR_ERR(a5xx_gpu->shadow);
938 }
939
940 gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
941 REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
942 } else if (gpu->nr_rings > 1) {
943 /* Disable preemption if WHERE_AM_I isn't available */
944 a5xx_preempt_fini(gpu);
945 gpu->nr_rings = 1;
946 }
947
948 a5xx_preempt_hw_init(gpu);
949
950 /* Disable the interrupts through the initial bringup stage */
951 gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
952
953 /* Clear ME_HALT to start the micro engine */
954 gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
955 ret = a5xx_me_init(gpu);
956 if (ret)
957 return ret;
958
959 ret = a5xx_power_init(gpu);
960 if (ret)
961 return ret;
962
963 /*
964 * Send a pipeline event stat to get misbehaving counters to start
965 * ticking correctly
966 */
967 if (adreno_is_a530(adreno_gpu)) {
968 OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
969 OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
970
971 a5xx_flush(gpu, gpu->rb[0], true);
972 if (!a5xx_idle(gpu, gpu->rb[0]))
973 return -EINVAL;
974 }
975
976 /*
977 * If the chip that we are using does support loading one, then
978 * try to load a zap shader into the secure world. If successful
979 * we can use the CP to switch out of secure mode. If not then we
980 * have no resource but to try to switch ourselves out manually. If we
981 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
982 * be blocked and a permissions violation will soon follow.
983 */
984 ret = a5xx_zap_shader_init(gpu);
985 if (!ret) {
986 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
987 OUT_RING(gpu->rb[0], 0x00000000);
988
989 a5xx_flush(gpu, gpu->rb[0], true);
990 if (!a5xx_idle(gpu, gpu->rb[0]))
991 return -EINVAL;
992 } else if (ret == -ENODEV) {
993 /*
994 * This device does not use zap shader (but print a warning
995 * just in case someone got their dt wrong.. hopefully they
996 * have a debug UART to realize the error of their ways...
997 * if you mess this up you are about to crash horribly)
998 */
999 dev_warn_once(gpu->dev->dev,
1000 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
1001 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
1002 } else {
1003 return ret;
1004 }
1005
1006 /* Last step - yield the ringbuffer */
1007 a5xx_preempt_start(gpu);
1008
1009 return 0;
1010 }
1011
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org