tree:
https://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git opp/linux-next
head: b9fd171ae7781c5eb2ecc5f3a59cb41b33c05be0
commit: 47ff5793e4c13946e3b604b6f2a57fa45b5b8f70 [6/9] OPP: Update the bandwidth on OPP
frequency changes
config: x86_64-randconfig-a002-20200513 (attached as .config)
compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
reproduce:
git checkout 47ff5793e4c13946e3b604b6f2a57fa45b5b8f70
# save the attached .config to linux build tree
make ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
ld: drivers/opp/core.o: in function `_opp_table_kref_release':
drivers/opp/core.c:1087: undefined reference to `icc_put'
ld: drivers/opp/core.o: in function `dev_pm_opp_set_rate':
> drivers/opp/core.c:914: undefined reference to `icc_set_bw'
ld: drivers/opp/of.o: in function `dev_pm_opp_of_find_icc_paths':
drivers/opp/of.c:364: undefined reference to `of_icc_get_by_index'
ld: drivers/opp/of.c:383: undefined reference to `icc_put'
vim +914 drivers/opp/core.c
793
794 /**
795 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
796 * @dev: device for which we do this operation
797 * @target_freq: frequency to achieve
798 *
799 * This configures the power-supplies to the levels specified by the OPP
800 * corresponding to the target_freq, and programs the clock to a value <=
801 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax
802 * provided by the opp, should have already rounded to the target OPP's
803 * frequency.
804 */
805 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
806 {
807 struct opp_table *opp_table;
808 unsigned long freq, old_freq, temp_freq;
809 struct dev_pm_opp *old_opp, *opp;
810 struct clk *clk;
811 int ret, i;
812
813 opp_table = _find_opp_table(dev);
814 if (IS_ERR(opp_table)) {
815 dev_err(dev, "%s: device opp doesn't exist\n", __func__);
816 return PTR_ERR(opp_table);
817 }
818
819 if (unlikely(!target_freq)) {
820 if (opp_table->required_opp_tables) {
821 ret = _set_required_opps(dev, opp_table, NULL);
822 } else if (!_get_opp_count(opp_table)) {
823 return 0;
824 } else {
825 dev_err(dev, "target frequency can't be 0\n");
826 ret = -EINVAL;
827 }
828
829 goto put_opp_table;
830 }
831
832 clk = opp_table->clk;
833 if (IS_ERR(clk)) {
834 dev_err(dev, "%s: No clock available for the device\n",
835 __func__);
836 ret = PTR_ERR(clk);
837 goto put_opp_table;
838 }
839
840 freq = clk_round_rate(clk, target_freq);
841 if ((long)freq <= 0)
842 freq = target_freq;
843
844 old_freq = clk_get_rate(clk);
845
846 /* Return early if nothing to do */
847 if (old_freq == freq) {
848 dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to
do\n",
849 __func__, freq);
850 ret = 0;
851 goto put_opp_table;
852 }
853
854 /*
855 * For IO devices which require an OPP on some platforms/SoCs
856 * while just needing to scale the clock on some others
857 * we look for empty OPP tables with just a clock handle and
858 * scale only the clk. This makes dev_pm_opp_set_rate()
859 * equivalent to a clk_set_rate()
860 */
861 if (!_get_opp_count(opp_table)) {
862 ret = _generic_set_opp_clk_only(dev, clk, freq);
863 goto put_opp_table;
864 }
865
866 temp_freq = old_freq;
867 old_opp = _find_freq_ceil(opp_table, &temp_freq);
868 if (IS_ERR(old_opp)) {
869 dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
870 __func__, old_freq, PTR_ERR(old_opp));
871 }
872
873 temp_freq = freq;
874 opp = _find_freq_ceil(opp_table, &temp_freq);
875 if (IS_ERR(opp)) {
876 ret = PTR_ERR(opp);
877 dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
878 __func__, freq, ret);
879 goto put_old_opp;
880 }
881
882 dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
883 old_freq, freq);
884
885 /* Scaling up? Configure required OPPs before frequency */
886 if (freq >= old_freq) {
887 ret = _set_required_opps(dev, opp_table, opp);
888 if (ret)
889 goto put_opp;
890 }
891
892 if (opp_table->set_opp) {
893 ret = _set_opp_custom(opp_table, dev, old_freq, freq,
894 IS_ERR(old_opp) ? NULL : old_opp->supplies,
895 opp->supplies);
896 } else if (opp_table->regulators) {
897 ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
898 IS_ERR(old_opp) ? NULL : old_opp->supplies,
899 opp->supplies);
900 } else {
901 /* Only frequency scaling */
902 ret = _generic_set_opp_clk_only(dev, clk, freq);
903 }
904
905 /* Scaling down? Configure required OPPs after frequency */
906 if (!ret && freq < old_freq) {
907 ret = _set_required_opps(dev, opp_table, opp);
908 if (ret)
909 dev_err(dev, "Failed to set required opps: %d\n", ret);
910 }
911
912 if (!ret && opp_table->paths) {
913 for (i = 0; i < opp_table->path_count; i++) {
914 ret = icc_set_bw(opp_table->paths[i],
915
opp->bandwidth[i].avg,
916 opp->bandwidth[i].peak);
917 if (ret)
918 dev_err(dev, "Failed to set bandwidth[%d]: %d\n",
919 i, ret);
920 }
921 }
922
923 put_opp:
924 dev_pm_opp_put(opp);
925 put_old_opp:
926 if (!IS_ERR(old_opp))
927 dev_pm_opp_put(old_opp);
928 put_opp_table:
929 dev_pm_opp_put_opp_table(opp_table);
930 return ret;
931 }
932 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
933
934 /* OPP-dev Helpers */
935 static void _remove_opp_dev(struct opp_device *opp_dev,
936 struct opp_table *opp_table)
937 {
938 opp_debug_unregister(opp_dev, opp_table);
939 list_del(&opp_dev->node);
940 kfree(opp_dev);
941 }
942
943 static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
944 struct opp_table *opp_table)
945 {
946 struct opp_device *opp_dev;
947
948 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
949 if (!opp_dev)
950 return NULL;
951
952 /* Initialize opp-dev */
953 opp_dev->dev = dev;
954
955 list_add(&opp_dev->node, &opp_table->dev_list);
956
957 /* Create debugfs entries for the opp_table */
958 opp_debug_register(opp_dev, opp_table);
959
960 return opp_dev;
961 }
962
963 struct opp_device *_add_opp_dev(const struct device *dev,
964 struct opp_table *opp_table)
965 {
966 struct opp_device *opp_dev;
967
968 mutex_lock(&opp_table->lock);
969 opp_dev = _add_opp_dev_unlocked(dev, opp_table);
970 mutex_unlock(&opp_table->lock);
971
972 return opp_dev;
973 }
974
975 static struct opp_table *_allocate_opp_table(struct device *dev, int index)
976 {
977 struct opp_table *opp_table;
978 struct opp_device *opp_dev;
979 int ret;
980
981 /*
982 * Allocate a new OPP table. In the infrequent case where a new
983 * device is needed to be added, we pay this penalty.
984 */
985 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
986 if (!opp_table)
987 return NULL;
988
989 mutex_init(&opp_table->lock);
990 mutex_init(&opp_table->genpd_virt_dev_lock);
991 INIT_LIST_HEAD(&opp_table->dev_list);
992
993 /* Mark regulator count uninitialized */
994 opp_table->regulator_count = -1;
995
996 opp_dev = _add_opp_dev(dev, opp_table);
997 if (!opp_dev) {
998 kfree(opp_table);
999 return NULL;
1000 }
1001
1002 _of_init_opp_table(opp_table, dev, index);
1003
1004 /* Find clk for the device */
1005 opp_table->clk = clk_get(dev, NULL);
1006 if (IS_ERR(opp_table->clk)) {
1007 ret = PTR_ERR(opp_table->clk);
1008 if (ret != -EPROBE_DEFER)
1009 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
1010 ret);
1011 }
1012
1013 /* Find interconnect path(s) for the device */
1014 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
1015 if (ret)
1016 dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
1017 __func__, ret);
1018
1019 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
1020 INIT_LIST_HEAD(&opp_table->opp_list);
1021 kref_init(&opp_table->kref);
1022
1023 /* Secure the device table modification */
1024 list_add(&opp_table->node, &opp_tables);
1025 return opp_table;
1026 }
1027
1028 void _get_opp_table_kref(struct opp_table *opp_table)
1029 {
1030 kref_get(&opp_table->kref);
1031 }
1032
1033 static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
1034 {
1035 struct opp_table *opp_table;
1036
1037 /* Hold our table modification lock here */
1038 mutex_lock(&opp_table_lock);
1039
1040 opp_table = _find_opp_table_unlocked(dev);
1041 if (!IS_ERR(opp_table))
1042 goto unlock;
1043
1044 opp_table = _managed_opp(dev, index);
1045 if (opp_table) {
1046 if (!_add_opp_dev_unlocked(dev, opp_table)) {
1047 dev_pm_opp_put_opp_table(opp_table);
1048 opp_table = NULL;
1049 }
1050 goto unlock;
1051 }
1052
1053 opp_table = _allocate_opp_table(dev, index);
1054
1055 unlock:
1056 mutex_unlock(&opp_table_lock);
1057
1058 return opp_table;
1059 }
1060
1061 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
1062 {
1063 return _opp_get_opp_table(dev, 0);
1064 }
1065 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
1066
1067 struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
1068 int index)
1069 {
1070 return _opp_get_opp_table(dev, index);
1071 }
1072
1073 static void _opp_table_kref_release(struct kref *kref)
1074 {
1075 struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
1076 struct opp_device *opp_dev, *temp;
1077 int i;
1078
1079 _of_clear_opp_table(opp_table);
1080
1081 /* Release clk */
1082 if (!IS_ERR(opp_table->clk))
1083 clk_put(opp_table->clk);
1084
1085 if (opp_table->paths) {
1086 for (i = 0; i < opp_table->path_count; i++)
1087 icc_put(opp_table->paths[i]);
1088 kfree(opp_table->paths);
1089 }
1090
1091 WARN_ON(!list_empty(&opp_table->opp_list));
1092
1093 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
1094 /*
1095 * The OPP table is getting removed, drop the performance state
1096 * constraints.
1097 */
1098 if (opp_table->genpd_performance_state)
1099 dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
1100
1101 _remove_opp_dev(opp_dev, opp_table);
1102 }
1103
1104 mutex_destroy(&opp_table->genpd_virt_dev_lock);
1105 mutex_destroy(&opp_table->lock);
1106 list_del(&opp_table->node);
1107 kfree(opp_table);
1108
1109 mutex_unlock(&opp_table_lock);
1110 }
1111
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org