Hi Jonathan,
[FYI, it's a private test report for your RFC patch.]
[auto build test ERROR on tip/sched/core]
[also build test ERROR on next-20210125]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url:
https://github.com/0day-ci/linux/commits/Jonathan-Schwender/sched-rt-Fix-...
base:
https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
7a976f77bb962ce9486e09eb839aa135619b54f3
config: h8300-randconfig-r022-20210201 (attached as .config)
compiler: h8300-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
#
https://github.com/0day-ci/linux/commit/f2f23b24036429e0d47deb121f16367a2...
git remote add linux-review
https://github.com/0day-ci/linux
git fetch --no-tags linux-review
Jonathan-Schwender/sched-rt-Fix-RT-group-throttling-with-nohz_full/20210201-173818
git checkout f2f23b24036429e0d47deb121f16367a2444247e
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=h8300
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
kernel/sched/rt.c:669:6: warning: no previous prototype for
'sched_rt_bandwidth_account' [-Wmissing-prototypes]
669 | bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
kernel/sched/rt.c: In function 'do_sched_rt_period_timer':
> kernel/sched/rt.c:876:33: error: 'struct rq' has no
member named 'cpu'
876 | &&
housekeeping_cpu(this_rq()->cpu, HK_FLAG_TIMER))) {
| ^~
vim +876 kernel/sched/rt.c
853
854 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
855 {
856 int i, idle = 1, throttled = 0;
857 const struct cpumask *span;
858
859 span = sched_rt_period_mask();
860 #ifdef CONFIG_RT_GROUP_SCHED
861 /*
862 * FIXME: isolated CPUs should really leave the root task group,
863 * whether they are isolcpus or were isolated via cpusets, lest
864 * the timer run on a CPU which does not service all runqueues,
865 * potentially leaving other CPUs indefinitely throttled. If
866 * isolation is really required, the user will turn the throttle
867 * off to kill the perturbations it causes anyway. Meanwhile,
868 * this maintains functionality for boot and/or troubleshooting.
869 * If nohz_full is active and the timer was offloaded to a
870 * housekeeping CPU, sched_rt_period_mask() will not contain
871 * the isolated CPU. To prevent indefinite throttling of tasks
872 * on isolated CPUs, housekeeping CPUs service all online CPUs.
873 */
874 if (rt_b == &root_task_group.rt_bandwidth
875 || (housekeeping_enabled(HK_FLAG_TIMER)
876 && housekeeping_cpu(this_rq()->cpu,
HK_FLAG_TIMER))) {
877 span = cpu_online_mask;
878 }
879 #endif
880 for_each_cpu(i, span) {
881 int enqueue = 0;
882 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
883 struct rq *rq = rq_of_rt_rq(rt_rq);
884 int skip;
885
886 /*
887 * When span == cpu_online_mask, taking each rq->lock
888 * can be time-consuming. Try to avoid it when possible.
889 */
890 raw_spin_lock(&rt_rq->rt_runtime_lock);
891 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime !=
RUNTIME_INF)
892 rt_rq->rt_runtime = rt_b->rt_runtime;
893 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
894 raw_spin_unlock(&rt_rq->rt_runtime_lock);
895 if (skip)
896 continue;
897
898 raw_spin_lock(&rq->lock);
899 update_rq_clock(rq);
900
901 if (rt_rq->rt_time) {
902 u64 runtime;
903
904 raw_spin_lock(&rt_rq->rt_runtime_lock);
905 if (rt_rq->rt_throttled)
906 balance_runtime(rt_rq);
907 runtime = rt_rq->rt_runtime;
908 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
909 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
910 rt_rq->rt_throttled = 0;
911 enqueue = 1;
912
913 /*
914 * When we're idle and a woken (rt) task is
915 * throttled check_preempt_curr() will set
916 * skip_update and the time between the wakeup
917 * and this unthrottle will get accounted as
918 * 'runtime'.
919 */
920 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
921 rq_clock_cancel_skipupdate(rq);
922 }
923 if (rt_rq->rt_time || rt_rq->rt_nr_running)
924 idle = 0;
925 raw_spin_unlock(&rt_rq->rt_runtime_lock);
926 } else if (rt_rq->rt_nr_running) {
927 idle = 0;
928 if (!rt_rq_throttled(rt_rq))
929 enqueue = 1;
930 }
931 if (rt_rq->rt_throttled)
932 throttled = 1;
933
934 if (enqueue)
935 sched_rt_rq_enqueue(rt_rq);
936 raw_spin_unlock(&rq->lock);
937 }
938
939 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime ==
RUNTIME_INF))
940 return 1;
941
942 return idle;
943 }
944
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org