Hi Ravi,
FYI, the error/warning still remains.
tree:
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head: fb57b1fabcb28f358901b2df90abd2b48abc1ca8
commit: 29da4f91c0c1fbda12b8a31be0d564930208c92e [10157/10701] powerpc/watchpoint:
Don't allow concurrent perf and ptrace events
config: arm64-randconfig-r026-20200519 (attached as .config)
compiler: clang version 11.0.0 (
https://github.com/llvm/llvm-project
135b877874fae96b4372c8a3fbfaa8ff44ff86e3)
reproduce:
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# install arm64 cross compiling tool for clang build
# apt-get install binutils-aarch64-linux-gnu
git checkout 29da4f91c0c1fbda12b8a31be0d564930208c92e
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp(a)intel.com>
All warnings (new ones prefixed by >>, old ones prefixed by <<):
kernel/events/hw_breakpoint.c:71:12: warning: no previous prototype for function
'hw_breakpoint_weight' [-Wmissing-prototypes]
__weak int hw_breakpoint_weight(struct perf_event *bp)
^
kernel/events/hw_breakpoint.c:71:8: note: declare 'static' if the function is not
intended to be used outside of this translation unit
__weak int hw_breakpoint_weight(struct perf_event *bp)
^
static
> kernel/events/hw_breakpoint.c:216:12: warning: no previous
prototype for function 'arch_reserve_bp_slot' [-Wmissing-prototypes]
__weak
int arch_reserve_bp_slot(struct perf_event *bp)
^
kernel/events/hw_breakpoint.c:216:8: note: declare 'static' if the function is not
intended to be used outside of this translation unit
__weak int arch_reserve_bp_slot(struct perf_event *bp)
^
static
> kernel/events/hw_breakpoint.c:221:13: warning: no previous
prototype for function 'arch_release_bp_slot' [-Wmissing-prototypes]
__weak
void arch_release_bp_slot(struct perf_event *bp)
^
kernel/events/hw_breakpoint.c:221:8: note: declare 'static' if the function is not
intended to be used outside of this translation unit
__weak void arch_release_bp_slot(struct perf_event *bp)
^
static
kernel/events/hw_breakpoint.c:228:13: warning: no previous prototype for function
'arch_unregister_hw_breakpoint' [-Wmissing-prototypes]
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
^
kernel/events/hw_breakpoint.c:228:8: note: declare 'static' if the function is not
intended to be used outside of this translation unit
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
^
static
4 warnings generated.
vim +/arch_reserve_bp_slot +216 kernel/events/hw_breakpoint.c
70
71 __weak int hw_breakpoint_weight(struct perf_event *bp)
72 {
73 return 1;
74 }
75
76 static inline enum bp_type_idx find_slot_idx(u64 bp_type)
77 {
78 if (bp_type & HW_BREAKPOINT_RW)
79 return TYPE_DATA;
80
81 return TYPE_INST;
82 }
83
84 /*
85 * Report the maximum number of pinned breakpoints a task
86 * have in this cpu
87 */
88 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
89 {
90 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
91 int i;
92
93 for (i = nr_slots[type] - 1; i >= 0; i--) {
94 if (tsk_pinned[i] > 0)
95 return i + 1;
96 }
97
98 return 0;
99 }
100
101 /*
102 * Count the number of breakpoints of the same type and same task.
103 * The given event must be not on the list.
104 */
105 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
106 {
107 struct task_struct *tsk = bp->hw.target;
108 struct perf_event *iter;
109 int count = 0;
110
111 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
112 if (iter->hw.target == tsk &&
113 find_slot_idx(iter->attr.bp_type) == type &&
114 (iter->cpu < 0 || cpu == iter->cpu))
115 count += hw_breakpoint_weight(iter);
116 }
117
118 return count;
119 }
120
121 static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
122 {
123 if (bp->cpu >= 0)
124 return cpumask_of(bp->cpu);
125 return cpu_possible_mask;
126 }
127
128 /*
129 * Report the number of pinned/un-pinned breakpoints we have in
130 * a given cpu (cpu > -1) or in all of them (cpu = -1).
131 */
132 static void
133 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
134 enum bp_type_idx type)
135 {
136 const struct cpumask *cpumask = cpumask_of_bp(bp);
137 int cpu;
138
139 for_each_cpu(cpu, cpumask) {
140 struct bp_cpuinfo *info = get_bp_info(cpu, type);
141 int nr;
142
143 nr = info->cpu_pinned;
144 if (!bp->hw.target)
145 nr += max_task_bp_pinned(cpu, type);
146 else
147 nr += task_bp_pinned(cpu, bp, type);
148
149 if (nr > slots->pinned)
150 slots->pinned = nr;
151
152 nr = info->flexible;
153 if (nr > slots->flexible)
154 slots->flexible = nr;
155 }
156 }
157
158 /*
159 * For now, continue to consider flexible as pinned, until we can
160 * ensure no flexible event can ever be scheduled before a pinned event
161 * in a same cpu.
162 */
163 static void
164 fetch_this_slot(struct bp_busy_slots *slots, int weight)
165 {
166 slots->pinned += weight;
167 }
168
169 /*
170 * Add a pinned breakpoint for the given task in our constraint table
171 */
172 static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
173 enum bp_type_idx type, int weight)
174 {
175 unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
176 int old_idx, new_idx;
177
178 old_idx = task_bp_pinned(cpu, bp, type) - 1;
179 new_idx = old_idx + weight;
180
181 if (old_idx >= 0)
182 tsk_pinned[old_idx]--;
183 if (new_idx >= 0)
184 tsk_pinned[new_idx]++;
185 }
186
187 /*
188 * Add/remove the given breakpoint in our constraint table
189 */
190 static void
191 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
192 int weight)
193 {
194 const struct cpumask *cpumask = cpumask_of_bp(bp);
195 int cpu;
196
197 if (!enable)
198 weight = -weight;
199
200 /* Pinned counter cpu profiling */
201 if (!bp->hw.target) {
202 get_bp_info(bp->cpu, type)->cpu_pinned += weight;
203 return;
204 }
205
206 /* Pinned counter task profiling */
207 for_each_cpu(cpu, cpumask)
208 toggle_bp_task_slot(bp, cpu, type, weight);
209
210 if (enable)
211 list_add_tail(&bp->hw.bp_list, &bp_task_head);
212 else
213 list_del(&bp->hw.bp_list);
214 }
215
216 __weak int arch_reserve_bp_slot(struct perf_event *bp)
217 {
218 return 0;
219 }
220
221 __weak void arch_release_bp_slot(struct perf_event *bp)
222 {
223 }
224
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org