Hi Jiang,
[FYI, it's a private test report for your RFC patch.]
[auto build test WARNING on vhost/linux-next]
[also build test WARNING on tip/perf/core linus/master v5.13-rc6]
[cannot apply to next-20210615]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url:
https://github.com/0day-ci/linux/commits/Jiang-Wang/virtio-vsock-introduc...
base:
https://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git linux-next
config: arm64-randconfig-s031-20210615 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
reproduce:
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# apt-get install sparse
# sparse version: v0.6.3-341-g8af24329-dirty
#
https://github.com/0day-ci/linux/commit/0d43b802cb4112ba50c616916364ada91...
git remote add linux-review
https://github.com/0day-ci/linux
git fetch --no-tags linux-review
Jiang-Wang/virtio-vsock-introduce-SOCK_DGRAM-support/20210616-120056
git checkout 0d43b802cb4112ba50c616916364ada91c24a7bb
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross C=1
CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' W=1 ARCH=arm64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
sparse warnings: (new ones prefixed by >>)
> drivers/vhost/vsock.c:150:29: sparse: sparse: restricted __le16
degrades to integer
drivers/vhost/vsock.c:345:21: sparse: sparse: restricted
__le16 degrades to integer
drivers/vhost/vsock.c:349:28: sparse: sparse: restricted __le16 degrades to integer
drivers/vhost/vsock.c:364:21: sparse: sparse: restricted __le16 degrades to integer
vim +150 drivers/vhost/vsock.c
96
97 static void
98 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
99 struct vhost_virtqueue *vq)
100 {
101 struct vhost_virtqueue *tx_vq;
102 int pkts = 0, total_len = 0;
103 bool added = false;
104 bool restart_tx = false;
105 spinlock_t *lock;
106 struct list_head *send_pkt_list;
107
108 if (vq == &vsock->vqs[VSOCK_VQ_RX]) {
109 tx_vq = &vsock->vqs[VSOCK_VQ_TX];
110 lock = &vsock->send_pkt_list_lock;
111 send_pkt_list = &vsock->send_pkt_list;
112 } else {
113 tx_vq = &vsock->vqs[VSOCK_VQ_DGRAM_TX];
114 lock = &vsock->dgram_send_pkt_list_lock;
115 send_pkt_list = &vsock->dgram_send_pkt_list;
116 }
117
118 mutex_lock(&vq->mutex);
119
120 if (!vhost_vq_get_backend(vq))
121 goto out;
122
123 if (!vq_meta_prefetch(vq))
124 goto out;
125
126 /* Avoid further vmexits, we're already processing the virtqueue */
127 vhost_disable_notify(&vsock->dev, vq);
128
129 do {
130 struct virtio_vsock_pkt *pkt;
131 struct iov_iter iov_iter;
132 unsigned out, in;
133 size_t nbytes;
134 size_t iov_len, payload_len;
135 int head;
136 bool is_dgram = false;
137
138 spin_lock_bh(lock);
139 if (list_empty(send_pkt_list)) {
140 spin_unlock_bh(lock);
141 vhost_enable_notify(&vsock->dev, vq);
142 break;
143 }
144
145 pkt = list_first_entry(send_pkt_list,
146 struct virtio_vsock_pkt, list);
147 list_del_init(&pkt->list);
148 spin_unlock_bh(lock);
149
150 if (pkt->hdr.type == VIRTIO_VSOCK_TYPE_DGRAM)
151 is_dgram = true;
152
153 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
154 &out, &in, NULL, NULL);
155 if (head < 0) {
156 spin_lock_bh(lock);
157 list_add(&pkt->list, send_pkt_list);
158 spin_unlock_bh(lock);
159 break;
160 }
161
162 if (head == vq->num) {
163 if (is_dgram) {
164 virtio_transport_free_pkt(pkt);
165 vq_err(vq, "Dgram virtqueue is full!");
166 spin_lock_bh(lock);
167 vsock->dgram_used--;
168 spin_unlock_bh(lock);
169 break;
170 }
171 spin_lock_bh(lock);
172 list_add(&pkt->list, send_pkt_list);
173 spin_unlock_bh(lock);
174
175 /* We cannot finish yet if more buffers snuck in while
176 * re-enabling notify.
177 */
178 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
179 vhost_disable_notify(&vsock->dev, vq);
180 continue;
181 }
182 break;
183 }
184
185 if (out) {
186 virtio_transport_free_pkt(pkt);
187 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
188 if (is_dgram) {
189 spin_lock_bh(lock);
190 vsock->dgram_used--;
191 spin_unlock_bh(lock);
192 }
193
194 break;
195 }
196
197 iov_len = iov_length(&vq->iov[out], in);
198 if (iov_len < sizeof(pkt->hdr)) {
199 virtio_transport_free_pkt(pkt);
200 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
201 if (is_dgram) {
202 spin_lock_bh(lock);
203 vsock->dgram_used--;
204 spin_unlock_bh(lock);
205 }
206 break;
207 }
208
209 if (iov_len < pkt->len - pkt->off &&
210 vq == &vsock->vqs[VSOCK_VQ_DGRAM_RX]) {
211 virtio_transport_free_pkt(pkt);
212 vq_err(vq, "Buffer len [%zu] too small for dgram\n", iov_len);
213 break;
214 }
215
216 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
217 payload_len = pkt->len - pkt->off;
218
219 /* If the packet is greater than the space available in the
220 * buffer, we split it using multiple buffers.
221 */
222 if (payload_len > iov_len - sizeof(pkt->hdr))
223 payload_len = iov_len - sizeof(pkt->hdr);
224
225 /* Set the correct length in the header */
226 pkt->hdr.len = cpu_to_le32(payload_len);
227
228 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
229 if (nbytes != sizeof(pkt->hdr)) {
230 virtio_transport_free_pkt(pkt);
231 vq_err(vq, "Faulted on copying pkt hdr\n");
232 if (is_dgram) {
233 spin_lock_bh(lock);
234 vsock->dgram_used--;
235 spin_unlock_bh(lock);
236 }
237 break;
238 }
239
240 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
241 &iov_iter);
242 if (nbytes != payload_len) {
243 virtio_transport_free_pkt(pkt);
244 vq_err(vq, "Faulted on copying pkt buf\n");
245 break;
246 }
247
248 /* Deliver to monitoring devices all packets that we
249 * will transmit.
250 */
251 virtio_transport_deliver_tap_pkt(pkt);
252
253 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
254 added = true;
255
256 pkt->off += payload_len;
257 total_len += payload_len;
258
259 /* If we didn't send all the payload we can requeue the packet
260 * to send it with the next available buffer.
261 */
262 if ((pkt->off < pkt->len)
263 && (vq == &vsock->vqs[VSOCK_VQ_RX])) {
264 /* We are queueing the same virtio_vsock_pkt to handle
265 * the remaining bytes, and we want to deliver it
266 * to monitoring devices in the next iteration.
267 */
268 pkt->tap_delivered = false;
269
270 spin_lock_bh(lock);
271 list_add(&pkt->list, send_pkt_list);
272 spin_unlock_bh(lock);
273 } else {
274 if (pkt->reply) {
275 int val;
276
277 val = atomic_dec_return(&vsock->queued_replies);
278
279 /* Do we have resources to resume tx
280 * processing?
281 */
282 if (val + 1 == tx_vq->num)
283 restart_tx = true;
284 }
285
286 virtio_transport_free_pkt(pkt);
287 if (is_dgram) {
288 spin_lock_bh(lock);
289 vsock->dgram_used--;
290 spin_unlock_bh(lock);
291 }
292 }
293 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
294 if (added)
295 vhost_signal(&vsock->dev, vq);
296
297 out:
298 mutex_unlock(&vq->mutex);
299
300 if (restart_tx)
301 vhost_poll_queue(&tx_vq->poll);
302 }
303
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org