tree: baolu/iommu/master/5.6-rc6/20200318
head: 9c6fbc3fd3d235cb9a54e1e03f19c8690d915d88
commit: 9c6fbc3fd3d235cb9a54e1e03f19c8690d915d88 [14/14] iommu/vt-d: Add page request
draining support
config: x86_64-randconfig-s1-20200317 (attached as .config)
compiler: gcc-4.9 (Debian 4.9.2-10+deb8u1) 4.9.2
reproduce:
git checkout 9c6fbc3fd3d235cb9a54e1e03f19c8690d915d88
# save the attached .config to linux build tree
make ARCH=x86_64
If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp(a)intel.com>
All warnings (new ones prefixed by >>):
drivers//iommu/intel-svm.c: In function 'intel_svm_drain_prq':
> drivers//iommu/intel-svm.c:284:9: warning: missing braces around
initializer [-Wmissing-braces]
struct qi_desc desc[3] = { 0 };
^
drivers//iommu/intel-svm.c:284:9: warning: (near initialization for 'desc[0]')
[-Wmissing-braces]
vim +284 drivers//iommu/intel-svm.c
230
231 #define for_each_svm_dev(sdev, svm, d) \
232 list_for_each_entry((sdev), &(svm)->devs, list) \
233 if ((d) != (sdev)->dev) {} else
234
235 /*
236 * When a PASID is stopped or terminated, there can be pending PRQs
237 * (requests have not received responses) in remapping hardware.
238 *
239 * There are at least below scenarios that PRQ drains are required:
240 * - unbind a PASID;
241 * - resume phase of the PASID suspend/resume cycle.
242 *
243 * Steps to be performed:
244 * - Disable PR interrupt;
245 * - Take a snapshot of the page request queue, record current head
246 * and tail, and mark PRQ entries with PASID to be dropped;
247 * - Mark queue empty, a.k.a. Head = Tail;
248 * - PRQ draining as described in 7.11 of the spec. Unnecessary to
249 * check queue full since queue was empty at the point of drain;
250 * - Tail could have been moved due to new PRQ written by HW;
251 * - Process snapshot copy of PR queue;
252 * - Process hardware PR queue, enable interrupt again.
253 *
254 * For an example, consider the following timeline going downward.
255 * VT-d HW VT-d Driver User(KMD, guest)
256 * --------------------------------------------------------
257 * [PR1.2.3]
258 * [PR1.1.3] <tail>
259 * [PR1.2.2]
260 * [PR1.2.1]
261 * [PR1.1.2]
262 * [PR1.1.1] <head>
263 * [IRQ] ->
264 *
265 *
266 * <- [unbind PASID 1]
267 * [delete pending PR]
268 * [drain PRQs]
269 *
270 * Decoder:
271 * - PR.PASID.GroupID.Index, e.g. PR.1.2.3 indicates the Page request
272 * with PASID = 1, GroupID = 2, 3rd request in the group.
273 * - LPIG: last page in group
274 * - PDP: private data present
275 * - KMD: kernel mode driver for native SVA
276 *
277 * Note:
278 * - Caller of unbind/suspend/resume PASID APIs must ensure no pending
279 * DMA activities prior to call.
280 */
281 static void intel_svm_drain_prq(struct device *dev, int pasid)
282 {
283 struct device_domain_info *info;
284 struct qi_desc desc[3] = { 0 };
285 struct dmar_domain
*domain;
286 struct intel_iommu *iommu;
287 struct intel_svm *svm;
288 struct pci_dev *pdev;
289 unsigned long flags;
290 int head, tail;
291 u16 sid, did;
292 void *prqq;
293 int qdep;
294
295 info = get_domain_info(dev);
296 if (WARN_ON(!info || !dev_is_pci(dev)))
297 return;
298
299 iommu = info->iommu;
300 domain = info->domain;
301 pdev = to_pci_dev(dev);
302
303 rcu_read_lock();
304 svm = ioasid_find(NULL, pasid, NULL);
305 if (WARN_ON(!svm)) {
306 rcu_read_unlock();
307 return;
308 }
309 rcu_read_unlock();
310
311 spin_lock_irqsave(&iommu->lock, flags);
312 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
313 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
314
315 /*
316 * Make a copy of the PR queue then process it offline without
317 * blocking PRQ interrupts.
318 */
319 prqq = kmemdup(iommu->prq, PAGE_SIZE ^ PRQ_ORDER, GFP_ATOMIC);
320 if (!prqq) {
321 spin_unlock_irqrestore(&iommu->lock, flags);
322 return;
323 }
324 /*
325 * Make queue empty to allow further events and avoid the queue
326 * full condition while we drain the queue.
327 */
328 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
329 spin_unlock_irqrestore(&iommu->lock, flags);
330
331 /*
332 * Process the copy of PRQ, drained PASID already marked to be
333 * dropped.
334 */
335 if (intel_svm_process_prq(iommu, prqq, head, tail)) {
336 kfree(prqq);
337 return;
338 }
339
340 /*
341 * Perform steps prescribed in VT-d spec CH7.11 to drain page
342 * request and responses.
343 */
344 sid = PCI_DEVID(info->bus, info->devfn);
345 did = domain->iommu_did[iommu->seq_id];
346 qdep = pci_ats_queue_depth(pdev);
347
348 desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
349 QI_IWD_FENCE |
350 QI_IWD_TYPE;
351 desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
352 QI_EIOTLB_DID(did) |
353 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
354 QI_EIOTLB_TYPE;
355 desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
356 QI_DEV_EIOTLB_SID(sid) |
357 QI_DEV_EIOTLB_QDEP(qdep) |
358 QI_DEIOTLB_TYPE |
359 QI_DEV_IOTLB_PFSID(info->pfsid);
360
361 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
362
363 /*
364 * If new requests come in while we processing the copy, we should
365 * process it now, otherwise the new request may be stuck until the
366 * next IRQ.
367 */
368 if (dmar_readq(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PPR) {
369 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
370 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
371 intel_svm_process_prq(iommu, iommu->prq, head, tail);
372 }
373
374 /* Allow new pending PRQ to generate interrupts. */
375 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
376
377 kfree(prqq);
378 return;
379 }
380
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org