tree:
https://github.com/0day-ci/linux/commits/UPDATE-20200404-070422/Stanislav...
head: ef47f2e70c518108df01b5d25f5818f685753cde
commit: 0cf17ffdc9b9273b7a623a6d484d1a2dafc88b1a [9/10] drm/i915: Restrict qgv points
which don't have enough bandwidth.
config: i386-debian-10.3 (attached as .config)
compiler: gcc-7 (Ubuntu 7.5.0-6ubuntu2) 7.5.0
reproduce:
git checkout 0cf17ffdc9b9273b7a623a6d484d1a2dafc88b1a
# save the attached .config to linux build tree
make ARCH=i386
If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
drivers/gpu/drm/i915/display/intel_bw.c: In function 'intel_bw_atomic_check':
> drivers/gpu/drm/i915/display/intel_bw.c:539:17: error: implicit
declaration of function 'intel_atomic_get_bw_old_state'; did you mean
'intel_atomic_get_bw_state'? [-Werror=implicit-function-declaration]
old_bw_state = intel_atomic_get_bw_old_state(state);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
intel_atomic_get_bw_state
drivers/gpu/drm/i915/display/intel_bw.c:539:15: warning: assignment makes pointer from
integer without a cast [-Wint-conversion]
old_bw_state = intel_atomic_get_bw_old_state(state);
^
cc1: some warnings being treated as errors
vim +539 drivers/gpu/drm/i915/display/intel_bw.c
420
421 int intel_bw_atomic_check(struct intel_atomic_state *state)
422 {
423 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
424 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
425 struct intel_bw_state *new_bw_state = NULL;
426 struct intel_bw_state *old_bw_state = NULL;
427 unsigned int data_rate;
428 unsigned int num_active_planes;
429 struct intel_crtc *crtc;
430 int i, ret;
431 u32 allowed_points = 0;
432 unsigned int max_bw_point = 0, max_bw = 0;
433 unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
434 u32 mask = (1 << num_qgv_points) - 1;
435
436 /* FIXME earlier gens need some checks too */
437 if (INTEL_GEN(dev_priv) < 11)
438 return 0;
439
440 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
441 new_crtc_state, i) {
442 unsigned int old_data_rate =
443 intel_bw_crtc_data_rate(old_crtc_state);
444 unsigned int new_data_rate =
445 intel_bw_crtc_data_rate(new_crtc_state);
446 unsigned int old_active_planes =
447 intel_bw_crtc_num_active_planes(old_crtc_state);
448 unsigned int new_active_planes =
449 intel_bw_crtc_num_active_planes(new_crtc_state);
450
451 /*
452 * Avoid locking the bw state when
453 * nothing significant has changed.
454 */
455 if (old_data_rate == new_data_rate &&
456 old_active_planes == new_active_planes)
457 continue;
458
459 new_bw_state = intel_atomic_get_bw_state(state);
460 if (IS_ERR(new_bw_state))
461 return PTR_ERR(new_bw_state);
462
463 new_bw_state->data_rate[crtc->pipe] = new_data_rate;
464 new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
465
466 drm_dbg_kms(&dev_priv->drm,
467 "pipe %c data rate %u num active planes %u\n",
468 pipe_name(crtc->pipe),
469 new_bw_state->data_rate[crtc->pipe],
470 new_bw_state->num_active_planes[crtc->pipe]);
471 }
472
473 if (!new_bw_state)
474 return 0;
475
476 ret = intel_atomic_lock_global_state(&new_bw_state->base);
477 if (ret) {
478 DRM_DEBUG_KMS("Could not lock global state\n");
479 return ret;
480 }
481
482 data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
483 data_rate = DIV_ROUND_UP(data_rate, 1000);
484
485 num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
486
487 for (i = 0; i < num_qgv_points; i++) {
488 unsigned int max_data_rate;
489
490 max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
491 /*
492 * We need to know which qgv point gives us
493 * maximum bandwidth in order to disable SAGV
494 * if we find that we exceed SAGV block time
495 * with watermarks. By that moment we already
496 * have those, as it is calculated earlier in
497 * intel_atomic_check,
498 */
499 if (max_data_rate > max_bw) {
500 max_bw_point = i;
501 max_bw = max_data_rate;
502 }
503 if (max_data_rate >= data_rate)
504 allowed_points |= BIT(i);
505 DRM_DEBUG_KMS("QGV point %d: max bw %d required %d\n",
506 i, max_data_rate, data_rate);
507 }
508
509 /*
510 * BSpec states that we always should have at least one allowed point
511 * left, so if we couldn't - simply reject the configuration for obvious
512 * reasons.
513 */
514 if (allowed_points == 0) {
515 DRM_DEBUG_KMS("No QGV points provide sufficient memory"
516 " bandwidth for display configuration.\n");
517 return -EINVAL;
518 }
519
520 /*
521 * Leave only single point with highest bandwidth, if
522 * we can't enable SAGV due to the increased memory latency it may
523 * cause.
524 */
525 if (!intel_can_enable_sagv(new_bw_state)) {
526 allowed_points = 1 << max_bw_point;
527 DRM_DEBUG_KMS("No SAGV, using single QGV point %d\n",
528 max_bw_point);
529 }
530 /*
531 * We store the ones which need to be masked as that is what PCode
532 * actually accepts as a parameter.
533 */
534 new_bw_state->qgv_points_mask = (~allowed_points) & mask;
535
536 DRM_DEBUG_KMS("New state %p qgv mask %x\n",
537 state, new_bw_state->qgv_points_mask);
538
539 old_bw_state = intel_atomic_get_bw_old_state(state);
540 if (IS_ERR(old_bw_state)) {
541 DRM_DEBUG_KMS("Could not get old bw state!\n");
542 return PTR_ERR(old_bw_state);
543 }
544
545 /*
546 * If the actual mask had changed we need to make sure that
547 * the commits are serialized(in case this is a nomodeset, nonblocking)
548 */
549 if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
550 ret = intel_atomic_serialize_global_state(&new_bw_state->base);
551 if (ret) {
552 DRM_DEBUG_KMS("Could not serialize global state\n");
553 return ret;
554 }
555 }
556
557 return 0;
558 }
559
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org