Hi Thomas,
I love your patch! Yet something to improve:
[auto build test ERROR on kvm/linux-next]
[cannot apply to tip/auto-latest linus/master linux/master v5.7-rc6 next-20200519]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see
https://stackoverflow.com/a/37406982]
url:
https://github.com/0day-ci/linux/commits/Thomas-Gleixner/x86-KVM-Async-PF...
base:
https://git.kernel.org/pub/scm/virt/kvm/kvm.git linux-next
config: i386-allyesconfig (attached as .config)
If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>, old ones prefixed by <<):
> arch/x86/kvm/svm/svm.c:3330:16: error: expected '=',
',', ';', 'asm' or '__attribute__' before 'void'
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
^~~~
arch/x86/kvm/svm/svm.c: In function 'svm_vcpu_run':
> arch/x86/kvm/svm/svm.c:3446:2: error: implicit declaration of
function 'svm_vcpu_enter_exit'; did you mean 'kvm_vcpu_mtrr_init'?
[-Werror=implicit-function-declaration]
svm_vcpu_enter_exit(vcpu, svm);
^~~~~~~~~~~~~~~~~~~
kvm_vcpu_mtrr_init
cc1: some warnings being treated as errors
vim +3330 arch/x86/kvm/svm/svm.c
3329
3330 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
3331 struct vcpu_svm *svm)
3332 {
3333 /*
3334 * VMENTER enables interrupts (host state), but the kernel state is
3335 * interrupts disabled when this is invoked. Also tell RCU about
3336 * it. This is the same logic as for exit_to_user_mode().
3337 *
3338 * This ensures that e.g. latency analysis on the host observes
3339 * guest mode as interrupt enabled.
3340 *
3341 * guest_enter_irqoff() informs context tracking about the
3342 * transition to guest mode and if enabled adjusts RCU state
3343 * accordingly.
3344 */
3345 instrumentation_begin();
3346 trace_hardirqs_on_prepare();
3347 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
3348 instrumentation_end();
3349
3350 guest_enter_irqoff();
3351 lockdep_hardirqs_on(CALLER_ADDR0);
3352
3353 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
3354
3355 #ifdef CONFIG_X86_64
3356 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3357 #else
3358 loadsegment(fs, svm->host.fs);
3359 #ifndef CONFIG_X86_32_LAZY_GS
3360 loadsegment(gs, svm->host.gs);
3361 #endif
3362 #endif
3363
3364 /*
3365 * VMEXIT disables interrupts (host state), but tracing and lockdep
3366 * have them in state 'on' as recorded before entering guest mode.
3367 * Same as enter_from_user_mode().
3368 *
3369 * guest_exit_irqoff() restores host context and reinstates RCU if
3370 * enabled and required.
3371 *
3372 * This needs to be done before the below as native_read_msr()
3373 * contains a tracepoint and x86_spec_ctrl_restore_host() calls
3374 * into world and some more.
3375 */
3376 lockdep_hardirqs_off(CALLER_ADDR0);
3377 guest_exit_irqoff();
3378
3379 instrumentation_begin();
3380 trace_hardirqs_off_prepare();
3381 instrumentation_end();
3382 }
3383
3384 static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
3385 {
3386 fastpath_t exit_fastpath;
3387 struct vcpu_svm *svm = to_svm(vcpu);
3388
3389 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3390 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3391 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3392
3393 /*
3394 * A vmexit emulation is required before the vcpu can be executed
3395 * again.
3396 */
3397 if (unlikely(svm->nested.exit_required))
3398 return EXIT_FASTPATH_NONE;
3399
3400 /*
3401 * Disable singlestep if we're injecting an interrupt/exception.
3402 * We don't want our modified rflags to be pushed on the stack where
3403 * we might not be able to easily reset them if we disabled NMI
3404 * singlestep later.
3405 */
3406 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3407 /*
3408 * Event injection happens before external interrupts cause a
3409 * vmexit and interrupts are disabled here, so smp_send_reschedule
3410 * is enough to force an immediate vmexit.
3411 */
3412 disable_nmi_singlestep(svm);
3413 smp_send_reschedule(vcpu->cpu);
3414 }
3415
3416 pre_svm_run(svm);
3417
3418 sync_lapic_to_cr8(vcpu);
3419
3420 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3421
3422 /*
3423 * Run with all-zero DR6 unless needed, so that we can get the exact cause
3424 * of a #DB.
3425 */
3426 if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
3427 svm_set_dr6(svm, vcpu->arch.dr6);
3428 else
3429 svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
3430
3431 clgi();
3432 kvm_load_guest_xsave_state(vcpu);
3433
3434 if (lapic_in_kernel(vcpu) &&
3435 vcpu->arch.apic->lapic_timer.timer_advance_ns)
3436 kvm_wait_lapic_expire(vcpu);
3437
3438 /*
3439 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3440 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3441 * is no need to worry about the conditional branch over the wrmsr
3442 * being speculatively taken.
3443 */
3444 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3445
3446 svm_vcpu_enter_exit(vcpu, svm);
3447
3448 /*
3449 * We do not use IBRS in the kernel. If this vCPU has used the
3450 * SPEC_CTRL MSR it may have left it on; save the value and
3451 * turn it off. This is much more efficient than blindly adding
3452 * it to the atomic save/restore list. Especially as the former
3453 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
3454 *
3455 * For non-nested case:
3456 * If the L01 MSR bitmap does not intercept the MSR, then we need to
3457 * save it.
3458 *
3459 * For nested case:
3460 * If the L02 MSR bitmap does not intercept the MSR, then we need to
3461 * save it.
3462 */
3463 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3464 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
3465
3466 reload_tss(vcpu);
3467
3468 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3469
3470 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3471 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3472 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3473 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3474
3475 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3476 kvm_before_interrupt(&svm->vcpu);
3477
3478 kvm_load_host_xsave_state(vcpu);
3479 stgi();
3480
3481 /* Any pending NMI will happen here */
3482 exit_fastpath = svm_exit_handlers_fastpath(vcpu);
3483
3484 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3485 kvm_after_interrupt(&svm->vcpu);
3486
3487 sync_cr8_to_lapic(vcpu);
3488
3489 svm->next_rip = 0;
3490 svm->nested.nested_run_pending = 0;
3491
3492 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3493
3494 /* if exit due to PF check for async PF */
3495 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3496 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
3497
3498 if (npt_enabled) {
3499 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3500 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3501 }
3502
3503 /*
3504 * We need to handle MC intercepts here before the vcpu has a chance to
3505 * change the physical cpu
3506 */
3507 if (unlikely(svm->vmcb->control.exit_code ==
3508 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3509 svm_handle_mce(svm);
3510
3511 mark_all_clean(svm->vmcb);
3512 return exit_fastpath;
3513 }
3514
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org