Hi Thomas,
I love your patch! Yet something to improve:
[auto build test ERROR on kvm/linux-next]
[also build test ERROR on tip/auto-latest linus/master v5.7-rc6 next-20200519]
[cannot apply to linux/master]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see
https://stackoverflow.com/a/37406982]
url:
https://github.com/0day-ci/linux/commits/Thomas-Gleixner/x86-KVM-Async-PF...
base:
https://git.kernel.org/pub/scm/virt/kvm/kvm.git linux-next
config: x86_64-allyesconfig (attached as .config)
compiler: clang version 11.0.0 (
https://github.com/llvm/llvm-project
e6658079aca6d971b4e9d7137a3a2ecbc9c34aec)
reproduce:
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
# install x86_64 cross compiling tool for clang build
# apt-get install binutils-x86-64-linux-gnu
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>, old ones prefixed by <<):
> arch/x86/kvm/svm/svm.c:3404:2: error: implicit declaration of
function 'trace_hardirqs_on_prepare' [-Werror,-Wimplicit-function-declaration]
trace_hardirqs_on_prepare();
^
arch/x86/kvm/svm/svm.c:3404:2: note: did you mean 'trace_hardirqs_on'?
include/linux/irqflags.h:32:15: note: 'trace_hardirqs_on' declared here
extern void trace_hardirqs_on(void);
^
> arch/x86/kvm/svm/svm.c:3405:2: error: implicit declaration of
function 'lockdep_hardirqs_on_prepare' [-Werror,-Wimplicit-function-declaration]
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
^
arch/x86/kvm/svm/svm.c:3405:2: note: did you mean 'trace_hardirqs_on_prepare'?
arch/x86/kvm/svm/svm.c:3404:2: note: 'trace_hardirqs_on_prepare' declared here
trace_hardirqs_on_prepare();
^
> arch/x86/kvm/svm/svm.c:3434:2: error: implicit declaration of
function 'trace_hardirqs_off_prepare' [-Werror,-Wimplicit-function-declaration]
trace_hardirqs_off_prepare();
^
arch/x86/kvm/svm/svm.c:3434:2: note: did you mean 'trace_hardirqs_on_prepare'?
arch/x86/kvm/svm/svm.c:3404:2: note: 'trace_hardirqs_on_prepare' declared here
trace_hardirqs_on_prepare();
^
3 errors generated.
vim +/trace_hardirqs_on_prepare +3404 arch/x86/kvm/svm/svm.c
3329
3330 static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
3331 {
3332 fastpath_t exit_fastpath;
3333 struct vcpu_svm *svm = to_svm(vcpu);
3334
3335 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3336 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3337 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3338
3339 /*
3340 * A vmexit emulation is required before the vcpu can be executed
3341 * again.
3342 */
3343 if (unlikely(svm->nested.exit_required))
3344 return EXIT_FASTPATH_NONE;
3345
3346 /*
3347 * Disable singlestep if we're injecting an interrupt/exception.
3348 * We don't want our modified rflags to be pushed on the stack where
3349 * we might not be able to easily reset them if we disabled NMI
3350 * singlestep later.
3351 */
3352 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3353 /*
3354 * Event injection happens before external interrupts cause a
3355 * vmexit and interrupts are disabled here, so smp_send_reschedule
3356 * is enough to force an immediate vmexit.
3357 */
3358 disable_nmi_singlestep(svm);
3359 smp_send_reschedule(vcpu->cpu);
3360 }
3361
3362 pre_svm_run(svm);
3363
3364 sync_lapic_to_cr8(vcpu);
3365
3366 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3367
3368 /*
3369 * Run with all-zero DR6 unless needed, so that we can get the exact cause
3370 * of a #DB.
3371 */
3372 if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
3373 svm_set_dr6(svm, vcpu->arch.dr6);
3374 else
3375 svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
3376
3377 clgi();
3378 kvm_load_guest_xsave_state(vcpu);
3379
3380 if (lapic_in_kernel(vcpu) &&
3381 vcpu->arch.apic->lapic_timer.timer_advance_ns)
3382 kvm_wait_lapic_expire(vcpu);
3383
3384 /*
3385 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3386 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3387 * is no need to worry about the conditional branch over the wrmsr
3388 * being speculatively taken.
3389 */
3390 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
3391
3392 /*
3393 * VMENTER enables interrupts (host state), but the kernel state is
3394 * interrupts disabled when this is invoked. Also tell RCU about
3395 * it. This is the same logic as for exit_to_user_mode().
3396 *
3397 * This ensures that e.g. latency analysis on the host observes
3398 * guest mode as interrupt enabled.
3399 *
3400 * guest_enter_irqoff() informs context tracking about the
3401 * transition to guest mode and if enabled adjusts RCU state
3402 * accordingly.
3403 */
3404 trace_hardirqs_on_prepare();
3405 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
3406 guest_enter_irqoff();
3407 lockdep_hardirqs_on(CALLER_ADDR0);
3408
3409 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
3410
3411 #ifdef CONFIG_X86_64
3412 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3413 #else
3414 loadsegment(fs, svm->host.fs);
3415 #ifndef CONFIG_X86_32_LAZY_GS
3416 loadsegment(gs, svm->host.gs);
3417 #endif
3418 #endif
3419
3420 /*
3421 * VMEXIT disables interrupts (host state), but tracing and lockdep
3422 * have them in state 'on' as recorded before entering guest mode.
3423 * Same as enter_from_user_mode().
3424 *
3425 * guest_exit_irqoff() restores host context and reinstates RCU if
3426 * enabled and required.
3427 *
3428 * This needs to be done before the below as native_read_msr()
3429 * contains a tracepoint and x86_spec_ctrl_restore_host() calls
3430 * into world and some more.
3431 */
3432 lockdep_hardirqs_off(CALLER_ADDR0);
3433 guest_exit_irqoff();
3434 trace_hardirqs_off_prepare();
3435
3436 /*
3437 * We do not use IBRS in the kernel. If this vCPU has used the
3438 * SPEC_CTRL MSR it may have left it on; save the value and
3439 * turn it off. This is much more efficient than blindly adding
3440 * it to the atomic save/restore list. Especially as the former
3441 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
3442 *
3443 * For non-nested case:
3444 * If the L01 MSR bitmap does not intercept the MSR, then we need to
3445 * save it.
3446 *
3447 * For nested case:
3448 * If the L02 MSR bitmap does not intercept the MSR, then we need to
3449 * save it.
3450 */
3451 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
3452 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
3453
3454 reload_tss(vcpu);
3455
3456 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3457
3458 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3459 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3460 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3461 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3462
3463 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3464 kvm_before_interrupt(&svm->vcpu);
3465
3466 kvm_load_host_xsave_state(vcpu);
3467 stgi();
3468
3469 /* Any pending NMI will happen here */
3470 exit_fastpath = svm_exit_handlers_fastpath(vcpu);
3471
3472 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3473 kvm_after_interrupt(&svm->vcpu);
3474
3475 sync_cr8_to_lapic(vcpu);
3476
3477 svm->next_rip = 0;
3478 svm->nested.nested_run_pending = 0;
3479
3480 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3481
3482 /* if exit due to PF check for async PF */
3483 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3484 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
3485
3486 if (npt_enabled) {
3487 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3488 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3489 }
3490
3491 /*
3492 * We need to handle MC intercepts here before the vcpu has a chance to
3493 * change the physical cpu
3494 */
3495 if (unlikely(svm->vmcb->control.exit_code ==
3496 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3497 svm_handle_mce(svm);
3498
3499 mark_all_clean(svm->vmcb);
3500 return exit_fastpath;
3501 }
3502
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org