diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ae8e62df16dd..21a3ef3012cf 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6625,6 +6625,10 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->host_state.cr4 = cr4;
}
+ /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
+ if (vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
+ set_debugreg(vcpu->arch.dr6, 6);
I also noticed the related code in svm.c, but I refrained myself
to add a new branch in vmx_vcpu_run(). But after I see you put
the code of resetting dr6 in vmx_sync_dirty_debug_regs(), the whole
solution is much clean and better.
And if any chance you are also concern about the additional branch,
could you add a new callback to set dr6 and call the callback from
x86.c when KVM_DEBUGREG_WONT_EXIT.
The possible implementation of the callback:
for vmx: set_debugreg(vcpu->arch.dr6, 6);
for svm: svm_set_dr6(svm, vcpu->arch.dr6);
and always do svm_set_dr6(svm, DR6_ACTIVE_LOW); at the end of the
svm_handle_exit().
Thanks
Lai
+
/* When single-stepping over STI and MOV SS, we must clear the
* corresponding interruptibility bits in the guest state. Otherwise
* vmentry fails as it then expects bit 14 (BS) in pending debug
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a111899ab2b4..fbc536b21585 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9597,7 +9597,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
- set_debugreg(vcpu->arch.dr6, 6);
} else if (unlikely(hw_breakpoint_active())) {
set_debugreg(0, 7);
}
Paolo