[PATCH v2 7/9] KVM: SVM: keep DR6 synchronized with vcpu->arch.dr6

From: Paolo Bonzini
Date: Thu May 07 2020 - 07:50:55 EST


kvm_x86_ops.set_dr6 is only ever called with vcpu->arch.dr6 as the
second argument. Ensure that the VMCB value is synchronized to
vcpu->arch.dr6 on #DB (both "normal" and nested), so that the current
value of DR6 is always available in vcpu->arch.dr6. The get_dr6 callback
can just access vcpu->arch.dr6 and becomes redundant.

Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 1 -
arch/x86/kvm/svm/nested.c | 23 +++++++++++++++--------
arch/x86/kvm/svm/svm.c | 9 ++-------
arch/x86/kvm/vmx/vmx.c | 6 ------
arch/x86/kvm/x86.c | 5 +----
5 files changed, 18 insertions(+), 26 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8c247bcb037e..93f6f696d059 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1093,7 +1093,6 @@ struct kvm_x86_ops {
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
- u64 (*get_dr6)(struct kvm_vcpu *vcpu);
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index adab5b1c5fe1..1a547e3ac0e5 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>

#include <asm/msr-index.h>
+#include <asm/debugreg.h>

#include "kvm_emulate.h"
#include "trace.h"
@@ -267,7 +268,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
svm->vmcb->save.rip = nested_vmcb->save.rip;
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
- svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
+ svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;

svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
@@ -482,7 +483,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
nested_vmcb->save.rsp = vmcb->save.rsp;
nested_vmcb->save.rax = vmcb->save.rax;
nested_vmcb->save.dr7 = vmcb->save.dr7;
- nested_vmcb->save.dr6 = vmcb->save.dr6;
+ nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
nested_vmcb->save.cpl = vmcb->save.cpl;

nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
@@ -606,7 +607,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
/* DB exceptions for our internal use must not cause vmexit */
static int nested_svm_intercept_db(struct vcpu_svm *svm)
{
- unsigned long dr6;
+ unsigned long dr6 = svm->vmcb->save.dr6;

/* Always catch it and pass it to userspace if debugging the guest. */
if (svm->vcpu.guest_debug &
@@ -615,22 +616,28 @@ static int nested_svm_intercept_db(struct vcpu_svm *svm)

/* if we're not singlestepping, it's not ours */
if (!svm->nmi_singlestep)
- return NESTED_EXIT_DONE;
+ goto reflected_db;

/* if it's not a singlestep exception, it's not ours */
- if (kvm_get_dr(&svm->vcpu, 6, &dr6))
- return NESTED_EXIT_DONE;
if (!(dr6 & DR6_BS))
- return NESTED_EXIT_DONE;
+ goto reflected_db;

/* if the guest is singlestepping, it should get the vmexit */
if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
disable_nmi_singlestep(svm);
- return NESTED_EXIT_DONE;
+ goto reflected_db;
}

/* it's ours, the nested hypervisor must not see this one */
return NESTED_EXIT_HOST;
+
+reflected_db:
+ /*
+ * Synchronize guest DR6 here just like in db_interception; it will
+ * be moved into the nested VMCB by nested_svm_vmexit.
+ */
+ svm->vcpu.arch.dr6 = dr6;
+ return NESTED_EXIT_DONE;
}

static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 38f6aeefeb55..f03bffafd9e6 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1672,11 +1672,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
mark_dirty(svm->vmcb, VMCB_ASID);
}

-static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
-{
- return to_svm(vcpu)->vmcb->save.dr6;
-}
-
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1693,7 +1688,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
get_debugreg(vcpu->arch.db[1], 1);
get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3);
- vcpu->arch.dr6 = svm_get_dr6(vcpu);
+ vcpu->arch.dr6 = svm->vmcb->save.dr6;
vcpu->arch.dr7 = svm->vmcb->save.dr7;

vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
@@ -1739,6 +1734,7 @@ static int db_interception(struct vcpu_svm *svm)
if (!(svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
!svm->nmi_singlestep) {
+ vcpu->arch.dr6 = svm->vmcb->save.dr6;
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
return 1;
}
@@ -3931,7 +3927,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
- .get_dr6 = svm_get_dr6,
.set_dr6 = svm_set_dr6,
.set_dr7 = svm_set_dr7,
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 2384a2dbec44..6153a47109d3 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4965,11 +4965,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}

-static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.dr6;
-}
-
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
{
}
@@ -7736,7 +7731,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
- .get_dr6 = vmx_get_dr6,
.set_dr6 = vmx_set_dr6,
.set_dr7 = vmx_set_dr7,
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f7628555f036..b1b92d904f37 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1129,10 +1129,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
case 4:
/* fall through */
case 6:
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- *val = vcpu->arch.dr6;
- else
- *val = kvm_x86_ops.get_dr6(vcpu);
+ *val = vcpu->arch.dr6;
break;
case 5:
/* fall through */
--
2.18.2