[PATCH 3/3] KVM: x86: Move #PF retry tracking variables into emulation context

From: Sean Christopherson
Date: Fri Nov 22 2019 - 17:58:36 EST


Move last_retry_eip and last_retry_addr into the emulation context as
they are specific to retrying an instruction after emulation failure.

Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
---
arch/x86/include/asm/kvm_emulate.h | 4 ++++
arch/x86/include/asm/kvm_host.h | 3 ---
arch/x86/kvm/x86.c | 11 ++++++-----
3 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index e81658a4ab9d..9c5db3b4120e 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -311,6 +311,10 @@ struct x86_emulate_ctxt {
bool gpa_available;
gpa_t gpa_val;

+ /* Track EIP and CR2/GPA when retrying faulting instruction on #PF. */
+ unsigned long last_retry_eip;
+ unsigned long last_retry_addr;
+
/*
* decode cache
*/
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e434f4cfecd1..6c8bfebabc31 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -745,9 +745,6 @@ struct kvm_vcpu_arch {

cpumask_var_t wbinvd_dirty_mask;

- unsigned long last_retry_eip;
- unsigned long last_retry_addr;
-
struct {
bool halted;
gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9a8adfdf1e0a..3aa2d7d98779 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6317,6 +6317,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)

kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);

+ /* last_retry_{eip,addr} are persistent and must not be init'd here. */
ctxt->gpa_available = false;
ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
@@ -6467,8 +6468,8 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long last_retry_eip, last_retry_addr, gpa = cr2;

- last_retry_eip = vcpu->arch.last_retry_eip;
- last_retry_addr = vcpu->arch.last_retry_addr;
+ last_retry_eip = ctxt->last_retry_eip;
+ last_retry_addr = ctxt->last_retry_addr;

/*
* If the emulation is caused by #PF and it is non-page_table
@@ -6483,7 +6484,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
* and the address again, we can break out of the potential infinite
* loop.
*/
- vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
+ ctxt->last_retry_eip = ctxt->last_retry_addr = 0;

if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false;
@@ -6498,8 +6499,8 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
return false;

- vcpu->arch.last_retry_eip = ctxt->eip;
- vcpu->arch.last_retry_addr = cr2;
+ ctxt->last_retry_eip = ctxt->eip;
+ ctxt->last_retry_addr = cr2;

if (!vcpu->arch.mmu->direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
--
2.24.0