[PATCH 12/27] KVM: x86: make translate_nested_gpa vendor-specific
From: Paolo Bonzini
Date: Wed Apr 08 2026 - 11:45:17 EST
EPT and NPT have different rules for passing PFERR_USER_MASK to the
nested page table walk. In particular, for final addresses EPT
uses the U bit of the guest (nGVA->nGPA) walk.
While at it, remove PFERR_USER_MASK from the VMX version of the
function, since it is actually ignored by the tables that
update_permission_bitmask() generates for EPT.
Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 4 ++++
arch/x86/kvm/hyperv.c | 3 ++-
arch/x86/kvm/mmu.h | 9 +++------
arch/x86/kvm/svm/nested.c | 15 +++++++++++++++
arch/x86/kvm/vmx/nested.c | 12 ++++++++++++
arch/x86/kvm/x86.c | 16 ----------------
6 files changed, 36 insertions(+), 23 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 65671d3769f0..a20263a4e727 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1979,6 +1979,10 @@ struct kvm_x86_nested_ops {
struct kvm_nested_state *kvm_state);
bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
+ gpa_t (*translate_nested_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa,
+ u64 access,
+ struct x86_exception *exception,
+ u64 pte_access);
int (*enable_evmcs)(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 53688f7b76eb..f35fae3a7b3d 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -2041,7 +2041,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
* read with kvm_read_guest().
*/
if (!hc->fast && is_guest_mode(vcpu)) {
- hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa,
+ hc->ingpa = kvm_x86_ops.nested_ops->translate_nested_gpa(
+ vcpu, hc->ingpa,
PFERR_GUEST_FINAL_MASK, NULL, 0);
if (unlikely(hc->ingpa == INVALID_GPA))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 635c2e5d8513..63be5c5efed9 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -294,10 +294,6 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
atomic64_add(count, &kvm->stat.pages[level - 1]);
}
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
- struct x86_exception *exception,
- u64 pte_access);
-
static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
gpa_t gpa, u64 access,
@@ -306,8 +302,9 @@ static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
{
if (mmu != &vcpu->arch.nested_mmu)
return gpa;
- return translate_nested_gpa(vcpu, gpa, access, exception,
- pte_access);
+ return kvm_x86_ops.nested_ops->translate_nested_gpa(vcpu, gpa, access,
+ exception,
+ pte_access);
}
static inline bool kvm_has_mirrored_tdp(const struct kvm *kvm)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index b36c33255bed..3b670ee4eb26 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1968,8 +1968,23 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
return true;
}
+static gpa_t svm_translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa,
+ u64 access,
+ struct x86_exception *exception,
+ u64 pte_access)
+{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+
+ BUG_ON(!mmu_is_nested(vcpu));
+
+ /* NPT walks are always user-walks */
+ access |= PFERR_USER_MASK;
+ return mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
+}
+
struct kvm_x86_nested_ops svm_nested_ops = {
.leave_nested = svm_leave_nested,
+ .translate_nested_gpa = svm_translate_nested_gpa,
.is_exception_vmexit = nested_svm_is_exception_vmexit,
.check_events = svm_check_nested_events,
.triple_fault = nested_svm_triple_fault,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 937aeb474af7..e4cb317807ab 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -7436,8 +7436,20 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
return 0;
}
+static gpa_t vmx_translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa,
+ u64 access,
+ struct x86_exception *exception,
+ u64 pte_access)
+{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+
+ BUG_ON(!mmu_is_nested(vcpu));
+ return mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
+}
+
struct kvm_x86_nested_ops vmx_nested_ops = {
.leave_nested = vmx_leave_nested,
+ .translate_nested_gpa = vmx_translate_nested_gpa,
.is_exception_vmexit = nested_vmx_is_exception_vmexit,
.check_events = vmx_check_nested_events,
.has_events = vmx_has_nested_events,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 15c1b2de3d93..0757b93e528d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7823,22 +7823,6 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
kvm_x86_call(get_segment)(vcpu, var, seg);
}
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
- struct x86_exception *exception,
- u64 pte_access)
-{
- struct kvm_mmu *mmu = vcpu->arch.mmu;
- gpa_t t_gpa;
-
- BUG_ON(!mmu_is_nested(vcpu));
-
- /* NPT walks are always user-walks */
- access |= PFERR_USER_MASK;
- t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
-
- return t_gpa;
-}
-
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
--
2.52.0