[PATCH 5/5] KVM: x86/mmu: Move kvm_tdp_mmu_map()'s prolog and epilog to its caller

From: Sean Christopherson
Date: Mon Dec 12 2022 - 22:31:05 EST


Move the hugepage adjust, tracepoint, and RCU (un)lock logic out of
kvm_tdp_mmu_map() and into its sole caller, kvm_tdp_mmu_page_fault(), to
eliminate the gotos used to bounce through rcu_read_unlock() when bailing
from the walk.

Opportunistically mark kvm_mmu_hugepage_adjust() as static as
kvm_tdp_mmu_map() was the only external user.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/kvm/mmu/mmu.c | 9 ++++++++-
arch/x86/kvm/mmu/mmu_internal.h | 1 -
arch/x86/kvm/mmu/tdp_mmu.c | 22 ++++------------------
3 files changed, 12 insertions(+), 20 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 254bc46234e0..99c40617d325 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3085,7 +3085,8 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
return min(host_level, max_level);
}

-void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+static void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu,
+ struct kvm_page_fault *fault)
{
struct kvm_memory_slot *slot = fault->slot;
kvm_pfn_t mask;
@@ -4405,7 +4406,13 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
if (is_page_fault_stale(vcpu, fault))
goto out_unlock;

+ kvm_mmu_hugepage_adjust(vcpu, fault);
+
+ trace_kvm_mmu_spte_requested(fault);
+
+ rcu_read_lock();
r = kvm_tdp_mmu_map(vcpu, fault);
+ rcu_read_unlock();

out_unlock:
read_unlock(&vcpu->kvm->mmu_lock);
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index ac00bfbf32f6..66c294d67641 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -317,7 +317,6 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot, gfn_t gfn,
int max_level);
-void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);

void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index cc1fb9a65620..78f47eb74544 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1150,13 +1150,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
struct kvm *kvm = vcpu->kvm;
struct tdp_iter iter;
struct kvm_mmu_page *sp;
- int ret = RET_PF_RETRY;
-
- kvm_mmu_hugepage_adjust(vcpu, fault);
-
- trace_kvm_mmu_spte_requested(fault);
-
- rcu_read_lock();

tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
int r;
@@ -1169,10 +1162,10 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* retry, avoiding unnecessary page table allocation and free.
*/
if (is_removed_spte(iter.old_spte))
- goto retry;
+ return RET_PF_RETRY;

if (iter.level == fault->goal_level)
- goto map_target_level;
+ return tdp_mmu_map_handle_target_level(vcpu, fault, &iter);

/* Step down into the lower level page table if it exists. */
if (is_shadow_present_pte(iter.old_spte) &&
@@ -1199,7 +1192,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
*/
if (r) {
tdp_mmu_free_sp(sp);
- goto retry;
+ return RET_PF_RETRY;
}

if (fault->huge_page_disallowed &&
@@ -1216,14 +1209,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* iterator detected an upper level SPTE was frozen during traversal.
*/
WARN_ON_ONCE(iter.level == fault->goal_level);
- goto retry;
-
-map_target_level:
- ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
-
-retry:
- rcu_read_unlock();
- return ret;
+ return RET_PF_RETRY;
}

bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
--
2.39.0.rc1.256.g54fd8350bd-goog