[PATCH 06/22] KVM: x86/mmu: remove "bool base_only" arguments
From: Paolo Bonzini
Date: Thu Apr 14 2022 - 03:40:56 EST
The argument is always false now that kvm_mmu_calc_root_page_role has
been removed.
Reviewed-by: Sean Christopherson <seanjc@xxxxxxxxxx>
Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
arch/x86/kvm/mmu/mmu.c | 66 +++++++++++++++---------------------------
1 file changed, 23 insertions(+), 43 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d56875938c29..7f156da3ca93 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4705,47 +4705,30 @@ static void paging32_init_context(struct kvm_mmu *context)
context->direct_map = false;
}
-static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
- const struct kvm_mmu_role_regs *regs)
-{
- union kvm_mmu_extended_role ext = {0};
-
- if (____is_cr0_pg(regs)) {
- ext.cr0_pg = 1;
- ext.cr4_pae = ____is_cr4_pae(regs);
- ext.cr4_smep = ____is_cr4_smep(regs);
- ext.cr4_smap = ____is_cr4_smap(regs);
- ext.cr4_pse = ____is_cr4_pse(regs);
-
- /* PKEY and LA57 are active iff long mode is active. */
- ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
- ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
- ext.efer_lma = ____is_efer_lma(regs);
- }
-
- ext.valid = 1;
-
- return ext;
-}
-
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
- const struct kvm_mmu_role_regs *regs,
- bool base_only)
+ const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role = {0};
role.base.access = ACC_ALL;
if (____is_cr0_pg(regs)) {
+ role.ext.cr0_pg = 1;
role.base.efer_nx = ____is_efer_nx(regs);
role.base.cr0_wp = ____is_cr0_wp(regs);
+
+ role.ext.cr4_pae = ____is_cr4_pae(regs);
+ role.ext.cr4_smep = ____is_cr4_smep(regs);
+ role.ext.cr4_smap = ____is_cr4_smap(regs);
+ role.ext.cr4_pse = ____is_cr4_pse(regs);
+
+ /* PKEY and LA57 are active iff long mode is active. */
+ role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
+ role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
+ role.ext.efer_lma = ____is_efer_lma(regs);
}
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
-
- if (base_only)
- return role;
-
- role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
+ role.ext.valid = 1;
return role;
}
@@ -4765,10 +4748,9 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
static union kvm_mmu_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
- const struct kvm_mmu_role_regs *regs,
- bool base_only)
+ const struct kvm_mmu_role_regs *regs)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4783,7 +4765,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
- kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false);
+ kvm_calc_tdp_mmu_root_page_role(vcpu, regs);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
@@ -4812,10 +4794,9 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
static union kvm_mmu_role
kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
- const struct kvm_mmu_role_regs *regs,
- bool base_only)
+ const struct kvm_mmu_role_regs *regs)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
@@ -4826,11 +4807,10 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
static union kvm_mmu_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
- const struct kvm_mmu_role_regs *regs,
- bool base_only)
+ const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs);
role.base.direct = !____is_cr0_pg(regs);
@@ -4872,7 +4852,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
- kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
+ kvm_calc_shadow_mmu_root_page_role(vcpu, regs);
shadow_mmu_init_context(vcpu, context, regs, new_role);
}
@@ -4882,7 +4862,7 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs);
role.base.direct = false;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4983,7 +4963,7 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *
{
union kvm_mmu_role role;
- role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
+ role = kvm_calc_shadow_root_page_role_common(vcpu, regs);
/*
* Nested MMUs are used only for walking L2's gva->gpa, they never have
--
2.31.1