+
+ /*
+ * Note, enforcing the NX huge page mitigation for nonpaging MMUs
+ * (shadow paging, CR0.PG=0 in the guest) is completely unnecessary.
+ * The guest doesn't have any page tables to abuse and is guaranteed
+ * to switch to a different MMU when CR0.PG is toggled on (may not
+ * always be guaranteed when KVM is using TDP). See also make_spte().
+ */
const bool nx_huge_page_workaround_enabled;
/*
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 7314d27d57a4..9f3e5af088a5 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -147,6 +147,17 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (!prefetch)
spte |= spte_shadow_accessed_mask(spte);
+ /*
+ * For simplicity, enforce the NX huge page mitigation even if not
+ * strictly necessary. KVM could ignore if the mitigation if paging is
+ * disabled in the guest, but KVM would then have to ensure a new MMU
+ * is loaded (or all shadow pages zapped) when CR0.PG is toggled on,
+ * and that's a net negative for performance when TDP is enabled. KVM
+ * could ignore the mitigation if TDP is disabled and CR0.PG=0, as KVM
+ * will always switch to a new MMU if paging is enabled in the guest,
+ * but that adds complexity just to optimize a mode that is anything
+ * but performance critical.