The page walker may be used with nested paging too when accessing mmio areas.
Make it support the additional page-level too.
Signed-off-by: Joerg Roedel<joerg.roedel@xxxxxxx>
---
arch/x86/kvm/mmu.c | 6 ++++++
arch/x86/kvm/paging_tmpl.h | 16 ++++++++++++++++
2 files changed, 22 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ef2396d..fc0e2fc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -117,6 +117,11 @@ module_param(oos_shadow, bool, 0644);
#define PT64_DIR_BASE_ADDR_MASK \
(PT64_BASE_ADDR_MASK& ~((1ULL<< (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
+#define PT64_PDPE_BASE_ADDR_MASK \
+ (PT64_BASE_ADDR_MASK& ~(1ULL<< (PAGE_SHIFT + (2 * PT64_LEVEL_BITS))))
+#define PT64_PDPE_OFFSET_MASK \
+ (PT64_BASE_ADDR_MASK& (1ULL<< (PAGE_SHIFT + (2 * PT64_LEVEL_BITS))))
+
#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
(PAGE_MASK& ~((1ULL<< (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
@@ -130,6 +135,7 @@ module_param(oos_shadow, bool, 0644);
#define PFERR_RSVD_MASK (1U<< 3)
#define PFERR_FETCH_MASK (1U<< 4)
+static gfn_t gpte_to_gfn_pdpe(pt_element_t gpte)
+{
+ return (gpte& PT64_PDPE_BASE_ADDR_MASK)>> PAGE_SHIFT;
+}
+
static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
gfn_t table_gfn, unsigned index,
pt_element_t orig_pte, pt_element_t new_pte)
@@ -201,6 +207,15 @@ walk:
break;
}
+ if (walker->level == PT_PDPE_LEVEL&&
+ (pte& PT_PAGE_SIZE_MASK)&&
+ is_long_mode(vcpu)) {
+ walker->gfn = gpte_to_gfn_pdpe(pte);
+ walker->gfn += (addr& PT64_PDPE_OFFSET_MASK)
+ >> PAGE_SHIFT;
+ break;
+ }
+
pt_access = pte_access;