On Thu, Feb 07, 2008 at 03:27:19PM +0200, Izik Eidus wrote:
Joerg Roedel wrote:
This patch contains the changes to the KVM MMU necessary for support of thegood patch, it look like things will be very fixable with it
Nested Paging feature in AMD Barcelona and Phenom Processors.
Signed-off-by: Joerg Roedel <joerg.roedel@xxxxxxx>you probably mean gpa_t ?
---
arch/x86/kvm/mmu.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++--
arch/x86/kvm/mmu.h | 6 ++++
2 files changed, 82 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5e76963..5304d55 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1081,6 +1081,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
int i;
gfn_t root_gfn;
struct kvm_mmu_page *sp;
+ int metaphysical = 0;
root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
@@ -1089,14 +1090,20 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->arch.mmu.root_hpa;
ASSERT(!VALID_PAGE(root));
+ if (tdp_enabled)
+ metaphysical = 1;
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
- PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
+ PT64_ROOT_LEVEL, metaphysical,
+ ACC_ALL, NULL, NULL);
root = __pa(sp->spt);
++sp->root_count;
vcpu->arch.mmu.root_hpa = root;
return;
}
#endif
+ metaphysical = !is_paging(vcpu);
+ if (tdp_enabled)
+ metaphysical = 1;
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];
@@ -1110,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
} else if (vcpu->arch.mmu.root_level == 0)
root_gfn = 0;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
- PT32_ROOT_LEVEL, !is_paging(vcpu),
+ PT32_ROOT_LEVEL, metaphysical,
ACC_ALL, NULL, NULL);
root = __pa(sp->spt);
++sp->root_count;
@@ -1144,6 +1151,36 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
error_code & PFERR_WRITE_MASK, gfn);
}
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
+ u32 error_code)
Yes. But the function is assigned to a function pointer. And the type of
that pointer expects gva_t there. So I named the parameter gpa to
describe that a guest physical address is meant there.
+{i dont know if it worth checking it here,
+ struct page *page;
+ int r;
+
+ ASSERT(vcpu);
+ ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+
+ r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ return r;
+
+ down_read(¤t->mm->mmap_sem);
+ page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ if (is_error_page(page)) {
+ kvm_release_page_clean(page);
+ up_read(¤t->mm->mmap_sem);
+ return 1;
+ }
in the worth case we will map the error page and the host will be safe
Looking at the nonpaging_map function it is the right place to check for
the error page.
Joerg Roedel