[PATCH 04/12] KVM: arm64: Support clear DBM bit for PTEs
From: Keqian Zhu
Date: Tue Jun 16 2020 - 05:36:46 EST
This supports clear DBM bit for PTEs, to realize dynamic enable
of hardware DBM.
Signed-off-by: Keqian Zhu <zhukeqian1@xxxxxxxxxx>
---
arch/arm64/include/asm/kvm_host.h | 2 +
arch/arm64/kvm/mmu.c | 151 ++++++++++++++++++++++++++++++
2 files changed, 153 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c3e6fcc664b1..9ea2dcfd609c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -480,6 +480,8 @@ u64 __kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
+void kvm_mmu_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *memslot);
+void kvm_mmu_clear_dbm_all(struct kvm *kvm);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 27407153121b..f08b0fbca0a0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2446,6 +2446,157 @@ int kvm_mmu_init(void)
return err;
}
+#ifdef CONFIG_ARM64_HW_AFDBM
+/**
+ * stage2_clear_dbm_ptes() - clear DBM bit from PMD range
+ * @pmd: pointer to pmd entry
+ * @addr: range start address
+ * @end: range end address
+ */
+static void stage2_clear_dbm_ptes(pmd_t *pmd, phys_addr_t addr,
+ phys_addr_t end)
+{
+ pte_t *pte;
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+ if (!pte_none(*pte) && kvm_s2pte_dbm(pte))
+ kvm_clear_s2pte_dbm(pte);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_pmds() - clear DBM bit from PUD range
+ * @kvm: The KVM pointer
+ * @pud: pointer to pud entry
+ * @addr: range start address
+ * @end: range end address
+ */
+static void stage2_clear_dbm_pmds(struct kvm *kvm, pud_t *pud,
+ phys_addr_t addr, phys_addr_t end)
+{
+ pmd_t *pmd;
+ phys_addr_t next;
+
+ pmd = stage2_pmd_offset(kvm, pud, addr);
+ do {
+ next = stage2_pmd_addr_end(kvm, addr, end);
+ if (!pmd_none(*pmd) && !pmd_thp_or_huge(*pmd))
+ stage2_clear_dbm_ptes(pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_puds() - clear DBM bit from P4D range
+ * @kvm: The KVM pointer
+ * @pgd: pointer to pgd entry
+ * @addr: range start address
+ * @end: range end address
+ */
+static void stage2_clear_dbm_puds(struct kvm *kvm, p4d_t *p4d,
+ phys_addr_t addr, phys_addr_t end)
+{
+ pud_t *pud;
+ phys_addr_t next;
+
+ pud = stage2_pud_offset(kvm, p4d, addr);
+ do {
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud) && !stage2_pud_huge(kvm, *pud))
+ stage2_clear_dbm_pmds(kvm, pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_p4ds() - clear DBM bit from PGD range
+ * @kvm: The KVM pointer
+ * @pgd: pointer to pgd entry
+ * @addr: range start address
+ * @end: range end address
+ */
+static void stage2_clear_dbm_p4ds(struct kvm *kvm, pgd_t *pgd,
+ phys_addr_t addr, phys_addr_t end)
+{
+ p4d_t *p4d;
+ phys_addr_t next;
+
+ p4d = stage2_p4d_offset(kvm, pgd, addr);
+ do {
+ next = stage2_p4d_addr_end(kvm, addr, end);
+ if (!stage2_p4d_none(kvm, *p4d))
+ stage2_clear_dbm_puds(kvm, p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+}
+
+/**
+ * stage2_clear_dbm_range() - clear DBM bit from stage2 memory
+ * region range
+ * @kvm: The KVM pointer
+ * @addr: Start address of range
+ * @end: End address of range
+ */
+static void stage2_clear_dbm_range(struct kvm *kvm, phys_addr_t addr,
+ phys_addr_t end)
+{
+ pgd_t *pgd;
+ phys_addr_t next;
+
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
+ do {
+ cond_resched_lock(&kvm->mmu_lock);
+ if (!READ_ONCE(kvm->arch.pgd))
+ break;
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (stage2_pgd_present(kvm, *pgd))
+ stage2_clear_dbm_p4ds(kvm, pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * kvm_mmu_clear_dbm() - clear DBM bit from stage2 PTEs for memory slot
+ * @kvm: The KVM pointer
+ * @slot: The memory slot to clear DBM bit
+ *
+ * After this function returns, DBM bit of all block or page descriptors
+ * is cleared.
+ *
+ * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
+ * serializing operations for VM memory regions.
+ */
+void kvm_mmu_clear_dbm(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+ phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
+ phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
+
+ spin_lock(&kvm->mmu_lock);
+ stage2_clear_dbm_range(kvm, start, end);
+ spin_unlock(&kvm->mmu_lock);
+ kvm_flush_remote_tlbs(kvm);
+}
+
+/**
+ * kvm_mmu_clear_dbm_all() - clear DBM bit from stage2 PTEs for whole VM
+ * @kvm: The KVM pointer
+ *
+ * Called with kvm->slots_lock mutex acquired.
+ */
+void kvm_mmu_clear_dbm_all(struct kvm *kvm)
+{
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memory_slot *memslots = slots->memslots;
+ struct kvm_memory_slot *memslot;
+ int slot;
+
+ if (unlikely(!slots->used_slots))
+ return;
+
+ for (slot = 0; slot < slots->used_slots; slot++) {
+ memslot = &memslots[slot];
+ kvm_mmu_clear_dbm(kvm, memslot);
+ }
+}
+#endif /* CONFIG_ARM64_HW_AFDBM */
+
void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
--
2.19.1