[PATCH v9 1/8] x86/tlb: add APIs manipulating tlb batch's arch data
From: Byungchul Park
Date: Wed Apr 17 2024 - 03:34:34 EST
This is a preparation for migrc mechanism that needs to recognize
read-only tlb entries during migration by separating tlb batch arch data
into two, one is for read-only entries and the other is for writable
ones, and merging those two when needed.
Migrc also needs to optimize tlb shootdown by skipping CPUs that have
already performed tlb flush needed for a while. To support it, added
APIs manipulating arch data for x86.
Signed-off-by: Byungchul Park <byungchul@xxxxxx>
---
arch/x86/include/asm/tlbflush.h | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 25726893c6f4..a14f77c5cdde 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,6 +5,7 @@
#include <linux/mm_types.h>
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
+#include <linux/cpumask.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
@@ -293,6 +294,23 @@ static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+static inline void arch_tlbbatch_clear(struct arch_tlbflush_unmap_batch *batch)
+{
+ cpumask_clear(&batch->cpumask);
+}
+
+static inline void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc)
+{
+ cpumask_or(&bdst->cpumask, &bdst->cpumask, &bsrc->cpumask);
+}
+
+static inline bool arch_tlbbatch_done(struct arch_tlbflush_unmap_batch *bdst,
+ struct arch_tlbflush_unmap_batch *bsrc)
+{
+ return !cpumask_andnot(&bdst->cpumask, &bdst->cpumask, &bsrc->cpumask);
+}
+
static inline bool pte_flags_need_flush(unsigned long oldflags,
unsigned long newflags,
bool ignore_access)
--
2.17.1