[PATCH v2 09/18] arm64: mm: Invalidate both kernel and user ASIDs when performing TLBI

From: Will Deacon
Date: Thu Nov 30 2017 - 11:43:56 EST


Since an mm has both a kernel and a user ASID, we need to ensure that
broadcast TLB maintenance targets both address spaces so that things
like CoW continue to work with the uaccess primitives in the kernel.

Signed-off-by: Will Deacon <will.deacon@xxxxxxx>
---
arch/arm64/include/asm/tlbflush.h | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index af1c76981911..9e82dd79c7db 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -23,6 +23,7 @@

#include <linux/sched.h>
#include <asm/cputype.h>
+#include <asm/mmu.h>

/*
* Raw TLBI operations.
@@ -54,6 +55,11 @@

#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)

+#define __tlbi_user(op, arg) do { \
+ if (arm64_kernel_unmapped_at_el0()) \
+ __tlbi(op, (arg) | USER_ASID_FLAG); \
+} while (0)
+
/*
* TLB Management
* ==============
@@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)

dsb(ishst);
__tlbi(aside1is, asid);
+ __tlbi_user(aside1is, asid);
dsb(ish);
}

@@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,

dsb(ishst);
__tlbi(vale1is, addr);
+ __tlbi_user(vale1is, addr);
dsb(ish);
}

@@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,

dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (last_level)
+ if (last_level) {
__tlbi(vale1is, addr);
- else
+ __tlbi_user(vale1is, addr);
+ } else {
__tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
+ }
}
dsb(ish);
}
@@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);

__tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
dsb(ish);
}

--
2.1.4