[PATCH 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions

From: Pavel Tatashin
Date: Wed Nov 27 2019 - 13:45:08 EST


We currently duplicate the logic to enable/disable uaccess via TTBR0,
with C functions and assembly macros. This is a maintenenace burden
and is liable to lead to subtle bugs, so let's get rid of the assembly
macros, and always use the C functions. This requires refactoring
some assembly functions to have a C wrapper.

Signed-off-by: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx>
---
arch/arm64/include/asm/asm-uaccess.h | 22 ---------------
arch/arm64/include/asm/cacheflush.h | 39 ++++++++++++++++++++++++--
arch/arm64/mm/cache.S | 41 +++++++++-------------------
arch/arm64/mm/flush.c | 2 +-
4 files changed, 50 insertions(+), 54 deletions(-)

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index f68a0e64482a..fba2a69f7fef 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -34,28 +34,6 @@
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
-
- .macro uaccess_ttbr0_disable, tmp1, tmp2
-alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp2 // avoid preemption
- __uaccess_ttbr0_disable \tmp1
- restore_irq \tmp2
-alternative_else_nop_endif
- .endm
-
- .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
-alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp3 // avoid preemption
- __uaccess_ttbr0_enable \tmp1, \tmp2
- restore_irq \tmp3
-alternative_else_nop_endif
- .endm
-#else
- .macro uaccess_ttbr0_disable, tmp1, tmp2
- .endm
-
- .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
- .endm
#endif

#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 665c78e0665a..c0b265e12d9d 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -61,16 +61,49 @@
* - kaddr - page address
* - size - region size
*/
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int invalidate_icache_range(unsigned long start, unsigned long end);
+extern void __asm_flush_icache_range(unsigned long start, unsigned long end);
+extern long __asm_flush_cache_user_range(unsigned long start,
+ unsigned long end);
+extern int asm_invalidate_icache_range(unsigned long start,
+ unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __inval_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pop(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
extern void sync_icache_aliases(void *kaddr, unsigned long len);

+static inline void __flush_icache_range(unsigned long start, unsigned long end)
+{
+ uaccess_ttbr0_enable();
+ __asm_flush_icache_range(start, end);
+ uaccess_ttbr0_disable();
+}
+
+static inline void __flush_cache_user_range(unsigned long start,
+ unsigned long end)
+{
+ uaccess_ttbr0_enable();
+ __asm_flush_cache_user_range(start, end);
+ uaccess_ttbr0_disable();
+}
+
+static inline int invalidate_icache_range(unsigned long start,
+ unsigned long end)
+{
+ int rv;
+
+ if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) {
+ isb();
+ return 0;
+ }
+ uaccess_ttbr0_enable();
+ rv = asm_invalidate_icache_range(start, end);
+ uaccess_ttbr0_disable();
+
+ return rv;
+}
+
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
__flush_icache_range(start, end);
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index db767b072601..a48b6dba304e 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@
#include <asm/asm-uaccess.h>

/*
- * flush_icache_range(start,end)
+ * __asm_flush_icache_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -24,11 +24,11 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(__flush_icache_range)
+ENTRY(__asm_flush_icache_range)
/* FALLTHROUGH */

/*
- * __flush_cache_user_range(start,end)
+ * __asm_flush_cache_user_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -37,8 +37,7 @@ ENTRY(__flush_icache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(__flush_cache_user_range)
- uaccess_ttbr0_enable x2, x3, x4
+ENTRY(__asm_flush_cache_user_range)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
b 7f
@@ -60,41 +59,27 @@ alternative_if ARM64_HAS_CACHE_DIC
alternative_else_nop_endif
invalidate_icache_by_line x0, x1, x2, x3, 9f
8: mov x0, #0
-1:
- uaccess_ttbr0_disable x1, x2
- ret
-9:
- mov x0, #-EFAULT
+1: ret
+9: mov x0, #-EFAULT
b 1b
-ENDPROC(__flush_icache_range)
-ENDPROC(__flush_cache_user_range)
+ENDPROC(__asm_flush_icache_range)
+ENDPROC(__asm_flush_cache_user_range)

/*
- * invalidate_icache_range(start,end)
+ * asm_invalidate_icache_range(start,end)
*
* Ensure that the I cache is invalid within specified region.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(invalidate_icache_range)
-alternative_if ARM64_HAS_CACHE_DIC
- mov x0, xzr
- isb
- ret
-alternative_else_nop_endif
-
- uaccess_ttbr0_enable x2, x3, x4
-
+ENTRY(asm_invalidate_icache_range)
invalidate_icache_by_line x0, x1, x2, x3, 2f
mov x0, xzr
-1:
- uaccess_ttbr0_disable x1, x2
- ret
-2:
- mov x0, #-EFAULT
+1: ret
+2: mov x0, #-EFAULT
b 1b
-ENDPROC(invalidate_icache_range)
+ENDPROC(asm_invalidate_icache_range)

/*
* __flush_dcache_area(kaddr, size)
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..b23f34d23f31 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(__asm_flush_icache_range);

#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
--
2.24.0