[PATCH 4.20 121/145] MIPS: c-r4k: Add r4k_blast_scache_node for Loongson-3

From: Greg Kroah-Hartman
Date: Mon Jan 07 2019 - 08:22:43 EST


4.20-stable review patch. If anyone has any objections, please let me know.

------------------

From: Huacai Chen <chenhc@xxxxxxxxxx>

commit bb53fdf395eed103f85061bfff3b116cee123895 upstream.

For multi-node Loongson-3 (NUMA configuration), r4k_blast_scache() can
only flush Node-0's scache. So we add r4k_blast_scache_node() by using
(CAC_BASE | (node_id << NODE_ADDRSPACE_SHIFT)) instead of CKSEG0 as the
start address.

Signed-off-by: Huacai Chen <chenhc@xxxxxxxxxx>
[paul.burton@xxxxxxxx: Include asm/mmzone.h from asm/r4kcache.h for
nid_to_addrbase(). Add asm/mach-generic/mmzone.h
to allow inclusion for all platforms.]
Signed-off-by: Paul Burton <paul.burton@xxxxxxxx>
Patchwork: https://patchwork.linux-mips.org/patch/21129/
Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx>
Cc: James Hogan <james.hogan@xxxxxxxx>
Cc: Steven J . Hill <Steven.Hill@xxxxxxxxxx>
Cc: linux-mips@xxxxxxxxxxxxxx
Cc: Fuxin Zhang <zhangfx@xxxxxxxxxx>
Cc: Zhangjin Wu <wuzhangjin@xxxxxxxxx>
Cc: <stable@xxxxxxxxxxxxxxx> # 3.15+
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
arch/mips/include/asm/mach-generic/mmzone.h | 2 +
arch/mips/include/asm/mach-loongson64/mmzone.h | 1
arch/mips/include/asm/mmzone.h | 8 ++++
arch/mips/include/asm/r4kcache.h | 22 ++++++++++++
arch/mips/mm/c-r4k.c | 44 +++++++++++++++++++++----
5 files changed, 70 insertions(+), 7 deletions(-)

--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/mmzone.h
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Intentionally empty */
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -21,6 +21,7 @@
#define NODE3_ADDRSPACE_OFFSET 0x300000000000UL

#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)

#define LEVELS_PER_SLICE 128

--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -9,6 +9,14 @@
#include <asm/page.h>
#include <mmzone.h>

+#ifndef pa_to_nid
+#define pa_to_nid(addr) 0
+#endif
+
+#ifndef nid_to_addrbase
+#define nid_to_addrbase(nid) 0
+#endif
+
#ifdef CONFIG_DISCONTIGMEM

#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -20,6 +20,7 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
+#include <asm/mmzone.h>
#include <linux/uaccess.h> /* for uaccess_kernel() */

extern void (*r4k_blast_dcache)(void);
@@ -674,4 +675,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )

+/* Currently, this is very specific to Loongson-3 */
+#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize##_node(long node) \
+{ \
+ unsigned long start = CAC_BASE | nid_to_addrbase(node); \
+ unsigned long end = start + current_cpu_data.desc.waysize; \
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
+ unsigned long ws_end = current_cpu_data.desc.ways << \
+ current_cpu_data.desc.waybit; \
+ unsigned long ws, addr; \
+ \
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
+ for (addr = start; addr < end; addr += lsize * 32) \
+ cache##lsize##_unroll32(addr|ws, indexop); \
+}
+
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
+
#endif /* _ASM_R4KCACHE_H */
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
r4k_blast_scache = blast_scache128;
}

+static void (*r4k_blast_scache_node)(long node);
+
+static void r4k_blast_scache_node_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache_node = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache_node = blast_scache16_node;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_node = blast_scache32_node;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_node = blast_scache64_node;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_node = blast_scache128_node;
+}
+
static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2:
- case CPU_LOONGSON3:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -480,6 +497,11 @@ static inline void local_r4k___flush_cac
r4k_blast_scache();
break;

+ case CPU_LOONGSON3:
+ /* Use get_ebase_cpunum() for both NUMA=y/n */
+ r4k_blast_scache_node(get_ebase_cpunum() >> 2);
+ break;
+
case CPU_BMIPS5000:
r4k_blast_scache();
__sync();
@@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsi

preempt_disable();
if (cpu_has_inclusive_pcaches) {
- if (size >= scache_size)
- r4k_blast_scache();
- else
+ if (size >= scache_size) {
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache();
+ else
+ r4k_blast_scache_node(pa_to_nid(addr));
+ } else {
blast_scache_range(addr, addr + size);
+ }
preempt_enable();
__sync();
return;
@@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned l

preempt_disable();
if (cpu_has_inclusive_pcaches) {
- if (size >= scache_size)
- r4k_blast_scache();
- else {
+ if (size >= scache_size) {
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache();
+ else
+ r4k_blast_scache_node(pa_to_nid(addr));
+ } else {
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
@@ -1918,6 +1947,7 @@ void r4k_cache_init(void)
r4k_blast_scache_page_setup();
r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
+ r4k_blast_scache_node_setup();
#ifdef CONFIG_EVA
r4k_blast_dcache_user_page_setup();
r4k_blast_icache_user_page_setup();