[patch 02/27] SPARC64: Fix two kernel linear mapping setup bugs.

From: Greg KH
Date: Fri Feb 01 2008 - 19:24:38 EST


2.6.22-stable review patch. If anyone has any objections, please let us
know.

------------------
From: David Miller <davem@xxxxxxxxxxxxx>

[SPARC64]: Fix two kernel linear mapping setup bugs.

[ Upstream commit: 8f361453d8e9a67c85b2cf9b93c642c2d8fe0462 ]

This was caught and identified by Greg Onufer.

Since we setup the 256M/4M bitmap table after taking over the trap
table, it's possible for some 4M mapping to get loaded in the TLB
beforhand which later will be 256M mappings.

This can cause illegal TLB multiple-match conditions. Fix this by
setting up the bitmap before we take over the trap table.

Next, __flush_tlb_all() was not doing anything on hypervisor
platforms. Fix by adding sun4v_mmu_demap_all() and calling it.

Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxx>

---
arch/sparc64/kernel/entry.S | 12 ++++++++++++
arch/sparc64/mm/init.c | 29 ++++++++++++++++++++---------
include/asm-sparc64/hypervisor.h | 4 ++++
3 files changed, 36 insertions(+), 9 deletions(-)

--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -2593,3 +2593,15 @@ sun4v_mmustat_info:
retl
nop
.size sun4v_mmustat_info, .-sun4v_mmustat_info
+
+ .globl sun4v_mmu_demap_all
+ .type sun4v_mmu_demap_all,#function
+sun4v_mmu_demap_all:
+ clr %o0
+ clr %o1
+ mov HV_MMU_ALL, %o2
+ mov HV_FAST_MMU_DEMAP_ALL, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_mmu_demap_all, .-sun4v_mmu_demap_all
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1135,14 +1135,9 @@ static void __init mark_kpte_bitmap(unsi
}
}

-static void __init kernel_physical_mapping_init(void)
+static void __init init_kpte_bitmap(void)
{
unsigned long i;
-#ifdef CONFIG_DEBUG_PAGEALLOC
- unsigned long mem_alloced = 0UL;
-#endif
-
- read_obp_memory("reg", &pall[0], &pall_ents);

for (i = 0; i < pall_ents; i++) {
unsigned long phys_start, phys_end;
@@ -1151,14 +1146,24 @@ static void __init kernel_physical_mappi
phys_end = phys_start + pall[i].reg_size;

mark_kpte_bitmap(phys_start, phys_end);
+ }
+}

+static void __init kernel_physical_mapping_init(void)
+{
#ifdef CONFIG_DEBUG_PAGEALLOC
+ unsigned long i, mem_alloced = 0UL;
+
+ for (i = 0; i < pall_ents; i++) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = pall[i].phys_addr;
+ phys_end = phys_start + pall[i].reg_size;
+
mem_alloced += kernel_map_range(phys_start, phys_end,
PAGE_KERNEL);
-#endif
}

-#ifdef CONFIG_DEBUG_PAGEALLOC
printk("Allocated %ld bytes for kernel page tables.\n",
mem_alloced);

@@ -1400,6 +1405,10 @@ void __init paging_init(void)

inherit_prom_mappings();

+ read_obp_memory("reg", &pall[0], &pall_ents);
+
+ init_kpte_bitmap();
+
/* Ok, we can use our TLB miss and window trap handlers safely. */
setup_tba();

@@ -1854,7 +1863,9 @@ void __flush_tlb_all(void)
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
- if (tlb_type == spitfire) {
+ if (tlb_type == hypervisor) {
+ sun4v_mmu_demap_all();
+ } else if (tlb_type == spitfire) {
for (i = 0; i < 64; i++) {
/* Spitfire Errata #32 workaround */
/* NOTE: Always runs on spitfire, so no
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -709,6 +709,10 @@ extern unsigned long sun4v_mmu_tsb_ctx0(
*/
#define HV_FAST_MMU_DEMAP_ALL 0x24

+#ifndef __ASSEMBLY__
+extern void sun4v_mmu_demap_all(void);
+#endif
+
/* mmu_map_perm_addr()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR

--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/