[PATCH v1 05/10] powerpc/mm: Do early ioremaps from top to bottom on PPC64 too.
From: Christophe Leroy
Date: Tue Aug 13 2019 - 16:12:03 EST
Until vmalloc system is up and running, ioremap basically
allocates addresses at the border of the IOREMAP area.
On PPC32, addresses are allocated down from the top of the area
while on PPC64, addresses are allocated up from the base of the
area.
On PPC32, the base of vmalloc area is not known yet when ioremap()
starts to be used, while the end of it is fixed. On PPC64, both the
start and the end are already fixed when ioremap() starts to being
used.
Changing PPC64 behaviour is the lighest change, so change PPC64
ioremap() to allocate addresses from the top as PPC32 does.
Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxx>
---
arch/powerpc/mm/book3s64/hash_utils.c | 2 +-
arch/powerpc/mm/book3s64/radix_pgtable.c | 2 +-
arch/powerpc/mm/pgtable_64.c | 18 +++++++++---------
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index e6d471058597..0f954dc40346 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1030,7 +1030,7 @@ void __init hash__early_init_mmu(void)
__kernel_io_start = H_KERN_IO_START;
__kernel_io_end = H_KERN_IO_END;
vmemmap = (struct page *)H_VMEMMAP_START;
- ioremap_bot = IOREMAP_BASE;
+ ioremap_bot = IOREMAP_END;
#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index b4ca9e95e678..11303e2fffb1 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -611,7 +611,7 @@ void __init radix__early_init_mmu(void)
__kernel_io_start = RADIX_KERN_IO_START;
__kernel_io_end = RADIX_KERN_IO_END;
vmemmap = (struct page *)RADIX_VMEMMAP_START;
- ioremap_bot = IOREMAP_BASE;
+ ioremap_bot = IOREMAP_END;
#ifdef CONFIG_PCI
pci_io_base = ISA_IO_BASE;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 6fa2e969bf0e..0f0b1e1ea5ab 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -101,7 +101,7 @@ unsigned long __pte_frag_size_shift;
EXPORT_SYMBOL(__pte_frag_size_shift);
unsigned long ioremap_bot;
#else /* !CONFIG_PPC_BOOK3S_64 */
-unsigned long ioremap_bot = IOREMAP_BASE;
+unsigned long ioremap_bot = IOREMAP_END;
#endif
int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
@@ -169,11 +169,11 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
/*
* Choose an address to map it to.
- * Once the imalloc system is running, we use it.
+ * Once the vmalloc system is running, we use it.
* Before that, we map using addresses going
- * up from ioremap_bot. imalloc will use
- * the addresses from ioremap_bot through
- * IMALLOC_END
+ * down from ioremap_bot. vmalloc will use
+ * the addresses from IOREMAP_BASE through
+ * ioremap_bot
*
*/
paligned = addr & PAGE_MASK;
@@ -186,7 +186,7 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
struct vm_struct *area;
area = __get_vm_area_caller(size, VM_IOREMAP,
- ioremap_bot, IOREMAP_END,
+ IOREMAP_BASE, ioremap_bot,
caller);
if (area == NULL)
return NULL;
@@ -194,9 +194,9 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
area->phys_addr = paligned;
ret = __ioremap_at(paligned, area->addr, size, prot);
} else {
- ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
+ ret = __ioremap_at(paligned, (void *)ioremap_bot - size, size, prot);
if (ret)
- ioremap_bot += size;
+ ioremap_bot -= size;
}
if (ret)
@@ -217,7 +217,7 @@ void __iounmap(volatile void __iomem *token)
addr = (void *) ((unsigned long __force)
PCI_FIX_ADDR(token) & PAGE_MASK);
- if ((unsigned long)addr < ioremap_bot) {
+ if ((unsigned long)addr >= ioremap_bot) {
printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
" at 0x%p\n", addr);
return;
--
2.13.3