[Please note: This e-mail is from an EXTERNAL e-mail address]
Rather than storing the start of vmalloc space, store the size, and
move the calculation into adjust_lowmem_limit(). We now have one single
place where this calculation takes place.
Signed-off-by: Russell King (Oracle) <rmk+kernel@xxxxxxxxxxxxxxx>
---
arch/arm/mm/mmu.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ed2846bdb1f4..5ae11e6f2a58 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1123,7 +1123,7 @@ void __init debug_ll_io_init(void)
}
#endif
-static unsigned long __initdata vmalloc_start = VMALLOC_END - (240 << 20);
+static unsigned long __initdata vmalloc_size = 240 << 20;
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
@@ -1148,7 +1148,7 @@ static int __init early_vmalloc(char *arg)
vmalloc_reserve >> 20);
}
- vmalloc_start = VMALLOC_END - vmalloc_reserve;
+ vmalloc_size = vmalloc_reserve;
return 0;
}
early_param("vmalloc", early_vmalloc);
@@ -1168,7 +1168,7 @@ void __init adjust_lowmem_bounds(void)
* and may itself be outside the valid range for which phys_addr_t
* and therefore __pa() is defined.
*/
- vmalloc_limit = (u64)vmalloc_start - VMALLOC_OFFSET -
+ vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET -
PAGE_OFFSET + PHYS_OFFSET;
/*
--
2.20.1