[PATCH 4.14 169/193] powerpc/64s/hash: Fix 128TB-512TB virtual address boundary case allocation
From: Greg Kroah-Hartman
Date: Tue Nov 28 2017 - 05:51:07 EST
4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Nicholas Piggin <npiggin@xxxxxxxxx>
commit 6a72dc038b615229a1b285829d6c8378d15c2347 upstream.
When allocating VA space with a hint that crosses 128TB, the SLB
addr_limit variable is not expanded if addr is not > 128TB, but the
slice allocation looks at task_size, which is 512TB. This results in
slice_check_fit() incorrectly succeeding because the slice_count
truncates off bit 128 of the requested mask, so the comparison to the
available mask succeeds.
Fix this by using mm->context.addr_limit instead of mm->task_size for
testing allocation limits. This causes such allocations to fail.
Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB")
Reported-by: Florian Weimer <fweimer@xxxxxxxxxx>
Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx>
Signed-off-by: Michael Ellerman <mpe@xxxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/powerpc/mm/slice.c | 50 +++++++++++++++++++++++-------------------------
1 file changed, 24 insertions(+), 26 deletions(-)
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_
{
struct vm_area_struct *vma;
- if ((mm->task_size - len) < addr)
+ if ((mm->context.addr_limit - len) < addr)
return 0;
vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vm_start_gap(vma));
@@ -133,7 +133,7 @@ static void slice_mask_for_free(struct m
if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i;
- if (mm->task_size <= SLICE_LOW_TOP)
+ if (mm->context.addr_limit <= SLICE_LOW_TOP)
return;
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
@@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(un
struct slice_mask compat_mask;
int fixed = (flags & MAP_FIXED);
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
+ unsigned long page_size = 1UL << pshift;
struct mm_struct *mm = current->mm;
unsigned long newaddr;
unsigned long high_limit;
- /*
- * Check if we need to expland slice area.
- */
- if (unlikely(addr >= mm->context.addr_limit &&
- mm->context.addr_limit != TASK_SIZE)) {
- mm->context.addr_limit = TASK_SIZE;
+ high_limit = DEFAULT_MAP_WINDOW;
+ if (addr >= high_limit)
+ high_limit = TASK_SIZE;
+
+ if (len > high_limit)
+ return -ENOMEM;
+ if (len & (page_size - 1))
+ return -EINVAL;
+ if (fixed) {
+ if (addr & (page_size - 1))
+ return -EINVAL;
+ if (addr > high_limit - len)
+ return -ENOMEM;
+ }
+
+ if (high_limit > mm->context.addr_limit) {
+ mm->context.addr_limit = high_limit;
on_each_cpu(slice_flush_segments, mm, 1);
}
- /*
- * This mmap request can allocate upt to 512TB
- */
- if (addr >= DEFAULT_MAP_WINDOW)
- high_limit = mm->context.addr_limit;
- else
- high_limit = DEFAULT_MAP_WINDOW;
+
/*
* init different masks
*/
@@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(un
/* Sanity checks */
BUG_ON(mm->task_size == 0);
+ BUG_ON(mm->context.addr_limit == 0);
VM_BUG_ON(radix_enabled());
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
addr, len, flags, topdown);
- if (len > mm->task_size)
- return -ENOMEM;
- if (len & ((1ul << pshift) - 1))
- return -EINVAL;
- if (fixed && (addr & ((1ul << pshift) - 1)))
- return -EINVAL;
- if (fixed && addr > (mm->task_size - len))
- return -ENOMEM;
-
/* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) {
- addr = _ALIGN_UP(addr, 1ul << pshift);
+ addr = _ALIGN_UP(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
- if (addr > mm->task_size - len ||
+ if (addr > high_limit - len ||
!slice_area_is_free(mm, addr, len))
addr = 0;
}