diff --git a/mm/mremap.c b/mm/mremap.c index 5dd572d57ca9..614b4fffed1d 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -235,6 +235,71 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, return true; } + +#define ADDR_BEFORE_PREV(addr, vma) \ + ((vma)->vm_prev && (addr) < (vma)->vm_prev->vm_end) + +static inline void try_to_align_start(unsigned long *len, + struct vm_area_struct *old, unsigned long *old_addr, + struct vm_area_struct *new, unsigned long *new_addr) +{ + if (ADDR_BEFORE_PREV(*old_addr & PMD_MASK, old)) + return; + + if (ADDR_BEFORE_PREV(*new_addr & PMD_MASK, new)) + return; + + /* Bingo! */ + *len += *new_addr & ~PMD_MASK; + *old_addr &= PMD_MASK; + *new_addr &= PMD_MASK; +} + +/* + * When aligning the end, avoid ALIGN() (which can overflow + * if the user space is the full address space, and overshoot + * the vm_start of the next vma). + * + * Align the upper limit down instead, and check that it's not + * in the same PMD as the end. + */ +#define ADDR_AFTER_NEXT(addr, vma) \ + ((vma)->vm_next && (addr) > (PMD_MASK & (vma)->vm_next->vm_start)) + +static inline void try_to_align_end(unsigned long *len, + struct vm_area_struct *old, unsigned long *old_addr, + struct vm_area_struct *new, unsigned long *new_addr) +{ + if (ADDR_AFTER_NEXT(*old_addr + *len, old)) + return; + + if (ADDR_AFTER_NEXT(*new_addr + *len, new)) + return; + + /* Mutual alignment means this is same for new/old addr */ + *len = ALIGN(*new_addr + *len, PMD_SIZE) - *new_addr; +} + +/* + * The PMD move case is much more efficient, so if we have the + * mutually aligned case, try to see if we can extend the + * beginning and end to be aligned too. + * + * The pointer dereferences look bad, but with inlining, the + * compiler will sort it out. + */ +static inline void try_to_align_range(unsigned long *len, + struct vm_area_struct *old, unsigned long *old_addr, + struct vm_area_struct *new, unsigned long *new_addr) +{ + if ((*old_addr ^ *new_addr) & ~PMD_MASK) + return; + + try_to_align_start(len, old, old_addr, new, new_addr); + try_to_align_end(len, old, old_addr, new, new_addr); +} +#else +#define try_to_align_range(len,old,olda,new,newa) do { } while(0); #endif unsigned long move_page_tables(struct vm_area_struct *vma, @@ -253,6 +318,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, old_addr, old_end); mmu_notifier_invalidate_range_start(&range); + try_to_align_range(&len, vma, &old_addr, new_vma, &new_addr); + for (; old_addr < old_end; old_addr += extent, new_addr += extent) { cond_resched(); next = (old_addr + PMD_SIZE) & PMD_MASK;