mm/memory.c | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index e315b1f1ef08..524aa7183971 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -773,10 +773,14 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, return 0; } -static inline void +/* + * This returns 0 for success, >0 for "success, and I used the prealloc page", + * and <0 for "you need to preallocate a page and retry". + */ +static inline int copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, - unsigned long addr, int *rss) + unsigned long addr, int *rss, struct page *prealloc) { unsigned long vm_flags = vma->vm_flags; pte_t pte = *src_pte; @@ -815,6 +819,7 @@ copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, } set_pte_at(dst_mm, addr, dst_pte, pte); + return 0; } static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -824,16 +829,19 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; spinlock_t *src_ptl, *dst_ptl; - int progress = 0; + int progress, used_page; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; + struct page *prealloc = NULL; again: + progress = 0; + used_page = 0; init_rss_vec(rss); dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) - return -ENOMEM; + goto out_of_memory; src_pte = pte_offset_map(src_pmd, addr); src_ptl = pte_lockptr(src_mm, src_pmd); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); @@ -865,8 +873,12 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, progress += 8; continue; } - copy_present_pte(dst_mm, src_mm, dst_pte, src_pte, - vma, addr, rss); + /* copy_present_page() may need to have a pre-allocated temporary page */ + used_page = copy_present_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss, prealloc); + if (used_page < 0) + break; + if (used_page) + prealloc = NULL; progress += 8; } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); @@ -879,12 +891,24 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (entry.val) { if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) + goto out_of_memory; + } + /* Did we exit from the pte lock because we needed a new page? */ + if (used_page < 0) { + prealloc = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr); + if (!prealloc) return -ENOMEM; - progress = 0; } if (addr != end) goto again; + if (prealloc) + free_unref_page(prealloc); return 0; + +out_of_memory: + if (prealloc) + free_unref_page(prealloc); + return -ENOMEM; } static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,