Re: [PATCH v2 4/4] mm/migration: fix potential pte_unmap on an not mapped pte
From: David Hildenbrand
Date: Fri Apr 29 2022 - 05:52:29 EST
On 25.04.22 15:27, Miaohe Lin wrote:
> __migration_entry_wait and migration_entry_wait_on_locked assume pte is
> always mapped from caller. But this is not the case when it's called from
> migration_entry_wait_huge and follow_huge_pmd. Add a hugetlbfs variant that
> calls hugetlb_migration_entry_wait(ptep == NULL) to fix this issue.
>
> Fixes: 30dad30922cc ("mm: migration: add migrate_entry_wait_huge()")
> Suggested-by: David Hildenbrand <david@xxxxxxxxxx>
> Signed-off-by: Miaohe Lin <linmiaohe@xxxxxxxxxx>
> ---
> include/linux/swapops.h | 12 ++++++++----
> mm/hugetlb.c | 4 ++--
> mm/migrate.c | 23 +++++++++++++++++++----
> 3 files changed, 29 insertions(+), 10 deletions(-)
>
> diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> index 30cded849ee4..862e5a2053b1 100644
> --- a/include/linux/swapops.h
> +++ b/include/linux/swapops.h
> @@ -244,8 +244,10 @@ extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
> spinlock_t *ptl);
> extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
> unsigned long address);
> -extern void migration_entry_wait_huge(struct vm_area_struct *vma,
> - struct mm_struct *mm, pte_t *pte);
> +#ifdef CONFIG_HUGETLB_PAGE
> +extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
> +extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
> +#endif
> #else
> static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
> {
> @@ -271,8 +273,10 @@ static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
> spinlock_t *ptl) { }
> static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
> unsigned long address) { }
> -static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
> - struct mm_struct *mm, pte_t *pte) { }
> +#ifdef CONFIG_HUGETLB_PAGE
> +static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
> +static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
> +#endif
> static inline int is_writable_migration_entry(swp_entry_t entry)
> {
> return 0;
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 098f81e8550d..994361ec75e0 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5689,7 +5689,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
> */
> entry = huge_ptep_get(ptep);
> if (unlikely(is_hugetlb_entry_migration(entry))) {
> - migration_entry_wait_huge(vma, mm, ptep);
> + migration_entry_wait_huge(vma, ptep);
> return 0;
> } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
> return VM_FAULT_HWPOISON_LARGE |
> @@ -6907,7 +6907,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
> } else {
> if (is_hugetlb_entry_migration(pte)) {
> spin_unlock(ptl);
> - __migration_entry_wait(mm, (pte_t *)pmd, ptl);
> + __migration_entry_wait_huge((pte_t *)pmd, ptl);
The unlock+immediate relock looks a bit sub-optimal, but that's already
been that way before your change.
Reviewed-by: David Hildenbrand <david@xxxxxxxxxx>
--
Thanks,
David / dhildenb