[RFC PATCH 17/18] mm: convert vmf.prealloc_pte to struct ptdesc pointer

From: alexs
Date: Tue Jul 30 2024 - 03:24:37 EST


From: Alex Shi <alexs@xxxxxxxxxx>

vmfs.prealloc_pte is a pointer to page table memory, so converter it to
struct ptdesc pointer.

Signed-off-by: Alex Shi <alexs@xxxxxxxxxx>
Cc: linux-fsdevel@xxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
include/linux/mm.h | 2 +-
mm/filemap.c | 2 +-
mm/memory.c | 12 ++++++------
3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7424f964dff3..749d6dd311fa 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -567,7 +567,7 @@ struct vm_fault {
* Protects pte page table if 'pte'
* is not NULL, otherwise pmd.
*/
- pgtable_t prealloc_pte; /* Pre-allocated pte page table.
+ struct ptdesc *prealloc_pte; /* Pre-allocated pte page table.
* vm_ops->map_pages() sets up a page
* table from atomic context.
* do_fault_around() pre-allocates
diff --git a/mm/filemap.c b/mm/filemap.c
index 3708ef71182e..d62150418b91 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3453,7 +3453,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
}

if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
- pmd_install(mm, vmf->pmd, (struct ptdesc **)&vmf->prealloc_pte);
+ pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);

return false;
}
diff --git a/mm/memory.c b/mm/memory.c
index 79685600d23f..1a5fb17ab045 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4648,7 +4648,7 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
* # flush A, B to clear the writeback
*/
if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
- vmf->prealloc_pte = ptdesc_page(pte_alloc_one(vma->vm_mm));
+ vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}
@@ -4687,7 +4687,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;

- pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, page_ptdesc(vmf->prealloc_pte));
+ pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
/*
* We are going to consume the prealloc table,
* count that as nr_ptes.
@@ -4726,7 +4726,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
* related to pte entry. Use the preallocated table for that.
*/
if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
- vmf->prealloc_pte = ptdesc_page(pte_alloc_one(vma->vm_mm));
+ vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}
@@ -4868,7 +4868,7 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
}

if (vmf->prealloc_pte)
- pmd_install(vma->vm_mm, vmf->pmd, (struct ptdesc **)&vmf->prealloc_pte);
+ pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
return VM_FAULT_OOM;
}
@@ -5011,7 +5011,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
pte_off + vma_pages(vmf->vma) - vma_off) - 1;

if (pmd_none(*vmf->pmd)) {
- vmf->prealloc_pte = ptdesc_page(pte_alloc_one(vmf->vma->vm_mm));
+ vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
}
@@ -5197,7 +5197,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf)

/* preallocated pagetable is unused: free it */
if (vmf->prealloc_pte) {
- pte_free(vm_mm, page_ptdesc(vmf->prealloc_pte));
+ pte_free(vm_mm, vmf->prealloc_pte);
vmf->prealloc_pte = NULL;
}
return ret;
--
2.43.0