[PATCH v3 2/3] x86/mm/pat: Cleanup unused parameter in follow_phys
From: Wupeng Ma
Date: Thu Jan 11 2024 - 07:10:18 EST
From: Ma Wupeng <mawupeng1@xxxxxxxxxx>
Parameter flags is always zero in caller untrack_pfn() and
track_pfn_copy(). let's drop it.
Signed-off-by: Ma Wupeng <mawupeng1@xxxxxxxxxx>
---
arch/x86/mm/pat/memtype.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index a6a88679c92e..94bcba399701 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -951,8 +951,8 @@ static void free_pfn_range(u64 paddr, unsigned long size)
}
static int follow_phys(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned long *prot, resource_size_t *phys)
+ unsigned long address, unsigned long *prot,
+ resource_size_t *phys)
{
int ret = -EINVAL;
pte_t *ptep, pte;
@@ -965,9 +965,6 @@ static int follow_phys(struct vm_area_struct *vma,
goto out;
pte = ptep_get(ptep);
- if ((flags & FOLL_WRITE) && !pte_write(pte))
- goto unlock;
-
*prot = pgprot_val(pte_pgprot(pte));
*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
@@ -997,7 +994,7 @@ int track_pfn_copy(struct vm_area_struct *vma)
* reserve the whole chunk covered by vma. We need the
* starting address and protection from pte.
*/
- if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+ if (follow_phys(vma, vma->vm_start, &prot, &paddr)) {
WARN_ON_ONCE(1);
return -EINVAL;
}
@@ -1084,7 +1081,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
/* free the chunk starting from pfn or the whole chunk */
paddr = (resource_size_t)pfn << PAGE_SHIFT;
if (!paddr && !size) {
- if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+ if (follow_phys(vma, vma->vm_start, &prot, &paddr)) {
WARN_ON_ONCE(1);
return;
}
--
2.25.1