[PATCH 1/2] mm/hugetlb: Match behavior on read-only private mappings

From: Peter Xu
Date: Thu Apr 11 2024 - 16:23:50 EST


The default behavior for reading a private file mapping should fill in the
page cache and map the page read-only. Hugetlb didn't do like it. Make it
behave the same.

Signed-off-by: Peter Xu <peterx@xxxxxxxxxx>
---
mm/hugetlb.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a8536349de13..bc3c97c476d2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6199,6 +6199,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
struct folio *folio;
pte_t new_pte;
bool new_folio, new_pagecache_folio = false;
+ bool is_write = vmf->flags & FAULT_FLAG_WRITE;
u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);

/*
@@ -6276,7 +6277,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
__folio_mark_uptodate(folio);
new_folio = true;

- if (vma->vm_flags & VM_MAYSHARE) {
+ if (!is_write || vma->vm_flags & VM_MAYSHARE) {
int err = hugetlb_add_to_page_cache(folio, mapping,
vmf->pgoff);
if (err) {
@@ -6294,6 +6295,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
}
new_pagecache_folio = true;
} else {
+ /* Write on a private mapping */
folio_lock(folio);

ret = vmf_anon_prepare(vmf);
@@ -6333,7 +6335,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* any allocations necessary to record that reservation occur outside
* the spinlock.
*/
- if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ if (is_write && !(vma->vm_flags & VM_SHARED)) {
if (vma_needs_reservation(h, vma, vmf->address) < 0) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
--
2.44.0