[PATCH] hugetlb: fix irq locking omissions

From: Mike Kravetz
Date: Fri Apr 02 2021 - 16:18:13 EST


The pach "hugetlb: make free_huge_page irq safe" changed spin_*lock
calls to spin_*lock_irq* calls. However, it missed several places
in the file hugetlb.c. Add the overlooked changes.

Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
---
mm/hugetlb.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c22111f3da20..a6bfc6bcbc81 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2284,7 +2284,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
*/
page_ref_dec(new_page);
retry:
- spin_lock(&hugetlb_lock);
+ spin_lock_irq(&hugetlb_lock);
if (!PageHuge(old_page)) {
/*
* Freed from under us. Drop new_page too.
@@ -2297,7 +2297,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
* Fail with -EBUSY if not possible.
*/
update_and_free_page(h, new_page);
- spin_unlock(&hugetlb_lock);
+ spin_unlock_irq(&hugetlb_lock);
if (!isolate_huge_page(old_page, list))
ret = -EBUSY;
return ret;
@@ -2307,7 +2307,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
* freelist yet. Race window is small, so we can succed here if
* we retry.
*/
- spin_unlock(&hugetlb_lock);
+ spin_unlock_irq(&hugetlb_lock);
cond_resched();
goto retry;
} else {
@@ -2323,7 +2323,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
__enqueue_huge_page(&h->hugepage_freelists[nid], new_page);
}
unlock:
- spin_unlock(&hugetlb_lock);
+ spin_unlock_irq(&hugetlb_lock);

return ret;
}
@@ -2339,15 +2339,15 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
* to carefully check the state under the lock.
* Return success when racing as if we dissolved the page ourselves.
*/
- spin_lock(&hugetlb_lock);
+ spin_lock_irq(&hugetlb_lock);
if (PageHuge(page)) {
head = compound_head(page);
h = page_hstate(head);
} else {
- spin_unlock(&hugetlb_lock);
+ spin_unlock_irq(&hugetlb_lock);
return 0;
}
- spin_unlock(&hugetlb_lock);
+ spin_unlock_irq(&hugetlb_lock);

/*
* Fence off gigantic pages as there is a cyclic dependency between
@@ -2737,7 +2737,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
* pages in hstate via the proc/sysfs interfaces.
*/
mutex_lock(&h->resize_lock);
- spin_lock(&hugetlb_lock);
+ spin_lock_irq(&hugetlb_lock);

/*
* Check for a node specific request.
--
2.30.2