[PATCH v5 8/8] hugetlb: add lockdep_assert_held() calls for hugetlb_lock
From: Mike Kravetz
Date: Fri Apr 09 2021 - 16:54:13 EST
After making hugetlb lock irq safe and separating some functionality
done under the lock, add some lockdep_assert_held to help verify
locking.
Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
Acked-by: Michal Hocko <mhocko@xxxxxxxx>
Reviewed-by: Miaohe Lin <linmiaohe@xxxxxxxxxx>
Reviewed-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
Reviewed-by: Oscar Salvador <osalvador@xxxxxxx>
---
mm/hugetlb.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 049ca0bccfcc..5cf2b7e5ca50 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1062,6 +1062,8 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
static void enqueue_huge_page(struct hstate *h, struct page *page)
{
int nid = page_to_nid(page);
+
+ lockdep_assert_held(&hugetlb_lock);
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
@@ -1073,6 +1075,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
struct page *page;
bool pin = !!(current->flags & PF_MEMALLOC_PIN);
+ lockdep_assert_held(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
if (pin && !is_pinnable_page(page))
continue;
@@ -1344,6 +1347,7 @@ static void remove_hugetlb_page(struct hstate *h, struct page *page,
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
+ lockdep_assert_held(&hugetlb_lock);
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
@@ -1694,6 +1698,7 @@ static struct page *remove_pool_huge_page(struct hstate *h,
int nr_nodes, node;
struct page *page = NULL;
+ lockdep_assert_held(&hugetlb_lock);
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
/*
* If we're returning unused surplus pages, only examine
@@ -1943,6 +1948,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
long needed, allocated;
bool alloc_ok = true;
+ lockdep_assert_held(&hugetlb_lock);
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
if (needed <= 0) {
h->resv_huge_pages += delta;
@@ -2036,6 +2042,7 @@ static void return_unused_surplus_pages(struct hstate *h,
struct page *page;
LIST_HEAD(page_list);
+ lockdep_assert_held(&hugetlb_lock);
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
@@ -2524,6 +2531,7 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
int i;
LIST_HEAD(page_list);
+ lockdep_assert_held(&hugetlb_lock);
if (hstate_is_gigantic(h))
return;
@@ -2565,6 +2573,7 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
{
int nr_nodes, node;
+ lockdep_assert_held(&hugetlb_lock);
VM_BUG_ON(delta != -1 && delta != 1);
if (delta < 0) {
--
2.30.2