[RFC PATCH 19/24] mm/hugetlb: Flush work when dissolving hugetlb page

From: Muchun Song
Date: Tue Sep 15 2020 - 20:41:37 EST


We should flush work when dissolving a hugetlb page to make sure that
the hugetlb page is freed to the buddy.

Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
mm/hugetlb.c | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8666cedf9a7b..56c0bf2370ed 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1848,6 +1848,11 @@ static inline void free_gigantic_page_comm(struct hstate *h, struct page *page)
free_gigantic_page(page, huge_page_order(h));
}

+static inline void flush_free_huge_page_work(void)
+{
+ flush_work(&hpage_update_work);
+}
+
static inline bool subpage_hwpoison(struct page *head, struct page *page)
{
return page_private(head + 4) == page - head;
@@ -1910,6 +1915,10 @@ static inline void free_gigantic_page_comm(struct hstate *h, struct page *page)
spin_lock(&hugetlb_lock);
}

+static inline void flush_free_huge_page_work(void)
+{
+}
+
static inline bool subpage_hwpoison(struct page *head, struct page *page)
{
return true;
@@ -2484,6 +2493,7 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
int dissolve_free_huge_page(struct page *page)
{
int rc = -EBUSY;
+ bool need_flush = false;

/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!PageHuge(page))
@@ -2515,10 +2525,19 @@ int dissolve_free_huge_page(struct page *page)
h->free_huge_pages_node[nid]--;
h->max_huge_pages--;
update_and_free_page(h, head);
+ need_flush = true;
rc = 0;
}
out:
spin_unlock(&hugetlb_lock);
+
+ /*
+ * We should flush work before return to make sure that
+ * the hugetlb page is freed to the buddy.
+ */
+ if (need_flush)
+ flush_free_huge_page_work();
+
return rc;
}

--
2.20.1