Those counts will be wrong as there are no huge pages on the node.
I'll think about this more tomorrow.
Pretty sure this is an issue, but I could be wrong. Just wanted to give
a heads up.
--
Mike Kravetz
+ }
+ }
+
+ return ret;
+}
+
+bool isolate_or_dissolve_huge_page(struct page *page)
+{
+ struct hstate *h = NULL;
+ struct page *head;
+ bool ret = false;
+
+ spin_lock(&hugetlb_lock);
+ if (PageHuge(page)) {
+ head = compound_head(page);
+ h = page_hstate(head);
+ }
+ spin_unlock(&hugetlb_lock);
+
+ /*
+ * The page might have been dissolved from under our feet.
+ * If that is the case, return success as if we dissolved it ourselves.
+ */
+ if (!h)
+ return true;
+
+ /*
+ * Fence off gigantic pages as there is a cyclic dependency
+ * between alloc_contig_range and them.
+ */
+ if (hstate_is_gigantic(h))
+ return ret;
+
+ if(!page_count(head) && alloc_and_dissolve_huge_page(h, head))
+ ret = true;
+
+ return ret;
+}
+
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{