[PATCH] mm/cma: fix race

From: Joonsoo Kim
Date: Thu Mar 03 2016 - 23:28:17 EST


Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
---
mm/page_alloc.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c6c38ed..a01c3b5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -620,8 +620,8 @@ static inline void rmv_page_order(struct page *page)
*
* For recording page's order, we use page_private(page).
*/
-static inline int page_is_buddy(struct page *page, struct page *buddy,
- unsigned int order)
+static inline int page_is_buddy(struct zone *zone, struct page *page,
+ struct page *buddy, unsigned int order)
{
if (!pfn_valid_within(page_to_pfn(buddy)))
return 0;
@@ -644,6 +644,12 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
if (page_zone_id(page) != page_zone_id(buddy))
return 0;

+ if (IS_ENABLED(CONFIG_CMA) &&
+ has_isolate_pageblock(zone) &&
+ order >= pageblock_order &&
+ is_migrate_isolate(get_pageblock_migratetype(buddy)))
+ return 0;
+
VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);

return 1;
@@ -711,7 +717,7 @@ static inline void __free_one_page(struct page *page,
while (order < max_order - 1) {
buddy_idx = __find_buddy_index(page_idx, order);
buddy = page + (buddy_idx - page_idx);
- if (!page_is_buddy(page, buddy, order))
+ if (!page_is_buddy(zone, page, buddy, order))
break;
/*
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
@@ -745,7 +751,7 @@ static inline void __free_one_page(struct page *page,
higher_page = page + (combined_idx - page_idx);
buddy_idx = __find_buddy_index(combined_idx, order + 1);
higher_buddy = higher_page + (buddy_idx - combined_idx);
- if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+ if (page_is_buddy(zone, higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]);
goto out;
--
1.9.1