diff --git a/mm/swap.c b/mm/swap.c index c02f936..74118d2 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -40,6 +40,25 @@ int page_cluster; static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); +static noinline void SetPageActiveCheck(struct page *page) +{ + unsigned long x; + while(true) { + unsigned long flags = page->flags; + x = cmpxchg(&(page)->flags, flags, + flags | (1 << PG_active)); + if (x == flags) break; + } + if (~x & (1 << PG_lru)) { + char name[sizeof(current->comm)]; + printk(KERN_ERR "%s %pS: SetPageActive(%p) w/ prev = %lX\n", + get_task_comm(name, current), + __builtin_return_address(0), page, x); + } +} + +#define SetPageActive SetPageActiveCheck + /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. diff --git a/mm/vmscan.c b/mm/vmscan.c index 3f44b81..dc417ab 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -53,6 +53,25 @@ #define CREATE_TRACE_POINTS #include +static noinline void SetPageActiveCheck(struct page *page) +{ + unsigned long x; + while(true) { + unsigned long flags = page->flags; + x = cmpxchg(&(page)->flags, flags, + flags | (1 << PG_active)); + if (x == flags) break; + } + if (~x & (1 << PG_lru)) { + char name[sizeof(current->comm)]; + printk(KERN_ERR "%s %pS: SetPageActive(%p) w/ prev = %lX\n", + get_task_comm(name, current), + __builtin_return_address(0), page, x); + } +} + +#define SetPageActive SetPageActiveCheck + /* * reclaim_mode determines how the inactive list is shrunk * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages @@ -729,7 +748,17 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (!trylock_page(page)) goto keep; - VM_BUG_ON(PageActive(page)); + if (PageActive(page)) { + char name[sizeof(current->comm)]; + printk(KERN_ERR "%s: shrink_page_list (nr_scanned=%lu nr_reclaimed=%lu nr_to_reclaim=%lu gfp_mask=%X) found inactive page %p with flags=%lX\n", + get_task_comm(name, current), + sc->nr_scanned, sc->nr_reclaimed, + sc->nr_to_reclaim, sc->gfp_mask, page, + page->flags); + //VM_BUG_ON(PageActive(page)); + msleep(1); + continue; + } VM_BUG_ON(page_zone(page) != zone); sc->nr_scanned++; @@ -2247,6 +2276,10 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, unsigned long balanced = 0; bool all_zones_ok = true; + /* If kswapd has been running too long, just sleep */ + if (need_resched()) + return false; + /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ if (remaining) return true; @@ -2282,7 +2315,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, * must be balanced */ if (order) - return pgdat_balanced(pgdat, balanced, classzone_idx); + return !pgdat_balanced(pgdat, balanced, classzone_idx); else return !all_zones_ok; }