On Thu, 23 May 2019 10:27:38 +0800 Yang Shi wrote:
@ -1642,14 +1650,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,Change for no earn:)
unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long skipped = 0;
- unsigned long scan, total_scan, nr_pages;
+ unsigned long scan, total_scan;
+ unsigned long nr_pages;
LIST_HEAD(pages_skipped);AFAICS scan currently prevents us from looping for ever, while nr_taken bails
isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
+ total_scan = 0;
scan = 0;
- for (total_scan = 0;
- scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src);
- total_scan++) {
+ while (scan < nr_to_scan && !list_empty(src)) {
struct page *page;
us out once we get what's expected, so I doubt it makes much sense to cut
nr_taken off.
page = lru_to_page(src);The comment looks to defy the change if we fail to add a huge page to
@@ -1657,9 +1665,12 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
VM_BUG_ON_PAGE(!PageLRU(page), page);
+ nr_pages = 1 << compound_order(page);
+ total_scan += nr_pages;
+
if (page_zonenum(page) > sc->reclaim_idx) {
list_move(&page->lru, &pages_skipped);
- nr_skipped[page_zonenum(page)]++;
+ nr_skipped[page_zonenum(page)] += nr_pages;
continue;
}
@@ -1669,10 +1680,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* ineligible pages. This causes the VM to not reclaim any
* pages, triggering a premature OOM.
*/
- scan++;
+ scan += nr_pages;
the dst list; otherwise nr_taken knows how to do the right thing. What
I prefer is to let scan to do one thing a time.
switch (__isolate_lru_page(page, mode)) {Best Regards
case 0:
- nr_pages = hpage_nr_pages(page);
nr_taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages;
list_move(&page->lru, dst);
--
1.8.3.1
Hillf