--- linux/mm/vmscan.c Sat Nov 17 22:18:17 2001 +++ linux.mt/mm/vmscan.c Mon Dec 10 19:55:23 2001 @@ -531,13 +531,14 @@ * We move them the other way when we see the * reference bit on the page. */ -static void refill_inactive(int nr_pages) +static int refill_inactive(int nr_pages) { struct list_head * entry; + int count = 0; spin_lock(&pagemap_lru_lock); entry = active_list.prev; - while (nr_pages-- && entry != &active_list) { + while (nr_pages && entry != &active_list) { struct page * page; page = list_entry(entry, struct page, lru); @@ -545,14 +546,16 @@ if (PageTestandClearReferenced(page)) { list_del(&page->lru); list_add(&page->lru, &active_list); - continue; + } else { + del_page_from_active_list(page); + add_page_to_inactive_list(page); + SetPageReferenced(page); + nr_pages--; + count++; } - - del_page_from_active_list(page); - add_page_to_inactive_list(page); - SetPageReferenced(page); } spin_unlock(&pagemap_lru_lock); + return count; } static int FASTCALL(shrink_caches(zone_t * classzone, int priority, unsigned int gfp_mask, int nr_pages)); @@ -564,22 +567,24 @@ nr_pages -= kmem_cache_reap(gfp_mask); if (nr_pages <= 0) return 0; + shrink_dcache_memory(priority, gfp_mask); + shrink_icache_memory(priority, gfp_mask); +#ifdef CONFIG_QUOTA + shrink_dqcache_memory(DEF_PRIORITY, gfp_mask); +#endif nr_pages = chunk_size; /* try to keep the active list 2/3 of the size of the cache */ ratio = (unsigned long) nr_pages * nr_active_pages / ((nr_inactive_pages + 1) * 2); + if(!ratio) + ratio = nr_pages; + refill_inactive(ratio); nr_pages = shrink_cache(nr_pages, classzone, gfp_mask, priority); + if (nr_pages <= 0) return 0; - - shrink_dcache_memory(priority, gfp_mask); - shrink_icache_memory(priority, gfp_mask); -#ifdef CONFIG_QUOTA - shrink_dqcache_memory(DEF_PRIORITY, gfp_mask); -#endif - return nr_pages; }