--- linux-orig/mm/vmscan.c Thu Nov 1 15:33:58 2001 +++ linux/mm/vmscan.c Fri Nov 2 13:50:31 2001 @@ -290,7 +290,7 @@ static int FASTCALL(swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * classzone)); static int swap_out(unsigned int priority, unsigned int gfp_mask, zone_t * classzone) { - int counter, nr_pages = SWAP_CLUSTER_MAX; + int counter, nr_pages = SWAP_CLUSTER_MAX * DEF_PRIORITY / priority; struct mm_struct *mm; counter = mmlist_nr; @@ -334,7 +334,7 @@ { struct list_head * entry; int max_scan = nr_inactive_pages / priority; - int max_mapped = nr_pages*10; + int max_mapped = SWAP_CLUSTER_MAX * DEF_PRIORITY / priority; spin_lock(&pagemap_lru_lock); while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) { @@ -469,16 +469,10 @@ spin_unlock(&pagecache_lock); UnlockPage(page); page_mapped: - if (--max_mapped >= 0) - continue; + if (max_mapped > 0) + max_mapped--; + continue; - /* - * Alert! We've found too many mapped pages on the - * inactive list, so we start swapping out now! - */ - spin_unlock(&pagemap_lru_lock); - swap_out(priority, gfp_mask, classzone); - return nr_pages; } /* @@ -514,6 +508,14 @@ break; } spin_unlock(&pagemap_lru_lock); + + /* + * Alert! We've found too many mapped pages on the + * inactive list, so we start swapping out - delayed! + * -skraw + */ + if (max_mapped==0 && nr_pages>0) + swap_out(priority, gfp_mask, classzone); return nr_pages; }