It contains also a trick from Stephen that should make the life easier to
shrink_mmap() to free the swap caching pages.
Patch against 2.1.123.
diff -urN /home/andrea/devel/kernel-tree/linux-2.1.123/include/linux/swap.h linux/include/linux/swap.h
--- /home/andrea/devel/kernel-tree/linux-2.1.123/include/linux/swap.h Tue Sep 29 01:02:25 1998
+++ linux/include/linux/swap.h Tue Sep 29 01:56:43 1998
@@ -90,6 +90,7 @@
extern struct page * read_swap_cache_async(unsigned long, int);
#define read_swap_cache(entry) read_swap_cache_async(entry, 1);
extern int FASTCALL(swap_count(unsigned long));
+extern void FASTCALL(try_to_free_last_swap_entry(unsigned long));
/*
* Make these inline later once they are working properly.
*/
diff -urN /home/andrea/devel/kernel-tree/linux-2.1.123/mm/swap_state.c linux/mm/swap_state.c
--- /home/andrea/devel/kernel-tree/linux-2.1.123/mm/swap_state.c Tue Sep 29 01:02:26 1998
+++ linux/mm/swap_state.c Tue Sep 29 01:40:11 1998
@@ -250,8 +250,27 @@
/*
* If we are the only user, then free up the swap cache.
*/
- if (PageSwapCache(page) && !is_page_shared(page)) {
- delete_from_swap_cache(page);
+ if (PageSwapCache(page))
+ {
+ if (!is_page_shared(page))
+ delete_from_swap_cache(page);
+ else
+ /*
+ * Special case: if we have still got other
+ * references to this page, but all such references
+ * are to the swap entry rather than to the physical
+ * page, then we are only keeping this page for
+ * caching purposes. If the remaining process
+ * references are killed, we will be left with an
+ * orphaned swap entry from the cache. Make sure
+ * that such pages are easily cleaned out of the page
+ * cache.
+ */
+ if (atomic_read(&page->count) == 1)
+ {
+ clear_bit(PG_referenced, &page->flags);
+ page->age = 0;
+ }
}
free_page(addr);
@@ -284,6 +303,30 @@
printk (KERN_ERR "VM: Found a non-swapper swap page!\n");
__free_page(found);
return 0;
+}
+
+void try_to_free_last_swap_entry(unsigned long entry)
+{
+ struct page * page = lookup_swap_cache(entry);
+ if (page)
+ {
+ /*
+ * The last reference in the swap_map[entry] is caused
+ * by this swap cache page.
+ *
+ * Decrease the page->count increased by __find_page().
+ * -arca
+ */
+ __free_page(page);
+ if (atomic_read(&page->count) == 1)
+ /*
+ * The page is resident in memory only because
+ * it' s in the swap cache so we can remove it
+ * because it can' t be useful anymore.
+ * -arca
+ */
+ delete_from_swap_cache(page);
+ }
}
/*
diff -urN /home/andrea/devel/kernel-tree/linux-2.1.123/mm/swapfile.c linux/mm/swapfile.c
--- /home/andrea/devel/kernel-tree/linux-2.1.123/mm/swapfile.c Thu Sep 10 23:56:48 1998
+++ linux/mm/swapfile.c Tue Sep 29 01:26:31 1998
@@ -144,10 +144,15 @@
p->highest_bit = offset;
if (!p->swap_map[offset])
goto bad_free;
- if (p->swap_map[offset] < SWAP_MAP_MAX) {
- if (!--p->swap_map[offset])
+ if (p->swap_map[offset] < SWAP_MAP_MAX)
+ switch(--p->swap_map[offset])
+ {
+ case 0:
nr_swap_pages++;
- }
+ break;
+ case 1:
+ try_to_free_last_swap_entry(entry);
+ }
#ifdef DEBUG_SWAP
printk("DebugVM: swap_free(entry %08lx, count now %d)\n",
entry, p->swap_map[offset]);
Andrea[s] Arcangeli
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/