So for those making scientific calculations (big matrix inversions?) which
swap alot, this gives some clues ... [many zero pages which are not zero
mapped should be avoided whenever possible]
The patch is against a clean 2.1.21. [the change in kernel_stat.h causes a
recompile of about 20 files]
On my system (which doesnt do any such calculations), from 'cat
/proc/stat':
swap 5202 10921
zero swapout 96
This means there were 96 pages from 10921 pages swapped out, which were
all zeroes. Curious what the output on a simulation box looks like :)
-- mingo
----------------------------------------------------------------------->
--- linux-2.1.21_orig/mm/page_io.c Mon Dec 23 10:13:52 1996
+++ linux/mm/page_io.c Wed Jan 15 18:24:06 1997
@@ -73,8 +73,24 @@
}
if (rw == READ)
kstat.pswpin++;
- else
+ else {
+ unsigned int i;
+ unsigned long * ptr = (unsigned long *) buf;
kstat.pswpout++;
+
+ /*
+ * Okay this is very inefficient, but good for making
+ * some type of statistics about how many truly zero
+ * content pages are swapped out.
+ */
+ for (i=PAGE_SIZE/sizeof(unsigned long); i; i--) {
+ if (*ptr++)
+ break;
+ }
+ if (!i)
+ kstat.pswpout_zero++;
+ }
+
page = mem_map + MAP_NR(buf);
atomic_inc(&page->count);
wait_on_page(page);
--- linux-2.1.21_orig/fs/proc/array.c Sat Dec 21 13:24:02 1996
+++ linux/fs/proc/array.c Wed Jan 15 18:27:37 1997
@@ -209,6 +209,7 @@
"disk_wblk %u %u %u %u\n"
"page %u %u\n"
"swap %u %u\n"
+ "zero swapout %u\n"
"intr %u",
kstat.cpu_user,
kstat.cpu_nice,
@@ -228,6 +229,7 @@
kstat.pgpgout,
kstat.pswpin,
kstat.pswpout,
+ kstat.pswpout_zero,
sum);
for (i = 0 ; i < NR_IRQS ; i++)
len += sprintf(buffer + len, " %u", kstat.interrupts[i]);
--- linux-2.1.21_orig/include/linux/kernel_stat.h Thu Dec 12 16:12:27 1996
+++ linux/include/linux/kernel_stat.h Wed Jan 15 18:23:27 1997
@@ -25,6 +25,7 @@
unsigned int ierrors, oerrors;
unsigned int collisions;
unsigned int context_swtch;
+ unsigned int pswpout_zero;
};
extern struct kernel_stat kstat;