Page fault scalability patch V18: No page table lock in do_anonymous_page
From: Christoph Lameter
Date: Tue Mar 01 2005 - 23:04:19 EST
Do not use the page_table_lock in do_anonymous_page. This will significantly
increase the parallelism in the page fault handler in SMP systems. The patch
also modifies the definitions of _mm_counter functions so that rss and anon_rss
become atomic.
Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Index: linux-2.6.10/mm/memory.c
===================================================================
--- linux-2.6.10.orig/mm/memory.c 2005-02-24 19:42:21.000000000 -0800
+++ linux-2.6.10/mm/memory.c 2005-02-24 19:42:25.000000000 -0800
@@ -1832,12 +1832,12 @@ do_anonymous_page(struct mm_struct *mm,
vma->vm_page_prot)),
vma);
- spin_lock(&mm->page_table_lock);
+ page_table_atomic_start(mm);
if (!ptep_cmpxchg(page_table, orig_entry, entry)) {
pte_unmap(page_table);
page_cache_release(page);
- spin_unlock(&mm->page_table_lock);
+ page_table_atomic_stop(mm);
inc_page_state(cmpxchg_fail_anon_write);
return VM_FAULT_MINOR;
}
@@ -1855,7 +1855,7 @@ do_anonymous_page(struct mm_struct *mm,
update_mmu_cache(vma, addr, entry);
pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
+ page_table_atomic_stop(mm);
return VM_FAULT_MINOR;
}
Index: linux-2.6.10/include/linux/sched.h
===================================================================
--- linux-2.6.10.orig/include/linux/sched.h 2005-02-24 19:42:17.000000000 -0800
+++ linux-2.6.10/include/linux/sched.h 2005-02-24 19:42:25.000000000 -0800
@@ -203,10 +203,26 @@ arch_get_unmapped_area_topdown(struct fi
extern void arch_unmap_area(struct vm_area_struct *area);
extern void arch_unmap_area_topdown(struct vm_area_struct *area);
+#ifdef CONFIG_ATOMIC_TABLE_OPS
+/*
+ * Atomic page table operations require that the counters are also
+ * incremented atomically
+*/
+#define set_mm_counter(mm, member, value) atomic_set(&(mm)->member, value)
+#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->member))
+#define update_mm_counter(mm, member, value) atomic_add(value, &(mm)->member)
+#define MM_COUNTER_T atomic_t
+
+#else
+/*
+ * No atomic page table operations. Counters are protected by
+ * the page table lock
+ */
#define set_mm_counter(mm, member, value) (mm)->member = (value)
#define get_mm_counter(mm, member) ((mm)->member)
#define update_mm_counter(mm, member, value) (mm)->member += (value)
#define MM_COUNTER_T unsigned long
+#endif
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/