[rfc: cpuops adv V1 7/8] slub: Add PageSlubPartial

From: Christoph Lameter
Date: Thu Dec 02 2010 - 16:54:49 EST


The condition for a page being on the partial list is established by a set
of combinations of values in the page struct (inuse > 0 && freelist != NULL).

With the lockless updates that set may become temporarily incoherent.
Use an explit flag to signal that a page is on a partial list and allow
multiple adds and removes from the partial list.

Signed-off-by: Christoph Lameter <cl@xxxxxxxxx>


---
include/linux/page-flags.h | 2 ++
mm/slub.c | 25 +++++++++++++++----------
2 files changed, 17 insertions(+), 10 deletions(-)

Index: linux-2.6/include/linux/page-flags.h
===================================================================
--- linux-2.6.orig/include/linux/page-flags.h 2010-12-02 14:53:16.000000000 -0600
+++ linux-2.6/include/linux/page-flags.h 2010-12-02 15:01:35.000000000 -0600
@@ -128,6 +128,7 @@ enum pageflags {

/* SLUB */
PG_slub_frozen = PG_active,
+ PG_slub_partial = PG_error,
};

#ifndef __GENERATING_BOUNDS_H
@@ -214,6 +215,7 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEAR
__PAGEFLAG(SlobFree, slob_free)

__PAGEFLAG(SlubFrozen, slub_frozen)
+__PAGEFLAG(SlubPartial, slub_partial) TESTSCFLAG(SlubPartial, slub_partial)

/*
* Private page markings that may be used by the filesystem that owns the page
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2010-12-02 14:58:48.000000000 -0600
+++ linux-2.6/mm/slub.c 2010-12-02 15:29:50.000000000 -0600
@@ -1308,13 +1308,15 @@ static __always_inline int slab_trylock(
static void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
- spin_lock(&n->list_lock);
- n->nr_partial++;
- if (tail)
- list_add_tail(&page->lru, &n->partial);
- else
- list_add(&page->lru, &n->partial);
- spin_unlock(&n->list_lock);
+ if (!TestSetPageSlubPartial(page)) {
+ spin_lock(&n->list_lock);
+ n->nr_partial++;
+ if (tail)
+ list_add_tail(&page->lru, &n->partial);
+ else
+ list_add(&page->lru, &n->partial);
+ spin_unlock(&n->list_lock);
+ }
}

static inline void __remove_partial(struct kmem_cache_node *n,
@@ -1322,15 +1324,18 @@ static inline void __remove_partial(stru
{
list_del(&page->lru);
n->nr_partial--;
+ __ClearPageSlubPartial(page);
}

static void remove_partial(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));

- spin_lock(&n->list_lock);
- __remove_partial(n, page);
- spin_unlock(&n->list_lock);
+ if (TestClearPageSlubPartial(page)) {
+ spin_lock(&n->list_lock);
+ __remove_partial(n, page);
+ spin_unlock(&n->list_lock);
+ }
}

/*

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/