slub: Add KICKABLE to avoid repeated kick() attempts

From: Christoph Lameter
Date: Fri Jan 29 2010 - 15:51:34 EST


Add a flag KICKABLE to be set on slabs with a defragmentation method

Clear the flag if a kick action is not successful in reducing the
number of objects in a slab. This will avoid future attempts to
kick objects out.

The KICKABLE flag is set again when all objects of the slab have been
allocated (Occurs during removal of a slab from the partial lists).

Reviewed-by: Rik van Riel <riel@xxxxxxxxxx>
Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Signed-off-by: Pekka Enberg <penberg@xxxxxxxxxxxxxx>
Signed-off-by: Christoph Lameter <cl@xxxxxxxxxxxxxxxxxxxx>

---
include/linux/page-flags.h | 2 ++
mm/slub.c | 23 ++++++++++++++++++-----
2 files changed, 20 insertions(+), 5 deletions(-)

Index: slab-2.6/mm/slub.c
===================================================================
--- slab-2.6.orig/mm/slub.c 2010-01-22 15:47:48.000000000 -0600
+++ slab-2.6/mm/slub.c 2010-01-22 15:49:30.000000000 -0600
@@ -1168,6 +1168,9 @@ static struct page *new_slab(struct kmem
SLAB_STORE_USER | SLAB_TRACE))
__SetPageSlubDebug(page);

+ if (s->kick)
+ __SetPageSlubKickable(page);
+
start = page_address(page);

if (unlikely(s->flags & SLAB_POISON))
@@ -1210,6 +1213,7 @@ static void __free_slab(struct kmem_cach
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);

+ __ClearPageSlubKickable(page);
__ClearPageSlab(page);
reset_page_mapcount(page);
if (current->reclaim_state)
@@ -1421,6 +1425,8 @@ static void unfreeze_slab(struct kmem_ca
if (SLABDEBUG && PageSlubDebug(page) &&
(s->flags & SLAB_STORE_USER))
add_full(n, page);
+ if (s->kick)
+ __SetPageSlubKickable(page);
}
slab_unlock(page);
} else {
@@ -2905,12 +2911,12 @@ static int kmem_cache_vacate(struct page
slab_lock(page);

BUG_ON(!PageSlab(page)); /* Must be s slab page */
- BUG_ON(!SlabFrozen(page)); /* Slab must have been frozen earlier */
+ BUG_ON(!PageSlubFrozen(page)); /* Slab must have been frozen earlier */

s = page->slab;
objects = page->objects;
map = scratch + objects * sizeof(void **);
- if (!page->inuse || !s->kick)
+ if (!page->inuse || !s->kick || !PageSlubKickable(page))
goto out;

/* Determine used objects */
@@ -2948,6 +2954,9 @@ out:
* Check the result and unfreeze the slab
*/
leftover = page->inuse;
+ if (leftover)
+ /* Unsuccessful reclaim. Avoid future reclaim attempts. */
+ __ClearPageSlubKickable(page);
unfreeze_slab(s, page, leftover > 0);
local_irq_restore(flags);
return leftover;
@@ -3009,17 +3018,21 @@ static unsigned long __kmem_cache_shrink
continue;

if (page->inuse) {
- if (page->inuse * 100 >=
+ if (!PageSlubKickable(page) || page->inuse * 100 >=
s->defrag_ratio * page->objects) {
slab_unlock(page);
- /* Slab contains enough objects */
+ /*
+ * Slab contains enough objects
+ * or we alrady tried reclaim before and
+ * it failed. Skip this one.
+ */
continue;
}

list_move(&page->lru, &zaplist);
if (s->kick) {
n->nr_partial--;
- SetSlabFrozen(page);
+ __SetPageSlubFrozen(page);
}
slab_unlock(page);
} else {
Index: slab-2.6/include/linux/page-flags.h
===================================================================
--- slab-2.6.orig/include/linux/page-flags.h 2010-01-22 15:09:43.000000000 -0600
+++ slab-2.6/include/linux/page-flags.h 2010-01-22 15:49:30.000000000 -0600
@@ -129,6 +129,7 @@ enum pageflags {
/* SLUB */
PG_slub_frozen = PG_active,
PG_slub_debug = PG_error,
+ PG_slub_kickable = PG_dirty,
};

#ifndef __GENERATING_BOUNDS_H
@@ -216,6 +217,7 @@ __PAGEFLAG(SlobFree, slob_free)

__PAGEFLAG(SlubFrozen, slub_frozen)
__PAGEFLAG(SlubDebug, slub_debug)
+__PAGEFLAG(SlubKickable, slub_kickable)

/*
* Private page markings that may be used by the filesystem that owns the page

--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/