[slubllv3 13/21] slub: Pass kmem_cache struct to lock and freeze slab

From: Christoph Lameter
Date: Fri Apr 15 2011 - 16:50:44 EST


We need more information about the slab for the cmpxchg implementation.

Signed-off-by: Christoph Lameter <cl@xxxxxxxxx>

---
mm/slub.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)

Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2011-04-15 14:29:01.000000000 -0500
+++ linux-2.6/mm/slub.c 2011-04-15 14:29:35.000000000 -0500
@@ -1424,8 +1424,8 @@ static inline void remove_partial(struct
*
* Must hold list_lock.
*/
-static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
- struct page *page)
+static inline int lock_and_freeze_slab(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page)
{
if (slab_trylock(page)) {
remove_partial(n, page);
@@ -1437,7 +1437,8 @@ static inline int lock_and_freeze_slab(s
/*
* Try to allocate a partial slab from a specific node.
*/
-static struct page *get_partial_node(struct kmem_cache_node *n)
+static struct page *get_partial_node(struct kmem_cache *s,
+ struct kmem_cache_node *n)
{
struct page *page;

@@ -1452,7 +1453,7 @@ static struct page *get_partial_node(str

spin_lock(&n->list_lock);
list_for_each_entry(page, &n->partial, lru)
- if (lock_and_freeze_slab(n, page))
+ if (lock_and_freeze_slab(s, n, page))
goto out;
page = NULL;
out:
@@ -1503,7 +1504,7 @@ static struct page *get_any_partial(stru

if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) {
- page = get_partial_node(n);
+ page = get_partial_node(s, n);
if (page) {
put_mems_allowed();
return page;

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/