[PATCH 1/1] mm/zswap.c: add lock helper

From: Fabian Frederick
Date: Fri May 02 2014 - 12:33:13 EST


&tree->lock is used all over the place

Cc: Seth Jennings <sjenning@xxxxxxxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Fabian Frederick <fabf@xxxxxxxxx>
---
mm/zswap.c | 44 +++++++++++++++++++++++++-------------------
1 file changed, 25 insertions(+), 19 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index aeaef0f..581f36f 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -532,6 +532,7 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
struct zswap_tree *tree;
pgoff_t offset;
struct zswap_entry *entry;
+ spinlock_t *lock;
struct page *page;
u8 *src, *dst;
unsigned int dlen;
@@ -545,17 +546,18 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
swpentry = zhdr->swpentry; /* here */
zbud_unmap(pool, handle);
tree = zswap_trees[swp_type(swpentry)];
+ lock = &tree->lock;
offset = swp_offset(swpentry);

/* find and ref zswap entry */
- spin_lock(&tree->lock);
+ spin_lock(lock);
entry = zswap_entry_find_get(&tree->rbroot, offset);
if (!entry) {
/* entry was invalidated */
- spin_unlock(&tree->lock);
+ spin_unlock(lock);
return 0;
}
- spin_unlock(&tree->lock);
+ spin_unlock(lock);
BUG_ON(offset != entry->offset);

/* try to allocate swap cache page */
@@ -595,7 +597,7 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
page_cache_release(page);
zswap_written_back_pages++;

- spin_lock(&tree->lock);
+ spin_lock(lock);
/* drop local reference */
zswap_entry_put(tree, entry);

@@ -608,7 +610,7 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
*/
if (entry == zswap_rb_search(&tree->rbroot, offset))
zswap_entry_put(tree, entry);
- spin_unlock(&tree->lock);
+ spin_unlock(lock);

goto end;

@@ -620,9 +622,9 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
* it it either okay to return !0
*/
fail:
- spin_lock(&tree->lock);
+ spin_lock(lock);
zswap_entry_put(tree, entry);
- spin_unlock(&tree->lock);
+ spin_unlock(lock);

end:
return ret;
@@ -637,6 +639,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
{
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry, *dupentry;
+ spinlock_t *lock = &tree->lock;
int ret;
unsigned int dlen = PAGE_SIZE, len;
unsigned long handle;
@@ -702,7 +705,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
entry->length = dlen;

/* map */
- spin_lock(&tree->lock);
+ spin_lock(lock);
do {
ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
if (ret == -EEXIST) {
@@ -712,7 +715,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
zswap_entry_put(tree, dupentry);
}
} while (ret == -EEXIST);
- spin_unlock(&tree->lock);
+ spin_unlock(lock);

/* update stats */
atomic_inc(&zswap_stored_pages);
@@ -736,19 +739,20 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
{
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
+ spinlock_t *lock = &tree->lock;
u8 *src, *dst;
unsigned int dlen;
int ret;

/* find */
- spin_lock(&tree->lock);
+ spin_lock(lock);
entry = zswap_entry_find_get(&tree->rbroot, offset);
if (!entry) {
/* entry was written back */
- spin_unlock(&tree->lock);
+ spin_unlock(lock);
return -1;
}
- spin_unlock(&tree->lock);
+ spin_unlock(lock);

/* decompress */
dlen = PAGE_SIZE;
@@ -761,9 +765,9 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
zbud_unmap(zswap_pool, entry->handle);
BUG_ON(ret);

- spin_lock(&tree->lock);
+ spin_lock(lock);
zswap_entry_put(tree, entry);
- spin_unlock(&tree->lock);
+ spin_unlock(lock);

return 0;
}
@@ -773,13 +777,14 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
{
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
+ spinlock_t *lock = &tree->lock;

/* find */
- spin_lock(&tree->lock);
+ spin_lock(lock);
entry = zswap_rb_search(&tree->rbroot, offset);
if (!entry) {
/* entry was written back */
- spin_unlock(&tree->lock);
+ spin_unlock(lock);
return;
}

@@ -789,7 +794,7 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
/* drop the initial reference from entry creation */
zswap_entry_put(tree, entry);

- spin_unlock(&tree->lock);
+ spin_unlock(lock);
}

/* frees all zswap entries for the given swap type */
@@ -797,16 +802,17 @@ static void zswap_frontswap_invalidate_area(unsigned type)
{
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry, *n;
+ spinlock_t *lock = &tree->lock;

if (!tree)
return;

/* walk the tree and free everything */
- spin_lock(&tree->lock);
+ spin_lock(lock);
rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
zswap_free_entry(entry);
tree->rbroot = RB_ROOT;
- spin_unlock(&tree->lock);
+ spin_unlock(lock);
kfree(tree);
zswap_trees[type] = NULL;
}
--
1.8.4.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/