[PATCH v10 23/25] mm: zswap: Allocate pool batching resources if the compressor supports batching.

From: Kanchana P Sridhar
Date: Fri Jul 04 2025 - 00:28:55 EST


This patch sets up zswap for allocating per-CPU resources optimally for
non-batching and batching compressors.

A new ZSWAP_MAX_BATCH_SIZE constant is defined as 8U, to set an upper
limit on the number of pages in large folios that will be batch
compressed.

As per Herbert's comments in [2] in response to the
crypto_acomp_batch_compress() and crypto_acomp_batch_decompress() API
proposed in [1], this series does not create new crypto_acomp batching
API. Instead, zswap compression batching uses the existing
crypto_acomp_compress() API in combination with the "void *kernel_data"
member added to "struct acomp_req" earlier in this series.

It is up to the compressor to manage multiple requests, as needed, to
accomplish batch parallelism. zswap only needs to allocate the per-CPU
dst buffers according to the batch size supported by the compressor.

A "u8 compr_batch_size" member is added to "struct zswap_pool", as per
Yosry's suggestion. pool->compr_batch_size is set as the minimum of the
compressor's max batch-size and ZSWAP_MAX_BATCH_SIZE. Accordingly, it
proceeds to allocate the necessary compression dst buffers in the
per-CPU acomp_ctx.

Another "u8 batch_size" member is added to "struct zswap_pool" to store
the unit for batching large folio stores: for batching compressors, this
is the pool->compr_batch_size. For non-batching compressors, this is
ZSWAP_MAX_BATCH_SIZE/4.

zswap does not use more than one dst buffer yet. Follow-up patches will
actually utilize the multiple acomp_ctx buffers for batch
compression/decompression of multiple pages.

Thus, ZSWAP_MAX_BATCH_SIZE limits the amount of extra memory used for
batching. There is a small extra memory overhead of allocating
the acomp_ctx->buffers array for compressors that do not support
batching: On x86_64, the overhead is 1 pointer per-CPU (i.e. 8 bytes).

[1]: https://patchwork.kernel.org/project/linux-mm/patch/20250508194134.28392-11-kanchana.p.sridhar@xxxxxxxxx/
[2]: https://patchwork.kernel.org/comment/26382610

Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@xxxxxxxxx>
---
mm/zswap.c | 82 +++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 63 insertions(+), 19 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index 688ce7ed39ca8..d4e4475ba5c5d 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -80,6 +80,9 @@ static bool zswap_pool_reached_full;

#define ZSWAP_PARAM_UNSET ""

+/* Limit the batch size to limit per-CPU memory usage for dst buffers. */
+#define ZSWAP_MAX_BATCH_SIZE 8U
+
static int zswap_setup(void);

/* Enable/disable zswap */
@@ -147,7 +150,7 @@ struct crypto_acomp_ctx {
struct crypto_acomp *acomp;
struct acomp_req *req;
struct crypto_wait wait;
- u8 *buffer;
+ u8 **buffers;
struct mutex mutex;
bool is_sleepable;
};
@@ -166,6 +169,8 @@ struct zswap_pool {
struct work_struct release_work;
struct hlist_node node;
char tfm_name[CRYPTO_MAX_ALG_NAME];
+ u8 compr_batch_size;
+ u8 batch_size;
};

/* Global LRU lists shared by all zswap pools. */
@@ -258,8 +263,10 @@ static void __zswap_pool_empty(struct percpu_ref *ref);
* zswap_cpu_comp_prepare(), not others.
* - Cleanup acomp_ctx resources on all cores in zswap_pool_destroy().
*/
-static void acomp_ctx_dealloc(struct crypto_acomp_ctx *acomp_ctx)
+static void acomp_ctx_dealloc(struct crypto_acomp_ctx *acomp_ctx, u8 nr_buffers)
{
+ u8 i;
+
if (IS_ERR_OR_NULL(acomp_ctx))
return;

@@ -269,7 +276,11 @@ static void acomp_ctx_dealloc(struct crypto_acomp_ctx *acomp_ctx)
if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
crypto_free_acomp(acomp_ctx->acomp);

- kfree(acomp_ctx->buffer);
+ if (acomp_ctx->buffers) {
+ for (i = 0; i < nr_buffers; ++i)
+ kfree(acomp_ctx->buffers[i]);
+ kfree(acomp_ctx->buffers);
+ }
}

static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
@@ -277,6 +288,7 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
int ret = -ENOMEM;
+ u8 i;

/*
* The per-CPU pool->acomp_ctx is zero-initialized on allocation.
@@ -289,10 +301,6 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
return 0;

- acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!acomp_ctx->buffer)
- return ret;
-
acomp_ctx->acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
if (IS_ERR_OR_NULL(acomp_ctx->acomp)) {
pr_err("could not alloc crypto acomp %s : %ld\n",
@@ -305,17 +313,36 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
acomp_ctx->req = acomp_request_alloc(acomp_ctx->acomp);
if (IS_ERR_OR_NULL(acomp_ctx->req)) {
pr_err("could not alloc crypto acomp_request %s\n",
- pool->tfm_name);
+ pool->tfm_name);
goto fail;
}

- crypto_init_wait(&acomp_ctx->wait);
+ /*
+ * Allocate up to ZSWAP_MAX_BATCH_SIZE dst buffers if the
+ * compressor supports batching.
+ */
+ pool->compr_batch_size = min(ZSWAP_MAX_BATCH_SIZE,
+ crypto_acomp_batch_size(acomp_ctx->acomp));
+
+ acomp_ctx->buffers = kcalloc_node(pool->compr_batch_size, sizeof(u8 *),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!acomp_ctx->buffers)
+ goto fail;
+
+ for (i = 0; i < pool->compr_batch_size; ++i) {
+ acomp_ctx->buffers[i] = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!acomp_ctx->buffers[i])
+ goto fail;
+ }

/*
* if the backend of acomp is async zip, crypto_req_done() will wakeup
* crypto_wait_req(); if the backend of acomp is scomp, the callback
* won't be called, crypto_wait_req() will return without blocking.
*/
+ crypto_init_wait(&acomp_ctx->wait);
+
acomp_request_set_callback(acomp_ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &acomp_ctx->wait);

@@ -323,7 +350,7 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
return 0;

fail:
- acomp_ctx_dealloc(acomp_ctx);
+ acomp_ctx_dealloc(acomp_ctx, pool->compr_batch_size);
return ret;
}

@@ -345,6 +372,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
return NULL;
}

+ /* Many things rely on the zero-initialization. */
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return NULL;
@@ -407,13 +435,28 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
goto ref_fail;
INIT_LIST_HEAD(&pool->list);

+ /*
+ * Set the unit of compress batching for large folios, for quick
+ * retrieval in the zswap_compress() fast path:
+ * If the compressor is sequential (@pool->compr_batch_size is 1),
+ * large folios will be compressed in batches of ZSWAP_MAX_BATCH_SIZE/4
+ * pages, where each page in the batch is compressed sequentially.
+ * We see better performance by processing the folio in batches of
+ * ZSWAP_MAX_BATCH_SIZE/4, due to cache locality of working set
+ * structures.
+ */
+ pool->batch_size = (pool->compr_batch_size > 1) ?
+ pool->compr_batch_size : ZSWAP_MAX_BATCH_SIZE/4;
+
zswap_pool_debug("created", pool);

return pool;

ref_fail:
for_each_possible_cpu(cpu)
- acomp_ctx_dealloc(per_cpu_ptr(pool->acomp_ctx, cpu));
+ acomp_ctx_dealloc(per_cpu_ptr(pool->acomp_ctx, cpu),
+ pool->compr_batch_size);
+
error:
if (pool->acomp_ctx)
free_percpu(pool->acomp_ctx);
@@ -472,7 +515,8 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
zswap_pool_debug("destroying", pool);

for_each_possible_cpu(cpu)
- acomp_ctx_dealloc(per_cpu_ptr(pool->acomp_ctx, cpu));
+ acomp_ctx_dealloc(per_cpu_ptr(pool->acomp_ctx, cpu),
+ pool->compr_batch_size);

free_percpu(pool->acomp_ctx);

@@ -942,7 +986,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,

mutex_lock(&acomp_ctx->mutex);

- dst = acomp_ctx->buffer;
+ dst = acomp_ctx->buffers[0];
sg_init_table(&input, 1);
sg_set_page(&input, page, PAGE_SIZE, 0);

@@ -1003,19 +1047,19 @@ static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)

acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
- obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffer);
+ obj = zpool_obj_read_begin(zpool, entry->handle, acomp_ctx->buffers[0]);

/*
* zpool_obj_read_begin() might return a kmap address of highmem when
- * acomp_ctx->buffer is not used. However, sg_init_one() does not
- * handle highmem addresses, so copy the object to acomp_ctx->buffer.
+ * acomp_ctx->buffers[0] is not used. However, sg_init_one() does not
+ * handle highmem addresses, so copy the object to acomp_ctx->buffers[0].
*/
if (virt_addr_valid(obj)) {
src = obj;
} else {
- WARN_ON_ONCE(obj == acomp_ctx->buffer);
- memcpy(acomp_ctx->buffer, obj, entry->length);
- src = acomp_ctx->buffer;
+ WARN_ON_ONCE(obj == acomp_ctx->buffers[0]);
+ memcpy(acomp_ctx->buffers[0], obj, entry->length);
+ src = acomp_ctx->buffers[0];
}

sg_init_one(&input, src, entry->length);
--
2.27.0