[RFC v4 09/18] page_pool: rename __page_pool_put_page() to __page_pool_put_netmem()

From: Byungchul Park
Date: Tue Jun 03 2025 - 22:53:46 EST


Now that __page_pool_put_page() puts netmem, not struct page, rename it
to __page_pool_put_netmem() to reflect what it does.

Signed-off-by: Byungchul Park <byungchul@xxxxxx>
Reviewed-by: Mina Almasry <almasrymina@xxxxxxxxxx>
---
net/core/page_pool.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index c31a35621b24..0d6a72a71745 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -790,8 +790,8 @@ static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
* subsystem.
*/
static __always_inline netmem_ref
-__page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
- unsigned int dma_sync_size, bool allow_direct)
+__page_pool_put_netmem(struct page_pool *pool, netmem_ref netmem,
+ unsigned int dma_sync_size, bool allow_direct)
{
lockdep_assert_no_hardirq();

@@ -850,7 +850,7 @@ static bool page_pool_napi_local(const struct page_pool *pool)
/* Allow direct recycle if we have reasons to believe that we are
* in the same context as the consumer would run, so there's
* no possible race.
- * __page_pool_put_page() makes sure we're not in hardirq context
+ * __page_pool_put_netmem() makes sure we're not in hardirq context
* and interrupts are enabled prior to accessing the cache.
*/
cpuid = smp_processor_id();
@@ -868,8 +868,8 @@ void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
if (!allow_direct)
allow_direct = page_pool_napi_local(pool);

- netmem = __page_pool_put_page(pool, netmem, dma_sync_size,
- allow_direct);
+ netmem = __page_pool_put_netmem(pool, netmem, dma_sync_size,
+ allow_direct);
if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
/* Cache full, fallback to free pages */
recycle_stat_inc(pool, ring_full);
@@ -970,8 +970,8 @@ void page_pool_put_netmem_bulk(netmem_ref *data, u32 count)
continue;
}

- netmem = __page_pool_put_page(pool, netmem, -1,
- allow_direct);
+ netmem = __page_pool_put_netmem(pool, netmem, -1,
+ allow_direct);
/* Approved for bulk recycling in ptr_ring cache */
if (netmem)
bulk[bulk_len++] = netmem;
--
2.17.1