[PATCH v2 15/23] mm/slab_common: use same tracepoint in kmalloc and normal caches

From: Hyeonggon Yoo
Date: Thu Apr 14 2022 - 04:59:50 EST


Now that tracepoints print cache names, we can distinguish kmalloc and
normal cache allocations.

Use same tracepoint in kmalloc and normal caches. After this patch,
there is only two tracepoints in slab allocators: kmem_cache_alloc_node
and kmem_cache_free.

Remove all unused tracepoints.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
---
include/trace/events/kmem.h | 79 -------------------------------------
mm/slab.c | 8 ++--
mm/slab_common.c | 9 ++---
mm/slob.c | 14 ++++---
mm/slub.c | 19 +++++----
5 files changed, 27 insertions(+), 102 deletions(-)

diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 35e6887c6101..ca67ba5fd76a 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -9,56 +9,6 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>

-DECLARE_EVENT_CLASS(kmem_alloc,
-
- TP_PROTO(unsigned long call_site,
- const void *ptr,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
-
- TP_STRUCT__entry(
- __field( unsigned long, call_site )
- __field( const void *, ptr )
- __field( size_t, bytes_req )
- __field( size_t, bytes_alloc )
- __field( gfp_t, gfp_flags )
- ),
-
- TP_fast_assign(
- __entry->call_site = call_site;
- __entry->ptr = ptr;
- __entry->bytes_req = bytes_req;
- __entry->bytes_alloc = bytes_alloc;
- __entry->gfp_flags = gfp_flags;
- ),
-
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
- (void *)__entry->call_site,
- __entry->ptr,
- __entry->bytes_req,
- __entry->bytes_alloc,
- show_gfp_flags(__entry->gfp_flags))
-);
-
-DEFINE_EVENT(kmem_alloc, kmalloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
-);
-
-DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
-);
-
DECLARE_EVENT_CLASS(kmem_alloc_node,

TP_PROTO(const char *name,
@@ -101,15 +51,6 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->node)
);

-DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
-
- TP_PROTO(const char *name, unsigned long call_site,
- const void *ptr, size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
-
- TP_ARGS(name, call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
-);
-
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,

TP_PROTO(const char *name, unsigned long call_site,
@@ -119,26 +60,6 @@ DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
TP_ARGS(name, call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
);

-TRACE_EVENT(kfree,
-
- TP_PROTO(unsigned long call_site, const void *ptr),
-
- TP_ARGS(call_site, ptr),
-
- TP_STRUCT__entry(
- __field( unsigned long, call_site )
- __field( const void *, ptr )
- ),
-
- TP_fast_assign(
- __entry->call_site = call_site;
- __entry->ptr = ptr;
- ),
-
- TP_printk("call_site=%pS ptr=%p",
- (void *)__entry->call_site, __entry->ptr)
-);
-
TRACE_EVENT(kmem_cache_free,

TP_PROTO(const char *name, unsigned long call_site, const void *ptr),
diff --git a/mm/slab.c b/mm/slab.c
index 3c47d0979706..b9959a6b5c48 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3519,9 +3519,9 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
ret = slab_alloc_node(cachep, NULL, flags, nodeid, size, _RET_IP_);

ret = kasan_kmalloc(cachep, ret, size, flags);
- trace_kmalloc_node(cachep->name, _RET_IP_, ret,
- size, cachep->size,
- flags, nodeid);
+ trace_kmem_cache_alloc_node(cachep->name, _RET_IP_, ret,
+ size, cachep->size,
+ flags, nodeid);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -3657,7 +3657,6 @@ void kfree(const void *objp)
unsigned long flags;
struct folio *folio;

- trace_kfree(_RET_IP_, objp);

if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
@@ -3669,6 +3668,7 @@ void kfree(const void *objp)
}

c = folio_slab(folio)->slab_cache;
+ trace_kmem_cache_free(c->name, _RET_IP_, objp);

local_irq_save(flags);
kfree_debugcheck(objp);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 416f0a1f17a6..3d1569085c54 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -910,6 +910,7 @@ void free_large_kmalloc(struct folio *folio, void *object)
if (WARN_ON_ONCE(order == 0))
pr_warn_once("object pointer: 0x%p\n", object);

+ trace_kmem_cache_free(KMALLOC_LARGE_NAME, _RET_IP_, object);
kmemleak_free(object);
kasan_kfree_large(object);

@@ -956,8 +957,8 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
ptr = kasan_kmalloc_large(ptr, size, flags);
/* As ptr might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ptr, size, 1, flags);
- trace_kmalloc_node(KMALLOC_LARGE_NAME, _RET_IP_, ptr, size,
- PAGE_SIZE << order, flags, node);
+ trace_kmem_cache_alloc_node(KMALLOC_LARGE_NAME, _RET_IP_, ptr, size,
+ PAGE_SIZE << order, flags, node);
return ptr;
}
EXPORT_SYMBOL(kmalloc_large_node);
@@ -1290,11 +1291,7 @@ size_t ksize(const void *objp)
EXPORT_SYMBOL(ksize);

/* Tracepoints definitions. */
-EXPORT_TRACEPOINT_SYMBOL(kmalloc);
-EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
-EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
-EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);

int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
diff --git a/mm/slob.c b/mm/slob.c
index 8abde6037d95..b1f291128e94 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -505,8 +505,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
*m = size;
ret = (void *)m + minalign;

- trace_kmalloc_node(KMALLOC_NAME, caller, ret,
- size, size + minalign, gfp, node);
+ trace_kmem_cache_alloc_node(KMALLOC_NAME, caller, ret,
+ size, size + minalign, gfp, node);
} else {
unsigned int order = get_order(size);

@@ -514,8 +514,9 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);

- trace_kmalloc_node(KMALLOC_LARGE_NAME, caller, ret,
- size, PAGE_SIZE << order, gfp, node);
+ trace_kmem_cache_alloc_node(KMALLOC_LARGE_NAME, caller,
+ ret, size, PAGE_SIZE << order,
+ gfp, node);
}

kmemleak_alloc(ret, size, 1, gfp);
@@ -533,8 +534,6 @@ void kfree(const void *block)
{
struct folio *sp;

- trace_kfree(_RET_IP_, block);
-
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
kmemleak_free(block);
@@ -543,10 +542,13 @@ void kfree(const void *block)
if (folio_test_slab(sp)) {
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
+
+ trace_kmem_cache_free(KMALLOC_LARGE_NAME, _RET_IP_, block);
slob_free(m, *m + align);
} else {
unsigned int order = folio_order(sp);

+ trace_kmem_cache_free(KMALLOC_NAME, _RET_IP_, block);
mod_node_page_state(folio_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
__free_pages(folio_page(sp, 0), order);
diff --git a/mm/slub.c b/mm/slub.c
index de03fa1f5667..d53e9e22d67e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3229,8 +3229,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);

- trace_kmalloc_node(s->name, _RET_IP_, ret,
- size, s->size, gfpflags, node);
+ trace_kmem_cache_alloc_node(s->name, _RET_IP_, ret,
+ size, s->size, gfpflags, node);

ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
@@ -4352,7 +4352,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)

ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);

- trace_kmalloc_node(s->name, _RET_IP_, ret, size, s->size, flags, node);
+ trace_kmem_cache_alloc_node(s->name, _RET_IP_, ret, size,
+ s->size, flags, node);

ret = kasan_kmalloc(s, ret, size, flags);

@@ -4431,8 +4432,7 @@ void kfree(const void *x)
struct folio *folio;
struct slab *slab;
void *object = (void *)x;
-
- trace_kfree(_RET_IP_, x);
+ struct kmem_cache *s;

if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
@@ -4442,8 +4442,12 @@ void kfree(const void *x)
free_large_kmalloc(folio, object);
return;
}
+
slab = folio_slab(folio);
- slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
+ s = slab->slab_cache;
+
+ trace_kmem_cache_free(s->name, _RET_IP_, x);
+ slab_free(s, slab, object, NULL, 1, _RET_IP_);
}
EXPORT_SYMBOL(kfree);

@@ -4811,7 +4815,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);

/* Honor the call site pointer we received. */
- trace_kmalloc_node(s->name, caller, ret, size, s->size, gfpflags, node);
+ trace_kmem_cache_alloc_node(s->name, caller, ret, size,
+ s->size, gfpflags, node);

return ret;
}
--
2.32.0