Re: [RFC 2/3] mm/slub: sort objects in cache by frequency of stack trace

From: Vlastimil Babka
Date: Wed May 26 2021 - 10:06:50 EST


On 5/21/21 2:11 PM, glittao@xxxxxxxxx wrote:
> From: Oliver Glitta <glittao@xxxxxxxxx>
>
> Sort objects in slub cache by the frequency of stack trace used
> in object location in alloc_calls and free_calls implementation
> in debugfs. Most frequently used stack traces will be the first.

That will make it much more convenient.

> Signed-off-by: Oliver Glitta <glittao@xxxxxxxxx>

Reviewed-by: Vlastimil Babka <vbabka@xxxxxxx>

> ---
> mm/slub.c | 17 +++++++++++++++++
> 1 file changed, 17 insertions(+)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index d5ed6ed7d68b..247983d647cd 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -37,6 +37,7 @@
> #include <linux/memcontrol.h>
> #include <linux/random.h>
> #include <kunit/test.h>
> +#include <linux/sort.h>
>
> #include <linux/debugfs.h>
> #include <trace/events/kmem.h>
> @@ -5893,6 +5894,17 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
> return NULL;
> }
>
> +static int cmp_loc_by_count(const void *a, const void *b, const void *data)
> +{
> + struct location *loc1 = (struct location *)a;
> + struct location *loc2 = (struct location *)b;
> +
> + if (loc1->count > loc2->count)
> + return -1;
> + else
> + return 1;
> +}
> +
> static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
> {
> struct kmem_cache_node *n;
> @@ -5944,6 +5956,11 @@ static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
> process_slab(&t, s, page, alloc);
> spin_unlock_irqrestore(&n->list_lock, flags);
> }
> +
> + /* Sort locations by count */
> + sort_r(t.loc, t.count, sizeof(struct location),
> + cmp_loc_by_count, NULL, NULL);
> +
> }
>
> if (*ppos < t.count) {
>