making memleak airtight

Michael L. Galbraith (mikeg@weiden.de)
Sat, 29 Aug 1998 09:32:16 +0200 (CEST)


Hello VM wizards,

If someone could examine the appended patch chunks, I'd really appreciate
it. I have a memleak 'customer' who seems to have a real memory leak, but
memleak can't see that darn thing. On advice from Ingo, I'm trying to make
memleak _absolutely_ interrupt safe. In order to do that, I've moved it's
internal accounting into the gizzard of the page allocator, and made
everything dependant upon that.

The most critical portion is..
__get_free_pages() receives a pointer to a memleak allocation point
identifier structure and maps the pages it allocates to this id.
__kmem_cache_alloc() does the same thing to cover kmalloc().
If the way I'm doing that is broken... :-/

All higher level allocators pass the id to it's underlying allocator
so that the customer of that allocator 'inherits' the pages/chunklet.

Questions:

So far, I've wrapped..
page allocator
slab allocator
vmalloc
alloc_skb
skb_clone
skb_copy
skb_realloc_headroom
sk_alloc
sock_wmalloc
sock_rmalloc
sock_kmalloc
(also mmap to try to cover userland)
Are there others?

Should I change the ownership when get/free_xxx_fast() is used as well?

I am determined that when I send the next version to my 'customer', it is
either going to identify the stupid %&$# leak, or at the very least, the
two dummies who are chasing a nooooon-problem. (either choice is ok :-)

TIA,

-Mike

this is the critical section of the current gyration..

diff -urN linux-2.1.119.virgin/mm/mmap.c /usr/src/linux/mm/mmap.c
--- linux-2.1.119.virgin/mm/mmap.c Sat Aug 29 08:16:57 1998
+++ /usr/src/linux/mm/mmap.c Sat Aug 29 06:27:26 1998
@@ -3,6 +3,11 @@
*
* Written by obz.
*/
+
+#define MEMLEAK_PASS_ALLOC
+#define MEMLEAK_UNWRAP_MMAP
+#define MEMLEAK_UNWRAP_SLAB
+
#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -23,6 +28,8 @@
#include <asm/system.h>
#include <asm/pgtable.h>

+#include <linux/memleak_unwrap.h>
+
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
@@ -91,6 +98,7 @@
unsigned long rlim, retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ MEMLEAK_DUMMY_PTR

down(&mm->mmap_sem);
lock_kernel();
@@ -157,8 +165,13 @@
#undef _trans
}

+#ifndef CONFIG_MEMLEAK
unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off)
+#else
+unsigned long do_mmap_wrap(struct file * file, unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long off, struct alloc_struct *id)
+#endif
{
struct mm_struct * mm = current->mm;
struct vm_area_struct * vma;
@@ -435,7 +448,11 @@
* work. This now handles partial unmappings.
* Jeremy Fitzhardine <jeremy@sw.oz.au>
*/
+#ifndef CONFIG_MEMLEAK
int do_munmap(unsigned long addr, size_t len)
+#else
+int do_munmap_wrap(unsigned long addr, size_t len, struct alloc_struct *id)
+#endif
{
struct mm_struct * mm;
struct vm_area_struct *mpnt, *free, *extra;
@@ -532,6 +549,7 @@
asmlinkage int sys_munmap(unsigned long addr, size_t len)
{
int ret;
+ MEMLEAK_DUMMY_PTR

down(&current->mm->mmap_sem);
lock_kernel();
diff -urN linux-2.1.119.virgin/mm/page_alloc.c /usr/src/linux/mm/page_alloc.c
--- linux-2.1.119.virgin/mm/page_alloc.c Sat Aug 29 08:16:57 1998
+++ /usr/src/linux/mm/page_alloc.c Sat Aug 29 08:17:36 1998
@@ -5,6 +5,7 @@
* Swap reorganised 29.12.95, Stephen Tweedie
*/

+#define MEMLEAK_PASS_ALLOCATION
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
@@ -144,6 +145,7 @@
if (!test_and_change_bit(index, area->map))
break;
remove_mem_queue(list(map_nr ^ -mask));
+ memleak_free(PAGE_OFFSET + (list(map_nr ^ -mask)->map_nr << PAGE_SHIFT));
mask <<= 1;
area++;
index >>= 1;
@@ -224,6 +226,7 @@
area--; high--; size >>= 1; \
add_mem_queue(area, map); \
MARK_USED(index, high, area); \
+ memleak_alloc_nolock(ADDRESS(index)); \
index += size; \
map += size; \
} \
@@ -231,7 +234,11 @@
map->age = PAGE_INITIAL_AGE; \
} while (0)

+#ifndef CONFIG_MEMLEAK
unsigned long __get_free_pages(int gfp_mask, unsigned long order)
+#else
+unsigned long __get_free_pages_wrap(int gfp_mask, unsigned long order, struct alloc_struct *id)
+#endif
{
unsigned long flags;

diff -urN linux-2.1.119.virgin/mm/slab.c /usr/src/linux/mm/slab.c
--- linux-2.1.119.virgin/mm/slab.c Sat Aug 29 08:16:57 1998
+++ /usr/src/linux/mm/slab.c Sat Aug 29 06:27:54 1998
@@ -100,6 +100,10 @@
* is less than 512 (PAGE_SIZE<<3), but greater than 256.
*/

+#define MEMLEAK_PASS_ALLOCATION
+#define MEMLEAK_UNWRAP_SLAB
+
+#include <linux/config.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -111,6 +115,8 @@
#include <asm/atomic.h>
#include <asm/spinlock.h>

+#include <linux/memleak_unwrap.h>
+
/* If there is a different PAGE_SIZE around, and it works with this allocator,
* then change the following.
*/
@@ -504,7 +510,11 @@
* of memory is DMAable. No need to hold the cache-lock.
*/
static inline void *
+#ifndef CONFIG_MEMLEAK
kmem_getpages(kmem_cache_t *cachep, unsigned long flags, unsigned int *dma)
+#else
+kmem_getpages_wrap(kmem_cache_t *cachep, unsigned long flags, unsigned int *dma, struct alloc_struct *id)
+#endif
{
void *addr;

@@ -682,9 +692,15 @@
* NOTE: The 'name' is assumed to be memory that is _not_ going to disappear.
*/
kmem_cache_t *
+#ifndef CONFIG_MEMLEAK
kmem_cache_create(const char *name, size_t size, size_t offset,
unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
void (*dtor)(void*, kmem_cache_t *, unsigned long))
+#else
+kmem_cache_create_wrap(const char *name, size_t size, size_t offset,
+ unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
+ void (*dtor)(void*, kmem_cache_t *, unsigned long), struct alloc_struct *id)
+#endif
{
const char *func_nm= KERN_ERR "kmem_create: ";
kmem_cache_t *searchp;
@@ -1044,13 +1060,20 @@
ret = 1;
if (cachep->c_lastp == kmem_slab_end(cachep))
ret--; /* Cache is empty. */
+#ifdef CONFIG_MEMLEAK
+ if(ret == 0) memleak_free(cachep);
+#endif
spin_unlock_irq(&cachep->c_spinlock);
return ret;
}

/* Get the memory for a slab management obj. */
static inline kmem_slab_t *
+#ifndef CONFIG_MEMLEAK
kmem_cache_slabmgmt(kmem_cache_t *cachep, void *objp, int local_flags)
+#else
+kmem_cache_slabmgmt_wrap(kmem_cache_t *cachep, void *objp, int local_flags, struct alloc_struct *id)
+#endif
{
kmem_slab_t *slabp;

@@ -1138,7 +1161,11 @@
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
static int
+#ifndef CONFIG_MEMLEAK
kmem_cache_grow(kmem_cache_t * cachep, int flags)
+#else
+kmem_cache_grow_wrap(kmem_cache_t * cachep, int flags, struct alloc_struct *id)
+#endif
{
kmem_slab_t *slabp;
struct page *page;
@@ -1358,7 +1385,11 @@

/* Returns a ptr to an obj in the given cache. */
static inline void *
+#ifndef CONFIG_MEMLEAK
__kmem_cache_alloc(kmem_cache_t *cachep, int flags)
+#else
+__kmem_cache_alloc_wrap(kmem_cache_t *cachep, int flags, struct alloc_struct *id)
+#endif
{
kmem_slab_t *slabp;
kmem_bufctl_t *bufp;
@@ -1396,6 +1427,7 @@
* obj has been removed from the slab. Should be safe to drop
* the lock here.
*/
+ memleak_alloc_nolock(objp);
spin_unlock_irqrestore(&cachep->c_spinlock, save_flags);
#if SLAB_DEBUG_SUPPORT
if (cachep->c_flags & SLAB_RED_ZONE)
@@ -1526,6 +1558,7 @@
kmem_poison_obj(cachep, objp);
}
#endif /* SLAB_DEBUG_SUPPORT */
+ memleak_free(objp);
spin_unlock_irqrestore(&cachep->c_spinlock, save_flags);
return;
}
@@ -1601,7 +1634,11 @@
}

void *
+#ifndef CONFIG_MEMLEAK
kmem_cache_alloc(kmem_cache_t *cachep, int flags)
+#else
+kmem_cache_alloc_wrap(kmem_cache_t *cachep, int flags, struct alloc_struct *id)
+#endif
{
return __kmem_cache_alloc(cachep, flags);
}
@@ -1613,7 +1650,11 @@
}

void *
+#ifndef CONFIG_MEMLEAK
kmalloc(size_t size, int flags)
+#else
+kmalloc_wrap(size_t size, int flags, struct alloc_struct *id)
+#endif
{
cache_sizes_t *csizep = cache_sizes;

diff -urN linux-2.1.119.virgin/mm/vmalloc.c /usr/src/linux/mm/vmalloc.c
--- linux-2.1.119.virgin/mm/vmalloc.c Sun Aug 2 10:11:31 1998
+++ /usr/src/linux/mm/vmalloc.c Sat Aug 29 06:28:18 1998
@@ -4,12 +4,19 @@
* Copyright (C) 1993 Linus Torvalds
*/

+#define MEMLEAK_PASS_ALLOCATION
+#define MEMLEAK_UNWRAP_VMALLOC
+#define MEMLEAK_UNWRAP_SLAB
+
+#include <linux/config.h>
#include <linux/malloc.h>
#include <linux/vmalloc.h>

#include <asm/uaccess.h>
#include <asm/system.h>

+#include <linux/memleak_unwrap.h>
+
static struct vm_struct * vmlist = NULL;

static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
@@ -83,7 +90,12 @@
flush_tlb_all();
}

+#ifndef CONFIG_MEMLEAK
static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size)
+#else
+static inline int alloc_area_pte_wrap(pte_t * pte, unsigned long address,
+ unsigned long size, struct alloc_struct *id)
+#endif
{
unsigned long end;

@@ -105,7 +117,12 @@
return 0;
}

+#ifndef CONFIG_MEMLEAK
static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size)
+#else
+static inline int alloc_area_pmd_wrap(pmd_t * pmd, unsigned long address,
+ unsigned long size, struct alloc_struct *id)
+#endif
{
unsigned long end;

@@ -125,7 +142,12 @@
return 0;
}

+#ifndef CONFIG_MEMLEAK
int vmalloc_area_pages(unsigned long address, unsigned long size)
+#else
+int vmalloc_area_pages_wrap(unsigned long address, unsigned long size,
+ struct alloc_struct *id )
+#endif
{
pgd_t * dir;
unsigned long end = address + size;
@@ -150,7 +172,11 @@
return 0;
}

+#ifndef CONFIG_MEMLEAK
struct vm_struct * get_vm_area(unsigned long size)
+#else
+struct vm_struct * get_vm_area_wrap(unsigned long size, struct alloc_struct *id)
+#endif
{
unsigned long addr;
struct vm_struct **p, *tmp, *area;
@@ -194,7 +220,11 @@
printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
}

+#ifndef CONFIG_MEMLEAK
void * vmalloc(unsigned long size)
+#else
+void * vmalloc_wrap(unsigned long size, struct alloc_struct *id)
+#endif
{
void * addr;
struct vm_struct *area;

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.altern.org/andrebalsa/doc/lkml-faq.html