[PATCH] memory_save patch, final (2.2.0-pre8) version

Max (max@Linuz.sns.it)
Wed, 20 Jan 1999 13:53:43 +0100 (MET)


This is the memory_save patch for 2.2.0-pre8.
Please consider for inclusion in the standard kernel tree.

On i386 it reduces kernel memory usage by 4k every 2MB of physical RAM.
It does so by removing the `unused' and `map_nr' fields in `struct page'
of linux/include/linux/mm.h

I tested it on 2.2.0-pre7 and found no negative performance impact on i386.
On other archs it _may_ give performance penalties, so to be on the safe side
removing `map_nr' is enabled only on i386 for the moment.

To enable it on other archs, edit line 125 of include/linux/mm.h, from:
#ifdef __i386__
to whatever one likes.

Andrea Arcangeli and others tested it too and found it stable too.
Anyway the patch is straightforward enough IMHO...

Massimiliano Ghilardi

And here is the patch:
diff -urN v2.2.0-pre8/include/linux/mm.h linux/include/linux/mm.h
--- v2.2.0-pre8/include/linux/mm.h Thu Jan 14 23:08:55 1999
+++ linux/include/linux/mm.h Sun Jan 17 15:20:10 1999
@@ -110,6 +110,35 @@
};

/*
+ * On some architectures, don't add a ->map_nr field in the `struct page'
+ * below, but calculate it on the fly as (page - mem_map).
+ * This reduces kernel memory usage, but may have performance penalties.
+ *
+ * At least on Intel the penalties are completely negligible,
+ * so we go for saving memory. For other architectures, YMMV.
+ *
+ * Maybe this should be moved to include/asm-<arch>/page.h ?
+ *
+ * Massimiliano Ghilardi <max@linuz.sns.it> 01/20/1999
+ *
+ */
+#ifdef __i386__
+#undef USE_STRUCT_PAGE_NR
+#else
+#define USE_STRUCT_PAGE_NR
+#endif
+
+/*
+ * This is the macro to find the map_nr given a struct page pointer.
+ * You should use it instead of doing p->map_nr .
+ */
+#ifdef USE_STRUCT_PAGE_NR
+#define STRUCT_PAGE_NR(page) ((page)->map_nr)
+#else
+#define STRUCT_PAGE_NR(page) ((page) - (mem_map))
+#endif
+
+/*
* Try to keep the most commonly accessed fields in single cache lines
* here (16 bytes or greater). This ordering should be particularly
* beneficial on 32-bit processors.
@@ -125,12 +154,13 @@
unsigned long offset;
struct page *next_hash;
atomic_t count;
- unsigned int unused;
unsigned long flags; /* atomic flags, some possibly updated asynchronously */
struct wait_queue *wait;
struct page **pprev_hash;
struct buffer_head * buffers;
+#ifdef USE_STRUCT_PAGE_NR
unsigned long map_nr; /* page->map_nr == page - mem_map */
+#endif
} mem_map_t;

/* Page flag bit values */
diff -urN v2.2.0-pre8/include/linux/pagemap.h linux/include/linux/pagemap.h
--- v2.2.0-pre8/include/linux/pagemap.h Mon Jan 11 10:02:18 1999
+++ linux/include/linux/pagemap.h Sun Jan 17 15:15:03 1999
@@ -14,7 +14,7 @@

static inline unsigned long page_address(struct page * page)
{
- return PAGE_OFFSET + PAGE_SIZE * page->map_nr;
+ return PAGE_OFFSET + PAGE_SIZE * STRUCT_PAGE_NR(page);
}

#define PAGE_HASH_BITS 11
diff -urN v2.2.0-pre8/mm/filemap.c linux/mm/filemap.c
--- v2.2.0-pre8/mm/filemap.c Mon Jan 11 10:09:30 1999
+++ linux/mm/filemap.c Sun Jan 17 15:14:46 1999
@@ -144,7 +144,7 @@
if (PageSkip(page)) {
/* next_hash is overloaded for PageSkip */
page = page->next_hash;
- clock = page->map_nr;
+ clock = STRUCT_PAGE_NR(page);
}

count--;
diff -urN v2.2.0-pre8/mm/page_alloc.c linux/mm/page_alloc.c
--- v2.2.0-pre8/mm/page_alloc.c Thu Jan 14 23:08:55 1999
+++ linux/mm/page_alloc.c Sun Jan 17 15:15:28 1999
@@ -125,7 +125,7 @@
if (PageSwapCache(page))
panic ("Freeing swap cache page");
page->flags &= ~(1 << PG_referenced);
- free_pages_ok(page->map_nr, 0);
+ free_pages_ok(STRUCT_PAGE_NR(page), 0);
return;
}
}
@@ -163,7 +163,7 @@
if (!dma || CAN_DMA(ret)) { \
unsigned long map_nr; \
(prev->next = ret->next)->prev = prev; \
- map_nr = ret->map_nr; \
+ map_nr = STRUCT_PAGE_NR(ret); \
MARK_USED(map_nr, new_order, area); \
nr_free_pages -= 1 << order; \
EXPAND(ret, map_nr, order, new_order, area); \
@@ -322,7 +322,9 @@
--p;
atomic_set(&p->count, 0);
p->flags = (1 << PG_DMA) | (1 << PG_reserved);
+#ifdef USE_STRUCT_PAGE_NR
p->map_nr = p - mem_map;
+#endif
} while (p > mem_map);

for (i = 0 ; i < NR_MEM_LISTS ; i++) {
diff -urN v2.2.0-pre8/arch/m68k/mm/memory.c linux/arch/m68k/mm/memory.c
--- v2.2.0-pre8/arch/m68k/mm/memory.c Fri Jan 8 23:20:17 1999
+++ linux/arch/m68k/mm/memory.c Sun Jan 17 15:16:00 1999
@@ -92,7 +92,7 @@
static ptable_desc ptable_list = { &ptable_list, &ptable_list };

#define PD_MARKBITS(dp) (*(unsigned char *)&(dp)->offset)
-#define PD_PAGE(dp) (PAGE_OFFSET + ((dp)->map_nr << PAGE_SHIFT))
+#define PD_PAGE(dp) (PAGE_OFFSET + (STRUCT_PAGE_NR(dp) << PAGE_SHIFT))
#define PAGE_PD(page) ((ptable_desc *)&mem_map[MAP_NR(page)])

#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
diff -urN v2.2.0-pre8/arch/sparc/mm/srmmu.c linux/arch/sparc/mm/srmmu.c
--- v2.2.0-pre8/arch/sparc/mm/srmmu.c Fri Jan 8 23:19:22 1999
+++ linux/arch/sparc/mm/srmmu.c Sun Jan 17 15:14:25 1999
@@ -470,7 +470,7 @@
(unsigned int)ret->pprev_hash = mask & ~tmp;
if (!(mask & ~tmp))
pte_quicklist = (unsigned long *)ret->next_hash;
- ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off);
+ ret = (struct page *)(PAGE_OFFSET + (STRUCT_PAGE_NR(ret) << PAGE_SHIFT) + off);
pgtable_cache_size--;
}
spin_unlock(&pte_spinlock);
@@ -508,7 +508,7 @@
(unsigned int)ret->pprev_hash = mask & ~tmp;
if (!(mask & ~tmp))
pgd_quicklist = (unsigned long *)ret->next_hash;
- ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off);
+ ret = (struct page *)(PAGE_OFFSET + (STRUCT_PAGE_NR(ret) << PAGE_SHIFT) + off);
pgd_cache_size--;
}
spin_unlock(&pgd_spinlock);
@@ -682,7 +682,7 @@
spin_lock(&pgd_spinlock);
address >>= SRMMU_PGDIR_SHIFT;
for (page = (struct page *)pgd_quicklist; page; page = page->next_hash) {
- pgd_t *pgd = (pgd_t *)(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ pgd_t *pgd = (pgd_t *)(PAGE_OFFSET + (STRUCT_PAGE_NR(page) << PAGE_SHIFT));
unsigned int mask = (unsigned int)page->pprev_hash;

if (mask & 1)
@@ -2817,7 +2817,7 @@
page->next_hash = NULL;
page->pprev_hash = NULL;
pgtable_cache_size -= 16;
- free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ free_page(PAGE_OFFSET + (STRUCT_PAGE_NR(page) << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
@@ -2843,7 +2843,7 @@
page->next_hash = NULL;
page->pprev_hash = NULL;
pgd_cache_size -= 4;
- free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ free_page(PAGE_OFFSET + (STRUCT_PAGE_NR(page) << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
diff -urN v2.2.0-pre8/arch/sparc64/mm/init.c linux/arch/sparc64/mm/init.c
--- v2.2.0-pre8/arch/sparc64/mm/init.c Fri Jan 8 23:22:00 1999
+++ linux/arch/sparc64/mm/init.c Sun Jan 17 15:17:11 1999
@@ -69,7 +69,7 @@
page->next_hash = NULL;
page->pprev_hash = NULL;
pgd_cache_size -= 2;
- free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ free_page(PAGE_OFFSET + (STRUCT_PAGE_NR(page) << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
diff -urN v2.2.0-pre8/include/asm-sparc64/pgtable.h linux/include/asm-sparc64/pgtable.h
--- v2.2.0-pre8/include/asm-sparc64/pgtable.h Wed Jan 20 13:33:27 1999
+++ linux/include/asm-sparc64/pgtable.h Wed Jan 20 13:33:45 1999
@@ -371,7 +371,7 @@
(unsigned long)ret->pprev_hash = mask;
if (!mask)
pgd_quicklist = (unsigned long *)ret->next_hash;
- ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off);
+ ret = (struct page *)(PAGE_OFFSET + (STRUCT_PAGE_NR(ret) << PAGE_SHIFT) + off);
pgd_cache_size--;
} else {
ret = (struct page *) __get_free_page(GFP_KERNEL);

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/