[PATCH] memory_save patch, updated version

Max (max@Linuz.sns.it)
Sun, 17 Jan 1999 15:56:10 +0100 (MET)


This is the memory_save patch updated for 2.2.0-pre7, including
all arch-specific code.

Andrea Arcangeli already included the old version of this patch
(which broke non-i386 arch) in his latest arca-vm-xx patches.

I tested it and found no significative performance change on i386.
On other archs it *may* give performance penalties, so to be on the safe side
the patch is only enabled for i386 at the moment.

The patch reduces kernel memory usage by 4k every 2MB of physical RAM
(4k every 4MB if you already removed the `unused' field in `struct page',
include/linux/mm.h)

To enable it on other archs, change include/linux/mm.h, line 118 from
#ifdef __i386__
to
#if (defined __i386__ || defined <your favourite arch> || ...)

Note that I already updated both arch-indipendent and arch-specific code to use
the macro `STRUCT_PAGE_NR(page)' --- which expands to the correct thing ---
instead of the old `page->map_nr'

Massimiliano Ghilardi

----------------------------------------------------------------
| I have yet to meet a person who had a bad experience of Linux. |
| Most have never had an experience. |
----------------------------------------------------------------

And here is the patch:

diff -urN linux-2.2.0-pre7-dist/arch/m68k/mm/memory.c linux-2.2.0-pre7/arch/m68k/mm/memory.c
--- linux-2.2.0-pre7-dist/arch/m68k/mm/memory.c Fri Jan 8 23:20:17 1999
+++ linux-2.2.0-pre7/arch/m68k/mm/memory.c Sun Jan 17 15:16:00 1999
@@ -92,7 +92,7 @@
static ptable_desc ptable_list = { &ptable_list, &ptable_list };

#define PD_MARKBITS(dp) (*(unsigned char *)&(dp)->offset)
-#define PD_PAGE(dp) (PAGE_OFFSET + ((dp)->map_nr << PAGE_SHIFT))
+#define PD_PAGE(dp) (PAGE_OFFSET + (STRUCT_PAGE_NR(dp) << PAGE_SHIFT))
#define PAGE_PD(page) ((ptable_desc *)&mem_map[MAP_NR(page)])

#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
diff -urN linux-2.2.0-pre7-dist/arch/sparc/mm/srmmu.c linux-2.2.0-pre7/arch/sparc/mm/srmmu.c
--- linux-2.2.0-pre7-dist/arch/sparc/mm/srmmu.c Fri Jan 8 23:19:22 1999
+++ linux-2.2.0-pre7/arch/sparc/mm/srmmu.c Sun Jan 17 15:14:25 1999
@@ -470,7 +470,7 @@
(unsigned int)ret->pprev_hash = mask & ~tmp;
if (!(mask & ~tmp))
pte_quicklist = (unsigned long *)ret->next_hash;
- ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off);
+ ret = (struct page *)(PAGE_OFFSET + (STRUCT_PAGE_NR(ret) << PAGE_SHIFT) + off);
pgtable_cache_size--;
}
spin_unlock(&pte_spinlock);
@@ -508,7 +508,7 @@
(unsigned int)ret->pprev_hash = mask & ~tmp;
if (!(mask & ~tmp))
pgd_quicklist = (unsigned long *)ret->next_hash;
- ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off);
+ ret = (struct page *)(PAGE_OFFSET + (STRUCT_PAGE_NR(ret) << PAGE_SHIFT) + off);
pgd_cache_size--;
}
spin_unlock(&pgd_spinlock);
@@ -682,7 +682,7 @@
spin_lock(&pgd_spinlock);
address >>= SRMMU_PGDIR_SHIFT;
for (page = (struct page *)pgd_quicklist; page; page = page->next_hash) {
- pgd_t *pgd = (pgd_t *)(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ pgd_t *pgd = (pgd_t *)(PAGE_OFFSET + (STRUCT_PAGE_NR(page) << PAGE_SHIFT));
unsigned int mask = (unsigned int)page->pprev_hash;

if (mask & 1)
@@ -2817,7 +2817,7 @@
page->next_hash = NULL;
page->pprev_hash = NULL;
pgtable_cache_size -= 16;
- free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ free_page(PAGE_OFFSET + (STRUCT_PAGE_NR(page) << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
@@ -2843,7 +2843,7 @@
page->next_hash = NULL;
page->pprev_hash = NULL;
pgd_cache_size -= 4;
- free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ free_page(PAGE_OFFSET + (STRUCT_PAGE_NR(page) << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
diff -urN linux-2.2.0-pre7-dist/arch/sparc64/mm/init.c linux-2.2.0-pre7/arch/sparc64/mm/init.c
--- linux-2.2.0-pre7-dist/arch/sparc64/mm/init.c Fri Jan 8 23:22:00 1999
+++ linux-2.2.0-pre7/arch/sparc64/mm/init.c Sun Jan 17 15:17:11 1999
@@ -69,7 +69,7 @@
page->next_hash = NULL;
page->pprev_hash = NULL;
pgd_cache_size -= 2;
- free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT));
+ free_page(PAGE_OFFSET + (STRUCT_PAGE_NR(page) << PAGE_SHIFT));
freed++;
if (page2)
page = page2->next_hash;
diff -urN linux-2.2.0-pre7-dist/include/linux/mm.h linux-2.2.0-pre7/include/linux/mm.h
--- linux-2.2.0-pre7-dist/include/linux/mm.h Thu Jan 14 23:08:55 1999
+++ linux-2.2.0-pre7/include/linux/mm.h Sun Jan 17 15:20:10 1999
@@ -103,6 +103,35 @@
};

/*
+ * On some architectures, don't add a ->map_nr field in the `struct page'
+ * below, but calculate it on the fly as (page - mem_map).
+ * This reduces kernel memory usage, but may have performance penalties.
+ *
+ * At least on Intel the penalties are completely negligible,
+ * so we go for saving memory. For other architectures, YMMV.
+ *
+ * Maybe this should be moved to include/asm-<arch>/page.h ?
+ *
+ * Massimiliano Ghilardi <max@linuz.sns.it> 01/17/1999
+ *
+ */
+#ifdef __i386__
+#undef USE_STRUCT_PAGE_NR
+#else
+#define USE_STRUCT_PAGE_NR
+#endif
+
+/*
+ * This is the macro to find the map_nr give a struct page pointer.
+ * You should use it instead of doing p->map_nr .
+ */
+#ifdef USE_STRUCT_PAGE_NR
+#define STRUCT_PAGE_NR(page) ((page)->map_nr)
+#else
+#define STRUCT_PAGE_NR(page) ((page) - (mem_map))
+#endif
+
+/*
* Try to keep the most commonly accessed fields in single cache lines
* here (16 bytes or greater). This ordering should be particularly
* beneficial on 32-bit processors.
@@ -118,12 +147,13 @@
unsigned long offset;
struct page *next_hash;
atomic_t count;
- unsigned int unused;
unsigned long flags; /* atomic flags, some possibly updated asynchronously */
struct wait_queue *wait;
struct page **pprev_hash;
struct buffer_head * buffers;
+#ifdef USE_STRUCT_PAGE_NR
unsigned long map_nr; /* page->map_nr == page - mem_map */
+#endif
} mem_map_t;

/* Page flag bit values */
diff -urN linux-2.2.0-pre7-dist/include/linux/pagemap.h linux-2.2.0-pre7/include/linux/pagemap.h
--- linux-2.2.0-pre7-dist/include/linux/pagemap.h Mon Jan 11 10:02:18 1999
+++ linux-2.2.0-pre7/include/linux/pagemap.h Sun Jan 17 15:15:03 1999
@@ -14,7 +14,7 @@

static inline unsigned long page_address(struct page * page)
{
- return PAGE_OFFSET + PAGE_SIZE * page->map_nr;
+ return PAGE_OFFSET + PAGE_SIZE * STRUCT_PAGE_NR(page);
}

#define PAGE_HASH_BITS 11
diff -urN linux-2.2.0-pre7-dist/mm/filemap.c linux-2.2.0-pre7/mm/filemap.c
--- linux-2.2.0-pre7-dist/mm/filemap.c Mon Jan 11 10:09:30 1999
+++ linux-2.2.0-pre7/mm/filemap.c Sun Jan 17 15:14:46 1999
@@ -144,7 +144,7 @@
if (PageSkip(page)) {
/* next_hash is overloaded for PageSkip */
page = page->next_hash;
- clock = page->map_nr;
+ clock = STRUCT_PAGE_NR(page);
}

count--;
diff -urN linux-2.2.0-pre7-dist/mm/page_alloc.c linux-2.2.0-pre7/mm/page_alloc.c
--- linux-2.2.0-pre7-dist/mm/page_alloc.c Thu Jan 14 23:08:55 1999
+++ linux-2.2.0-pre7/mm/page_alloc.c Sun Jan 17 15:15:28 1999
@@ -125,7 +125,7 @@
if (PageSwapCache(page))
panic ("Freeing swap cache page");
page->flags &= ~(1 << PG_referenced);
- free_pages_ok(page->map_nr, 0);
+ free_pages_ok(STRUCT_PAGE_NR(page), 0);
return;
}
}
@@ -163,7 +163,7 @@
if (!dma || CAN_DMA(ret)) { \
unsigned long map_nr; \
(prev->next = ret->next)->prev = prev; \
- map_nr = ret->map_nr; \
+ map_nr = STRUCT_PAGE_NR(ret); \
MARK_USED(map_nr, new_order, area); \
nr_free_pages -= 1 << order; \
EXPAND(ret, map_nr, order, new_order, area); \
@@ -322,7 +322,9 @@
--p;
atomic_set(&p->count, 0);
p->flags = (1 << PG_DMA) | (1 << PG_reserved);
+#ifdef USE_STRUCT_PAGE_NR
p->map_nr = p - mem_map;
+#endif
} while (p > mem_map);

for (i = 0 ; i < NR_MEM_LISTS ; i++) {

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/