Re: low memory buffer cachebug fix, need some testers...

Dr. Werner Fink (werner@suse.de)
Tue, 10 Jun 1997 15:14:41 +0200


> Aha, good eyes. I've put this into my tree.
>
> The only remaining issue is how try_to_free_page() does not try hard
> enough, this is what causes gcc to die with a signal. My old "sleep
> instead of fail" hack is not going back in, it is a kludge and can
> create other problems.
>
> I will look into a better fix for that one.

Hmmm ... there is an other problem detected by Gerard Roudier. By calling
grow_buffers in refill_freelist we will get a competition between
try_to_free_buffer in shrink_mmap which is called by __get_free_pages
in grow_buffers ...
Therefore shrink_mmap should not free buffers but only shared pages
if it is used to get buffer pages.

Maybe the following patch helps for this and the `try harder'
problem? It's against pre-patch-2.0.31-2 and your given patch
... without the switch in __get_to_free_pages :-)

Werner

---------------------------------------------------------------------
diff -U8 -rN linux-2.0.31-davem/include/linux/mm.h linux-2.0.31/include/linux/mm.h
--- linux-2.0.31-davem/include/linux/mm.h Sat Mar 29 01:08:17 1997
+++ linux-2.0.31/include/linux/mm.h Tue Jun 10 13:08:59 1997
@@ -290,17 +290,17 @@
extern void remove_shared_vm_struct(struct vm_area_struct *);
extern void build_mmap_avl(struct mm_struct *);
extern void exit_mmap(struct mm_struct *);
extern int do_munmap(unsigned long, size_t);
extern unsigned long get_unmapped_area(unsigned long, unsigned long);

/* filemap.c */
extern unsigned long page_unuse(unsigned long);
-extern int shrink_mmap(int, int);
+extern int shrink_mmap(int, int, int);
extern void truncate_inode_pages(struct inode *, unsigned long);

#define GFP_BUFFER 0x00
#define GFP_ATOMIC 0x01
#define GFP_USER 0x02
#define GFP_KERNEL 0x03
#define GFP_NOBUFFER 0x04
#define GFP_NFS 0x05
diff -U8 -rN linux-2.0.31-davem/mm/filemap.c linux-2.0.31/mm/filemap.c
--- linux-2.0.31-davem/mm/filemap.c Tue Jun 10 12:58:48 1997
+++ linux-2.0.31/mm/filemap.c Tue Jun 10 13:46:54 1997
@@ -122,17 +122,17 @@
if (offset < PAGE_SIZE) {
unsigned long address = page_address(page);
memset((void *) (offset + address), 0, PAGE_SIZE - offset);
flush_page_to_ram(address);
}
}
}

-int shrink_mmap(int priority, int dma)
+int shrink_mmap(int priority, int dma, int gfp_level)
{
static int clock = 0;
struct page * page;
unsigned long limit = MAP_NR(high_memory);
struct buffer_head *tmp, *bh;
int count_max, count_min;

count_max = (limit<<1) >> (priority>>1);
@@ -142,16 +142,19 @@
do {
count_max--;
if (page->inode || page->buffers)
count_min--;

if (PageLocked(page))
goto next;
if (dma && !PageDMA(page))
+ goto next;
+ /* Do not free buffers if we are called for buffers */
+ if (gfp_level == GFP_BUFFER && page->buffers)
goto next;
/* First of all, regenerate the page's referenced bit
from any buffers in the page */
bh = page->buffers;
if (bh) {
tmp = bh;
do {
if (buffer_touched(tmp)) {
diff -U8 -rN linux-2.0.31-davem/mm/page_alloc.c linux-2.0.31/mm/page_alloc.c
--- linux-2.0.31-davem/mm/page_alloc.c Sat Aug 17 20:19:29 1996
+++ linux-2.0.31/mm/page_alloc.c Tue Jun 10 13:49:54 1997
@@ -209,17 +209,17 @@
repeat:
cli();
if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
RMQUEUE(order, dma);
restore_flags(flags);
return 0;
}
restore_flags(flags);
- if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
+ if (try_to_free_page(priority, dma, 1))
goto repeat;
return 0;
}

/*
* Show free area list (used inside shift_scroll-lock stuff)
* We also calculate the percentage fragmentation. We do this by counting the
* memory on each free list with the exception of the first item on the list.
diff -U8 -rN linux-2.0.31-davem/mm/vmscan.c linux-2.0.31/mm/vmscan.c
--- linux-2.0.31-davem/mm/vmscan.c Tue Jun 10 14:46:29 1997
+++ linux-2.0.31/mm/vmscan.c Tue Jun 10 14:56:44 1997
@@ -342,20 +342,20 @@

/* we don't try as hard if we're not waiting.. */
stop = 3;
if (wait)
stop = 0;
switch (state) {
do {
case 0:
- /* Don't worry here for the GFP_BUFFER case, shrink_mmap never
- * tries to write dirty things out...
+ /* Don't worry here for the GFP_BUFFER case, shrink_mmap
+ * never tries to write dirty things out...
*/
- if (shrink_mmap(i, dma))
+ if (shrink_mmap(i, dma, priority))
return 1;
state = 1;
case 1:
/* shm_swap must always perform some I/O if it succeeds
* in finding things to free up, so don't waste any time
* if we are trying to get some buffer heads...
*/
if (priority != GFP_BUFFER && shm_swap(i, dma))
@@ -363,16 +363,34 @@
state = 2;
default:
if (swap_out(i, dma, wait, priority))
return 1;
state = 0;
i--;
} while ((i - stop) >= 0);
}
+
+ if (!wait)
+ return 0;
+ i = 6;
+ do {
+ if (shrink_mmap(i, dma, priority)) {
+ state = 0;
+ if (swap_out((i > 0) ? --i : 6, dma, wait, priority))
+ state = 2;
+ return 1;
+ }
+ if (swap_out(i, dma, wait, priority)) {
+ state = 2;
+ if (shrink_mmap((i > 0) ? --i : 6, dma, priority))
+ state = 0;
+ return 1;
+ }
+ } while (i--);
return 0;
}


/*
* The background pageout daemon.
* Started as a kernel thread from the init process.
*/