Hi,
Linux 2.2.17pre4, which include Andrea's mm-fix-3 patch, changed
try_to_free_buffers() to flush dirty buffers of pages, making this pages
freeable in case all buffers on them gets flushed.
The attached patch changes try_to_free_buffers() to wait at each 64
buffers written to disk, avoiding it from starting IO on a big number of
dirty buffers without waiting for them to get flushed.
With Rik's patch, which I posted a while ago, plus this patch, I'm
able to run mmap002 + mmap001 at the same time on a 8MB machine. (not
possible with "stock" 2.2.17pre4)
Note: The magic number 64 may not be perfect for all cases.
As Andrea pointed out, we may wait on buffers to get flushed while
there is freeable cache around.
--- linux-2.2.16.orig/mm/filemap.c Fri Jun 16 15:33:45 2000
+++ linux-2.2.16/mm/filemap.c Fri Jan 3 04:41:45 1997
@@ -141,7 +141,8 @@
unsigned long limit = num_physpages;
struct page * page;
int count;
-
+ int nr_dirty = 0;
+
/* Make sure we scan all pages twice at priority 0. */
count = (limit << 1) >> priority;
@@ -197,13 +198,22 @@
/* Is it a buffer page? */
if (page->buffers) {
+ /*
+ * Wait for async IO to complete
+ * at each 64 buffers
+ */
+
+ int wait = ((gfp_mask & __GFP_IO)
+ && (!(nr_dirty++ % 64)));
+
if (buffer_under_min())
continue;
/*
* We can sleep if we need to do some write
* throttling.
*/
- if (!try_to_free_buffers(page))
+
+ if (!try_to_free_buffers(page, wait))
goto refresh_clock;
return 1;
}
--- linux-2.2.16.orig/fs/buffer.c Fri Jun 16 15:33:45 2000
+++ linux-2.2.16/fs/buffer.c Fri Jan 3 05:13:24 1997
@@ -27,6 +27,10 @@
/* invalidate_buffers/set_blocksize/sync_dev race conditions and
fs corruption fixes, 1999, Andrea Arcangeli <andrea@suse.de> */
+/* Wait for dirty buffers to sync in sync_page_buffers.
+ * 2000, Marcelo Tosatti <marcelo@conectiva.com.br>
+ */
+
#include <linux/malloc.h>
#include <linux/locks.h>
#include <linux/errno.h>
@@ -1642,20 +1646,25 @@
#define BUFFER_BUSY_BITS ((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))
#define buffer_busy(bh) ((bh)->b_count || ((bh)->b_state & BUFFER_BUSY_BITS))
-static inline int sync_page_buffers(struct buffer_head * bh)
+static int sync_page_buffers(struct buffer_head *bh, int wait)
{
struct buffer_head * tmp = bh;
do {
- if (buffer_dirty(tmp) && !buffer_locked(tmp))
- ll_rw_block(WRITE, 1, &tmp);
+ struct buffer_head *p = tmp;
tmp = tmp->b_this_page;
+ if (buffer_locked(p)) {
+ if (wait)
+ __wait_on_buffer(p);
+ } else if (buffer_dirty(p))
+ ll_rw_block(WRITE, 1, &p);
} while (tmp != bh);
do {
- if (buffer_busy(tmp))
- return 1;
+ struct buffer_head *p = tmp;
tmp = tmp->b_this_page;
+ if (buffer_busy(p))
+ return 1;
} while (tmp != bh);
return 0;
@@ -1668,7 +1677,7 @@
* Wake up bdflush() if this fails - if we're running low on memory due
* to dirty buffers, we need to flush them out as quickly as possible.
*/
-int try_to_free_buffers(struct page * page_map)
+int try_to_free_buffers(struct page * page_map, int wait)
{
struct buffer_head * tmp, * bh = page_map->buffers;
@@ -1699,7 +1708,7 @@
return 1;
busy:
- if (!sync_page_buffers(bh))
+ if (!sync_page_buffers(bh, wait))
/*
* We can jump after the busy check because
* we rely on the kernel lock.
--- linux/include/linux/fs.h.orig Fri Jan 3 05:18:55 1997
+++ linux/include/linux/fs.h Fri Jan 3 05:19:15 1997
@@ -800,7 +800,7 @@
extern void refile_buffer(struct buffer_head * buf);
extern void set_writetime(struct buffer_head * buf, int flag);
-extern int try_to_free_buffers(struct page *);
+extern int try_to_free_buffers(struct page *, int wait);
extern void cache_drop_behind(struct buffer_head *bh);
extern int nr_buffers;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/
This archive was generated by hypermail 2b29 : Fri Jun 23 2000 - 21:00:15 EST