Set PG_launder against pages which are under VM writeback. So page
allocators will throttle against them.
=====================================
--- 2.5.13/mm/page-writeback.c~page_launder-fix Sun May 5 13:32:03 2002
+++ 2.5.13-akpm/mm/page-writeback.c Sun May 5 13:32:03 2002
@@ -354,6 +354,8 @@ int generic_writeback_mapping(struct add
lock_page(page);
if (TestClearPageDirty(page)) {
+ if (current->flags & PF_MEMALLOC)
+ SetPageLaunder(page);
err = writepage(page);
if (!ret)
ret = err;
--- 2.5.13/fs/fs-writeback.c~page_launder-fix Sun May 5 13:32:03 2002
+++ 2.5.13-akpm/fs/fs-writeback.c Sun May 5 13:32:03 2002
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/fs.h>
+#include <linux/mm.h>
#include <linux/writeback.h>
/**
@@ -135,7 +136,7 @@ static void __sync_single_inode(struct i
if (mapping->a_ops->writeback_mapping)
mapping->a_ops->writeback_mapping(mapping, nr_to_write);
else
- filemap_fdatawrite(mapping);
+ generic_writeback_mapping(mapping, NULL);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC))
--- 2.5.13/mm/filemap.c~page_launder-fix Sun May 5 13:32:03 2002
+++ 2.5.13-akpm/mm/filemap.c Sun May 5 13:32:03 2002
@@ -659,7 +659,6 @@ EXPORT_SYMBOL(wait_on_page_writeback);
void unlock_page(struct page *page)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
- clear_bit(PG_launder, &(page)->flags);
smp_mb__before_clear_bit();
if (!TestClearPageLocked(page))
BUG();
@@ -674,7 +673,7 @@ void unlock_page(struct page *page)
void end_page_writeback(struct page *page)
{
wait_queue_head_t *waitqueue = page_waitqueue(page);
- clear_bit(PG_launder, &(page)->flags);
+ ClearPageLaunder(page);
smp_mb__before_clear_bit();
if (!TestClearPageWriteback(page))
BUG();
--- 2.5.13/include/linux/page-flags.h~page_launder-fix Sun May 5 13:32:03 2002
+++ 2.5.13-akpm/include/linux/page-flags.h Sun May 5 13:32:03 2002
@@ -174,6 +174,7 @@ extern void get_page_state(struct page_s
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
-
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
This archive was generated by hypermail 2b29 : Tue May 07 2002 - 22:00:25 EST