[patch] free_swap_after deadcode

Andrea Arcangeli (andrea@suse.de)
Fri, 20 Aug 1999 18:40:52 +0200 (CEST)


This patch removes the swap_free_after logic. Now everything uses
swapcache so we first account a reference in the swap space and only then
we start the real I/O.

The only exception is in swapon but at that time nobody can run a
get_swap_page on such swapdevice (since the swap device is not yet
enabled...).

Here a patch against 2.3.15-pre1 to remove such code.

diff -ur 2.3.15-pre1/fs/buffer.c tmp/fs/buffer.c
--- 2.3.15-pre1/fs/buffer.c Thu Aug 12 02:53:23 1999
+++ tmp/fs/buffer.c Fri Aug 20 18:28:11 1999
@@ -754,9 +754,6 @@
if (test_and_clear_bit(PG_decr_after, &page->flags))
atomic_dec(&nr_async_pages);

- if (test_and_clear_bit(PG_free_swap_after, &page->flags))
- swap_free(page->offset);
-
free = test_and_clear_bit(PG_free_after, &page->flags);

if (page->owner != (void *)-1)
diff -ur 2.3.15-pre1/include/linux/mm.h tmp/include/linux/mm.h
--- 2.3.15-pre1/include/linux/mm.h Fri Aug 20 18:31:23 1999
+++ tmp/include/linux/mm.h Fri Aug 20 18:32:19 1999
@@ -147,7 +147,6 @@
#define PG_uptodate 3
#define PG_free_after 4
#define PG_decr_after 5
-#define PG_free_swap_after 6
#define PG_DMA 7
#define PG_Slab 8
#define PG_swap_cache 9
@@ -182,7 +181,6 @@
#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
-#define PageSwapUnlockAfter(page) (test_bit(PG_free_swap_after, &(page)->flags))
#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
#define PageSlab(page) (test_bit(PG_Slab, &(page)->flags))
#define PageSwapCache(page) (test_bit(PG_swap_cache, &(page)->flags))
diff -ur 2.3.15-pre1/mm/page_io.c tmp/mm/page_io.c
--- 2.3.15-pre1/mm/page_io.c Fri Aug 20 17:42:39 1999
+++ tmp/mm/page_io.c Fri Aug 20 18:29:15 1999
@@ -33,7 +33,7 @@
* that shared pages stay shared while being swapped.
*/

-static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, int wait, int dolock)
+static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, int wait)
{
unsigned long type, offset;
struct swap_info_struct * p;
@@ -118,10 +118,6 @@
set_bit(PG_decr_after, &page->flags);
atomic_inc(&nr_async_pages);
}
- if (dolock) {
- set_bit(PG_free_swap_after, &page->flags);
- p->swap_map[offset]++;
- }
set_bit(PG_free_after, &page->flags);

/* block_size == PAGE_SIZE/zones_used */
@@ -164,11 +160,10 @@
PAGE_BUG(page);
if (page->inode != &swapper_inode)
PAGE_BUG(page);
- rw_swap_page_base(rw, entry, page, wait, 1);
+ rw_swap_page_base(rw, entry, page, wait);
}

/*
- * shmfs needs a version that doesn't put the page in the page cache!
* The swap lock map insists that pages be in the page cache!
* Therefore we can't use it. Later when we can remove the need for the
* lock map and we can reduce the number of functions exported.
@@ -181,5 +176,5 @@
PAGE_BUG(page);
if (PageSwapCache(page))
PAGE_BUG(page);
- rw_swap_page_base(rw, entry, page, wait, 0);
+ rw_swap_page_base(rw, entry, page, wait);
}

Andrea

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/