[PATCH] virtual mapping (swap) support for ramfs (for 2.5/2.6)

From: Matt Yourst (yourst@mit.edu)
Date: Sat Jun 03 2000 - 13:58:07 EST


"Juan J. Quintela" wrote
>
> Hi matt
> I am interested in seeing the code. If you can't finish it, please
> send what you have to me.
>
> matt> I have some code that does this quite nicely using a variant of ramfs that
> matt> simply maintains a hash table of any swapped out pages (i.e.,
> offset-> swp_entry_t mappings.) The performance of this was excellent and simple
> matt> (code was less than ~200 lines). However, I can't post it right now since it
> matt> only supports the new shm_open() style virtual mappings (not the current SysV
> matt> IPC-style shmget/shmat/etc. system), plus I have not tested it recently and will
> matt> need to update it to work with current kernels. If anyone's interested in taking
> matt> this idea further, I'll try to fix the code for 2.4.0-test1 and post it later
> matt> this week.
>

Here's my partially-working patch that adds "virtual mapping" support to ramfs. This is a *very* early version of it, since it doesn't even work correctly yet when restoring some bigger page ranges from the swap file. If you think this might be useful, good luck debugging this problem (I'm guessing you'd know more about the page cache and swap code than I would.) There are still lots of other improvements/fixes that need to be made as well (for instance, a global hash table like the page cache, tighter integration with filemap.c, etc.)

Here's what I did to test this (I limited the kernel to 16 MB of physical RAM for testing):

1. Boot Linux with only bash running, but do swapon -a
2. mount -t ramfs test-label /mnt/ram
3. cp /dev/null /mnt/ram/testfile
4. testvirtualmap r /mnt/ram/testfile 20000000
5. testvirtualmap w /mnt/ram/testfile 20000000
6. testvirtualmap r /mnt/ram/testfile 20000000

The testvirtualmap.c program is included (the syntax is testvirtualmap [Read|Write] <ramfs file> <size in bytes>.) It may help to turn kernel messages on and off (Alt+PrtSc+1..9, etc.) for debugging and/or redirect testvirtualmap > /dev/null; there are lots of messages!

First, the patch against 2.4.0-test1-ac7:

--- linux/mm/Makfile.pre-vmap Thu Jun 1 20:00:58 2000
+++ linux/mm/Makefile Thu Jun 1 20:01:13 2000
@@ -10,7 +10,7 @@
 O_TARGET := mm.o
 O_OBJS := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
             vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
- page_alloc.o swap_state.o swapfile.o numa.o
+ page_alloc.o swap_state.o swapfile.o numa.o virtualmap.o
 
 ifeq ($(CONFIG_HIGHMEM),y)
 O_OBJS += highmem.o

--- linux/mm/page_io.c.pre-vmap Thu Jun 1 19:48:37 2000
+++ linux/mm/page_io.c Fri Jun 2 18:37:01 2000
@@ -33,7 +33,7 @@
  * that shared pages stay shared while being swapped.
  */
 
-static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page, int wait)
+int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page, int wait)
 {
         unsigned long offset;
         int zones[PAGE_SIZE/512];

--- linux/mm/filemap.c.pre-vmap Thu Jun 1 21:47:55 2000
+++ linux/mm/filemap.c Sat Jun 3 14:21:28 2000
@@ -1788,6 +1788,15 @@
         nopage: filemap_nopage,
 };
 
+/*
+ * Virtual mappings are like shared mappings that never
+ * sync to disk (except when swapping out a page.)
+ */
+static struct vm_operations_struct file_virtual_mmap = {
+ nopage: filemap_nopage,
+ swapout: filemap_swapout,
+};
+
 /* This is used for a general mmap of a disk file */
 
 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -1799,7 +1808,9 @@
         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
                 if (!inode->i_mapping->a_ops->writepage)
                         return -EINVAL;
- ops = &file_shared_mmap;
+ if (vma->vm_flags & VM_DONTSYNC)
+ ops = &file_virtual_mmap;
+ else ops = &file_shared_mmap;
         }
         if (!inode->i_sb || !S_ISREG(inode->i_mode))
                 return -EACCES;
@@ -1817,6 +1828,7 @@
 static int msync_interval(struct vm_area_struct * vma,
         unsigned long start, unsigned long end, int flags)
 {
+ if (vma->vm_flags & VM_DONTSYNC) return 0;
         if (vma->vm_file && vma->vm_ops && vma->vm_ops->sync) {
                 int error;
                 error = vma->vm_ops->sync(vma, start, end-start, flags);

--- linux/fs/ramfs/inode.c.pre-vmap Thu Jun 1 20:30:20 2000
+++ linux/fs/ramfs/inode.c Sat Jun 3 13:59:23 2000
@@ -28,8 +28,9 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/locks.h>
-
+#include <linux/virtualmap.h>
 #include <asm/uaccess.h>
+#include <asm/page.h>
 
 /* some random number */
 #define RAMFS_MAGIC 0x858458f6
@@ -48,6 +49,11 @@
         return 0;
 }
 
+static void ramfs_delete_inode(struct inode* inode)
+{
+ free_virtual_map(&inode->u.virtualmap_i);
+}
+
 /*
  * Lookup the data. This is trivial - if the dentry didn't already
  * exist, we know it is negative.
@@ -64,12 +70,7 @@
  */
 static int ramfs_readpage(struct file *file, struct page * page)
 {
- if (!Page_Uptodate(page)) {
- memset((void *) page_address(page), 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
- }
- UnlockPage(page);
- return 0;
+ return virtual_map_readpage(&file->f_dentry->d_inode->u.virtualmap_i, page);
 }
 
 /*
@@ -78,10 +79,9 @@
  */
 static int ramfs_writepage(struct file *file, struct page *page)
 {
- SetPageDirty(page);
- return 0;
+ return virtual_map_writepage(&file->f_dentry->d_inode->u.virtualmap_i, page);
 }
-
+/*
 static int ramfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
 {
         void *addr;
@@ -105,7 +105,7 @@
                 inode->i_size = pos;
         return 0;
 }
-
+*/
 struct inode *ramfs_get_inode(struct super_block *sb, int mode, int dev)
 {
         struct inode * inode = get_empty_inode();
@@ -131,6 +131,7 @@
                         break;
                 case S_IFREG:
                         inode->i_fop = &ramfs_file_operations;
+ if (initialize_virtual_map(&inode->u.virtualmap_i)) return NULL; /*::MTY FIXME inode leak */
                         break;
                 case S_IFDIR:
                         inode->i_op = &ramfs_dir_inode_operations;
@@ -138,6 +139,7 @@
                         break;
                 case S_IFLNK:
                         inode->i_op = &page_symlink_inode_operations;
+ if (initialize_virtual_map(&inode->u.virtualmap_i)) return NULL; /*::MTY FIXME inode leak */
                         break;
                 }
         }
@@ -255,6 +257,18 @@
         return error;
 }
 
+static void ramfs_truncate(struct inode* inode)
+{
+ printk("Truncating ramfs inode to %d pages\n", inode->i_size >> PAGE_SHIFT);
+ decommit_virtual_swap_pages(&inode->u.virtualmap_i, PAGE_ALIGN(inode->i_size) >> PAGE_SHIFT, 0xffffffff);
+}
+
+static int ramfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_flags |= (VM_DONTSYNC | VM_RAND_READ);
+ return generic_file_mmap(file, vma);
+}
+
 static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
 {
         int error;
@@ -271,14 +285,15 @@
 static struct address_space_operations ramfs_aops = {
         readpage: ramfs_readpage,
         writepage: ramfs_writepage,
- prepare_write: ramfs_prepare_write,
- commit_write: ramfs_commit_write
+ sync_page: block_sync_page, // this is CRITICAL!!!!!! Otherwise the disk queue to swap will only get flushed every few seconds!!!
+ // prepare_write: ramfs_prepare_write,
+ // commit_write: ramfs_commit_write
 };
 
 static struct file_operations ramfs_file_operations = {
         read: generic_file_read,
         write: generic_file_write,
- mmap: generic_file_mmap
+ mmap: ramfs_mmap,
 };
 
 static struct file_operations ramfs_dir_operations = {
@@ -296,6 +311,7 @@
         rmdir: ramfs_rmdir,
         mknod: ramfs_mknod,
         rename: ramfs_rename,
+ truncate: ramfs_truncate,
 };
 
 static void ramfs_put_super(struct super_block *sb)
@@ -307,6 +323,7 @@
 static struct super_operations ramfs_ops = {
         put_super: ramfs_put_super,
         statfs: ramfs_statfs,
+ delete_inode: ramfs_delete_inode,
 };
 
 static struct super_block *ramfs_read_super(struct super_block * sb, void * data, int silent)

--- linux/include/linux/fs.h.pre-vmap Thu Jun 1 19:45:04 2000
+++ linux/include/linux/fs.h Fri Jun 2 18:52:43 2000
@@ -264,6 +264,7 @@
 
 
 #include <linux/pipe_fs_i.h>
+#include <linux/virtualmap.h>
 #include <linux/minix_fs_i.h>
 #include <linux/ext2_fs_i.h>
 #include <linux/hpfs_fs_i.h>
@@ -418,6 +419,7 @@
         unsigned int i_attr_flags;
         __u32 i_generation;
         union {
+ struct virtual_map virtualmap_i;
                 struct minix_inode_info minix_i;
                 struct ext2_inode_info ext2_i;
                 struct hpfs_inode_info hpfs_i;

--- linux/include/linux/mm.h.pre-vmap Thu Jun 1 21:51:27 2000
+++ linux/include/linux/mm.h Fri Jun 2 18:52:43 2000
@@ -91,6 +91,7 @@
 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
 
 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
+#define VM_DONTSYNC 0x00040000 /* Do not sync this vma to disk (used for swap-backed virtual maps) */
 
 #define VM_STACK_FLAGS 0x00000177
 

--- /dev/null Wed Dec 31 19:00:00 1969
+++ linux/include/linux/virtualmap.h Thu Jun 1 23:11:58 2000
@@ -0,0 +1,29 @@
+#ifndef _LINUX_VIRTUALMAP_H
+#define _LINUX_VIRTUALMAP_H
+
+struct virtual_swap_entry;
+
+/*
+ * Shared memory section structure that goes inside each
+ * shm inode. We mostly use this to track swapped out pages
+ * (we let the page cache manage all in-memory pages.) The
+ * [template] member is present for future expansion in
+ * case we want to support various file-related transforms
+ * (ideas include transactional msync, unaligned mappings,
+ * various NT-style section types, user-space paging, etc.)
+ */
+struct virtual_map {
+ struct semaphore lock;
+ struct virtual_swap_entry** map;
+ int flags;
+ void* template;
+};
+
+int initialize_virtual_map(struct virtual_map* map);
+int virtual_map_readpage(struct virtual_map* map, struct page* page);
+int virtual_map_writepage(struct virtual_map* map, struct page* page);
+void decommit_virtual_swap_pages(struct virtual_map* map, unsigned int firstindex, unsigned int lastindex);
+void free_virtual_map(struct virtual_map* map);
+void virtual_map_swap_init(void);
+
+#endif

--- /dev/null Wed Dec 31 19:00:00 1969
+++ linux/mm/virtualmap.c Sat Jun 3 14:34:42 2000
@@ -0,0 +1,261 @@
+/*
+ * linux/mm/virtualmap.c
+ *
+ * Copyright (c) 2000 Matt Yourst <yourst@mit.edu>
+ *
+ * See linux/include/linux/virtualmap.h.
+ *
+ * This file handles virtual shared memory mapping through the
+ * page cache. Most of the code here deals with swapping of
+ * pages via the virtual mapping inode's address_space_ops.
+ */
+
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <asm/semaphore.h>
+#include <linux/pagemap.h>
+
+extern int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page, int wait);
+
+/*
+ * Where we allocate the 16-byte virtual_swap_entry structs from.
+ */
+static kmem_cache_t* virtual_swap_entry_cachep = NULL;
+
+/*
+ * Every time a shared memory page is written to swap,
+ * a virtual_swap_entry structure is allocated and hashed
+ * into the appropriate inode's virtual_map. This
+ * occurs even if the page is only being pre-swapped out
+ * and still remains in memory.
+ */
+struct virtual_swap_entry {
+ struct virtual_swap_entry *next;
+ struct virtual_swap_entry *prev;
+ int index;
+ swp_entry_t entry;
+};
+
+/*
+ * We use a root hash table the size of a single page for simplicity.
+ * This is very efficient compared to the old way in linux/ipc/shm.c
+ * (i.e., allocating a whole pseudo page table.)
+ */
+#define VIRTUAL_MAP_ENTRIES (PAGE_SIZE / sizeof(struct virtual_swap_entry*))
+#define VIRTUAL_MAP_ENTRIES_HASH_MASK (VIRTUAL_MAP_ENTRIES - 1)
+#define VIRTUAL_MAP_HASH(pageindex) (pageindex & VIRTUAL_MAP_ENTRIES_HASH_MASK)
+
+int initialize_virtual_map(struct virtual_map* map)
+{
+ init_MUTEX(&map->lock);
+ map->flags = 0;
+ map->template = NULL;
+ map->map = vmalloc(VIRTUAL_MAP_ENTRIES * sizeof(struct virtual_swap_entry*));
+ if (!map->map) return -ENOMEM;
+ memset(map->map, 0, VIRTUAL_MAP_ENTRIES * sizeof(struct virtual_swap_entry*));
+ return 0;
+}
+
+void free_virtual_map(struct virtual_map* map)
+{
+ decommit_virtual_swap_pages(map, 0, 0xffffffff);
+ vfree(map->map);
+ memset(map, -1, sizeof(struct virtual_map)); /* for debugging */
+}
+
+/*
+ * Looks up a page in the swap map hash table using its index as a key.
+ * If [add] is false, it returns the page if and only if it was found,
+ * or NULL otherwise. If [add] is true, AND the page was NOT found, we
+ * allocate a new swap map entry and insert it into the hash chain. It
+ * is up to the caller to do the appropriate thing if entry is nonzero
+ * (i.e., the page was found instead of added) or zero (it was just
+ * added, so the caller should use get_swap_page to fill this in.)
+ *
+ * IMPORTANT! This function, unhash_virtual_swap_entry and
+ * decommit_virtual_swap_pages MUST BE CALLED WITH THE MAP
+ * SEMAPHORE (map->lock) LOCKED.
+ */
+static struct virtual_swap_entry* find_add_virtual_swap_entry(struct virtual_map* map, int index, int add)
+{
+ struct virtual_swap_entry** rootp = &map->map[VIRTUAL_MAP_HASH(index)];
+ struct virtual_swap_entry* p = *rootp;
+
+ /* Is the page already in the swap map? */
+ while (p && (p->index != index)) p = p->next;
+ if (p || !add) return p;
+
+ /*
+ * Add a new entry to the start of the chain. The caller should
+ * check if p->entry == 0; if true, a new swap page must be
+ * allocated (otherwise we're just updating an old one.)
+ */
+ p = kmem_cache_alloc(virtual_swap_entry_cachep, SLAB_KERNEL);
+ p->index = index;
+ p->entry.val = 0;
+ p->next = *rootp;
+ p->prev = NULL;
+ if (*rootp) (*rootp)->prev = p;
+ *rootp = p;
+ return p;
+}
+
+/*
+ * Remove the shared swap entry from its hash chain and free it.
+ */
+static void unhash_virtual_swap_entry(struct virtual_map* map, struct virtual_swap_entry* entry)
+{
+ if (entry->next) entry->next->prev = entry->prev;
+ if (entry->prev)
+ entry->prev->next = entry->next;
+ else map->map[VIRTUAL_MAP_HASH(entry->index)] = entry->next;
+ kmem_cache_free(virtual_swap_entry_cachep, entry);
+}
+
+/*
+ * Decommit any pages in the specified range by walking each hash chain.
+ * Note that this is not a particularly efficient function as it has to
+ * walk the whole hash table, but it's called very rarely (when freeing
+ * pages, or very occasionally by an application.)
+ */
+void decommit_virtual_swap_pages(struct virtual_map* map,
+ unsigned int firstindex, unsigned int lastindex)
+{
+ swp_entry_t entry;
+ struct virtual_swap_entry* p;
+ int i;
+
+ down(&map->lock);
+ /* Is the page already in the swap map? */
+ for (i = 0; i < VIRTUAL_MAP_ENTRIES; i++) {
+ p = map->map[i];
+ while (p) {
+ if (p->index >= firstindex && p->index <= lastindex) {
+ entry.val = p->entry.val;
+ printk("Unhashing vmapswap entry 0x%08x for page %d (at offset %d)\n", entry, p->index, p->index*4096);
+ unhash_virtual_swap_entry(map, p);
+ swap_free(entry);
+ }
+ p = p->next;
+ }
+ }
+ up(&map->lock);
+}
+
+#ifdef VIRTUAL_MAP_DEBUG
+static void virtual_swap_walk_list(struct virtual_map* map, int firstindex, int lastindex)
+{
+ struct virtual_swap_entry* p;
+ int i;
+
+ printf("{\n");
+ /* Is the page already in the swap map? */
+ for (i = 0; i < VIRTUAL_MAP_ENTRIES; i++) {
+ p = map->map[i];
+ while (p) {
+ if (p->index >= firstindex && p->index <= lastindex) {
+ printf(" [0x%08x]: prev 0x%08x, next 0x%08x, page %d entry %d\n", p, p->prev, p->next, p->index, p->entry.val);
+ }
+ p = p->next;
+ }
+ }
+ printf("}\n");
+}
+#endif
+
+/*
+ * Provide the specified page to the page cache by either
+ * zero-filling or reading its data from the swap file.
+ */
+int virtual_map_readpage(struct virtual_map* map, struct page* page)
+{
+ struct virtual_swap_entry* entry;
+ swp_entry_t e;
+
+ down(&map->lock);
+ entry = find_add_virtual_swap_entry(map, page->index, 0);
+
+ //printk("readpage: entry = 0x%08x\n", entry);
+ if (!entry) {
+ /* We've never seen this page before: zero-fill it. */
+ // dentry->d_inode->i_size = max(dentry->d_inode->i_size, (page->index + 1) << PAGE_SHIFT) & PAGE_MASK;
+ printk("vmapZERO: Zero page %d (offset %d)\n", page->index, page->index*4096);
+ up(&map->lock);
+ clear_highpage(page);
+ SetPageUptodate(page);
+ UnlockPage(page);
+ } else {
+ printk("vmapread: Read page %d (offset %d) from entry 0x%08x\n", page->index, page->index*4096, entry->entry);
+ /* We found it in the swap map. Read it in and remove the hash entry and the swap space it used. */
+ //printk("[current=0x%08x] virtual_map_readpage: found in swap map at 0x%08x [index %d]\n", current, page->index, entry->entry.val);
+ rw_swap_page_base(READ, entry->entry, page, 0);
+ wait_on_page(page);
+ e.val = entry->entry.val;
+ unhash_virtual_swap_entry(map, entry);
+ printk("readpage: now freeing swap\n");
+ /* WARNING!!! This should somehow be done AFTER we are sure the page was read in. */
+ swap_free(e);
+ up(&map->lock);
+ SetPageUptodate(page);
+ UnlockPage(page);
+ }
+ //printk("readpage: returning\n");
+ return 0;
+}
+
+/*
+ * Write a page to the swap file. Clustering is handled by the
+ * mmap/filemap code.
+ */
+int virtual_map_writepage(struct virtual_map* map, struct page* page)
+{
+ struct virtual_swap_entry* entry;
+
+ down(&map->lock);
+ entry = find_add_virtual_swap_entry(map, page->index, 1);
+
+ /*
+ * Is the page already in the swap file? This might happen if the system
+ * is running low on free pages and the virtual memory manager asks us
+ * to pre-swap [page] (but without freeing it) as an optimization. If
+ * it's already on the swap file, just return - we don't want duplicates.
+ */
+ if (!entry->entry.val) {
+ entry->entry = get_swap_page();
+ if (!entry->entry.val) {
+ /*
+ * We're all out of swap space (this is bad.) Free the
+ * swap map entry we allocated and return an error.
+ */
+ unhash_virtual_swap_entry(map, entry);
+ up(&map->lock);
+ printk("ALL OUT OF SWAP SPACE!!!!!!!!!!!!\n");
+ return -ENOMEM;
+ }
+ }
+ /*
+ * We have now allocated a new swap file entry if necessary.
+ * Write out the page in async mode (stuff in filemap.c will
+ * wait for it to be unlocked anyway.)
+ */
+ page = prepare_highmem_swapout(page);
+ rw_swap_page_base(WRITE, entry->entry, page, 1);
+ // printk("[current=0x%08x] virtual_map_writepage: wrote page [index %d] = 0x%08x.\n", current, page->index, entry->entry.val);
+ up(&map->lock);
+ SetPageUptodate(page);
+ printk("vmapwrite: Write page %d (offset %d) to entry 0x%08x\n", page->index, page->index*4096, entry->entry);
+ return 0;
+}
+
+void __init virtual_map_swap_init()
+{
+ virtual_swap_entry_cachep = kmem_cache_create("virtual_swap_entry",
+ sizeof(struct virtual_swap_entry), 0, 0, NULL, NULL);
+ if (!virtual_swap_entry_cachep)
+ panic("virtual_map_swap_init: Failed to allocate slab cache\n");
+}
+

--- linux/init/main.c~ Wed May 31 12:50:47 2000
+++ linux/init/main.c Thu Jun 1 23:10:51 2000
@@ -27,6 +27,7 @@
 #include <linux/hdreg.h>
 #include <linux/iobuf.h>
 #include <linux/bootmem.h>
+#include <linux/virtualmap.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -581,6 +582,7 @@
         filescache_init();
         dcache_init(mempages);
         vma_init();
+ virtual_map_swap_init();
         buffer_init(mempages);
         page_cache_init(mempages);
         kiobuf_setup();

Now the test program:

#include <unistd.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <errno.h>
#include <stdio.h>
#include <sched.h>
#include <asm/mman.h>

void fill_page(void* p, int tag)
{
  int i;
  for (i = 0; i < 1024; i++) ((int*)p)[i] = tag;
}

void check_page(void* p, int tag)
{
  int i, j;
  for (i = 0; i < 1024; i++) {
    j = ((int*)p)[i];
                if (j != tag) {
                        printf("PAGE FAILED: p = 0x%08x, i = %d, tag = %d, expected = %d\n", p, i, j, tag); fflush(stdout); return;
    }
        }
        printf("OK");
}

/* Syntax is: testvirtualmap [r|w] filename size-in-bytes */
int main(int argc, char* argv[])
{
  int h;
  char* p;
  char* pp;
  char buf[512];
  int i;
  int pagecount = (atoi(argv[3]) + 4095) >> 12;
  printf("Testing virtual mapping using %d pages = %d bytes\n", pagecount, pagecount << 12);

  h = open(argv[2], O_RDWR, 0);
  if (h <= 0) {
    printf("ERROR! Could not open specified virtual section file.\n");
                return -1;
  }
  printf("Opened %s as fd %d\n", argv[2], h); fflush(stdout);

  gets(buf);

  printf("Resizing virtual map to desired size...\n"); fflush(stdout);
  ftruncate(h, pagecount*4096);

  p = mmap(NULL, pagecount*4096, PROT_READ | PROT_WRITE, MAP_SHARED, h, 0);
  if (!p) {
    printf("ERROR! Could not map the virtual section.\n");
                return -1;
  }
  printf("Mapped virtual section using MAP_SHARED for read and write access at 0x%08x", p); fflush(stdout);

  gets(buf);
  pp = p;
  for (i = 0; i < pagecount; i++) {
    if (*argv[1] == 'w') {
                        printf("\rWriting 0x%08x: to page %d at 0x%08x (offset %d)...", pp, i, i*4096, i*4096); fflush(stdout);
                        fill_page(pp, i);
                } else {
                        printf("\rReading 0x%08x: from page %d at 0x%08x (offset %d)...", pp, i, i*4096, i*4096); fflush(stdout);
                        check_page(pp, i);
                }
                pp += 4096;
  }

        printf("\n\nAll done!\n");
  return 0;
}

Good luck with this; please let me know if it doesn't even work at all (aside from that nasty bug I already mentioned and don't know how to solve.)

- Matt Yourst

-------------------------------------------------------------
 Matt T. Yourst Massachusetts Institute of Technology
 yourst@mit.edu 617.225.7690
 513 French House - 476 Memorial Drive - Cambridge, MA 02136
-------------------------------------------------------------

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/



This archive was generated by hypermail 2b29 : Wed Jun 07 2000 - 21:00:17 EST