diff -uNr 2.4.4-mSsu/fs/proc/proc_misc.c 2.4.4-mSsua/fs/proc/proc_misc.c --- 2.4.4-mSsu/fs/proc/proc_misc.c Sun Apr 29 20:32:52 2001 +++ 2.4.4-mSsua/fs/proc/proc_misc.c Mon May 7 13:38:53 2001 @@ -140,6 +140,17 @@ { struct sysinfo i; int len; + unsigned int cached, shmem; + + /* + * There may be some inconsistency because shmem_nrpages + * update is delayed to page_cache_size + * We make sure the cached value does not get below zero + */ + cached = atomic_read(&page_cache_size); + shmem = atomic_read(&shmem_nrpages); + if (shmem < cached) + cached -= shmem; /* * display in kilobytes. @@ -153,8 +164,8 @@ "Swap: %8lu %8lu %8lu\n", B(i.totalram), B(i.totalram-i.freeram), B(i.freeram), B(i.sharedram), B(i.bufferram), - B(atomic_read(&page_cache_size)), B(i.totalswap), - B(i.totalswap-i.freeswap), B(i.freeswap)); + B(cached), B(i.totalswap), + B(i.totalswap-i.freeswap), B(i.freeswap)); /* * Tagged format, for easy grepping and expansion. * The above will go away eventually, once the tools @@ -180,7 +191,7 @@ K(i.freeram), K(i.sharedram), K(i.bufferram), - K(atomic_read(&page_cache_size)), + K(cached), K(nr_active_pages), K(nr_inactive_dirty_pages), K(nr_inactive_clean_pages()), diff -uNr 2.4.4-mSsu/include/linux/shmem_fs.h 2.4.4-mSsua/include/linux/shmem_fs.h --- 2.4.4-mSsu/include/linux/shmem_fs.h Wed May 2 18:36:05 2001 +++ 2.4.4-mSsua/include/linux/shmem_fs.h Mon May 7 12:52:00 2001 @@ -17,6 +17,8 @@ unsigned long val; } swp_entry_t; +extern atomic_t shmem_nrpages; + struct shmem_inode_info { spinlock_t lock; struct semaphore sem; diff -uNr 2.4.4-mSsu/mm/mmap.c 2.4.4-mSsua/mm/mmap.c --- 2.4.4-mSsu/mm/mmap.c Sun Apr 29 20:33:01 2001 +++ 2.4.4-mSsua/mm/mmap.c Mon May 7 13:42:03 2001 @@ -55,13 +55,24 @@ */ long free; - + unsigned long cached, shmem; + + /* + * There may be some inconsistency because shmem_nrpages + * update is delayed to the page_cache_size + * We make sure the cached value does not get below zero + */ + cached = atomic_read(&page_cache_size); + shmem = atomic_read(&shmem_nrpages); + if (cached > shmem) + cached -= shmem; + /* Sometimes we want to use more memory than we have. */ if (sysctl_overcommit_memory) return 1; free = atomic_read(&buffermem_pages); - free += atomic_read(&page_cache_size); + free += cached; free += nr_free_pages(); free += nr_swap_pages; diff -uNr 2.4.4-mSsu/mm/shmem.c 2.4.4-mSsua/mm/shmem.c --- 2.4.4-mSsu/mm/shmem.c Fri May 4 21:37:34 2001 +++ 2.4.4-mSsua/mm/shmem.c Mon May 7 11:13:27 2001 @@ -3,7 +3,8 @@ * * Copyright (C) 2000 Linus Torvalds. * 2000 Transmeta Corp. - * 2000 Christoph Rohland + * 2000-2001 Christoph Rohland + * 2000-2001 SAP AG * * This file is released under the GPL. */ @@ -45,6 +46,7 @@ LIST_HEAD (shmem_inodes); static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED; +atomic_t shmem_nrpages = ATOMIC_INIT(0); #define BLOCKS_PER_PAGE (PAGE_SIZE/512) @@ -52,6 +54,7 @@ * shmem_recalc_inode - recalculate the size of an inode * * @inode: inode to recalc + * @swap: additional swap pages freed externally * * We have to calculate the free blocks since the mm can drop pages * behind our back @@ -62,12 +65,14 @@ * * So the mm freed * inodes->i_blocks/BLOCKS_PER_PAGE - - * (inode->i_mapping->nrpages + info->swapped) + * (inode->i_mapping->nrpages + info->swapped) * * It has to be called with the spinlock held. + * + * The swap parameter is a performance hack for truncate. */ -static void shmem_recalc_inode(struct inode * inode) +static void shmem_recalc_inode(struct inode * inode, unsigned long swap) { unsigned long freed; @@ -79,6 +84,7 @@ spin_lock (&info->stat_lock); info->free_blocks += freed; spin_unlock (&info->stat_lock); + atomic_sub(freed-swap, &shmem_nrpages); } } @@ -195,7 +201,7 @@ out: info->max_index = index; info->swapped -= freed; - shmem_recalc_inode(inode); + shmem_recalc_inode(inode, freed); spin_unlock (&info->lock); up(&info->sem); } @@ -250,14 +256,15 @@ entry = shmem_swp_entry(info, page->index); if (IS_ERR(entry)) /* this had been allocted on page allocation */ BUG(); - shmem_recalc_inode(page->mapping->host); + shmem_recalc_inode(page->mapping->host, 0); error = -EAGAIN; if (entry->val) BUG(); *entry = swap; error = 0; - /* Remove the from the page cache */ + /* Remove the page from the page cache */ + atomic_dec(&shmem_nrpages); lru_cache_del(page); remove_inode_page(page); @@ -376,6 +383,7 @@ } /* We have the page */ + atomic_inc(&shmem_nrpages); SetPageUptodate(page); if (info->locked) page_cache_get(page); @@ -1275,6 +1283,7 @@ return 0; found: add_to_page_cache(page, info->inode->i_mapping, offset + idx); + atomic_inc(&shmem_nrpages); set_page_dirty(page); SetPageUptodate(page); UnlockPage(page);