mm/memory.c | 34 +++++++++++++++++++++++++++++----- 1 files changed, 29 insertions(+), 5 deletions(-) diff -puN mm/memory.c~cow-ahead mm/memory.c --- 25/mm/memory.c~cow-ahead Mon Apr 14 20:08:44 2003 +++ 25-wind/mm/memory.c Mon Apr 14 20:37:42 2003 @@ -1452,7 +1452,7 @@ static int do_file_page(struct mm_struct */ static inline int handle_pte_fault(struct mm_struct *mm, struct vm_area_struct * vma, unsigned long address, - int write_access, pte_t *pte, pmd_t *pmd) + int write_access, pte_t *pte, pmd_t *pmd, int *cowahead) { pte_t entry; @@ -1471,8 +1471,11 @@ static inline int handle_pte_fault(struc } if (write_access) { - if (!pte_write(entry)) + if (!pte_write(entry)) { + if(!*cowahead) + *cowahead = 1; return do_wp_page(mm, vma, address, pte, pmd, entry); + } entry = pte_mkdirty(entry); } @@ -1492,6 +1495,17 @@ int handle_mm_fault(struct mm_struct *mm pgd_t *pgd; pmd_t *pmd; + int cowahead, i; + int retval, x; + + /* + * Implement cow-ahead: copy-on-write several + * pages when we fault one of them + */ + + i = cowahead = 0; + +do_cowahead: __set_current_state(TASK_RUNNING); pgd = pgd_offset(mm, address); @@ -1507,10 +1521,20 @@ int handle_mm_fault(struct mm_struct *mm spin_lock(&mm->page_table_lock); pmd = pmd_alloc(mm, pgd, address); - if (pmd) { + while (pmd) { pte_t * pte = pte_alloc_map(mm, pmd, address); - if (pte) - return handle_pte_fault(mm, vma, address, write_access, pte, pmd); + if (!pte) break; + + x = handle_pte_fault(mm, vma, address, write_access, pte, pmd, &cowahead); + if(!i) retval = x; + + i++; + address += PAGE_SIZE; + + if(!cowahead || i >= 0 || address >= vma->vm_end) + return retval; + + goto do_cowahead; } spin_unlock(&mm->page_table_lock); return VM_FAULT_OOM; _