fix for unmap_fixup -- please review

Bill Hawes (whawes@star.net)
Mon, 18 Aug 1997 13:13:36 -0400


This is a multi-part message in MIME format.
--------------B0ED82178323183C3789CC5E
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit

A while back I noticed a problem in unmap_fixup in that an allocation
failure causes it to bail out with no error indication, leaving the
mapping in an incorrect state. The attached patch is (I think) a
reasonable way to fix this.

The approach is as follows: Any unmapping will create at most one new
vma struct, for the case in which a hole is created in an existing vma.
For the cases in which we unmap from the start to an intermediate point,
or from an intermediate point to the end, the existing vma can be reused
after adjusting the appropriate values. Therefore, if we allocate one
vma struct before starting, the possible allocation failure can be
reported before modifying the existing mapping.

With the extra vma as a reserve, the code operates as before, except
that unmap_fixup returns a boolean to indicate whether the vma was
reused. If a vma is reused, the close operation is called first to
close the old mapping, and then the open is called and the vma
reinserted.

There also appears to be one other problem with the existing code --
when creating a hole, the vma for the end part has the statement
mpnt->vm_offset += (end - area->vm_start);
Since mnpt hasn't been initialized, this can't be correct, so I've used
mpnt->vm_offset = area->vm_offset + (end - area->vm_start);

I'm not sure what effect this would have or when the current code would
fail.

I'd appreciate it if some the VM experts would look this over for
problems. (It seems to work OK so far, so I'm using it on my system.)

Regards,
Bill
--------------B0ED82178323183C3789CC5E
Content-Type: text/plain; charset=us-ascii; name="mmap_50-patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline; filename="mmap_50-patch"

--- mm/mmap.c.old Sat Jul 19 08:17:17 1997
+++ mm/mmap.c Mon Aug 18 12:16:24 1997
@@ -373,10 +373,12 @@
* Unmapping between to intermediate points, making a hole.
*
* Case 4 involves the creation of 2 new areas, for each side of
- * the hole.
+ * the hole. If possible, we reuse the existing area rather than
+ * allocate a new one, and the return indicates whether the old
+ * area was reused.
*/
-static void unmap_fixup(struct vm_area_struct *area,
- unsigned long addr, size_t len)
+static int unmap_fixup(struct vm_area_struct *area, unsigned long addr,
+ size_t len, struct vm_area_struct **extra)
{
struct vm_area_struct *mpnt;
unsigned long end = addr + len;
@@ -391,7 +393,7 @@
area->vm_ops->close(area);
if (area->vm_dentry)
dput(area->vm_dentry);
- return;
+ return 0;
}

/* Work out to one of the ends. */
@@ -403,17 +405,18 @@
} else {
/* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
/* Add end mapping -- leave beginning for below */
- mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ mpnt = *extra;
+ *extra = NULL;

- if (!mpnt)
- return;
mpnt->vm_mm = area->vm_mm;
mpnt->vm_start = end;
mpnt->vm_end = area->vm_end;
mpnt->vm_page_prot = area->vm_page_prot;
mpnt->vm_flags = area->vm_flags;
mpnt->vm_ops = area->vm_ops;
- mpnt->vm_offset += (end - area->vm_start);
+ /* N.B. what does this do? vm_offset hasn't been initialized */
+ /* mpnt->vm_offset += (end - area->vm_start); */
+ mpnt->vm_offset = area->vm_offset + (end - area->vm_start);
mpnt->vm_dentry = dget(area->vm_dentry);
if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt);
@@ -421,18 +424,18 @@
insert_vm_struct(current->mm, mpnt);
}

- /* Construct whatever mapping is needed. */
- mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (!mpnt)
- return;
- *mpnt = *area;
- if (mpnt->vm_ops && mpnt->vm_ops->open)
- mpnt->vm_ops->open(mpnt);
+ /* Close the current area ... */
if (area->vm_ops && area->vm_ops->close) {
+ end = area->vm_end; /* save new end */
area->vm_end = area->vm_start;
area->vm_ops->close(area);
+ area->vm_end = end;
}
- insert_vm_struct(current->mm, mpnt);
+ /* ... then reopen and reinsert. */
+ if (area->vm_ops && area->vm_ops->open)
+ area->vm_ops->open(area);
+ insert_vm_struct(current->mm, area);
+ return 1;
}

asmlinkage int sys_munmap(unsigned long addr, size_t len)
@@ -452,7 +455,8 @@
*/
int do_munmap(unsigned long addr, size_t len)
{
- struct vm_area_struct *mpnt, *next, *free;
+ struct vm_area_struct *mpnt, *next, *free, *extra;
+ int freed;

if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
return -EINVAL;
@@ -471,6 +475,14 @@
if (!mpnt)
return 0;

+ /*
+ * We may need one additional vma to fix up the mappings ...
+ * and this is the last chance for an easy error exit.
+ */
+ extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!extra)
+ return -ENOMEM;
+
next = mpnt->vm_next;

/* we have mpnt->vm_next = next and addr < mpnt->vm_end */
@@ -486,19 +498,18 @@
free = mpnt;
mpnt = next;
}
- if (free == NULL)
- return 0;

/* Ok - we have the memory areas we should free on the 'free' list,
* so release them, and unmap the page range..
* If the one of the segments is only being partially unmapped,
* it will put new vm_area_struct(s) into the address space.
*/
- do {
+ freed = 0;
+ while ((mpnt = free) != NULL) {
unsigned long st, end, size;

- mpnt = free;
free = free->vm_next;
+ freed = 1;

remove_shared_vm_struct(mpnt);

@@ -514,12 +525,19 @@
zap_page_range(current->mm, st, size);
flush_tlb_range(current->mm, st, end);

- unmap_fixup(mpnt, st, size);
+ /*
+ * Fix the mapping, and free the old area if it wasn't reused.
+ */
+ if (!unmap_fixup(mpnt, st, size, &extra))
+ kmem_cache_free(vm_area_cachep, mpnt);
+ }

- kmem_cache_free(vm_area_cachep, mpnt);
- } while (free);
+ /* Release the extra vma struct if it wasn't used */
+ if (extra)
+ kmem_cache_free(vm_area_cachep, extra);

- current->mm->mmap_cache = NULL; /* Kill the cache. */
+ if (freed)
+ current->mm->mmap_cache = NULL; /* Kill the cache. */
return 0;
}

--------------B0ED82178323183C3789CC5E--