[RFC] kcore:change kcore_read to make sure the kernel read is safe

From: yalin wang
Date: Mon Aug 03 2015 - 23:38:11 EST


This change kcore_read() to use __copy_from_user_inatomic() to
copy data from kernel address, because kern_addr_valid() just make sure
page table is valid during call it, whne it return, the page table may
change, for example, like set_fixmap() function will change kernel page
table, then maybe trigger kernel crash if encounter this unluckily.

Signed-off-by: yalin wang <yalin.wang2010@xxxxxxxxx>
---
fs/proc/kcore.c | 30 ++++++++++++++++++++++++------
1 file changed, 24 insertions(+), 6 deletions(-)

diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 92e6726..b085fde 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -86,8 +86,8 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
size = try;
*nphdr = *nphdr + 1;
}
- *elf_buflen = sizeof(struct elfhdr) +
- (*nphdr + 2)*sizeof(struct elf_phdr) +
+ *elf_buflen = sizeof(struct elfhdr) +
+ (*nphdr + 2)*sizeof(struct elf_phdr) +
3 * ((sizeof(struct elf_note)) +
roundup(sizeof(CORE_STR), 4)) +
roundup(sizeof(struct elf_prstatus), 4) +
@@ -435,6 +435,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
size_t elf_buflen;
int nphdr;
unsigned long start;
+ unsigned long page = 0;

read_lock(&kclist_lock);
size = get_kcore_size(&nphdr, &elf_buflen);
@@ -485,7 +486,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
start = kc_offset_to_vaddr(*fpos - elf_buflen);
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
-
+
while (buflen) {
struct kcore_list *m;

@@ -515,15 +516,32 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
} else {
if (kern_addr_valid(start)) {
unsigned long n;
+ mm_segment_t old_fs = get_fs();
+
+ if (page == 0) {
+ page = __get_free_page(GFP_KERNEL);
+ if (page == 0)
+ return -ENOMEM;

- n = copy_to_user(buffer, (char *)start, tsz);
+ }
+ set_fs(KERNEL_DS);
+ pagefault_disable();
+ n = __copy_from_user_inatomic((void *)page,
+ (__force const void __user *)start,
+ tsz);
+ pagefault_enable();
+ set_fs(old_fs);
+ if (n)
+ memset((void *)page + tsz - n, 0, n);
+
+ n = copy_to_user(buffer, (char *)page, tsz);
/*
* We cannot distinguish between fault on source
* and fault on destination. When this happens
* we clear too and hope it will trigger the
* EFAULT again.
*/
- if (n) {
+ if (n) {
if (clear_user(buffer + tsz - n,
n))
return -EFAULT;
@@ -540,7 +558,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
start += tsz;
tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
}
-
+ free_page(page);
return acc;
}

--
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/