[PATCH 4/7] proc/kcore: hold lock during read

From: Omar Sandoval
Date: Fri Jul 06 2018 - 15:33:50 EST


From: Omar Sandoval <osandov@xxxxxx>

Now that we're using an rwsem, we can hold it during the entirety of
read_kcore() and have a common return path. This is preparation for the
next change.

Signed-off-by: Omar Sandoval <osandov@xxxxxx>
---
fs/proc/kcore.c | 70 ++++++++++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 30 deletions(-)

diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index f335400300d3..b7ff2e2ec350 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -438,19 +438,18 @@ static ssize_t
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
char *buf = file->private_data;
- ssize_t acc = 0;
size_t size, tsz;
size_t elf_buflen;
int nphdr;
unsigned long start;
+ size_t orig_buflen = buflen;
+ int ret = 0;

down_read(&kclist_lock);
size = get_kcore_size(&nphdr, &elf_buflen);

- if (buflen == 0 || *fpos >= size) {
- up_read(&kclist_lock);
- return 0;
- }
+ if (buflen == 0 || *fpos >= size)
+ goto out;

/* trim buflen to not go beyond EOF */
if (buflen > size - *fpos)
@@ -463,28 +462,26 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
tsz = elf_buflen - *fpos;
if (buflen < tsz)
tsz = buflen;
- elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
+ elf_buf = kzalloc(elf_buflen, GFP_KERNEL);
if (!elf_buf) {
- up_read(&kclist_lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
- up_read(&kclist_lock);
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
kfree(elf_buf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
kfree(elf_buf);
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
- acc += tsz;

/* leave now if filled buffer already */
if (buflen == 0)
- return acc;
- } else
- up_read(&kclist_lock);
+ goto out;
+ }

/*
* Check to see if our file offset matches with any of
@@ -497,25 +494,29 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
while (buflen) {
struct kcore_list *m;

- down_read(&kclist_lock);
list_for_each_entry(m, &kclist_head, list) {
if (start >= m->addr && start < (m->addr+m->size))
break;
}
- up_read(&kclist_lock);

if (&m->list == &kclist_head) {
- if (clear_user(buffer, tsz))
- return -EFAULT;
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else if (m->type == KCORE_VMALLOC) {
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
- if (copy_to_user(buffer, buf, tsz))
- return -EFAULT;
+ if (copy_to_user(buffer, buf, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else if (m->type == KCORE_USER) {
/* User page is handled prior to normal kernel page: */
- if (copy_to_user(buffer, (char *)start, tsz))
- return -EFAULT;
+ if (copy_to_user(buffer, (char *)start, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else {
if (kern_addr_valid(start)) {
/*
@@ -523,26 +524,35 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
* hardened user copy kernel text checks.
*/
if (probe_kernel_read(buf, (void *) start, tsz)) {
- if (clear_user(buffer, tsz))
- return -EFAULT;
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
} else {
- if (copy_to_user(buffer, buf, tsz))
- return -EFAULT;
+ if (copy_to_user(buffer, buf, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
} else {
- if (clear_user(buffer, tsz))
- return -EFAULT;
+ if (clear_user(buffer, tsz)) {
+ ret = -EFAULT;
+ goto out;
+ }
}
}
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
- acc += tsz;
start += tsz;
tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
}

- return acc;
+out:
+ up_write(&kclist_lock);
+ if (ret)
+ return ret;
+ return orig_buflen - buflen;
}


--
2.18.0