[PATCH 1/2] Massive code cleanup of sys_msync()

From: Anton Salikhmetov
Date: Tue Jan 15 2008 - 11:23:51 EST


Substantial code cleanup of the sys_msync() function:

1) using the PAGE_ALIGN() macro instead of "manual" alignment;
2) improved readability of the loop traversing the process memory regions.

Signed-off-by: Anton Salikhmetov <salikhmetov@xxxxxxxxx>
---
mm/msync.c | 79 ++++++++++++++++++++++++++++--------------------------------
1 files changed, 37 insertions(+), 42 deletions(-)

diff --git a/mm/msync.c b/mm/msync.c
index 144a757..3270caa 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -1,24 +1,25 @@
/*
* linux/mm/msync.c
*
+ * The msync() system call.
* Copyright (C) 1994-1999 Linus Torvalds
+ *
+ * Massive code cleanup.
+ * Copyright (C) 2008 Anton Salikhmetov <salikhmetov@xxxxxxxxx>
*/

-/*
- * The msync() system call.
- */
+#include <linux/file.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/syscalls.h>
#include <linux/sched.h>
+#include <linux/syscalls.h>

/*
* MS_SYNC syncs the entire file - including mappings.
*
* MS_ASYNC does not start I/O (it used to, up to 2.5.67).
- * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
+ * Nor does it mark the relevant pages dirty (it used to up to 2.6.17).
* Now it doesn't do anything, since dirty pages are properly tracked.
*
* The application may now run fsync() to
@@ -33,71 +34,65 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
unsigned long end;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- int unmapped_error = 0;
- int error = -EINVAL;
+ int error = 0, unmapped_error = 0;

if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
- goto out;
+ return -EINVAL;
if (start & ~PAGE_MASK)
- goto out;
+ return -EINVAL;
if ((flags & MS_ASYNC) && (flags & MS_SYNC))
- goto out;
- error = -ENOMEM;
- len = (len + ~PAGE_MASK) & PAGE_MASK;
+ return -EINVAL;
+
+ len = PAGE_ALIGN(len);
end = start + len;
if (end < start)
- goto out;
- error = 0;
+ return -ENOMEM;
if (end == start)
- goto out;
+ return 0;
+
/*
* If the interval [start,end) covers some unmapped address ranges,
* just ignore them, but return -ENOMEM at the end.
*/
down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
- for (;;) {
+ do {
struct file *file;

- /* Still start < end. */
- error = -ENOMEM;
- if (!vma)
- goto out_unlock;
- /* Here start < vma->vm_end. */
+ if (!vma) {
+ error = -ENOMEM;
+ break;
+ }
if (start < vma->vm_start) {
start = vma->vm_start;
- if (start >= end)
- goto out_unlock;
+ if (start >= end) {
+ error = -ENOMEM;
+ break;
+ }
unmapped_error = -ENOMEM;
}
- /* Here vma->vm_start <= start < vma->vm_end. */
- if ((flags & MS_INVALIDATE) &&
- (vma->vm_flags & VM_LOCKED)) {
+ if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED)) {
error = -EBUSY;
- goto out_unlock;
+ break;
}
- file = vma->vm_file;
start = vma->vm_end;
- if ((flags & MS_SYNC) && file &&
- (vma->vm_flags & VM_SHARED)) {
+
+ file = vma->vm_file;
+ if ((flags & MS_SYNC) && file && (vma->vm_flags & VM_SHARED)) {
get_file(file);
up_read(&mm->mmap_sem);
error = do_fsync(file, 0);
fput(file);
- if (error || start >= end)
- goto out;
+ if (error)
+ return error;
down_read(&mm->mmap_sem);
vma = find_vma(mm, start);
- } else {
- if (start >= end) {
- error = 0;
- goto out_unlock;
- }
- vma = vma->vm_next;
+ continue;
}
- }
-out_unlock:
+
+ vma = vma->vm_next;
+ } while (start < end);
up_read(&mm->mmap_sem);
-out:
+
return error ? : unmapped_error;
}
--
1.4.4.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/