[PATCH] x86: Optimize tail handling for copy_user
From: Vitaly Mayatskikh
Date: Mon Jul 28 2008 - 10:10:41 EST
Reduce protection faults count in copy_user_handle_tail routine by
limiting clear length to the end of page as was suggested by Linus.
Linus, should I add myself "signed-off-by: you" for patches with your
ideas implemented? Clear length calculation was changed a bit for
correct handling of page aligned addresses.
I'm using this systemtap script for sanity testing:
http://people.redhat.com/vmayatsk/copy_user_x8664/
It looks very ugly, but works.
Signed-off-by: Vitaly Mayatskikh <v.mayatskih@xxxxxxxxx>
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index f4df6e7..42baeca 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -161,23 +161,33 @@ EXPORT_SYMBOL(copy_in_user);
/*
* Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation,
- * it is not necessary to optimize tail handling.
+ * it is not necessary to do low level optimization of tail handling.
*/
unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+copy_user_handle_tail(char *to, char *from, unsigned len, unsigned clear_rest)
{
char c;
- unsigned zero_len;
+ unsigned clear_len;
- for (; len; --len) {
- if (__get_user_nocheck(c, from++, sizeof(char)))
- break;
- if (__put_user_nocheck(c, to++, sizeof(char)))
+ while (len) {
+ if (__get_user_nocheck(c, from, 1))
break;
+ from++;
+ if (__put_user_nocheck(c, to, 1))
+ /* Fault in destination, nothing to clear */
+ goto out;
+ to++;
+ len--;
}
- for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
- if (__put_user_nocheck(c, to++, sizeof(char)))
- break;
+ if (clear_rest) {
+ unsigned long addr = (unsigned long)to;
+ clear_len = len;
+ /* Limit clear_len to the rest of the page */
+ if ((addr + len) & PAGE_MASK != addr & PAGE_MASK)
+ clear_len = len - ((addr + len) & ~PAGE_MASK);
+ memset(to, 0, clear_len);
+ }
+out:
return len;
}
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 5cfd295..c5c5af0 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -196,6 +196,6 @@ static inline int __copy_from_user_inatomic_nocache(void *dst,
}
unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+copy_user_handle_tail(char *to, char *from, unsigned len, unsigned clear_rest);
#endif /* ASM_X86__UACCESS_64_H */
--
wbr, Vitaly
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/