Re: copy_from_user() fix

Savochkin Andrey Vladimirovich (saw@msu.ru)
Sun, 23 Aug 1998 14:51:42 +0400


--LZvS9be/3tNcYl/X
Content-Type: text/plain; charset=us-ascii

Linus,

I've reimplemented the fix according to Richard's suggestions.

On Sat, Aug 22, 1998 at 07:14:15PM -0700, Richard Henderson wrote:
> On Sat, Aug 22, 1998 at 01:58:25PM +0400, Savochkin Andrey Vladimirovich wrote:
> > There are few places in the kernel where uncleared pages may be exposed
> > to users. I consider it as a very important security problem.
> [...]
> > - clears the remaining memory in outline __generic_copy_from_user()
> > in arch/i386/lib/usercopy.c keeping inline __copy_from_user() without
> > the extra code;
> > - makes sure that generic and i386 specific code don't use __copy_from_user()
>
> No good. It _is_ an important problem, and should be treated as such.
> The only way you can be sure that you've closed all of the holes is to
> make sure there are _no_ entry points that could be potential problems.
> Thus you must fix __copy_from_user.

Ok.
The attached patch adds memory clearing to __copy_from_user
and keeps places where __copy_from_user is used as they are.

>
> Note that this will also ...
>
> > - arch/i386/math-emu/reg_ld_str.c to clear the memory after
> > __copy_from_user() if needed.
>
> ... make this bit of extreme uglyness go away.
>
> > The second part of the patch makes dirty changes to copy_from_user()
> > implementation for other architectures except sparc64 where memory
> > clearing has already been implemented.
>
> Please leave other architectures alone, as you clearly don't know
> what's going on here. For instance, you "fixed" Alpha when it
> wasn't broken. Note the last dozen lines of arch/alpha/lib/copy_user.S.

I don't argue that I don't know how user copying is implemented in different
architectures.

That's good that for Alpha the problem has already been fixed.
But there are architectures where things aren't so good as for Alpha,
Mips and Sparc64. I even found a place where copy_from_user returns -EFAULT.

The second part of my patch isn't a fix.
It's an optional addition :-) to the i386 fix implemented
because I care about the problem very much.

Best wishes
Andrey V.
Savochkin

--LZvS9be/3tNcYl/X
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename="copy_from_user.1"

diff -ruN linux-2.1.117.orig/arch/i386/lib/usercopy.c linux-2.1.117/arch/i386/lib/usercopy.c
--- linux-2.1.117.orig/arch/i386/lib/usercopy.c Sat Jan 24 03:34:36 1998
+++ linux-2.1.117/arch/i386/lib/usercopy.c Sun Aug 23 13:24:21 1998
@@ -7,7 +7,7 @@
*/
#include <asm/uaccess.h>

-inline unsigned long
+unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
@@ -15,11 +15,11 @@
return n;
}

-inline unsigned long
+unsigned long
__generic_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
- __copy_user(to,from,n);
+ __copy_user_zeroing(to,from,n);
return n;
}

diff -ruN linux-2.1.117.orig/include/asm-i386/uaccess.h linux-2.1.117/include/asm-i386/uaccess.h
--- linux-2.1.117.orig/include/asm-i386/uaccess.h Mon Aug 3 23:38:10 1998
+++ linux-2.1.117/include/asm-i386/uaccess.h Sun Aug 23 13:24:06 1998
@@ -268,13 +268,38 @@
: "r"(size & 3), "0"(size / 4), "D"(to), "S"(from) \
: "di", "si", "memory")

+#define __copy_user_zeroing(to,from,size) \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ " movl %1,%0\n" \
+ "1: rep; movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: lea 0(%1,%0,4),%0\n" \
+ "4: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosb\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,4b\n" \
+ ".previous" \
+ : "=&c"(size) \
+ : "r"(size & 3), "0"(size / 4), "D"(to), "S"(from) \
+ : "di", "si", "memory");
+
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
*/
static inline unsigned long
__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
- __copy_user(to,from,n);
+ __copy_user_zeroing(to,from,n);
return n;
}

@@ -369,6 +394,141 @@
} \
} while (0)

+/* Optimize just a little bit when we know the size of the move. */
+#define __constant_copy_user_zeroing(to, from, size) \
+do { \
+ switch (size & 3) { \
+ default: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "2: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosl\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " shl $2,%0\n" \
+ " jmp 1b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,2b\n" \
+ ".previous" \
+ : "=c"(size) \
+ : "S"(from), "D"(to), "0"(size/4) \
+ : "di", "si", "memory"); \
+ break; \
+ case 1: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsb\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosl\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " shl $2,%0\n" \
+ " incl %0\n" \
+ " jmp 2b\n" \
+ "4: pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " incl %0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,4b\n" \
+ ".previous" \
+ : "=c"(size) \
+ : "S"(from), "D"(to), "0"(size/4) \
+ : "di", "si", "memory"); \
+ break; \
+ case 2: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsw\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosl\n" \
+ " stosw\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " shl $2,%0\n" \
+ " addl $2,%0\n" \
+ " jmp 2b\n" \
+ "4: pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " stosw\n" \
+ " popl %%eax\n" \
+ " addl $2,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,3b\n" \
+ " .long 1b,4b\n" \
+ ".previous" \
+ : "=c"(size) \
+ : "S"(from), "D"(to), "0"(size/4) \
+ : "di", "si", "memory"); \
+ break; \
+ case 3: \
+ __asm__ __volatile__( \
+ "0: rep; movsl\n" \
+ "1: movsw\n" \
+ "2: movsb\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: pushl %0\n" \
+ " pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " rep; stosl\n" \
+ " stosw\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " popl %0\n" \
+ " shl $2,%0\n" \
+ " addl $3,%0\n" \
+ " jmp 2b\n" \
+ "5: pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " stosw\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " addl $3,%0\n" \
+ " jmp 2b\n" \
+ "6: pushl %%eax\n" \
+ " xorl %%eax,%%eax\n" \
+ " stosb\n" \
+ " popl %%eax\n" \
+ " incl %0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 0b,4b\n" \
+ " .long 1b,5b\n" \
+ " .long 2b,6b\n" \
+ ".previous" \
+ : "=c"(size) \
+ : "S"(from), "D"(to), "0"(size/4) \
+ : "di", "si", "memory"); \
+ break; \
+ } \
+} while (0)
+
unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
unsigned long __generic_copy_from_user(void *, const void *, unsigned long);

@@ -384,7 +544,7 @@
__constant_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
- __constant_copy_user(to,from,n);
+ __constant_copy_user_zeroing(to,from,n);
return n;
}

@@ -398,7 +558,7 @@
static inline unsigned long
__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
- __constant_copy_user(to,from,n);
+ __constant_copy_user_zeroing(to,from,n);
return n;
}

--LZvS9be/3tNcYl/X
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename="copy_from_user.2"

diff -ruN linux-2.1.117.orig/include/asm-arm/uaccess.h linux-2.1.117/include/asm-arm/uaccess.h
--- linux-2.1.117.orig/include/asm-arm/uaccess.h Wed Jan 21 03:39:43 1998
+++ linux-2.1.117/include/asm-arm/uaccess.h Sun Aug 23 11:04:56 1998
@@ -60,8 +60,11 @@

static __inline__ unsigned long copy_from_user(void *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
+ char *end = (char *)to + n;
+ if (access_ok(VERIFY_READ, from, n)) {
__do_copy_from_user(to, from, n);
+ if (n) memset(end - n, 0, n);
+ }
return n;
}

diff -ruN linux-2.1.117.orig/include/asm-m68k/uaccess.h linux-2.1.117/include/asm-m68k/uaccess.h
--- linux-2.1.117.orig/include/asm-m68k/uaccess.h Fri Aug 21 09:49:43 1998
+++ linux-2.1.117/include/asm-m68k/uaccess.h Sun Aug 23 11:04:56 1998
@@ -712,9 +712,16 @@
}

#define copy_from_user(to, from, n) \
+{ void *__to = (to); \
+ void *__from = (from); \
+ unsigned long __n = (n); \
+ char *__end = (char *)__to + __n; \
+ unsigned long __res = \
(__builtin_constant_p(n) ? \
__constant_copy_from_user(to, from, n) : \
- __generic_copy_from_user(to, from, n))
+ __generic_copy_from_user(to, from, n)); \
+ if (__res) memset(__end - __res, 0, __res); \
+ res; }

#define copy_to_user(to, from, n) \
(__builtin_constant_p(n) ? \
diff -ruN linux-2.1.117.orig/include/asm-ppc/uaccess.h linux-2.1.117/include/asm-ppc/uaccess.h
--- linux-2.1.117.orig/include/asm-ppc/uaccess.h Tue Jan 13 02:18:13 1998
+++ linux-2.1.117/include/asm-ppc/uaccess.h Sun Aug 23 11:04:56 1998
@@ -211,9 +211,12 @@
extern inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
- return __copy_tofrom_user(to, from, n);
- return n? -EFAULT: 0;
+ unsigned long res = n;
+ if (access_ok(VERIFY_READ, from, n)) {
+ res = __copy_tofrom_user(to, from, n);
+ if (res) memset((char *)to + n - res, 0, res);
+ }
+ return res;
}

extern inline unsigned long
@@ -221,7 +224,7 @@
{
if (access_ok(VERIFY_WRITE, to, n))
return __copy_tofrom_user(to, from, n);
- return n? -EFAULT: 0;
+ return n;
}

#define __copy_from_user(to, from, size) \
diff -ruN linux-2.1.117.orig/include/asm-sparc/uaccess.h linux-2.1.117/include/asm-sparc/uaccess.h
--- linux-2.1.117.orig/include/asm-sparc/uaccess.h Wed Apr 15 04:44:24 1998
+++ linux-2.1.117/include/asm-sparc/uaccess.h Sun Aug 23 11:04:56 1998
@@ -318,11 +318,14 @@
})

#define copy_from_user(to,from,n) ({ \
+void *__copy_to = (void *) (to); \
void *__copy_from = (void *) (from); \
__kernel_size_t __copy_size = (__kernel_size_t) (n); \
__kernel_size_t __copy_res; \
if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
-__copy_res = __copy_user((void *) (to), __copy_from, __copy_size); \
+__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
+if(__copy_res) \
+memset((char *)__copy_to + __copy_size - __copy_res, 0, __copy_res); \
} else __copy_res = __copy_size; \
__copy_res; })

--LZvS9be/3tNcYl/X--

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.altern.org/andrebalsa/doc/lkml-faq.html