[PATCH]: 4/4GB: Ineffiecency in filemap.c

From: Kirill Korotaev
Date: Thu Nov 11 2004 - 11:23:23 EST


This patch fixes ineffiecency in copying user data in some code pieces
when 4GB split is ON:
filemap.c tries to call __copy_XXX_user while in atomic, but 4GB split
code always fails copying when in atomic. So this optimization for
normal kernels doesn't work with 4GB split, moreover, makes lot's of unnessaccary locks/checks.

Signed-Off-By: Kirill Korotaev <dev@xxxxx>

Kirill

P.S. These 4GB split patches are against modified 2.6.8.1 kernel, but should be appliable to last Fedora kernels --- ./mm/filemap.c.4gbcptusr 2004-11-10 18:38:19.000000000 +0300
+++ ./mm/filemap.c 2004-11-11 16:34:24.669739800 +0300
@@ -814,6 +814,7 @@ int file_read_actor(read_descriptor_t *d
if (size > count)
size = count;

+#ifndef CONFIG_X86_UACCESS_INDIRECT
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
@@ -822,20 +823,23 @@ int file_read_actor(read_descriptor_t *d
kaddr = kmap_atomic(page, KM_USER0);
left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
kunmap_atomic(kaddr, KM_USER0);
- if (left == 0)
- goto success;
}
+#else
+ left = size;
+#endif

- /* Do it the slow way */
- kaddr = kmap(page);
- left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
- kunmap(page);
-
- if (left) {
- size -= left;
- desc->error = -EFAULT;
+ if (left != 0) {
+ /* Do it the slow way */
+ kaddr = kmap(page);
+ left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
+ kunmap(page);
+
+ if (left) {
+ size -= left;
+ desc->error = -EFAULT;
+ }
}
-success:
+
desc->count = count - size;
desc->written += size;
desc->arg.buf += size;
@@ -1615,9 +1619,13 @@ filemap_copy_from_user(struct page *page
char *kaddr;
int left;

+#ifndef CONFIG_X86_UACCESS_INDIRECT
kaddr = kmap_atomic(page, KM_USER0);
left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
+#else
+ left = bytes;
+#endif

if (left != 0) {
/* Do it the slow way */
@@ -1668,10 +1676,14 @@ filemap_copy_from_user_iovec(struct page
char *kaddr;
size_t copied;

+#ifndef CONFIG_X86_UACCESS_INDIRECT
kaddr = kmap_atomic(page, KM_USER0);
copied = __filemap_copy_from_user_iovec(kaddr + offset, iov,
base, bytes);
kunmap_atomic(kaddr, KM_USER0);
+#else
+ copied = 0;
+#endif
if (copied != bytes) {
kaddr = kmap(page);
copied = __filemap_copy_from_user_iovec(kaddr + offset, iov,
--- ./mm/shmem.c.4gbcptusr 2004-11-10 18:38:19.000000000 +0300
+++ ./mm/shmem.c 2004-11-11 14:21:39.919565872 +0300
@@ -1322,6 +1322,7 @@ shmem_file_write(struct file *file, cons
break;

left = bytes;
+#ifndef CONFIG_X86_UACCESS_INDIRECT
if (PageHighMem(page)) {
volatile unsigned char dummy;
__get_user(dummy, buf);
@@ -1331,6 +1332,7 @@ shmem_file_write(struct file *file, cons
left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
}
+#endif
if (left) {
kaddr = kmap(page);
left = __copy_from_user(kaddr + offset, buf, bytes);