diff -Naur linux-2.5.44/kernel/ksyms.c linux-2.5.44-ioe/kernel/ksyms.c --- linux-2.5.44/kernel/ksyms.c Sat Oct 19 06:01:08 2002 +++ linux-2.5.44-ioe/kernel/ksyms.c Fri Nov 1 23:12:48 2002 @@ -136,6 +136,7 @@ EXPORT_SYMBOL(page_address); #endif EXPORT_SYMBOL(get_user_pages); +EXPORT_SYMBOL(get_user_pages_sgl); /* filesystem internal functions */ EXPORT_SYMBOL(def_blk_fops); diff -Naur linux-2.5.44/mm/memory.c linux-2.5.44-ioe/mm/memory.c --- linux-2.5.44/mm/memory.c Sat Oct 19 06:01:52 2002 +++ linux-2.5.44-ioe/mm/memory.c Fri Nov 1 23:48:42 2002 @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -514,6 +515,85 @@ } +int get_user_pages_sgl(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int len, int write, + struct scatterlist **sgl) +{ + int i; + unsigned int flags; + + /* Without this structure, it makes no sense to call this */ + BUG_ON(!sgl); + + /* + * Require read or write permissions. + */ + flags = write ? VM_WRITE : VM_READ; + i = 0; + + do { + struct vm_area_struct * vma; + + vma = find_extend_vma(mm, start); + + if (!vma || (vma->vm_flags & VM_IO) + || !(flags & vma->vm_flags)) + return i ? : -EFAULT; + + /* Doesn't work with huge pages! */ + BUG_ON(is_vm_hugetlb_page(vma)); + + spin_lock(&mm->page_table_lock); + do { + struct page *map; + while (!(map = follow_page(mm, start, write))) { + spin_unlock(&mm->page_table_lock); + switch (handle_mm_fault(mm,vma,start,write)) { + case VM_FAULT_MINOR: + tsk->min_flt++; + break; + case VM_FAULT_MAJOR: + tsk->maj_flt++; + break; + case VM_FAULT_SIGBUS: + return i ? i : -EFAULT; + case VM_FAULT_OOM: + return i ? i : -ENOMEM; + default: + BUG(); + } + spin_lock(&mm->page_table_lock); + } + sgl[i]->page = get_page_map(map); + if (!sgl[i]->page) { + spin_unlock(&mm->page_table_lock); + while (i--) + page_cache_release(sgl[i]->page); + i = -EFAULT; + goto out; + } + if (!PageReserved(sgl[i]->page)) + page_cache_get(sgl[i]->page); + + /* TODO: Do coalescing of physically continious pages + * here + */ + sgl[i]->offset=0; + sgl[i]->length=PAGE_SIZE; + + i++; + start += PAGE_SIZE; + len--; + } while(len && start < vma->vm_end); + spin_unlock(&mm->page_table_lock); + } while(len); + + /* This might be pointless, if start is always aligned to pages */ + sgl[0]->offset=start & ~PAGE_MASK; + sgl[0]->length=PAGE_SIZE - (start & ~PAGE_MASK); +out: + return i; +} int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas)