That is how it is presently implemented.
#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
/*
* get_user_pages_fast provides equivalent functionality to get_user_pages,
* operating on current and current->mm (force=0 and doesn't return any vmas).
*
* get_user_pages_fast may take mmap_sem and page tables, so no assumptions
* can be made about locking. get_user_pages_fast is to be implemented in a
* way that is advantageous (vs get_user_pages()) when the user memory area is
* already faulted in and present in ptes. However if the pages have to be
* faulted in, it may turn out to be slightly slower).
*/
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
#else
/*
* Should probably be moved to asm-generic, and architectures can include it if
* they don't implement their own get_user_pages_fast.
*/
#define get_user_pages_fast(start, nr_pages, write, pages) \
({ \
struct mm_struct *mm = current->mm; \
int ret; \
\
down_read(&mm->mmap_sem); \
ret = get_user_pages(current, mm, start, nr_pages, \
write, 0, pages, NULL); \
up_read(&mm->mmap_sem); \
\
ret; \
})
#endif