diff -Nraup a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c --- a/arch/x86_64/ia32/ia32_binfmt.c 2005-04-27 07:04:33.000000000 +0800 +++ b/arch/x86_64/ia32/ia32_binfmt.c 2005-04-27 07:09:33.000000000 +0800 @@ -46,7 +46,7 @@ struct elf_phdr; #define IA32_EMULATOR 1 -#define ELF_ET_DYN_BASE (TASK_UNMAPPED_32 + 0x1000000) +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) #undef ELF_ARCH #define ELF_ARCH EM_386 @@ -307,9 +307,6 @@ MODULE_AUTHOR("Eric Youngdale, Andi Klee #define elf_addr_t __u32 -#undef TASK_SIZE -#define TASK_SIZE 0xffffffff - static void elf32_init(struct pt_regs *); #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 diff -Nraup a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c --- a/arch/x86_64/kernel/sys_x86_64.c 2005-04-27 07:04:33.000000000 +0800 +++ b/arch/x86_64/kernel/sys_x86_64.c 2005-04-27 07:09:58.000000000 +0800 @@ -68,13 +68,7 @@ out: static void find_start_end(unsigned long flags, unsigned long *begin, unsigned long *end) { -#ifdef CONFIG_IA32_EMULATION - if (test_thread_flag(TIF_IA32)) { - *begin = TASK_UNMAPPED_32; - *end = IA32_PAGE_OFFSET; - } else -#endif - if (flags & MAP_32BIT) { + if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { /* This is usually used needed to map code in small model, so it needs to be in the first 31bit. Limit it to that. This means we need to move the @@ -84,10 +78,10 @@ static void find_start_end(unsigned long of playground for now. -AK */ *begin = 0x40000000; *end = 0x80000000; - } else { - *begin = TASK_UNMAPPED_64; + } else { + *begin = TASK_UNMAPPED_BASE; *end = TASK_SIZE; - } + } } unsigned long diff -Nraup a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c --- a/arch/x86_64/mm/fault.c 2005-04-27 07:04:33.000000000 +0800 +++ b/arch/x86_64/mm/fault.c 2005-04-27 07:08:39.000000000 +0800 @@ -74,7 +74,7 @@ static noinline int is_prefetch(struct p instr = (unsigned char *)convert_rip_to_linear(current, regs); max_instr = instr + 15; - if ((regs->cs & 3) != 0 && instr >= (unsigned char *)TASK_SIZE) + if ((regs->cs & 3) != 0 && instr >= (unsigned char *)TASK_SIZE64) return 0; while (scan_more && instr < max_instr) { @@ -345,7 +345,7 @@ asmlinkage void do_page_fault(struct pt_ * (error_code & 4) == 0, and that the fault was not a * protection error (error_code & 1) == 0. */ - if (unlikely(address >= TASK_SIZE)) { + if (unlikely(address >= TASK_SIZE64)) { if (!(error_code & 5)) { if (vmalloc_fault(address) < 0) goto bad_area_nosemaphore; @@ -481,7 +481,7 @@ bad_area_nosemaphore: tsk->thread.cr2 = address; /* Kernel addresses are always protection faults */ - tsk->thread.error_code = error_code | (address >= TASK_SIZE); + tsk->thread.error_code = error_code | (address >= TASK_SIZE64); tsk->thread.trap_no = 14; info.si_signo = SIGSEGV; info.si_errno = 0; diff -Nraup a/include/asm-x86_64/a.out.h b/include/asm-x86_64/a.out.h --- a/include/asm-x86_64/a.out.h 2005-04-27 07:04:57.000000000 +0800 +++ b/include/asm-x86_64/a.out.h 2005-04-27 07:10:31.000000000 +0800 @@ -21,7 +21,7 @@ struct exec #ifdef __KERNEL__ #include -#define STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE) +#define STACK_TOP TASK_SIZE #endif #endif /* __A_OUT_GNU_H__ */ diff -Nraup a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h --- a/include/asm-x86_64/processor.h 2005-04-27 07:04:57.000000000 +0800 +++ b/include/asm-x86_64/processor.h 2005-04-27 07:11:12.000000000 +0800 @@ -161,16 +161,15 @@ static inline void clear_in_cr4 (unsigne /* * User space process size. 47bits. */ -#define TASK_SIZE (0x800000000000UL) + +#define TASK_SIZE64 (0x800000000000UL) +#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000) -#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3) -#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3) -#define TASK_UNMAPPED_BASE \ - (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64) +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) /* * Size of io_bitmap.