[PATCH 3/9] unify load_segment macro

From: Glauber de Oliveira Costa
Date: Wed Dec 05 2007 - 12:54:55 EST


This patch unifies the load_segment() macro, making them equal in both
x86_64 and i386 architectures. The common version goes to system.h,
and the old are deleted.

Signed-off-by: Glauber de Oliveira Costa <gcosta@xxxxxxxxxx>
---
include/asm-x86/system.h | 21 +++++++++++++++++++++
include/asm-x86/system_32.h | 22 ----------------------
include/asm-x86/system_64.h | 20 --------------------
3 files changed, 21 insertions(+), 42 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 6e9491d..1ac6088 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -39,6 +39,27 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))

/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg, value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %k0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "movl %k1, %%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ _ASM_ALIGN "\n\t" \
+ _ASM_PTR " 1b,3b\n" \
+ ".previous" \
+ : :"r" (value), "r" (0))
+
+
+/*
* Save a segment register away
*/
#define savesegment(seg, value) \
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index 717aeb9..a0641a3 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -34,28 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
"2" (prev), "d" (next)); \
} while (0)

-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value) \
- asm volatile("\n" \
- "1:\t" \
- "mov %0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "pushl $0\n\t" \
- "popl %%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 4\n\t" \
- ".long 1b,3b\n" \
- ".previous" \
- : :"rm" (value))
-
-
static inline void native_clts(void)
{
asm volatile ("clts");
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index f340060..da46059 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -43,26 +43,6 @@
extern void load_gs_index(unsigned);

/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value) \
- asm volatile("\n" \
- "1:\t" \
- "movl %k0,%%" #seg "\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3:\t" \
- "movl %1,%%" #seg "\n\t" \
- "jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- ".align 8\n\t" \
- ".quad 1b,3b\n" \
- ".previous" \
- : :"r" (value), "r" (0))
-
-/*
* Clear and set 'TS' bit respectively
*/
#define clts() __asm__ __volatile__ ("clts")
--
1.4.4.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/