[PATCH v6 1/4] x86: Clean up extable entry format (and free up a bit)
From: Andy Lutomirski
Date: Sun Jan 03 2016 - 20:26:30 EST
This adds two bits of fixup class information to a fixup entry,
generalizing the uaccess_err hack currently in place.
Forward-ported-from-3.9-by: Tony Luck <tony.luck@xxxxxxxxx>
Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
---
arch/x86/include/asm/asm.h | 70 ++++++++++++++++++++++++++++++----------------
arch/x86/mm/extable.c | 21 ++++++++------
2 files changed, 59 insertions(+), 32 deletions(-)
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 189679aba703..b64121ffb2da 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -43,19 +43,47 @@
#define _ASM_DI __ASM_REG(di)
/* Exception table entry */
-#ifdef __ASSEMBLY__
-# define _ASM_EXTABLE(from,to) \
- .pushsection "__ex_table","a" ; \
- .balign 8 ; \
- .long (from) - . ; \
- .long (to) - . ; \
- .popsection
-# define _ASM_EXTABLE_EX(from,to) \
- .pushsection "__ex_table","a" ; \
- .balign 8 ; \
- .long (from) - . ; \
- .long (to) - . + 0x7ffffff0 ; \
+/*
+ * An exception table entry is 64 bits. The first 32 bits are the offset
+ * from that entry to the potentially faulting instruction. sortextable
+ * relies on that exact encoding. The second 32 bits encode the fault
+ * handler address.
+ *
+ * We want to stick two extra bits of handler class into the fault handler
+ * address. All of these are generated by relocations, so we can only
+ * rely on addition. We therefore emit:
+ *
+ * (target - here) + (class) + 0x20000000
+ *
+ * This has the property that the two high bits are the class and the
+ * rest is easy to decode.
+ */
+
+/* There are two bits of extable entry class, added to a signed offset. */
+#define _EXTABLE_CLASS_DEFAULT 0 /* standard uaccess fixup */
+#define _EXTABLE_CLASS_EX 0x80000000 /* uaccess + set uaccess_err */
+
+/*
+ * The biases are the class constants + 0x20000000, as signed integers.
+ * This can't use ordinary arithmetic -- the assembler isn't that smart.
+ */
+#define _EXTABLE_BIAS_DEFAULT 0x20000000
+#define _EXTABLE_BIAS_EX 0x20000000 - 0x80000000
+
+#define _ASM_EXTABLE(from,to) \
+ _ASM_EXTABLE_CLASS(from, to, _EXTABLE_BIAS_DEFAULT)
+
+#define _ASM_EXTABLE_EX(from,to) \
+ _ASM_EXTABLE_CLASS(from, to, _EXTABLE_BIAS_EX)
+
+#ifdef __ASSEMBLY__
+# define _EXPAND_EXTABLE_BIAS(x) x
+# define _ASM_EXTABLE_CLASS(from,to,bias) \
+ .pushsection "__ex_table","a" ; \
+ .balign 8 ; \
+ .long (from) - . ; \
+ .long (to) - . + _EXPAND_EXTABLE_BIAS(bias) ; \
.popsection
# define _ASM_NOKPROBE(entry) \
@@ -89,18 +117,12 @@
.endm
#else
-# define _ASM_EXTABLE(from,to) \
- " .pushsection \"__ex_table\",\"a\"\n" \
- " .balign 8\n" \
- " .long (" #from ") - .\n" \
- " .long (" #to ") - .\n" \
- " .popsection\n"
-
-# define _ASM_EXTABLE_EX(from,to) \
- " .pushsection \"__ex_table\",\"a\"\n" \
- " .balign 8\n" \
- " .long (" #from ") - .\n" \
- " .long (" #to ") - . + 0x7ffffff0\n" \
+# define _EXPAND_EXTABLE_BIAS(x) #x
+# define _ASM_EXTABLE_CLASS(from,to,bias) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 8\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - . + " _EXPAND_EXTABLE_BIAS(bias) "\n" \
" .popsection\n"
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 903ec1e9c326..95e2ede71206 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -8,16 +8,24 @@ ex_insn_addr(const struct exception_table_entry *x)
{
return (unsigned long)&x->insn + x->insn;
}
+static inline unsigned int
+ex_class(const struct exception_table_entry *x)
+{
+ return (unsigned int)x->fixup & 0xC0000000;
+}
+
static inline unsigned long
ex_fixup_addr(const struct exception_table_entry *x)
{
- return (unsigned long)&x->fixup + x->fixup;
+ long offset = (long)((u32)x->fixup & 0x3fffffff) - (long)0x20000000;
+ return (unsigned long)&x->fixup + offset;
}
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
unsigned long new_ip;
+ unsigned int class;
#ifdef CONFIG_PNPBIOS
if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
@@ -35,12 +43,12 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(regs->ip);
if (fixup) {
+ class = ex_class(fixup);
new_ip = ex_fixup_addr(fixup);
- if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
+ if (class == _EXTABLE_CLASS_EX) {
/* Special hack for uaccess_err */
current_thread_info()->uaccess_err = 1;
- new_ip -= 0x7ffffff0;
}
regs->ip = new_ip;
return 1;
@@ -53,18 +61,15 @@ int fixup_exception(struct pt_regs *regs)
int __init early_fixup_exception(unsigned long *ip)
{
const struct exception_table_entry *fixup;
- unsigned long new_ip;
fixup = search_exception_tables(*ip);
if (fixup) {
- new_ip = ex_fixup_addr(fixup);
-
- if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
+ if (ex_class(fixup) == _EXTABLE_CLASS_EX) {
/* uaccess handling not supported during early boot */
return 0;
}
- *ip = new_ip;
+ *ip = ex_fixup_addr(fixup);
return 1;
}
--
2.1.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/