[PATCH V3 1/2] x86, mce, severities: Add AMD severities function

From: Aravind Gopalakrishnan
Date: Mon Mar 23 2015 - 18:07:16 EST


Add a severities function that caters to AMD processors.
This allows us to do some vendor specific work within the
function if necessary.

Also, introduce a vendor flag bitfield which contains vendor
specific flags. The severities code uses this to define error
scope based on the prescence of the flags field.

This is based off of work by Boris Petkov.

Testing details:
Tested the patch for any regressions on
Fam10h, Model 9h (Greyhound)
Fam15h: Models 0h-0fh (Orochi), 30h-3fh (Kaveri) and 60h-6fh (Carrizo),
Fam16h Model 00h-0fh (Kabini)

Signed-off-by: Aravind Gopalakrishnan <aravind.gopalakrishnan@xxxxxxx>
---
Changes from V2:
- Rebase on top of latest tip
- Tested patch on more systems and updated commit message appropriately

Changes from V1:
- Test mce_flags.overflow_recov once instead of multiple times

arch/x86/include/asm/mce.h | 6 ++++
arch/x86/kernel/cpu/mcheck/mce-severity.c | 53 +++++++++++++++++++++++++++++++
arch/x86/kernel/cpu/mcheck/mce.c | 9 ++++++
3 files changed, 68 insertions(+)

diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index fd38a23..b574fbf 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -116,6 +116,12 @@ struct mca_config {
u32 rip_msr;
};

+struct mce_vendor_flags {
+ __u64 overflow_recov : 1, /* cpuid_ebx(80000007) */
+ __reserved_0 : 63;
+};
+extern struct mce_vendor_flags mce_flags;
+
extern struct mca_config mca_cfg;
extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 8bb4330..4f8f87d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -186,12 +186,65 @@ static int error_context(struct mce *m)
return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
}

+/* keeping mce_severity_amd in sync with AMD error scope heirarchy table */
+static int mce_severity_amd(struct mce *m, enum context ctx)
+{
+ enum context ctx = error_context(m);
+ /* Processor Context Corrupt, no need to fumble too much, die! */
+ if (m->status & MCI_STATUS_PCC)
+ return MCE_PANIC_SEVERITY;
+
+ if (m->status & MCI_STATUS_UC) {
+ /*
+ * On older systems, where overflow_recov flag is not
+ * present, we should simply PANIC if Overflow occurs.
+ * If overflow_recov flag set, then SW can try
+ * to at least kill process to salvage systen operation.
+ */
+
+ if (mce_flags.overflow_recov) {
+ /* software can try to contain */
+ if (!(m->mcgstatus & MCG_STATUS_RIPV))
+ if (ctx == IN_KERNEL)
+ return MCE_PANIC_SEVERITY;
+
+ /* kill current process */
+ return MCE_AR_SEVERITY;
+ } else {
+ /* at least one error was not logged */
+ if (m->status & MCI_STATUS_OVER)
+ return MCE_PANIC_SEVERITY;
+ }
+ /*
+ * any other case, return MCE_UC_SEVERITY so that
+ * we log the error and exit #MC handler.
+ */
+ return MCE_UC_SEVERITY;
+ }
+
+ /*
+ * deferred error: poll handler catches these and adds to mce_ring
+ * so memory-failure can take recovery actions.
+ */
+ if (m->status & MCI_STATUS_DEFERRED)
+ return MCE_DEFERRED_SEVERITY;
+
+ /*
+ * corrected error: poll handler catches these and passes
+ * responsibility of decoding the error to EDAC
+ */
+ return MCE_KEEP_SEVERITY;
+}
+
int mce_severity(struct mce *m, int tolerant, char **msg, bool is_excp)
{
enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
enum context ctx = error_context(m);
struct severity *s;

+ if (m->cpuvendor == X86_VENDOR_AMD)
+ return mce_severity_amd(m, ctx);
+
for (s = severities;; s++) {
if ((m->status & s->mask) != s->result)
continue;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 3cc6793..03c7e0a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -65,6 +65,7 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
DEFINE_PER_CPU(unsigned, mce_exception_count);

struct mce_bank *mce_banks __read_mostly;
+struct mce_vendor_flags mce_flags __read_mostly;

struct mca_config mca_cfg __read_mostly = {
.bootlog = -1,
@@ -1533,6 +1534,13 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
mce_banks[0].ctl = 0;

/*
+ * overflow_recov is supported for F15h Models 00h-0fh
+ * even though we don't have cpuid bit for this
+ */
+ if (c->x86 == 0x15 && c->x86_model <= 0xf)
+ mce_flags.overflow_recov = 1;
+
+ /*
* Turn off MC4_MISC thresholding banks on those models since
* they're not supported there.
*/
@@ -1631,6 +1639,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
break;
case X86_VENDOR_AMD:
mce_amd_feature_init(c);
+ mce_flags.overflow_recov = cpuid_ebx(0x80000007) & 0x1;
break;
default:
break;
--
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/