[patch 04/14] x86/irq: Make irqstats array based

From: Thomas Gleixner

Date: Wed Mar 04 2026 - 13:55:56 EST


Having the x86 specific interrupt statistics as a data structure with
individual members instead of an array is just stupid as it requires
endless copy and paste in arch_show_interrupts() and arch_irq_stat_cpu(),
where the latter does not even take the latest interrupt additions into
account. The resulting #ifdef orgy is just disgusting.

Convert it to an array of counters, which does not make a difference in the
actual interrupt hotpath increment as the array index is constant and
therefore not any different than the member based access.

But in arch_show_interrupts() and arch_irq_stat_cpu() this just turns into
a loop, which reduces the text size by ~2k (~12%):

text data bss dec hex filename
19643 15250 904 35797 8bd5 ../build/arch/x86/kernel/irq.o
17355 15250 904 33509 82e5 ../build/arch/x86/kernel/irq.o

Adding a new vector or software counter only requires to update the table
and everything just works. Using the core provided emit function which
speeds up 0 outputs makes it significantly faster.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxx>
---
arch/x86/events/amd/core.c | 2
arch/x86/events/amd/ibs.c | 2
arch/x86/events/core.c | 2
arch/x86/events/intel/core.c | 2
arch/x86/events/intel/knc.c | 2
arch/x86/events/intel/p4.c | 2
arch/x86/events/zhaoxin/core.c | 2
arch/x86/hyperv/hv_init.c | 2
arch/x86/include/asm/hardirq.h | 69 ++++++----
arch/x86/include/asm/mce.h | 3
arch/x86/kernel/apic/apic.c | 4
arch/x86/kernel/apic/ipi.c | 2
arch/x86/kernel/cpu/acrn.c | 2
arch/x86/kernel/cpu/mce/amd.c | 2
arch/x86/kernel/cpu/mce/core.c | 8 -
arch/x86/kernel/cpu/mce/threshold.c | 2
arch/x86/kernel/cpu/mshyperv.c | 4
arch/x86/kernel/irq.c | 227 ++++++++++--------------------------
arch/x86/kernel/irq_work.c | 2
arch/x86/kernel/kvm.c | 2
arch/x86/kernel/nmi.c | 4
arch/x86/kernel/smp.c | 6
arch/x86/mm/tlb.c | 2
arch/x86/xen/enlighten_hvm.c | 2
arch/x86/xen/enlighten_pv.c | 2
arch/x86/xen/smp.c | 6
arch/x86/xen/smp_pv.c | 2
27 files changed, 135 insertions(+), 232 deletions(-)

--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -1032,7 +1032,7 @@ static int amd_pmu_v2_handle_irq(struct
* Unmasking the LVTPC is not required as the Mask (M) bit of the LVT
* PMI entry is not set by the local APIC when a PMC overflow occurs
*/
- inc_irq_stat(apic_perf_irqs);
+ inc_irq_stat(APIC_PERF);

done:
cpuc->enabled = pmu_enabled;
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1403,7 +1403,7 @@ perf_ibs_nmi_handler(unsigned int cmd, s
handled += perf_ibs_handle_irq(&perf_ibs_op, regs);

if (handled)
- inc_irq_stat(apic_perf_irqs);
+ inc_irq_stat(APIC_PERF);

perf_sample_event_took(sched_clock() - stamp);

--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1747,7 +1747,7 @@ int x86_pmu_handle_irq(struct pt_regs *r
}

if (handled)
- inc_irq_stat(apic_perf_irqs);
+ inc_irq_stat(APIC_PERF);

return handled;
}
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3504,7 +3504,7 @@ static int handle_pmi_common(struct pt_r
int bit;
int handled = 0;

- inc_irq_stat(apic_perf_irqs);
+ inc_irq_stat(APIC_PERF);

/*
* Ignore a range of extra bits in status that do not indicate
--- a/arch/x86/events/intel/knc.c
+++ b/arch/x86/events/intel/knc.c
@@ -238,7 +238,7 @@ static int knc_pmu_handle_irq(struct pt_
goto done;
}

- inc_irq_stat(apic_perf_irqs);
+ inc_irq_stat(APIC_PERF);

for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -1077,7 +1077,7 @@ static int p4_pmu_handle_irq(struct pt_r
}

if (handled)
- inc_irq_stat(apic_perf_irqs);
+ inc_irq_stat(APIC_PERF);

/*
* When dealing with the unmasking of the LVTPC on P4 perf hw, it has
--- a/arch/x86/events/zhaoxin/core.c
+++ b/arch/x86/events/zhaoxin/core.c
@@ -373,7 +373,7 @@ static int zhaoxin_pmu_handle_irq(struct
else
zhaoxin_pmu_ack_status(status);

- inc_irq_stat(apic_perf_irqs);
+ inc_irq_stat(APIC_PERF);

/*
* CondChgd bit 63 doesn't mean any overflow status. Ignore
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -219,7 +219,7 @@ static inline bool hv_reenlightenment_av
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
{
apic_eoi();
- inc_irq_stat(irq_hv_reenlightenment_count);
+ inc_irq_stat(HYPERV_REENLIGHTENMENT);
schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
}

--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -4,51 +4,60 @@

#include <linux/threads.h>

-typedef struct {
-#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
- u8 kvm_cpu_l1tf_flush_l1d;
-#endif
- unsigned int __nmi_count; /* arch dependent */
+enum {
+ IRQ_COUNT_NMI,
#ifdef CONFIG_X86_LOCAL_APIC
- unsigned int apic_timer_irqs; /* arch dependent */
- unsigned int irq_spurious_count;
- unsigned int icr_read_retry_count;
+ IRQ_COUNT_APIC_TIMER,
+ IRQ_COUNT_SPURIOUS,
+ IRQ_COUNT_APIC_PERF,
+ IRQ_COUNT_IRQ_WORK,
+ IRQ_COUNT_ICR_READ_RETRY,
+ IRQ_COUNT_X86_PLATFORM_IPI,
#endif
-#if IS_ENABLED(CONFIG_KVM)
- unsigned int kvm_posted_intr_ipis;
- unsigned int kvm_posted_intr_wakeup_ipis;
- unsigned int kvm_posted_intr_nested_ipis;
-#endif
-#ifdef CONFIG_GUEST_PERF_EVENTS
- unsigned int perf_guest_mediated_pmis;
-#endif
- unsigned int x86_platform_ipis; /* arch dependent */
- unsigned int apic_perf_irqs;
- unsigned int apic_irq_work_irqs;
#ifdef CONFIG_SMP
- unsigned int irq_resched_count;
- unsigned int irq_call_count;
+ IRQ_COUNT_RESCHEDULE,
+ IRQ_COUNT_CALL_FUNCTION,
+ IRQ_COUNT_TLB,
#endif
- unsigned int irq_tlb_count;
#ifdef CONFIG_X86_THERMAL_VECTOR
- unsigned int irq_thermal_count;
+ IRQ_COUNT_THERMAL_APIC,
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
- unsigned int irq_threshold_count;
+ IRQ_COUNT_THRESHOLD_APIC,
#endif
#ifdef CONFIG_X86_MCE_AMD
- unsigned int irq_deferred_error_count;
+ IRQ_COUNT_DEFERRED_ERROR,
+#endif
+#ifdef CONFIG_X86_MCE
+ IRQ_COUNT_MCE_EXCEPTION,
+ IRQ_COUNT_MCE_POLL,
#endif
#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
- unsigned int irq_hv_callback_count;
+ IRQ_COUNT_HYPERVISOR_CALLBACK,
#endif
#if IS_ENABLED(CONFIG_HYPERV)
- unsigned int irq_hv_reenlightenment_count;
- unsigned int hyperv_stimer0_count;
+ IRQ_COUNT_HYPERV_REENLIGHTENMENT,
+ IRQ_COUNT_HYPERV_STIMER0,
+#endif
+#if IS_ENABLED(CONFIG_KVM)
+ IRQ_COUNT_POSTED_INTR,
+ IRQ_COUNT_POSTED_INTR_NESTED,
+ IRQ_COUNT_POSTED_INTR_WAKEUP,
+#endif
+#ifdef CONFIG_GUEST_PERF_EVENTS
+ IRQ_COUNT_PERF_GUEST_MEDIATED_PMI,
#endif
#ifdef CONFIG_X86_POSTED_MSI
- unsigned int posted_msi_notification_count;
+ IRQ_COUNT_POSTED_MSI_NOTIFICATION,
+#endif
+ IRQ_COUNT_MAX,
+};
+
+typedef struct {
+#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL)
+ u8 kvm_cpu_l1tf_flush_l1d;
#endif
+ unsigned int counts[IRQ_COUNT_MAX];
} ____cacheline_aligned irq_cpustat_t;

DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
@@ -58,7 +67,7 @@ DECLARE_PER_CPU_ALIGNED(struct pi_desc,
#endif
#define __ARCH_IRQ_STAT

-#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
+#define inc_irq_stat(index) this_cpu_inc(irq_stat.counts[IRQ_COUNT_##index])

extern void ack_bad_irq(unsigned int irq);

--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -291,9 +291,6 @@ bool mce_is_memory_error(struct mce *m);
bool mce_is_correctable(struct mce *m);
bool mce_usable_address(struct mce *m);

-DECLARE_PER_CPU(unsigned, mce_exception_count);
-DECLARE_PER_CPU(unsigned, mce_poll_count);
-
typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);

--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1040,7 +1040,7 @@ static void local_apic_timer_interrupt(v
/*
* the NMI deadlock-detector uses this.
*/
- inc_irq_stat(apic_timer_irqs);
+ inc_irq_stat(APIC_TIMER);

evt->event_handler(evt);
}
@@ -2108,7 +2108,7 @@ static noinline void handle_spurious_int

trace_spurious_apic_entry(vector);

- inc_irq_stat(irq_spurious_count);
+ inc_irq_stat(SPURIOUS);

/*
* If this is a spurious interrupt then do not acknowledge
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -120,7 +120,7 @@ u32 apic_mem_wait_icr_idle_timeout(void)
for (cnt = 0; cnt < 1000; cnt++) {
if (!(apic_read(APIC_ICR) & APIC_ICR_BUSY))
return 0;
- inc_irq_stat(icr_read_retry_count);
+ inc_irq_stat(ICR_READ_RETRY);
udelay(100);
}
return APIC_ICR_BUSY;
--- a/arch/x86/kernel/cpu/acrn.c
+++ b/arch/x86/kernel/cpu/acrn.c
@@ -52,7 +52,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_ca
* HYPERVISOR_CALLBACK_VECTOR.
*/
apic_eoi();
- inc_irq_stat(irq_hv_callback_count);
+ inc_irq_stat(HYPERVISOR_CALLBACK);

if (acrn_intr_handler)
acrn_intr_handler();
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -840,7 +840,7 @@ bool amd_mce_usable_address(struct mce *
DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error)
{
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
- inc_irq_stat(irq_deferred_error_count);
+ inc_irq_stat(DEFERRED_ERROR);
deferred_error_int_vector();
trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
apic_eoi();
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -67,8 +67,6 @@ static DEFINE_MUTEX(mce_sysfs_mutex);

#define SPINUNIT 100 /* 100ns */

-DEFINE_PER_CPU(unsigned, mce_exception_count);
-
DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);

DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
@@ -716,8 +714,6 @@ static noinstr void mce_read_aux(struct
}
}

-DEFINE_PER_CPU(unsigned, mce_poll_count);
-
/*
* We have three scenarios for checking for Deferred errors:
*
@@ -820,7 +816,7 @@ void machine_check_poll(enum mcp_flags f
struct mce *m;
int i;

- this_cpu_inc(mce_poll_count);
+ inc_irq_stat(MCE_POLL);

mce_gather_info(&err, NULL);
m = &err.m;
@@ -1595,7 +1591,7 @@ noinstr void do_machine_check(struct pt_
*/
lmce = 1;

- this_cpu_inc(mce_exception_count);
+ inc_irq_stat(MCE_EXCEPTION);

mce_gather_info(&err, regs);
m = &err.m;
--- a/arch/x86/kernel/cpu/mce/threshold.c
+++ b/arch/x86/kernel/cpu/mce/threshold.c
@@ -37,7 +37,7 @@ void (*mce_threshold_vector)(void) = def
DEFINE_IDTENTRY_SYSVEC(sysvec_threshold)
{
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
- inc_irq_stat(irq_threshold_count);
+ inc_irq_stat(THRESHOLD_APIC);
mce_threshold_vector();
trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
apic_eoi();
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -154,7 +154,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_cal
{
struct pt_regs *old_regs = set_irq_regs(regs);

- inc_irq_stat(irq_hv_callback_count);
+ inc_irq_stat(HYPERVISOR_CALLBACK);
if (mshv_handler)
mshv_handler();

@@ -191,7 +191,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_sti
{
struct pt_regs *old_regs = set_irq_regs(regs);

- inc_irq_stat(hyperv_stimer0_count);
+ inc_irq_stat(HYPERV_STIMER0)
if (hv_stimer0_handler)
hv_stimer0_handler();
add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -62,156 +62,84 @@ void ack_bad_irq(unsigned int irq)
apic_eoi();
}

-/*
- * A helper routine for putting space and decimal number without overhead
- * from rich format of printf().
- */
-static void put_decimal(struct seq_file *p, unsigned long long num)
-{
- const char *delimiter = " ";
- unsigned int width = 10;
+struct irq_stat_info {
+ unsigned int test_vector;
+ const char *symbol;
+ const char *text;
+};

- seq_put_decimal_ull_width(p, delimiter, num, width);
-}
+#define ISS(idx, sym, txt) [IRQ_COUNT_##idx] = { .symbol = sym, .text = txt }

-#define irq_stats(x) (&per_cpu(irq_stat, x))
-/*
- * /proc/interrupts printing for arch specific interrupts
- */
-int arch_show_interrupts(struct seq_file *p, int prec)
-{
- int j;
+#define ITS(idx, sym, txt) [IRQ_COUNT_##idx] = \
+ { .test_vector = idx## _VECTOR, .symbol = sym, .text = txt }

- seq_printf(p, "%*s:", prec, "NMI");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->__nmi_count);
- seq_puts(p, " Non-maskable interrupts\n");
+static const struct irq_stat_info irq_stat_info[IRQ_COUNT_MAX] = {
+ ISS(NMI, "NMI", " Non-maskable interrupts\n"),
#ifdef CONFIG_X86_LOCAL_APIC
- seq_printf(p, "%*s:", prec, "LOC");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->apic_timer_irqs);
- seq_puts(p, " Local timer interrupts\n");
-
- seq_printf(p, "%*s:", prec, "SPU");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->irq_spurious_count);
- seq_puts(p, " Spurious interrupts\n");
- seq_printf(p, "%*s:", prec, "PMI");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->apic_perf_irqs);
- seq_puts(p, " Performance monitoring interrupts\n");
- seq_printf(p, "%*s:", prec, "IWI");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->apic_irq_work_irqs);
- seq_puts(p, " IRQ work interrupts\n");
- seq_printf(p, "%*s:", prec, "RTR");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->icr_read_retry_count);
- seq_puts(p, " APIC ICR read retries\n");
- if (x86_platform_ipi_callback) {
- seq_printf(p, "%*s:", prec, "PLT");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->x86_platform_ipis);
- seq_puts(p, " Platform interrupts\n");
- }
+ ISS(APIC_TIMER, "LOC", " Local timer interrupts\n"),
+ ISS(SPURIOUS, "SPU", " Spurious interrupts\n"),
+ ISS(APIC_PERF, "PMI", " Performance monitoring interrupts\n"),
+ ISS(IRQ_WORK, "IWI", " IRQ work interrupts\n"),
+ ISS(ICR_READ_RETRY, "RTR", " APIC ICR read retries\n"),
+ ISS(X86_PLATFORM_IPI, "PLT", " Platform interrupts\n"),
#endif
#ifdef CONFIG_SMP
- seq_printf(p, "%*s:", prec, "RES");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->irq_resched_count);
- seq_puts(p, " Rescheduling interrupts\n");
- seq_printf(p, "%*s:", prec, "CAL");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->irq_call_count);
- seq_puts(p, " Function call interrupts\n");
- seq_printf(p, "%*s:", prec, "TLB");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->irq_tlb_count);
- seq_puts(p, " TLB shootdowns\n");
+ ISS(RESCHEDULE, "RES", " Rescheduling interrupts\n"),
+ ISS(CALL_FUNCTION, "CAL", " Function call interrupts\n"),
+ ISS(TLB, "TLB", " TLB shootdowns\n"),
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
- seq_printf(p, "%*s:", prec, "TRM");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->irq_thermal_count);
- seq_puts(p, " Thermal event interrupts\n");
+ ISS(THERMAL_APIC, "TRM", " Thermal event interrupt\n"),
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
- seq_printf(p, "%*s:", prec, "THR");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->irq_threshold_count);
- seq_puts(p, " Threshold APIC interrupts\n");
+ ISS(THRESHOLD_APIC, "THR", " Threshold APIC interrupts\n"),
#endif
#ifdef CONFIG_X86_MCE_AMD
- seq_printf(p, "%*s:", prec, "DFR");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->irq_deferred_error_count);
- seq_puts(p, " Deferred Error APIC interrupts\n");
+ ISS(DEFERRED_ERROR, "DFR", " Deferred Error APIC interrupts\n"),
#endif
#ifdef CONFIG_X86_MCE
- seq_printf(p, "%*s:", prec, "MCE");
- for_each_online_cpu(j)
- put_decimal(p, per_cpu(mce_exception_count, j));
- seq_puts(p, " Machine check exceptions\n");
- seq_printf(p, "%*s:", prec, "MCP");
- for_each_online_cpu(j)
- put_decimal(p, per_cpu(mce_poll_count, j));
- seq_puts(p, " Machine check polls\n");
+ ISS(MCE_EXCEPTION, "MCE", " Machine check exceptions\n"),
+ ISS(MCE_POLL, "MCP", " Machine check polls\n"),
#endif
#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
- if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
- seq_printf(p, "%*s:", prec, "HYP");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->irq_hv_callback_count);
- seq_puts(p, " Hypervisor callback interrupts\n");
- }
+ ITS(HYPERVISOR_CALLBACK, "HYP", " Hypervisor callback interrupts\n"),
#endif
#if IS_ENABLED(CONFIG_HYPERV)
- if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
- seq_printf(p, "%*s:", prec, "HRE");
- for_each_online_cpu(j)
- put_decimal(p,
- irq_stats(j)->irq_hv_reenlightenment_count);
- seq_puts(p, " Hyper-V reenlightenment interrupts\n");
- }
- if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
- seq_printf(p, "%*s:", prec, "HVS");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->hyperv_stimer0_count);
- seq_puts(p, " Hyper-V stimer0 interrupts\n");
- }
-#endif
- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
-#if defined(CONFIG_X86_IO_APIC)
- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
+ ITS(HYPERV_REENLIGHTMENT, "HRE", " Hyper-V reenlightment interrupts\n"),
+ ITS(HYPERV_STIMER0, "HVS", " Hyper-V stimer0 interrupts\n"),
#endif
#if IS_ENABLED(CONFIG_KVM)
- seq_printf(p, "%*s:", prec, "PIN");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->kvm_posted_intr_ipis);
- seq_puts(p, " Posted-interrupt notification event\n");
-
- seq_printf(p, "%*s:", prec, "NPI");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->kvm_posted_intr_nested_ipis);
- seq_puts(p, " Nested posted-interrupt event\n");
-
- seq_printf(p, "%*s:", prec, "PIW");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->kvm_posted_intr_wakeup_ipis);
- seq_puts(p, " Posted-interrupt wakeup event\n");
+ ITS(POSTED_INTR, "PIN", " Posted-interrupt notification event\n"),
+ ITS(POSTED_INTR_NESTED, "NPI", " Nested posted-interrupt event\n"),
+ ITS(POSTED_INTR_WAKEUP, "PIW", " Posted-interrupt wakeup event\n"),
#endif
#ifdef CONFIG_GUEST_PERF_EVENTS
- seq_printf(p, "%*s:", prec, "VPMI");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->perf_guest_mediated_pmis);
- seq_puts(p, " Perf Guest Mediated PMI\n");
+ ISS(PERF_GUEST_MEDIATED_PMI, "VPMI", " Perf Guest Mediated PMI\n"),
#endif
#ifdef CONFIG_X86_POSTED_MSI
- seq_printf(p, "%*s:", prec, "PMN");
- for_each_online_cpu(j)
- put_decimal(p, irq_stats(j)->posted_msi_notification_count);
- seq_puts(p, " Posted MSI notification event\n");
+ ISS(POSTED_MSI_NOTIFICATION, "PMN", " Posted MSI notification event\n"),
#endif
+};
+
+/*
+ * /proc/interrupts printing for arch specific interrupts
+ */
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ const struct irq_stat_info *info = irq_stat_info;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(irq_stat_info); i++, info++) {
+ if (info->test_vector && !test_bit(info->test_vector, system_vectors))
+ continue;
+
+ seq_printf(p, "%*s:", prec, info->symbol);
+ irq_proc_emit_counts(p, &irq_stat.counts[i]);
+ seq_puts(p, info->text);
+ }
+
+ seq_printf(p, "ERR: %10u\n", (unsigned int) atomic_read(&irq_err_count));
+ if (IS_ENABLED(CONFIG_X86_IO_APIC))
+ seq_printf(p, "MIS: %10u\n", (unsigned int) atomic_read(&irq_mis_count));
return 0;
}

@@ -220,38 +148,11 @@ int arch_show_interrupts(struct seq_file
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
- u64 sum = irq_stats(cpu)->__nmi_count;
+ irq_cpustat_t *p = per_cpu_ptr(&irq_stat, cpu);
+ u64 sum = 0;

-#ifdef CONFIG_X86_LOCAL_APIC
- sum += irq_stats(cpu)->apic_timer_irqs;
- sum += irq_stats(cpu)->irq_spurious_count;
- sum += irq_stats(cpu)->apic_perf_irqs;
- sum += irq_stats(cpu)->apic_irq_work_irqs;
- sum += irq_stats(cpu)->icr_read_retry_count;
- if (x86_platform_ipi_callback)
- sum += irq_stats(cpu)->x86_platform_ipis;
-#endif
-#ifdef CONFIG_SMP
- sum += irq_stats(cpu)->irq_resched_count;
- sum += irq_stats(cpu)->irq_call_count;
-#endif
-#ifdef CONFIG_X86_THERMAL_VECTOR
- sum += irq_stats(cpu)->irq_thermal_count;
-#endif
-#ifdef CONFIG_X86_MCE_THRESHOLD
- sum += irq_stats(cpu)->irq_threshold_count;
-#endif
-#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
- sum += irq_stats(cpu)->irq_hv_callback_count;
-#endif
-#if IS_ENABLED(CONFIG_HYPERV)
- sum += irq_stats(cpu)->irq_hv_reenlightenment_count;
- sum += irq_stats(cpu)->hyperv_stimer0_count;
-#endif
-#ifdef CONFIG_X86_MCE
- sum += per_cpu(mce_exception_count, cpu);
- sum += per_cpu(mce_poll_count, cpu);
-#endif
+ for (unsigned int i = 0; i < ARRAY_SIZE(irq_stat_info); i++)
+ sum += p->counts[i];
return sum;
}

@@ -354,7 +255,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platfo

apic_eoi();
trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
- inc_irq_stat(x86_platform_ipis);
+ inc_irq_stat(X86_PLATFORM_IPI);
if (x86_platform_ipi_callback)
x86_platform_ipi_callback();
trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
@@ -369,7 +270,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platfo
DEFINE_IDTENTRY_SYSVEC(sysvec_perf_guest_mediated_pmi_handler)
{
apic_eoi();
- inc_irq_stat(perf_guest_mediated_pmis);
+ inc_irq_stat(PERF_GUEST_MEDIATED_PMI);
perf_guest_handle_mediated_pmi();
}
#endif
@@ -395,7 +296,7 @@ EXPORT_SYMBOL_FOR_KVM(kvm_set_posted_int
DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
{
apic_eoi();
- inc_irq_stat(kvm_posted_intr_ipis);
+ inc_irq_stat(POSTED_INTR);
}

/*
@@ -404,7 +305,7 @@ DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm
DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
{
apic_eoi();
- inc_irq_stat(kvm_posted_intr_wakeup_ipis);
+ inc_irq_stat(POSTED_INTR_WAKEUP);
kvm_posted_intr_wakeup_handler();
}

@@ -414,7 +315,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted
DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
{
apic_eoi();
- inc_irq_stat(kvm_posted_intr_nested_ipis);
+ inc_irq_stat(POSTED_INTR_NESTED);
}
#endif

@@ -488,7 +389,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi

/* Mark the handler active for intel_ack_posted_msi_irq() */
__this_cpu_write(posted_msi_handler_active, true);
- inc_irq_stat(posted_msi_notification_count);
+ inc_irq_stat(POSTED_MSI_NOTIFICATION);
irq_enter();

/*
@@ -583,7 +484,7 @@ static void smp_thermal_vector(void)
DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
{
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
- inc_irq_stat(irq_thermal_count);
+ inc_irq_stat(THERMAL_APIC);
smp_thermal_vector();
trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
apic_eoi();
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -18,7 +18,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_irq_work)
{
apic_eoi();
trace_irq_work_entry(IRQ_WORK_VECTOR);
- inc_irq_stat(apic_irq_work_irqs);
+ inc_irq_stat(IRQ_WORK);
irq_work_run();
trace_irq_work_exit(IRQ_WORK_VECTOR);
}
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -310,7 +310,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncp

apic_eoi();

- inc_irq_stat(irq_hv_callback_count);
+ inc_irq_stat(HYPERVISOR_CALLBACK);

if (__this_cpu_read(async_pf_enabled)) {
token = __this_cpu_read(apf_reason.token);
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -576,7 +576,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi)

irq_state = irqentry_nmi_enter(regs);

- inc_irq_stat(__nmi_count);
+ inc_irq_stat(NMI);

if (IS_ENABLED(CONFIG_NMI_CHECK_CPU) && ignore_nmis) {
WRITE_ONCE(nsp->idt_ignored, nsp->idt_ignored + 1);
@@ -725,7 +725,7 @@ DEFINE_FREDENTRY_NMI(exc_nmi)

irq_state = irqentry_nmi_enter(regs);

- inc_irq_stat(__nmi_count);
+ inc_irq_stat(NMI);
default_do_nmi(regs);

irqentry_nmi_exit(regs, irq_state);
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -249,7 +249,7 @@ DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_res
{
apic_eoi();
trace_reschedule_entry(RESCHEDULE_VECTOR);
- inc_irq_stat(irq_resched_count);
+ inc_irq_stat(RESCHEDULE);
scheduler_ipi();
trace_reschedule_exit(RESCHEDULE_VECTOR);
}
@@ -258,7 +258,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_call_funct
{
apic_eoi();
trace_call_function_entry(CALL_FUNCTION_VECTOR);
- inc_irq_stat(irq_call_count);
+ inc_irq_stat(CALL_FUNCTION);
generic_smp_call_function_interrupt();
trace_call_function_exit(CALL_FUNCTION_VECTOR);
}
@@ -267,7 +267,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_call_funct
{
apic_eoi();
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
- inc_irq_stat(irq_call_count);
+ inc_irq_stat(CALL_FUNCTION);
generic_smp_call_function_single_interrupt();
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
}
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1144,7 +1144,7 @@ static void flush_tlb_func(void *info)
VM_WARN_ON(!irqs_disabled());

if (!local) {
- inc_irq_stat(irq_tlb_count);
+ inc_irq_stat(TLB);
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
}

--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -125,7 +125,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_ca
if (xen_percpu_upcall)
apic_eoi();

- inc_irq_stat(irq_hv_callback_count);
+ inc_irq_stat(HYPERVISOR_CALLBACK);

xen_evtchn_do_upcall();

--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -728,7 +728,7 @@ static void __xen_pv_evtchn_do_upcall(st
{
struct pt_regs *old_regs = set_irq_regs(regs);

- inc_irq_stat(irq_hv_callback_count);
+ inc_irq_stat(HYPERVISOR_CALLBACK);

xen_evtchn_do_upcall();

--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -23,7 +23,7 @@ static irqreturn_t xen_call_function_sin
*/
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{
- inc_irq_stat(irq_resched_count);
+ inc_irq_stat(RESCHEDULE);
scheduler_ipi();

return IRQ_HANDLED;
@@ -254,7 +254,7 @@ void xen_send_IPI_allbutself(int vector)
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_interrupt();
- inc_irq_stat(irq_call_count);
+ inc_irq_stat(CALL_FUNCTION);

return IRQ_HANDLED;
}
@@ -262,7 +262,7 @@ static irqreturn_t xen_call_function_int
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_single_interrupt();
- inc_irq_stat(irq_call_count);
+ inc_irq_stat(CALL_FUNCTION);

return IRQ_HANDLED;
}
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -400,7 +400,7 @@ static void xen_pv_stop_other_cpus(int w
static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
{
irq_work_run();
- inc_irq_stat(apic_irq_work_irqs);
+ inc_irq_stat(IRQ_WORK);

return IRQ_HANDLED;
}