Re: [PATCH RFC 1/1] genirq: Make threaded handler use irq affinity for managed interrupt

From: Ming Lei
Date: Sat Jan 04 2020 - 07:04:40 EST


On Fri, Jan 03, 2020 at 11:50:51AM +0000, John Garry wrote:
> On 03/01/2020 11:29, Ming Lei wrote:
> > On Fri, Jan 03, 2020 at 10:41:48AM +0000, John Garry wrote:
> > > On 03/01/2020 00:46, Ming Lei wrote:
> > > > > > > d the
> > > > > > > DMA API more than an architecture-specific problem.
> > > > > > >
> > > > > > > Given that we have so far very little data, I'd hold off any conclusion.
> > > > > > We can start to collect latency data of dma unmapping vs nvme_irq()
> > > > > > on both x86 and arm64.
> > > > > >
> > > > > > I will see if I can get a such box for collecting the latency data.
> > > > > To reiterate what I mentioned before about IOMMU DMA unmap on x86, a key
> > > > > difference is that by default it uses the non-strict (lazy) mode unmap, i.e.
> > > > > we unmap in batches. ARM64 uses general default, which is strict mode, i.e.
> > > > > every unmap results in an IOTLB fluch.
> > > > >
> > > > > In my setup, if I switch to lazy unmap (set iommu.strict=0 on cmdline), then
> > > > > no lockup.
> > > > >
> > > > > Are any special IOMMU setups being used for x86, like enabling strict mode?
> > > > > I don't know...
> > > > BTW, I have run the test on one 224-core ARM64 with one 32-hw_queue NVMe, the
> > > > softlock issue can be triggered in one minute.
> > > >
> > > > nvme_irq() often takes ~5us to complete on this machine, then there is really
> > > > risk of cpu lockup when IOPS is > 200K.
> > >
> > > Do you have a typical nvme_irq() completion time for a mid-range x86 server?
> >
> > ~1us.
>
> Eh, so ~ x5 faster on x86 machine?! Seems some real issue here.
>
> >
> > It is done via bcc script, and ebpf itself may introduce some overhead.
> >
>
> Can you share the script/instructions? I would like to test on my machine. I
> assume you tested on an ThunderX2.

It should have been done easier by bpftrace than bcc, however it has bug in case
of too many cpu cores on arm64.

So I uses a modified hardirqs.py to do that, you can collect the latency
histogram via funclatency too.



Thanks,
Ming
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# hardirqs Summarize hard IRQ (interrupt) event time.
# For Linux, uses BCC, eBPF.
#
# USAGE: hardirqs [-h] [-T] [-N] [-C] [-d] [interval] [outputs]
#
# Thanks Amer Ather for help understanding irq behavior.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 19-Oct-2015 Brendan Gregg Created this.

from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
import argparse

# arguments
examples = """examples:
./hardirqs # sum hard irq event time
./hardirqs -d # show hard irq event time as histograms
./hardirqs 1 10 # print 1 second summaries, 10 times
./hardirqs -NT 1 # 1s summaries, nanoseconds, and timestamps
"""
parser = argparse.ArgumentParser(
description="Summarize hard irq event time as histograms",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-N", "--nanoseconds", action="store_true",
help="output in nanoseconds")
parser.add_argument("-C", "--count", action="store_true",
help="show event counts instead of timing")
parser.add_argument("-d", "--dist", action="store_true",
help="show distributions as histograms")
parser.add_argument("interval", nargs="?", default=99999999,
help="output interval, in seconds")
parser.add_argument("outputs", nargs="?", default=99999999,
help="number of outputs")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
countdown = int(args.outputs)
if args.count and (args.dist or args.nanoseconds):
print("The --count option can't be used with time-based options")
exit()
if args.count:
factor = 1
label = "count"
elif args.nanoseconds:
factor = 1
label = "nsecs"
else:
factor = 1000
label = "usecs"
debug = 0

# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/interrupt.h>

struct irq_key {
u64 id;
u64 slot;
};

BPF_HISTOGRAM(irq_rqs, struct irq_key, 512);

struct irq_info {
u32 irq;
u32 rqs;
u64 start;
};

//at most 256 CPUs
//BPF_ARRAY(curr_irq, struct irq_info, 256);
BPF_HASH(curr_irq, u32, struct irq_info);

struct irq_sum_info {
u64 total;
u64 cnt;
u64 rq_cnt;
};
BPF_HASH(irq, u32, struct irq_sum_info);

// time IRQ
int trace_start(struct pt_regs *ctx, struct irq_desc *desc)
{
struct irq_info __info = {};
struct irq_info *info = &__info;
u32 id = bpf_get_smp_processor_id();

info->irq = desc->irq_data.irq;
info->start = bpf_ktime_get_ns();
info->rqs = 0;

curr_irq.update(&id, &__info);

return 0;
}

int trace_completion(struct pt_regs *ctx)
{
u32 irq_no;
struct irq_sum_info *sum;
struct irq_sum_info zero = {};
u64 delta;
struct irq_desc *desc = (struct irq_desc *)PT_REGS_PARM1(ctx);
u32 id = bpf_get_smp_processor_id();
struct irq_info *info = curr_irq.lookup(&id);

if (!info)
return 0;

irq_no = info->irq;
delta = bpf_ktime_get_ns() - info->start;

if (irq_no != desc->irq_data.irq)
bpf_trace_printk("irq not match: %d %d\\n", irq_no, desc->irq_data.irq);

sum = irq.lookup_or_init(&irq_no, &zero);
if (sum) {
sum->total += delta;
sum->cnt++;
sum->rq_cnt += info->rqs;
}

struct irq_key ikey = {.id = irq_no, .slot = info->rqs};
irq_rqs.increment(ikey);
curr_irq.delete(&id);

return 0;
}
int trace_rq(struct pt_regs *ctx, struct request *rq)
{
u32 id = bpf_get_smp_processor_id();
struct irq_info *info = curr_irq.lookup(&id);

if (!info)
return 0;

info->rqs++;

return 0;
}
"""

# code substitutions
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()

# load BPF program
b = BPF(text=bpf_text)

# these should really use irq:irq_handler_entry/exit tracepoints:
b.attach_kprobe(event="handle_irq_event_percpu", fn_name="trace_start")
b.attach_kretprobe(event="handle_irq_event_percpu", fn_name="trace_completion")
b.attach_kprobe(event="blk_mq_complete_request", fn_name="trace_rq")
print("Tracing hard irq event time... Hit Ctrl-C to end.")

# output
exiting = 0 if args.interval else 1
dist = b.get_table("irq")
irq_rqs = b.get_table("irq_rqs")

if args.timestamp:
print("start time %-8s\n" % strftime("%H:%M:%S"), end="")
while (1):
try:
sleep(int(args.interval))
except KeyboardInterrupt:
exiting = 1

print()
if args.timestamp:
print("end time %-8s\n" % strftime("%H:%M:%S"), end="")

if args.dist:
dist.print_log2_hist(label, "hardirq")
else:
print("%-10s %11s %11s %11s %11s %11s" % ("HARDIRQ", "TOTAL_TIME", "TOTAL_COUNT",
"AVG_TIME", "RQS", "AVG_RQS"))
for k, v in sorted(dist.items(), key=lambda dist: dist[1].total):
print("%-10d %11d %11d %11d %11d %11d" % (k.value, v.total / factor, v.cnt,
v.total / (factor * v.cnt),
v.rq_cnt,
v.rq_cnt / v.cnt))
dist.clear()

irq_rqs.print_linear_hist("rqs", "irq")
irq_rqs.clear()

countdown -= 1
if exiting or countdown == 0:
exit()