Re: [PATCH 2/7] perf-events: Add support for supplementary eventregisters v4

From: Stephane Eranian
Date: Thu Jan 13 2011 - 12:31:19 EST


Hi,

I'd like to suggest that the OFFCORE_RESPONSE extra MSR encoding
be put into a dedicated field in the perf_event_attr instead of in the upper
32-bits of attr->config.

There may not be enough space to encode for future processors.

In fact, given that the Sandy Bridge PMU spec is now available, we
have a first example of this (see Vol3b figure 30.29). OFFCORE_RESPONSE
needs 38 bits. So, instead of having NHM/WSM use attr->config and SNB
use another field, I think it would make sense to have that in a new u64 field
for all processors. Despite the fact that OFFCORE_RESPONSE remains
a model-specific feature, I think it would help user tools and libraries if we
were to use a dedicated field.


On Mon, Dec 27, 2010 at 4:36 PM, Lin Ming <ming.m.lin@xxxxxxxxx> wrote:
> From: Andi Kleen <ak@xxxxxxxxxxxxxxx>
>
> Intel Nehalem/Westmere have a special OFFCORE_RESPONSE event
> that can be used to monitor any offcore accesses from a core.
> This is a very useful event for various tunings, and it's
> also needed to implement the generic LLC-* events correctly.
>
> Unfortunately this event requires programming a mask in a separate
> register. And worse this separate register is per core, not per
> CPU thread.
>
> This patch adds:
> - Teaches perf_events that OFFCORE_RESPONSE needs extra parameters.
> The extra parameters are passed by user space in the unused upper
> 32bits of the config word.
> - Add support to the Intel perf_event core to schedule per
> core resources. This adds fairly generic infrastructure that
> can be also used for other per core resources.
> The basic code has is patterned after the similar AMD northbridge
> constraints code.
>
> Thanks to Stephane Eranian who pointed out some problems
> in the original version and suggested improvements.
>
> Cc: eranian@xxxxxxxxxx
> Signed-off-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
> ---
> Âarch/x86/kernel/cpu/perf_event.c    |  70 +++++++++++
> Âarch/x86/kernel/cpu/perf_event_intel.c | Â198 ++++++++++++++++++++++++++++++++
> Âinclude/linux/perf_event.h       |  Â2 +
> Â3 files changed, 270 insertions(+), 0 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
> index 0a360d1..df971ea 100644
> --- a/arch/x86/kernel/cpu/perf_event.c
> +++ b/arch/x86/kernel/cpu/perf_event.c
> @@ -93,6 +93,8 @@ struct amd_nb {
> Â Â Â Âstruct event_constraint event_constraints[X86_PMC_IDX_MAX];
> Â};
>
> +struct intel_percore;
> +
> Â#define MAX_LBR_ENTRIES Â Â Â Â Â Â Â Â16
>
> Âstruct cpu_hw_events {
> @@ -128,6 +130,13 @@ struct cpu_hw_events {
>    Âstruct perf_branch_entry    Âlbr_entries[MAX_LBR_ENTRIES];
>
> Â Â Â Â/*
> + Â Â Â Â* Intel percore register state.
> + Â Â Â Â* Coordinate shared resources between HT threads.
> + Â Â Â Â*/
> +    int               percore_used; /* Used by this CPU? */
> +    struct intel_percore      Â*per_core;
> +
> + Â Â Â /*
> Â Â Â Â * AMD specific bits
> Â Â Â Â */
>    Âstruct amd_nb      *amd_nb;
> @@ -175,6 +184,32 @@ struct cpu_hw_events {
> Â#define for_each_event_constraint(e, c) Â Â Â Â\
> Â Â Â Âfor ((e) = (c); (e)->weight; (e)++)
>
> +/*
> + * Extra registers for specific events.
> + * Some events need large masks and require external MSRs.
> + * Define a mapping to these extra registers.
> + * The actual contents are still encoded in unused parts of the
> + * original config u64.
> + */
> +struct extra_reg {
> +    unsigned int      Âevent;
> +    unsigned int      Âmsr;
> +    unsigned int      Âextra_shift;
> + Â Â Â u64 Â Â Â Â Â Â Â Â Â Â config_mask;
> + Â Â Â u64 Â Â Â Â Â Â Â Â Â Â valid_mask;
> +};
> +
> +#define EVENT_EXTRA_REG(e, ms, m, vm, es) { Â Â\
> + Â Â Â .event = (e), Â Â Â Â Â \
> + Â Â Â .msr = (ms), Â Â Â Â Â Â\
> + Â Â Â .config_mask = (m), Â Â \
> + Â Â Â .valid_mask = (vm), Â Â \
> + Â Â Â .extra_shift = (es), Â Â\
> + Â Â Â }
> +#define INTEL_EVENT_EXTRA_REG(event, msr, vm, es) Â Â Â\
> + Â Â Â EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, es)
> +#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, 0)
> +
> Âunion perf_capabilities {
> Â Â Â Âstruct {
>        Âu64   lbr_format  Â: 6;
> @@ -219,6 +254,7 @@ struct x86_pmu {
>    Âvoid      Â(*put_event_constraints)(struct cpu_hw_events *cpuc,
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct perf_event *event);
> Â Â Â Âstruct event_constraint *event_constraints;
> + Â Â Â struct event_constraint *percore_constraints;
>    Âvoid      Â(*quirks)(void);
>    Âint       perfctr_second_write;
>
> @@ -247,6 +283,11 @@ struct x86_pmu {
> Â Â Â Â */
>    Âunsigned long  lbr_tos, lbr_from, lbr_to; /* MSR base regs    */
>    Âint       lbr_nr;          Â/* hardware stack size */
> +
> + Â Â Â /*
> + Â Â Â Â* Extra registers for events
> + Â Â Â Â*/
> + Â Â Â struct extra_reg *extra_regs;
> Â};
>
> Âstatic struct x86_pmu x86_pmu __read_mostly;
> @@ -321,6 +362,33 @@ again:
> Â Â Â Âreturn new_raw_count;
> Â}
>
> +/*
> + * Find and validate any extra registers to set up.
> + */
> +static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
> +{
> + Â Â Â struct extra_reg *er;
> + Â Â Â u64 extra;
> +
> + Â Â Â event->hw.extra_reg = 0;
> + Â Â Â event->hw.extra_config = 0;
> +
> + Â Â Â if (!x86_pmu.extra_regs)
> + Â Â Â Â Â Â Â return 0;
> +
> + Â Â Â for (er = x86_pmu.extra_regs; er->msr; er++) {
> + Â Â Â Â Â Â Â if (er->event != (config & er->config_mask))
> + Â Â Â Â Â Â Â Â Â Â Â continue;
> + Â Â Â Â Â Â Â event->hw.extra_reg = er->msr;
> + Â Â Â Â Â Â Â extra = config >> er->extra_shift;
> + Â Â Â Â Â Â Â if (extra & ~er->valid_mask)
> + Â Â Â Â Â Â Â Â Â Â Â return -EINVAL;
> + Â Â Â Â Â Â Â event->hw.extra_config = extra;
> + Â Â Â Â Â Â Â break;
> + Â Â Â }
> + Â Â Â return 0;
> +}
> +
> Âstatic atomic_t active_events;
> Âstatic DEFINE_MUTEX(pmc_reserve_mutex);
>
> @@ -918,6 +986,8 @@ static void x86_pmu_enable(struct pmu *pmu)
> Âstatic inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
> Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âu64 enable_mask)
> Â{
> + Â Â Â if (hwc->extra_reg)
> + Â Â Â Â Â Â Â wrmsrl(hwc->extra_reg, hwc->extra_config);
> Â Â Â Âwrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
> Â}
>
> diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
> index 24e390e..32569d4 100644
> --- a/arch/x86/kernel/cpu/perf_event_intel.c
> +++ b/arch/x86/kernel/cpu/perf_event_intel.c
> @@ -1,5 +1,27 @@
> Â#ifdef CONFIG_CPU_SUP_INTEL
>
> +#define MAX_EXTRA_REGS 2
> +
> +/*
> + * Per register state.
> + */
> +struct er_account {
> +    int           ref;      Â/* reference count */
> +    unsigned int      Âextra_reg;   Â/* extra MSR number */
> + Â Â Â u64 Â Â Â Â Â Â Â Â Â Â extra_config; Â /* extra MSR config */
> +};
> +
> +/*
> + * Per core state
> + * This used to coordinate shared registers for HT threads.
> + */
> +struct intel_percore {
> +    raw_spinlock_t     Âlock;      /* protect structure */
> +    struct er_account    regs[MAX_EXTRA_REGS];
> +    int           refcnt;     /* number of threads */
> +    unsigned        Âcore_id;
> +};
> +
> Â/*
> Â* Intel PerfMon, used on Core and later.
> Â*/
> @@ -64,6 +86,18 @@ static struct event_constraint intel_nehalem_event_constraints[] =
> Â Â Â ÂEVENT_CONSTRAINT_END
> Â};
>
> +static struct extra_reg intel_nehalem_extra_regs[] =
> +{
> + Â Â Â INTEL_EVENT_EXTRA_REG(0xb7, 0x1a6, 0xffff, 32), /* OFFCORE_RESPONSE_0 */
> + Â Â Â EVENT_EXTRA_END
> +};
> +
> +static struct event_constraint intel_nehalem_percore_constraints[] =
> +{
> + Â Â Â INTEL_EVENT_CONSTRAINT(0xb7, 0),
> + Â Â Â EVENT_CONSTRAINT_END
> +};
> +
> Âstatic struct event_constraint intel_westmere_event_constraints[] =
> Â{
> Â Â Â ÂFIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
> @@ -76,6 +110,20 @@ static struct event_constraint intel_westmere_event_constraints[] =
> Â Â Â ÂEVENT_CONSTRAINT_END
> Â};
>
> +static struct extra_reg intel_westmere_extra_regs[] =
> +{
> + Â Â Â INTEL_EVENT_EXTRA_REG(0xb7, 0x1a6, 0xffff, 32), /* OFFCORE_RESPONSE_0 */
> + Â Â Â INTEL_EVENT_EXTRA_REG(0xbb, 0x1a7, 0xffff, 32), /* OFFCORE_RESPONSE_1 */
> + Â Â Â EVENT_EXTRA_END
> +};
> +
> +static struct event_constraint intel_westmere_percore_constraints[] =
> +{
> + Â Â Â INTEL_EVENT_CONSTRAINT(0xb7, 0),
> + Â Â Â INTEL_EVENT_CONSTRAINT(0xbb, 0),
> + Â Â Â EVENT_CONSTRAINT_END
> +};
> +
> Âstatic struct event_constraint intel_gen_event_constraints[] =
> Â{
> Â Â Â ÂFIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
> @@ -794,6 +842,66 @@ intel_bts_constraints(struct perf_event *event)
> Â}
>
> Âstatic struct event_constraint *
> +intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
> +{
> + Â Â Â struct hw_perf_event *hwc = &event->hw;
> + Â Â Â unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
> + Â Â Â struct event_constraint *c;
> + Â Â Â struct intel_percore *pc;
> + Â Â Â struct er_account *era;
> + Â Â Â int i;
> + Â Â Â int free_slot;
> + Â Â Â int found;
> +
> + Â Â Â if (!x86_pmu.percore_constraints)
> + Â Â Â Â Â Â Â return NULL;
> +
> + Â Â Â for (c = x86_pmu.percore_constraints; c->cmask; c++) {
> + Â Â Â Â Â Â Â if (e != c->code)
> + Â Â Â Â Â Â Â Â Â Â Â continue;
> +
> + Â Â Â Â Â Â Â /*
> + Â Â Â Â Â Â Â Â* Allocate resource per core.
> + Â Â Â Â Â Â Â Â*/
> + Â Â Â Â Â Â Â c = NULL;
> + Â Â Â Â Â Â Â pc = cpuc->per_core;
> + Â Â Â Â Â Â Â if (!pc)
> + Â Â Â Â Â Â Â Â Â Â Â break;
> + Â Â Â Â Â Â Â c = &emptyconstraint;
> + Â Â Â Â Â Â Â raw_spin_lock(&pc->lock);
> + Â Â Â Â Â Â Â free_slot = -1;
> + Â Â Â Â Â Â Â found = 0;
> + Â Â Â Â Â Â Â for (i = 0; i < MAX_EXTRA_REGS; i++) {
> + Â Â Â Â Â Â Â Â Â Â Â era = &pc->regs[i];
> + Â Â Â Â Â Â Â Â Â Â Â if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â /* Allow sharing same config */
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â if (hwc->extra_config == era->extra_config) {
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â era->ref++;
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â cpuc->percore_used = 1;
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â c = NULL;
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â }
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â /* else conflict */
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â found = 1;
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â break;
> + Â Â Â Â Â Â Â Â Â Â Â } else if (era->ref == 0 && free_slot == -1)
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â free_slot = i;
> + Â Â Â Â Â Â Â }
> + Â Â Â Â Â Â Â if (!found && free_slot != -1) {
> + Â Â Â Â Â Â Â Â Â Â Â era = &pc->regs[free_slot];
> + Â Â Â Â Â Â Â Â Â Â Â era->ref = 1;
> + Â Â Â Â Â Â Â Â Â Â Â era->extra_reg = hwc->extra_reg;
> + Â Â Â Â Â Â Â Â Â Â Â era->extra_config = hwc->extra_config;
> + Â Â Â Â Â Â Â Â Â Â Â cpuc->percore_used = 1;
> + Â Â Â Â Â Â Â Â Â Â Â c = NULL;
> + Â Â Â Â Â Â Â }
> + Â Â Â Â Â Â Â raw_spin_unlock(&pc->lock);
> + Â Â Â Â Â Â Â return c;
> + Â Â Â }
> +
> + Â Â Â return NULL;
> +}
> +
> +static struct event_constraint *
> Âintel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
> Â{
> Â Â Â Âstruct event_constraint *c;
> @@ -806,9 +914,50 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
> Â Â Â Âif (c)
> Â Â Â Â Â Â Â Âreturn c;
>
> + Â Â Â c = intel_percore_constraints(cpuc, event);
> + Â Â Â if (c)
> + Â Â Â Â Â Â Â return c;
> +
> Â Â Â Âreturn x86_get_event_constraints(cpuc, event);
> Â}
>
> +static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â struct perf_event *event)
> +{
> + Â Â Â struct extra_reg *er;
> + Â Â Â struct intel_percore *pc;
> + Â Â Â struct er_account *era;
> + Â Â Â struct hw_perf_event *hwc = &event->hw;
> + Â Â Â int i, allref;
> +
> + Â Â Â if (!cpuc->percore_used)
> + Â Â Â Â Â Â Â return;
> +
> + Â Â Â for (er = x86_pmu.extra_regs; er->msr; er++) {
> + Â Â Â Â Â Â Â if (er->event != (hwc->config & er->config_mask))
> + Â Â Â Â Â Â Â Â Â Â Â continue;
> +
> + Â Â Â Â Â Â Â pc = cpuc->per_core;
> + Â Â Â Â Â Â Â raw_spin_lock(&pc->lock);
> + Â Â Â Â Â Â Â for (i = 0; i < MAX_EXTRA_REGS; i++) {
> + Â Â Â Â Â Â Â Â Â Â Â era = &pc->regs[i];
> + Â Â Â Â Â Â Â Â Â Â Â if (era->ref > 0 &&
> + Â Â Â Â Â Â Â Â Â Â Â Â Â era->extra_config == hwc->extra_config &&
> + Â Â Â Â Â Â Â Â Â Â Â Â Â era->extra_reg == er->msr) {
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â era->ref--;
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â break;
> + Â Â Â Â Â Â Â Â Â Â Â }
> + Â Â Â Â Â Â Â }
> + Â Â Â Â Â Â Â allref = 0;
> + Â Â Â Â Â Â Â for (i = 0; i < MAX_EXTRA_REGS; i++)
> + Â Â Â Â Â Â Â Â Â Â Â allref += pc->regs[i].ref;
> + Â Â Â Â Â Â Â if (allref == 0)
> + Â Â Â Â Â Â Â Â Â Â Â cpuc->percore_used = 0;
> + Â Â Â Â Â Â Â raw_spin_unlock(&pc->lock);
> + Â Â Â Â Â Â Â break;
> + Â Â Â }
> +}
> +
> Âstatic int intel_pmu_hw_config(struct perf_event *event)
> Â{
> Â Â Â Âint ret = x86_pmu_hw_config(event);
> @@ -880,11 +1029,43 @@ static __initconst const struct x86_pmu core_pmu = {
> Â Â Â Â */
>    Â.max_period       = (1ULL << 31) - 1,
> Â Â Â Â.get_event_constraints Â= intel_get_event_constraints,
> + Â Â Â .put_event_constraints Â= intel_put_event_constraints,
>    Â.event_constraints   Â= intel_core_event_constraints,
> Â};
>
> +static int intel_pmu_cpu_prepare(int cpu)
> +{
> + Â Â Â struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> +
> + Â Â Â cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
> + Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â GFP_KERNEL, cpu_to_node(cpu));
> + Â Â Â if (!cpuc->per_core)
> + Â Â Â Â Â Â Â return NOTIFY_BAD;
> +
> + Â Â Â raw_spin_lock_init(&cpuc->per_core->lock);
> + Â Â Â cpuc->per_core->core_id = -1;
> + Â Â Â return NOTIFY_OK;
> +}
> +
> Âstatic void intel_pmu_cpu_starting(int cpu)
> Â{
> + Â Â Â struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> + Â Â Â int core_id = topology_core_id(cpu);
> + Â Â Â int i;
> +
> + Â Â Â for_each_online_cpu(i) {
> + Â Â Â Â Â Â Â struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
> +
> + Â Â Â Â Â Â Â if (pc && pc->core_id == core_id) {
> + Â Â Â Â Â Â Â Â Â Â Â kfree(cpuc->per_core);
> + Â Â Â Â Â Â Â Â Â Â Â cpuc->per_core = pc;
> + Â Â Â Â Â Â Â Â Â Â Â break;
> + Â Â Â Â Â Â Â }
> + Â Â Â }
> +
> + Â Â Â cpuc->per_core->core_id = core_id;
> + Â Â Â cpuc->per_core->refcnt++;
> +
> Â Â Â Âinit_debug_store_on_cpu(cpu);
> Â Â Â Â/*
> Â Â Â Â * Deal with CPUs that don't clear their LBRs on power-up.
> @@ -894,6 +1075,15 @@ static void intel_pmu_cpu_starting(int cpu)
>
> Âstatic void intel_pmu_cpu_dying(int cpu)
> Â{
> + Â Â Â struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> + Â Â Â struct intel_percore *pc = cpuc->per_core;
> +
> + Â Â Â if (pc) {
> + Â Â Â Â Â Â Â if (pc->core_id == -1 || --pc->refcnt == 0)
> + Â Â Â Â Â Â Â Â Â Â Â kfree(pc);
> + Â Â Â Â Â Â Â cpuc->per_core = NULL;
> + Â Â Â }
> +
> Â Â Â Âfini_debug_store_on_cpu(cpu);
> Â}
>
> @@ -918,7 +1108,9 @@ static __initconst const struct x86_pmu intel_pmu = {
> Â Â Â Â */
>    Â.max_period       = (1ULL << 31) - 1,
> Â Â Â Â.get_event_constraints Â= intel_get_event_constraints,
> + Â Â Â .put_event_constraints Â= intel_put_event_constraints,
>
> +    .cpu_prepare      Â= intel_pmu_cpu_prepare,
>    Â.cpu_starting      = intel_pmu_cpu_starting,
>    Â.cpu_dying       Â= intel_pmu_cpu_dying,
> Â};
> @@ -1036,7 +1228,10 @@ static __init int intel_pmu_init(void)
> Â Â Â Â Â Â Â Âintel_pmu_lbr_init_nhm();
>
> Â Â Â Â Â Â Â Âx86_pmu.event_constraints = intel_nehalem_event_constraints;
> + Â Â Â Â Â Â Â x86_pmu.percore_constraints =
> + Â Â Â Â Â Â Â Â Â Â Â intel_nehalem_percore_constraints;
> Â Â Â Â Â Â Â Âx86_pmu.enable_all = intel_pmu_nhm_enable_all;
> + Â Â Â Â Â Â Â x86_pmu.extra_regs = intel_nehalem_extra_regs;
> Â Â Â Â Â Â Â Âpr_cont("Nehalem events, ");
> Â Â Â Â Â Â Â Âbreak;
>
> @@ -1058,7 +1253,10 @@ static __init int intel_pmu_init(void)
> Â Â Â Â Â Â Â Âintel_pmu_lbr_init_nhm();
>
> Â Â Â Â Â Â Â Âx86_pmu.event_constraints = intel_westmere_event_constraints;
> + Â Â Â Â Â Â Â x86_pmu.percore_constraints =
> + Â Â Â Â Â Â Â Â Â Â Â intel_westmere_percore_constraints;
> Â Â Â Â Â Â Â Âx86_pmu.enable_all = intel_pmu_nhm_enable_all;
> + Â Â Â Â Â Â Â x86_pmu.extra_regs = intel_westmere_extra_regs;
> Â Â Â Â Â Â Â Âpr_cont("Westmere events, ");
> Â Â Â Â Â Â Â Âbreak;
>
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index dda5b0a..d24d9ab 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -539,6 +539,8 @@ struct hw_perf_event {
>            Âunsigned long  event_base;
>            Âint       idx;
>            Âint       last_cpu;
> +            unsigned int  Âextra_reg;
> + Â Â Â Â Â Â Â Â Â Â Â u64 Â Â Â Â Â Â extra_config;
> Â Â Â Â Â Â Â Â};
> Â Â Â Â Â Â Â Âstruct { /* software */
> Â Â Â Â Â Â Â Â Â Â Â Âstruct hrtimer Âhrtimer;
> --
> 1.7.3
>
>
>
>
>
>
¢éì®&Þ~º&¶¬–+-±éÝ¥Šw®žË±Êâmébžìdz¹Þ)í…æèw*jg¬±¨¶‰šŽŠÝj/êäz¹ÞŠà2ŠÞ¨è­Ú&¢)ß«a¶Úþø®G«éh®æj:+v‰¨Šwè†Ù>Wš±êÞiÛaxPjØm¶Ÿÿà -»+ƒùdš_