Re: [RFC 3/4] perf: Allow per PMU access control
From: Alexey Budankov
Date: Wed Jun 27 2018 - 05:47:24 EST
On 27.06.2018 12:15, Tvrtko Ursulin wrote:
>
> On 26/06/18 18:25, Alexey Budankov wrote:
>> Hi,
>>
>> On 26.06.2018 18:36, Tvrtko Ursulin wrote:
>>> From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
>>>
>>> For situations where sysadmins might want to allow different level of
>>> access control for different PMUs, we start creating per-PMU
>>> perf_event_paranoid controls in sysfs.
>>>
>>> These work in equivalent fashion as the existing perf_event_paranoid
>>> sysctl, which now becomes the parent control for each PMU.
>>>
>>> On PMU registration the global/parent value will be inherited by each PMU,
>>> as it will be propagated to all registered PMUs when the sysctl is
>>> updated.
>>>
>>> At any later point individual PMU access controls, located in
>>> <sysfs>/device/<pmu-name>/perf_event_paranoid, can be adjusted to achieve
>>> fine grained access control.
>>>
>>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
>>> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
>>> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
>>> Cc: Ingo Molnar <mingo@xxxxxxxxxx>
>>> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
>>> Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
>>> Cc: Alexander Shishkin <alexander.shishkin@xxxxxxxxxxxxxxx>
>>> Cc: Jiri Olsa <jolsa@xxxxxxxxxx>
>>> Cc: Namhyung Kim <namhyung@xxxxxxxxxx>
>>> Cc: Madhavan Srinivasan <maddy@xxxxxxxxxxxxxxxxxx>
>>> Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx>
>>> Cc: Alexey Budankov <alexey.budankov@xxxxxxxxxxxxxxx>
>>> Cc: linux-kernel@xxxxxxxxxxxxxxx
>>> Cc: x86@xxxxxxxxxx
>>> ---
>>> Â include/linux/perf_event.h | 12 ++++++--
>>> Â kernel/events/core.cÂÂÂÂÂÂ | 59 ++++++++++++++++++++++++++++++++++++++
>>> Â kernel/sysctl.cÂÂÂÂÂÂÂÂÂÂÂ |Â 4 ++-
>>> Â 3 files changed, 71 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
>>> index d7938d88c028..22e91cc2d9e1 100644
>>> --- a/include/linux/perf_event.h
>>> +++ b/include/linux/perf_event.h
>>> @@ -271,6 +271,9 @@ struct pmu {
>>> ÂÂÂÂÂ /* number of address filters this PMU can do */
>>> ÂÂÂÂÂ unsigned intÂÂÂÂÂÂÂÂÂÂÂ nr_addr_filters;
>>> Â +ÂÂÂ /* per PMU access control */
>>> +ÂÂÂ intÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ perf_event_paranoid;
>>
>> It looks like it needs to be declared as atomic and atomic_read/atomic_write
>> operations need to be explicitly used below in the patch as far this
>> variable may be manipulated by different threads at the same time
>> without explicit locking.
>
> It is just a write of an integer from either sysfs access or sysctl. As such I don't think going atomic would change anything. There is no RMW or increment or anything on it.
>
> Unless there are architectures where int stores are not atomic? But then the existing sysctl would have the same issue. So I suspect we can indeed rely on int store being atomic.
Yep, aligned word read/write is atomic on Intel and there is no runtime issue
currently, but the implementation itself is multithread and implicitly relies
on that atomicity so my suggestion is just explicitly code that assumption :).
Also, as you mentioned, that makes the arch independent part of code more portable.
>
> Regards,
>
> Tvrtko
>
>>
>>> +
>>> ÂÂÂÂÂ /*
>>> ÂÂÂÂÂÂ * Fully disable/enable this PMU, can be used to protect from the PMI
>>> ÂÂÂÂÂÂ * as well as for lazy/batch writing of the MSRs.
>>> @@ -1168,6 +1171,9 @@ extern int sysctl_perf_cpu_time_max_percent;
>>> Â Â extern void perf_sample_event_took(u64 sample_len_ns);
>>> Â +extern int perf_proc_paranoid_handler(struct ctl_table *table, int write,
>>> +ÂÂÂÂÂÂÂ void __user *buffer, size_t *lenp,
>>> +ÂÂÂÂÂÂÂ loff_t *ppos);
>>> Â extern int perf_proc_update_handler(struct ctl_table *table, int write,
>>> ÂÂÂÂÂÂÂÂÂ void __user *buffer, size_t *lenp,
>>> ÂÂÂÂÂÂÂÂÂ loff_t *ppos);
>>> @@ -1180,17 +1186,17 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
>>> Â Â static inline bool perf_paranoid_tracepoint_raw(const struct pmu *pmu)
>>> Â {
>>> -ÂÂÂ return sysctl_perf_event_paranoid > -1;
>>> +ÂÂÂ return pmu->perf_event_paranoid > -1;
>>> Â }
>>> Â Â static inline bool perf_paranoid_cpu(const struct pmu *pmu)
>>> Â {
>>> -ÂÂÂ return sysctl_perf_event_paranoid > 0;
>>> +ÂÂÂ return pmu->perf_event_paranoid > 0;
>>> Â }
>>> Â Â static inline bool perf_paranoid_kernel(const struct pmu *pmu)
>>> Â {
>>> -ÂÂÂ return sysctl_perf_event_paranoid > 1;
>>> +ÂÂÂ return pmu->perf_event_paranoid > 1;
>>> Â }
>>> Â Â extern void perf_event_init(void);
>>> diff --git a/kernel/events/core.c b/kernel/events/core.c
>>> index 370c89e81722..da36317dc8dc 100644
>>> --- a/kernel/events/core.c
>>> +++ b/kernel/events/core.c
>>> @@ -432,6 +432,24 @@ static void update_perf_cpu_limits(void)
>>> Â Â static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
>>> Â +int perf_proc_paranoid_handler(struct ctl_table *table, int write,
>>> +ÂÂÂÂÂÂÂ void __user *buffer, size_t *lenp,
>>> +ÂÂÂÂÂÂÂ loff_t *ppos)
>>> +{
>>> +ÂÂÂ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
>>> +ÂÂÂ struct pmu *pmu;
>>> +
>>> +ÂÂÂ if (ret || !write)
>>> +ÂÂÂÂÂÂÂ return ret;
>>> +
>>> +ÂÂÂ mutex_lock(&pmus_lock);
>>> +ÂÂÂ list_for_each_entry(pmu, &pmus, entry)
>>> +ÂÂÂÂÂÂÂ pmu->perf_event_paranoid = sysctl_perf_event_paranoid;
>>> +ÂÂÂ mutex_unlock(&pmus_lock);
>>> +
>>> +ÂÂÂ return 0;
>>> +}
>>> +
>>> Â int perf_proc_update_handler(struct ctl_table *table, int write,
>>> ÂÂÂÂÂÂÂÂÂ void __user *buffer, size_t *lenp,
>>> ÂÂÂÂÂÂÂÂÂ loff_t *ppos)
>>> @@ -9425,6 +9443,41 @@ static void free_pmu_context(struct pmu *pmu)
>>> ÂÂÂÂÂ mutex_unlock(&pmus_lock);
>>> Â }
>>> Â +/*
>>> + * Fine-grained access control:
>>> + */
>>> +static ssize_t
>>> +perf_event_paranoid_show(struct device *dev,
>>> +ÂÂÂÂÂÂÂÂÂÂÂÂ struct device_attribute *attr,
>>> +ÂÂÂÂÂÂÂÂÂÂÂÂ char *page)
>>> +{
>>> +ÂÂÂ struct pmu *pmu = dev_get_drvdata(dev);
>>> +
>>> +ÂÂÂ return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->perf_event_paranoid);
>>> +}
>>> +
>>> +static ssize_t
>>> +perf_event_paranoid_store(struct device *dev,
>>> +ÂÂÂÂÂÂÂÂÂÂÂÂÂ struct device_attribute *attr,
>>> +ÂÂÂÂÂÂÂÂÂÂÂÂÂ const char *buf, size_t count)
>>> +{
>>> +ÂÂÂ struct pmu *pmu = dev_get_drvdata(dev);
>>> +ÂÂÂ int ret, val;
>>> +
>>> +ÂÂÂ ret = kstrtoint(buf, 0, &val);
>>> +ÂÂÂ if (ret)
>>> +ÂÂÂÂÂÂÂ return ret;
>>> +
>>> +ÂÂÂ if (val < -1 || val > 2)
>>> +ÂÂÂÂÂÂÂ return -EINVAL;
>>> +
>>> +ÂÂÂ pmu->perf_event_paranoid = val;
>>> +
>>> +ÂÂÂ return count;
>>> +}
>>> +
>>> +static DEVICE_ATTR_RW(perf_event_paranoid);
>>> +
>>> Â /*
>>> ÂÂ * Let userspace know that this PMU supports address range filtering:
>>> ÂÂ */
>>> @@ -9539,6 +9592,11 @@ static int pmu_dev_alloc(struct pmu *pmu)
>>> ÂÂÂÂÂ if (ret)
>>> ÂÂÂÂÂÂÂÂÂ goto free_dev;
>>> Â +ÂÂÂ /* Add fine-grained access control attribute. */
>>> +ÂÂÂ ret = device_create_file(pmu->dev, &dev_attr_perf_event_paranoid);
>>> +ÂÂÂ if (ret)
>>> +ÂÂÂÂÂÂÂ goto del_dev;
>>> +
>>> ÂÂÂÂÂ /* For PMUs with address filters, throw in an extra attribute: */
>>> ÂÂÂÂÂ if (pmu->nr_addr_filters)
>>> ÂÂÂÂÂÂÂÂÂ ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
>>> @@ -9570,6 +9628,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
>>> ÂÂÂÂÂ if (!pmu->pmu_disable_count)
>>> ÂÂÂÂÂÂÂÂÂ goto unlock;
>>> Â +ÂÂÂ pmu->perf_event_paranoid = sysctl_perf_event_paranoid;
>>> ÂÂÂÂÂ pmu->type = -1;
>>> ÂÂÂÂÂ if (!name)
>>> ÂÂÂÂÂÂÂÂÂ goto skip_type;
>>> diff --git a/kernel/sysctl.c b/kernel/sysctl.c
>>> index 2d9837c0aff4..7f6fccb64a30 100644
>>> --- a/kernel/sysctl.c
>>> +++ b/kernel/sysctl.c
>>> @@ -1142,7 +1142,9 @@ static struct ctl_table kern_table[] = {
>>> ÂÂÂÂÂÂÂÂÂ .dataÂÂÂÂÂÂÂ = &sysctl_perf_event_paranoid,
>>> ÂÂÂÂÂÂÂÂÂ .maxlenÂÂÂÂÂÂÂ = sizeof(sysctl_perf_event_paranoid),
>>> ÂÂÂÂÂÂÂÂÂ .modeÂÂÂÂÂÂÂ = 0644,
>>> -ÂÂÂÂÂÂÂ .proc_handlerÂÂÂ = proc_dointvec,
>>> +ÂÂÂÂÂÂÂ .proc_handlerÂÂ = perf_proc_paranoid_handler,
>>> +ÂÂÂÂÂÂÂ .extra1ÂÂÂÂÂÂÂÂ = &neg_one,
>>> +ÂÂÂÂÂÂÂ .extra2ÂÂÂÂÂÂÂÂ = &two,
>>> ÂÂÂÂÂ },
>>> ÂÂÂÂÂ {
>>> ÂÂÂÂÂÂÂÂÂ .procnameÂÂÂ = "perf_event_mlock_kb",
>>>
>>
>