On Wed, Oct 24, 2018 at 08:11:15AM -0700, kan.liang@xxxxxxxxxxxxxxx wrote:
+void perf_event_munmap(void)
+{
+ struct perf_cpu_context *cpuctx;
+ unsigned long flags;
+ struct pmu *pmu;
+
+ local_irq_save(flags);
It is impossible to get here with IRQs already disabled.
+ list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
+ pmu = cpuctx->ctx.pmu;
+
+ if (!pmu->munmap)
+ continue;
+
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+ perf_pmu_disable(pmu);
+
+ pmu->munmap();
+
+ perf_pmu_enable(pmu);
+
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ }
+ local_irq_restore(flags);
+}
+
static void perf_event_switch(struct task_struct *task,
struct task_struct *next_prev, bool sched_in);
diff --git a/mm/mmap.c b/mm/mmap.c
index 5f2b2b184c60..61978ad8c480 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2777,6 +2777,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
/*
* Remove the vma's, and unmap the actual pages
*/
+ perf_event_munmap();
I think that if you add the munmap hook, you should do it right and at
least do it such that we can solve the other munmap problem.
detach_vmas_to_be_unmapped(mm, vma, prev, end);
unmap_region(mm, vma, prev, start, end);
--
2.17.1