perf: Cancel the mux hrtimer during CPU hotplug to avoid migration

The current design of hrtimers migrates the pinned timers to a
different CPU upon its hotplug. However, perf-core needs to
maintain the mux-hrtimers on a per CPU basis. That is, each
hrtimer carries the context for that particular CPU and would
lose this context if it gets migrated to a different CPU. As a
result, cancel the hrtimer for the CPU that's about to go down
and restart it (if required) when the perf-events are being created.

Change-Id: I7a1d0456208855e3a99a7d49e59c6dae811d146e
Signed-off-by: Raghavendra Rao Ananta <rananta@codeaurora.org>
[mojha@codeaurora.org: Resolved merge conflict and added missing
 `cpuctx` variable to avoid build failure]
Signed-off-by: Mukesh Ojha <mojha@codeaurora.org>
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 47c5b39..d32e7b8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -266,7 +266,7 @@
 	int				capabilities;
 
 	int * __percpu			pmu_disable_count;
-	struct perf_cpu_context * __percpu pmu_cpu_context;
+	struct perf_cpu_context __percpu *pmu_cpu_context;
 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eb57fb3..6be8ea2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11212,13 +11212,25 @@
 
 static void perf_event_exit_cpu_context(int cpu)
 {
+	struct perf_cpu_context *cpuctx;
 	struct perf_event_context *ctx;
+	unsigned long flags;
 	struct pmu *pmu;
 	int idx;
 
 	idx = srcu_read_lock(&pmus_srcu);
 	list_for_each_entry_rcu(pmu, &pmus, entry) {
-		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+		ctx = &cpuctx->ctx;
+
+		/* Cancel the mux hrtimer to avoid CPU migration */
+		if (pmu->task_ctx_nr != perf_sw_context) {
+			raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
+			hrtimer_cancel(&cpuctx->hrtimer);
+			cpuctx->hrtimer_active = 0;
+			raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock,
+							flags);
+		}
 
 		mutex_lock(&ctx->mutex);
 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);