Merge "Perf: arm: disable perf_event_read during hotplug"
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 852548c..bb24b4e 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -29,6 +29,8 @@
 #include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
+static DEFINE_PER_CPU(bool, is_hotplugging);
+
 /*
  * ARMv8 PMUv3 Performance Events handling code.
  * Common event types (some are defined in asm/perf_event.h).
@@ -982,6 +984,9 @@
 	if (!cpu_pmu)
 		return;
 
+	if (__this_cpu_read(is_hotplugging))
+		return;
+
 	hw_events = this_cpu_ptr(cpu_pmu->hw_events);
 
 	if (!hw_events)
@@ -1031,14 +1036,13 @@
 
 	pmu_idle_nb->cpu_pmu = cpu_pmu;
 	pmu_idle_nb->perf_cpu_idle_nb.notifier_call = perf_cpu_idle_notifier;
-	idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
 
 	ret = smp_call_function_any(&cpu_pmu->supported_cpus,
 				    __armv8pmu_probe_pmu,
 				    cpu_pmu, 1);
 
-	if (ret)
-		idle_notifier_unregister(&pmu_idle_nb->perf_cpu_idle_nb);
+	if (!ret)
+		idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
 
 	return ret;
 }
@@ -1140,6 +1144,37 @@
 	{},
 };
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int perf_event_hotplug_coming_up(unsigned int cpu)
+{
+	per_cpu(is_hotplugging, cpu) = false;
+	return 0;
+}
+
+static int perf_event_hotplug_going_down(unsigned int cpu)
+{
+	per_cpu(is_hotplugging, cpu) = true;
+	return 0;
+}
+
+static int perf_event_cpu_hp_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_NOTIFY_ONLINE,
+				"PERF_EVENT/CPUHP_AP_NOTIFY_ONLINE",
+				perf_event_hotplug_coming_up,
+				perf_event_hotplug_going_down);
+	if (ret)
+		pr_err("CPU hotplug notifier for perf_event.c could not be registered: %d\n",
+		       ret);
+
+	return ret;
+}
+#else
+static int perf_event_cpu_hp_init(void) { return 0; }
+#endif
+
 /*
  * Non DT systems have their micro/arch events probed at run-time.
  * A fairly complete list of generic events are provided and ones that
@@ -1152,6 +1187,16 @@
 
 static int armv8_pmu_device_probe(struct platform_device *pdev)
 {
+	int ret, cpu;
+
+	for_each_possible_cpu(cpu)
+		per_cpu(is_hotplugging, cpu) = false;
+
+	ret = perf_event_cpu_hp_init();
+
+	if (ret)
+		return ret;
+
 	if (acpi_disabled)
 		return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
 					    NULL);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 41f376d..a2a0eb0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -373,6 +373,7 @@
 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
 static DEFINE_PER_CPU(bool, is_idle);
+static DEFINE_PER_CPU(bool, is_hotplugging);
 
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -3495,6 +3496,9 @@
 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 	struct pmu *pmu = event->pmu;
 
+	if (__this_cpu_read(is_hotplugging))
+		return;
+
 	/*
 	 * If this is a task context, we need to check whether it is
 	 * the current task context of this cpu.  If not it has been
@@ -3619,7 +3623,8 @@
 			return 0;
 		if (cpu_isolated(event_cpu) ||
 			(event->attr.exclude_idle &&
-				per_cpu(is_idle, event_cpu)))
+				per_cpu(is_idle, event_cpu)) ||
+				per_cpu(is_hotplugging, event_cpu))
 			active_event_skip_read = true;
 	}
 
@@ -3649,7 +3654,8 @@
 		preempt_enable();
 		ret = data.ret;
 	} else if (event->state == PERF_EVENT_STATE_INACTIVE ||
-			active_event_skip_read) {
+			(active_event_skip_read &&
+			!per_cpu(is_hotplugging, event_cpu))) {
 		struct perf_event_context *ctx = event->ctx;
 		unsigned long flags;
 
@@ -10711,6 +10717,8 @@
 		raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
 
 		INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
+		per_cpu(is_hotplugging, cpu) = false;
+		per_cpu(is_idle, cpu) = false;
 	}
 }
 
@@ -10734,19 +10742,10 @@
 static void
 check_hotplug_start_event(struct perf_event *event)
 {
-	if (event->attr.type == PERF_TYPE_SOFTWARE) {
-		switch (event->attr.config) {
-		case PERF_COUNT_SW_CPU_CLOCK:
-			cpu_clock_event_start(event, 0);
-			break;
-		case PERF_COUNT_SW_TASK_CLOCK:
-			break;
-		default:
-			if (event->pmu->start)
-				event->pmu->start(event, 0);
-			break;
-		}
-	}
+	if (event->pmu->events_across_hotplug &&
+	    event->attr.type == PERF_TYPE_SOFTWARE &&
+	    event->pmu->start)
+		event->pmu->start(event, 0);
 }
 
 static int perf_event_start_swevents(unsigned int cpu)
@@ -10767,6 +10766,7 @@
 		mutex_unlock(&ctx->mutex);
 	}
 	srcu_read_unlock(&pmus_srcu, idx);
+	per_cpu(is_hotplugging, cpu) = false;
 	return 0;
 }
 
@@ -10783,22 +10783,13 @@
 			   struct perf_cpu_context *cpuctx,
 			   struct perf_event_context *ctx)
 {
-	if (!event->pmu->events_across_hotplug) {
+	if (event->pmu->events_across_hotplug &&
+	    event->attr.type == PERF_TYPE_SOFTWARE &&
+	    event->pmu->stop)
+		event->pmu->stop(event, PERF_EF_UPDATE);
+	else if (!event->pmu->events_across_hotplug)
 		__perf_remove_from_context(event, cpuctx,
 			ctx, (void *)DETACH_GROUP);
-	} else if (event->attr.type == PERF_TYPE_SOFTWARE) {
-		switch (event->attr.config) {
-		case PERF_COUNT_SW_CPU_CLOCK:
-			cpu_clock_event_stop(event, 0);
-			break;
-		case PERF_COUNT_SW_TASK_CLOCK:
-			break;
-		default:
-			if (event->pmu->stop)
-				event->pmu->stop(event, 0);
-			break;
-		}
-	}
 }
 
 static void __perf_event_exit_context(void *__info)
@@ -10837,6 +10828,7 @@
 
 int perf_event_exit_cpu(unsigned int cpu)
 {
+	per_cpu(is_hotplugging, cpu) = true;
 	perf_event_exit_cpu_context(cpu);
 	return 0;
 }
@@ -10880,6 +10872,24 @@
 	.notifier_call = event_idle_notif,
 };
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int perf_cpu_hp_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
+				"PERF/CORE/CPUHP_AP_PERF_ONLINE",
+				perf_event_start_swevents,
+				perf_event_exit_cpu);
+	if (ret)
+		pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
+		       ret);
+
+	return ret;
+}
+#else
+static int perf_cpu_hp_init(void) { return 0; }
+#endif
 
 void __init perf_event_init(void)
 {
@@ -10896,6 +10906,8 @@
 	perf_event_init_cpu(smp_processor_id());
 	idle_notifier_register(&perf_event_idle_nb);
 	register_reboot_notifier(&perf_reboot_notifier);
+	ret = perf_cpu_hp_init();
+	WARN(ret, "core perf_cpu_hp_init() failed with: %d", ret);
 
 	ret = init_hw_breakpoint();
 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
@@ -10949,22 +10961,6 @@
 }
 device_initcall(perf_event_sysfs_init);
 
-#ifdef CONFIG_HOTPLUG_CPU
-static int perf_cpu_hp_init(void)
-{
-	int ret;
-
-	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
-				"PERF/CORE/AP_PERF_ONLINE",
-				perf_event_start_swevents,
-				perf_event_exit_cpu);
-	if (ret)
-		pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
-		       ret);
-	return ret;
-}
-subsys_initcall(perf_cpu_hp_init);
-#endif
 
 #ifdef CONFIG_CGROUP_PERF
 static struct cgroup_subsys_state *