Merge "msm: perf: tracectr: Initialize cnts after hotplug"
diff --git a/arch/arm/mach-msm/perf_debug.c b/arch/arm/mach-msm/perf_debug.c
index 05d3cef..79b9aab 100644
--- a/arch/arm/mach-msm/perf_debug.c
+++ b/arch/arm/mach-msm/perf_debug.c
@@ -46,6 +46,7 @@
 	"21 Perf: preserve registers across hotplug\n"
 	"22 msm: perf: fix formatting of trace entry\n"
 	"23 msm: perf: Fix cpu id logic in tracectr notifier\n"
+	"24 msm: perf: tracectr: Initialize cnts after hotplug\n"
 ;
 
 static ssize_t desc_read(struct file *fp, char __user *buf,
diff --git a/arch/arm/mach-msm/perf_trace_counters.c b/arch/arm/mach-msm/perf_trace_counters.c
index 0a679b1..8eb1244 100644
--- a/arch/arm/mach-msm/perf_trace_counters.c
+++ b/arch/arm/mach-msm/perf_trace_counters.c
@@ -21,21 +21,17 @@
 DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
 DEFINE_PER_CPU(u32[NUM_L2_PERCPU], previous_l2_cnts);
 DEFINE_PER_CPU(u32, old_pid);
+DEFINE_PER_CPU(u32, hotplug_flag);
 /* Reset per_cpu variables that store counter values uppn CPU hotplug */
 static int tracectr_cpu_hotplug_notifier(struct notifier_block *self,
 				    unsigned long action, void *hcpu)
 {
 	int ret = NOTIFY_OK;
 	int cpu = (int)hcpu;
-	int i;
 
-	if ((action & (~CPU_TASKS_FROZEN)) == CPU_UP_PREPARE) {
-		per_cpu(previous_ccnt, cpu) = 0;
-		for (i = 0; i < NUM_L1_CTRS; i++)
-			per_cpu(previous_l1_cnts[i], cpu) = 0;
-		for (i = 0; i < NUM_L2_PERCPU; i++)
-			per_cpu(previous_l2_cnts[i], cpu) = 0;
-	}
+	if ((action & (~CPU_TASKS_FROZEN)) == CPU_STARTING)
+		per_cpu(hotplug_flag, cpu) = 1;
+
 	return ret;
 }
 
@@ -43,6 +39,35 @@
 	.notifier_call = tracectr_cpu_hotplug_notifier,
 };
 
+static void setup_prev_cnts(u32 cpu)
+{
+	int i;
+	u32 cnten_val;
+
+	/* Read PMCNTENSET */
+	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(cnten_val));
+	/* Disable all the counters that were enabled */
+	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r"(cnten_val));
+	if (cnten_val & CC) {
+		/* Read value */
+		asm volatile("mrc p15, 0, %0, c9, c13, 0"
+			: "=r"(per_cpu(previous_ccnt, cpu)));
+	}
+
+	for (i = 0; i < NUM_L1_CTRS; i++) {
+		if (cnten_val & (1 << i)) {
+			/* Select */
+			asm volatile("mcr p15, 0, %0, c9, c12, 5"
+				: : "r"(i));
+			/* Read value */
+			asm volatile("mrc p15, 0, %0, c9, c13, 2"
+				: "=r"(per_cpu(previous_l1_cnts[i], cpu)));
+		}
+	}
+	/* Enable all the counters that were disabled */
+	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r"(cnten_val));
+}
+
 static int tracectr_notifier(struct notifier_block *self, unsigned long cmd,
 		void *v)
 {
@@ -54,9 +79,14 @@
 		return -EFAULT;
 
 	current_pid = thread->task->pid;
-	if (per_cpu(old_pid, cpu) != -1)
-		trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
-						current_pid);
+	if (per_cpu(old_pid, cpu) != -1) {
+		if (per_cpu(hotplug_flag, cpu) == 1) {
+			per_cpu(hotplug_flag, cpu) = 0;
+			setup_prev_cnts(cpu);
+		} else
+			trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
+				current_pid);
+	}
 	per_cpu(old_pid, cpu) = current_pid;
 	return NOTIFY_OK;
 }