Merge branches 'perf/event-nos', 'perf/updates' and 'perf/omap4' into for-rmk
diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h
new file mode 100644
index 0000000..a0ada3e
--- /dev/null
+++ b/arch/arm/include/asm/cti.h
@@ -0,0 +1,179 @@
+#ifndef __ASMARM_CTI_H
+#define __ASMARM_CTI_H
+
+#include	<asm/io.h>
+
+/* The registers' definition is from section 3.2 of
+ * Embedded Cross Trigger Revision: r0p0
+ */
+#define		CTICONTROL		0x000
+#define		CTISTATUS		0x004
+#define		CTILOCK			0x008
+#define		CTIPROTECTION		0x00C
+#define		CTIINTACK		0x010
+#define		CTIAPPSET		0x014
+#define		CTIAPPCLEAR		0x018
+#define		CTIAPPPULSE		0x01c
+#define		CTIINEN			0x020
+#define		CTIOUTEN		0x0A0
+#define		CTITRIGINSTATUS		0x130
+#define		CTITRIGOUTSTATUS	0x134
+#define		CTICHINSTATUS		0x138
+#define		CTICHOUTSTATUS		0x13c
+#define		CTIPERIPHID0		0xFE0
+#define		CTIPERIPHID1		0xFE4
+#define		CTIPERIPHID2		0xFE8
+#define		CTIPERIPHID3		0xFEC
+#define		CTIPCELLID0		0xFF0
+#define		CTIPCELLID1		0xFF4
+#define		CTIPCELLID2		0xFF8
+#define		CTIPCELLID3		0xFFC
+
+/* The below are from section 3.6.4 of
+ * CoreSight v1.0 Architecture Specification
+ */
+#define		LOCKACCESS		0xFB0
+#define		LOCKSTATUS		0xFB4
+
+/* write this value to LOCKACCESS will unlock the module, and
+ * other value will lock the module
+ */
+#define		LOCKCODE		0xC5ACCE55
+
+/**
+ * struct cti - cross trigger interface struct
+ * @base: mapped virtual address for the cti base
+ * @irq: irq number for the cti
+ * @trig_out_for_irq: triger out number which will cause
+ *	the @irq happen
+ *
+ * cti struct used to operate cti registers.
+ */
+struct cti {
+	void __iomem *base;
+	int irq;
+	int trig_out_for_irq;
+};
+
+/**
+ * cti_init - initialize the cti instance
+ * @cti: cti instance
+ * @base: mapped virtual address for the cti base
+ * @irq: irq number for the cti
+ * @trig_out: triger out number which will cause
+ *	the @irq happen
+ *
+ * called by machine code to pass the board dependent
+ * @base, @irq and @trig_out to cti.
+ */
+static inline void cti_init(struct cti *cti,
+	void __iomem *base, int irq, int trig_out)
+{
+	cti->base = base;
+	cti->irq  = irq;
+	cti->trig_out_for_irq = trig_out;
+}
+
+/**
+ * cti_map_trigger - use the @chan to map @trig_in to @trig_out
+ * @cti: cti instance
+ * @trig_in: trigger in number
+ * @trig_out: trigger out number
+ * @channel: channel number
+ *
+ * This function maps one trigger in of @trig_in to one trigger
+ * out of @trig_out using the channel @chan.
+ */
+static inline void cti_map_trigger(struct cti *cti,
+	int trig_in, int trig_out, int chan)
+{
+	void __iomem *base = cti->base;
+	unsigned long val;
+
+	val = __raw_readl(base + CTIINEN + trig_in * 4);
+	val |= BIT(chan);
+	__raw_writel(val, base + CTIINEN + trig_in * 4);
+
+	val = __raw_readl(base + CTIOUTEN + trig_out * 4);
+	val |= BIT(chan);
+	__raw_writel(val, base + CTIOUTEN + trig_out * 4);
+}
+
+/**
+ * cti_enable - enable the cti module
+ * @cti: cti instance
+ *
+ * enable the cti module
+ */
+static inline void cti_enable(struct cti *cti)
+{
+	__raw_writel(0x1, cti->base + CTICONTROL);
+}
+
+/**
+ * cti_disable - disable the cti module
+ * @cti: cti instance
+ *
+ * enable the cti module
+ */
+static inline void cti_disable(struct cti *cti)
+{
+	__raw_writel(0, cti->base + CTICONTROL);
+}
+
+/**
+ * cti_irq_ack - clear the cti irq
+ * @cti: cti instance
+ *
+ * clear the cti irq
+ */
+static inline void cti_irq_ack(struct cti *cti)
+{
+	void __iomem *base = cti->base;
+	unsigned long val;
+
+	val = __raw_readl(base + CTIINTACK);
+	val |= BIT(cti->trig_out_for_irq);
+	__raw_writel(val, base + CTIINTACK);
+}
+
+/**
+ * cti_unlock - unlock cti module
+ * @cti: cti instance
+ *
+ * unlock the cti module, or else any writes to the cti
+ * module is not allowed.
+ */
+static inline void cti_unlock(struct cti *cti)
+{
+	void __iomem *base = cti->base;
+	unsigned long val;
+
+	val = __raw_readl(base + LOCKSTATUS);
+
+	if (val & 1) {
+		val = LOCKCODE;
+		__raw_writel(val, base + LOCKACCESS);
+	}
+}
+
+/**
+ * cti_lock - lock cti module
+ * @cti: cti instance
+ *
+ * lock the cti module, so any writes to the cti
+ * module will be not allowed.
+ */
+static inline void cti_lock(struct cti *cti)
+{
+	void __iomem *base = cti->base;
+	unsigned long val;
+
+	val = __raw_readl(base + LOCKSTATUS);
+
+	if (!(val & 1)) {
+		val = ~LOCKCODE;
+		__raw_writel(val, base + LOCKACCESS);
+	}
+}
+#endif
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 0f8e382..99cfe36 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -32,7 +32,4 @@
 extern enum arm_perf_pmu_ids
 armpmu_get_pmu_id(void);
 
-extern int
-armpmu_get_max_events(void);
-
 #endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 0bda22c..b5a5be2 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -27,13 +27,22 @@
 /*
  * struct arm_pmu_platdata - ARM PMU platform data
  *
- * @handle_irq: an optional handler which will be called from the interrupt and
- * passed the address of the low level handler, and can be used to implement
- * any platform specific handling before or after calling it.
+ * @handle_irq: an optional handler which will be called from the
+ *	interrupt and passed the address of the low level handler,
+ *	and can be used to implement any platform specific handling
+ *	before or after calling it.
+ * @enable_irq: an optional handler which will be called after
+ *	request_irq and be used to handle some platform specific
+ *	irq enablement
+ * @disable_irq: an optional handler which will be called before
+ *	free_irq and be used to handle some platform specific
+ *	irq disablement
  */
 struct arm_pmu_platdata {
 	irqreturn_t (*handle_irq)(int irq, void *dev,
 				  irq_handler_t pmu_handler);
+	void (*enable_irq)(int irq);
+	void (*disable_irq)(int irq);
 };
 
 #ifdef CONFIG_CPU_HAS_PMU
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index c475379..172101a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -59,8 +59,7 @@
 }
 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
 
-int
-armpmu_get_max_events(void)
+int perf_num_counters(void)
 {
 	int max_events = 0;
 
@@ -69,12 +68,6 @@
 
 	return max_events;
 }
-EXPORT_SYMBOL_GPL(armpmu_get_max_events);
-
-int perf_num_counters(void)
-{
-	return armpmu_get_max_events();
-}
 EXPORT_SYMBOL_GPL(perf_num_counters);
 
 #define HW_OP_UNSUPPORTED		0xFFFF
@@ -380,6 +373,8 @@
 {
 	int i, irq, irqs;
 	struct platform_device *pmu_device = armpmu->plat_device;
+	struct arm_pmu_platdata *plat =
+		dev_get_platdata(&pmu_device->dev);
 
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 
@@ -387,8 +382,11 @@
 		if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
 			continue;
 		irq = platform_get_irq(pmu_device, i);
-		if (irq >= 0)
+		if (irq >= 0) {
+			if (plat && plat->disable_irq)
+				plat->disable_irq(irq);
 			free_irq(irq, armpmu);
+		}
 	}
 
 	release_pmu(armpmu->type);
@@ -448,7 +446,8 @@
 				irq);
 			armpmu_release_hardware(armpmu);
 			return err;
-		}
+		} else if (plat && plat->enable_irq)
+			plat->enable_irq(irq);
 
 		cpumask_set_cpu(i, &armpmu->active_irqs);
 	}