ARM: local timers: add timer support using IO mapped register

The current arch_timer only support accessing through CP15 interface.
Add support for ARM processors that only support IO mapped register
interface.

Change-Id: Ide8be070d21609a2b1f4d6f0e0df1a27e6d978ff
Signed-off-by: Abhimanyu Kapur <abhimany@codeaurora.org>
Signed-off-by: Rohit Vaswani <rvaswani@codeaurora.org>
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 2098288..99ee2de 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -4,7 +4,7 @@
 #include <linux/ioport.h>
 
 struct arch_timer {
-	struct resource	res[2];
+	struct resource	res[3];
 };
 
 #ifdef CONFIG_ARM_ARCH_TIMER
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 81a9a71..88cf368 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -19,6 +19,7 @@
 #include <linux/clockchips.h>
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
+#include <linux/of_address.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 
@@ -32,8 +33,43 @@
 static unsigned long arch_timer_rate;
 static int arch_timer_ppi;
 static int arch_timer_ppi2;
+static int is_irq_percpu;
 
 static struct clock_event_device __percpu **arch_timer_evt;
+static void __iomem *timer_base;
+
+static u32 timer_reg_read_cp15(int reg);
+static void timer_reg_write_cp15(int reg, u32 val);
+static inline cycle_t counter_get_cntpct_cp15(void);
+static inline cycle_t counter_get_cntvct_cp15(void);
+
+static u32 timer_reg_read_mem(int reg);
+static void timer_reg_write_mem(int reg, u32 val);
+static inline cycle_t counter_get_cntpct_mem(void);
+static inline cycle_t counter_get_cntvct_mem(void);
+
+struct arch_timer_operations {
+	void (*reg_write)(int, u32);
+	u32 (*reg_read)(int);
+	cycle_t (*get_cntpct)(void);
+	cycle_t (*get_cntvct)(void);
+};
+
+static struct arch_timer_operations arch_timer_ops_cp15 = {
+	.reg_read = &timer_reg_read_cp15,
+	.reg_write = &timer_reg_write_cp15,
+	.get_cntpct = &counter_get_cntpct_cp15,
+	.get_cntvct = &counter_get_cntvct_cp15,
+};
+
+static struct arch_timer_operations arch_timer_ops_mem = {
+	.reg_read = &timer_reg_read_mem,
+	.reg_write = &timer_reg_write_mem,
+	.get_cntpct = &counter_get_cntpct_mem,
+	.get_cntvct = &counter_get_cntvct_mem,
+};
+
+static struct arch_timer_operations *arch_specific_timer = &arch_timer_ops_cp15;
 
 /*
  * Architected system timer support.
@@ -47,7 +83,29 @@
 #define ARCH_TIMER_REG_FREQ		1
 #define ARCH_TIMER_REG_TVAL		2
 
-static void arch_timer_reg_write(int reg, u32 val)
+/* Iomapped Register Offsets */
+#define QTIMER_CNTP_LOW_REG		0x000
+#define QTIMER_CNTP_HIGH_REG		0x004
+#define QTIMER_CNTV_LOW_REG		0x008
+#define QTIMER_CNTV_HIGH_REG		0x00C
+#define QTIMER_CTRL_REG			0x02C
+#define QTIMER_FREQ_REG			0x010
+#define QTIMER_CNTP_TVAL_REG		0x028
+#define QTIMER_CNTV_TVAL_REG		0x038
+
+static void timer_reg_write_mem(int reg, u32 val)
+{
+	switch (reg) {
+	case ARCH_TIMER_REG_CTRL:
+		__raw_writel(val, timer_base + QTIMER_CTRL_REG);
+		break;
+	case ARCH_TIMER_REG_TVAL:
+		__raw_writel(val, timer_base + QTIMER_CNTP_TVAL_REG);
+		break;
+	}
+}
+
+static void timer_reg_write_cp15(int reg, u32 val)
 {
 	switch (reg) {
 	case ARCH_TIMER_REG_CTRL:
@@ -61,7 +119,28 @@
 	isb();
 }
 
-static u32 arch_timer_reg_read(int reg)
+static u32 timer_reg_read_mem(int reg)
+{
+	u32 val;
+
+	switch (reg) {
+	case ARCH_TIMER_REG_CTRL:
+		val = __raw_readl(timer_base + QTIMER_CTRL_REG);
+		break;
+	case ARCH_TIMER_REG_FREQ:
+		val = __raw_readl(timer_base + QTIMER_FREQ_REG);
+		break;
+	case ARCH_TIMER_REG_TVAL:
+		val = __raw_readl(timer_base + QTIMER_CNTP_TVAL_REG);
+		break;
+	default:
+		BUG();
+	}
+
+	return val;
+}
+
+static u32 timer_reg_read_cp15(int reg)
 {
 	u32 val;
 
@@ -87,10 +166,11 @@
 	struct clock_event_device *evt;
 	unsigned long ctrl;
 
-	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
+	ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
-		arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+		arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL,
+							ctrl);
 		evt = *__this_cpu_ptr(arch_timer_evt);
 		evt->event_handler(evt);
 		return IRQ_HANDLED;
@@ -103,9 +183,9 @@
 {
 	unsigned long ctrl;
 
-	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
+	ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
 	ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
-	arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+	arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
 }
 
 static void arch_timer_set_mode(enum clock_event_mode mode,
@@ -126,12 +206,12 @@
 {
 	unsigned long ctrl;
 
-	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
+	ctrl = arch_specific_timer->reg_read(ARCH_TIMER_REG_CTRL);
 	ctrl |= ARCH_TIMER_CTRL_ENABLE;
 	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
 
-	arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
-	arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
+	arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+	arch_specific_timer->reg_write(ARCH_TIMER_REG_TVAL, evt);
 
 	return 0;
 }
@@ -168,19 +248,16 @@
 static int local_timer_is_architected(void)
 {
 	return (cpu_architecture() >= CPU_ARCH_ARMv7) &&
-	       ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1;
+		((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1;
 }
 
 static int arch_timer_available(void)
 {
 	unsigned long freq;
 
-	if (!local_timer_is_architected())
-		return -ENXIO;
-
 	if (arch_timer_rate == 0) {
-		arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
-		freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
+		arch_specific_timer->reg_write(ARCH_TIMER_REG_CTRL, 0);
+		freq = arch_specific_timer->reg_read(ARCH_TIMER_REG_FREQ);
 
 		/* Check the timer frequency. */
 		if (freq == 0) {
@@ -196,33 +273,57 @@
 	return 0;
 }
 
-static inline cycle_t arch_counter_get_cntpct(void)
+static inline cycle_t counter_get_cntpct_mem(void)
 {
-	u32 cvall, cvalh;
+	u32 cvall, cvalh, thigh;
 
-	asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
+	do {
+		cvalh = __raw_readl(timer_base + QTIMER_CNTP_HIGH_REG);
+		cvall = __raw_readl(timer_base + QTIMER_CNTP_LOW_REG);
+		thigh = __raw_readl(timer_base + QTIMER_CNTP_HIGH_REG);
+	} while (cvalh != thigh);
 
 	return ((cycle_t) cvalh << 32) | cvall;
 }
 
-static inline cycle_t arch_counter_get_cntvct(void)
+static inline cycle_t counter_get_cntpct_cp15(void)
+{
+	u32 cvall, cvalh;
+
+	asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
+	return ((cycle_t) cvalh << 32) | cvall;
+}
+
+static inline cycle_t counter_get_cntvct_mem(void)
+{
+	u32 cvall, cvalh, thigh;
+
+	do {
+		cvalh = __raw_readl(timer_base + QTIMER_CNTV_HIGH_REG);
+		cvall = __raw_readl(timer_base + QTIMER_CNTV_LOW_REG);
+		thigh = __raw_readl(timer_base + QTIMER_CNTV_HIGH_REG);
+	} while (cvalh != thigh);
+
+	return ((cycle_t) cvalh << 32) | cvall;
+}
+
+static inline cycle_t counter_get_cntvct_cp15(void)
 {
 	u32 cvall, cvalh;
 
 	asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
-
 	return ((cycle_t) cvalh << 32) | cvall;
 }
 
 static cycle_t arch_counter_read(struct clocksource *cs)
 {
-	return arch_counter_get_cntpct();
+	return arch_specific_timer->get_cntpct();
 }
 
 #ifdef ARCH_HAS_READ_CURRENT_TIMER
 int read_current_timer(unsigned long *timer_val)
 {
-	*timer_val = (unsigned long)arch_counter_get_cntpct();
+	*timer_val = (unsigned long)arch_specific_timer->get_cntpct();
 	return 0;
 }
 #endif
@@ -239,7 +340,7 @@
 {
 	cycle_t cntvct;
 
-	cntvct = arch_counter_get_cntvct();
+	cntvct = arch_specific_timer->get_cntvct();
 
 	/*
 	 * The sched_clock infrastructure only knows about counters
@@ -273,6 +374,9 @@
 {
 	int err;
 
+	if (!local_timer_is_architected())
+		arch_specific_timer = &arch_timer_ops_mem;
+
 	err = arch_timer_available();
 	if (err)
 		return err;
@@ -289,8 +393,12 @@
 	set_delay_fn(read_current_timer_delay_loop);
 #endif
 
-	err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
+	if (is_irq_percpu)
+		err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
 				 "arch_timer", arch_timer_evt);
+	else
+		err = request_irq(arch_timer_ppi, arch_timer_handler, 0,
+			"arch_timer", arch_timer_evt);
 	if (err) {
 		pr_err("arch_timer: can't register interrupt %d (%d)\n",
 		       arch_timer_ppi, err);
@@ -298,8 +406,13 @@
 	}
 
 	if (arch_timer_ppi2) {
-		err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
-					 "arch_timer", arch_timer_evt);
+		if (is_irq_percpu)
+			err = request_percpu_irq(arch_timer_ppi2,
+					arch_timer_handler, "arch_timer",
+					arch_timer_evt);
+		else
+			err = request_irq(arch_timer_ppi2, arch_timer_handler,
+					0, "arch_timer", arch_timer_evt);
 		if (err) {
 			pr_err("arch_timer: can't register interrupt %d (%d)\n",
 			       arch_timer_ppi2, err);
@@ -336,6 +449,16 @@
 	if (at->res[1].start > 0 && (at->res[1].flags & IORESOURCE_IRQ))
 		arch_timer_ppi2 = at->res[1].start;
 
+	if (at->res[2].start > 0 && at->res[2].end > 0 &&
+					(at->res[2].flags & IORESOURCE_MEM))
+		timer_base = ioremap(at->res[2].start,
+				resource_size(&at->res[2]));
+
+	if (!timer_base) {
+		pr_err("arch_timer: cant map timer base\n");
+		return -ENOMEM;
+	}
+
 	return arch_timer_common_register();
 }
 
@@ -366,6 +489,18 @@
 		pr_err("arch_timer: interrupt not specified in timer node\n");
 		return -ENODEV;
 	}
+
+	timer_base = of_iomap(np, 0);
+	if (!timer_base) {
+		pr_err("arch_timer: cant map timer base\n");
+		return -ENOMEM;
+	}
+
+	if (of_get_property(np, "irq-is-not-percpu", NULL))
+		is_irq_percpu = 0;
+	else
+		is_irq_percpu = 1;
+
 	arch_timer_ppi = ret;
 	ret = irq_of_parse_and_map(np, 1);
 	if (ret > 0)