blob: d1e209548f3e6f663d94bdf094d9b7c5b73b52eb [file] [log] [blame]
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/debugfs.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/cpufreq.h>
29#include <linux/iopoll.h>
30#include <linux/delay.h>
31#include <linux/regulator/consumer.h>
32
33#include <mach/irqs.h>
34
35#include "msm_cpr.h"
36
37#define MODULE_NAME "msm-cpr"
38
Kaushal Kumard0e4c812012-08-22 16:30:09 +053039/**
40 * Convert the Delay time to Timer Count Register
41 * e.g if frequency is 19200 kHz and delay required is
42 * 20000us, so timer count will be 19200 * 20000 / 1000
43 */
44#define TIMER_COUNT(freq, delay) ((freq * delay) / 1000)
45#define ALL_CPR_IRQ 0x3F
46
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053047/* Need platform device handle for suspend and resume APIs */
48static struct platform_device *cpr_pdev;
49
50struct msm_cpr {
51 int curr_osc;
52 int cpr_mode;
53 int prev_mode;
54 uint32_t floor;
55 uint32_t ceiling;
Kaushal Kumard0e4c812012-08-22 16:30:09 +053056 bool max_volt_set;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053057 void __iomem *base;
58 unsigned int irq;
59 struct mutex cpr_mutex;
60 struct regulator *vreg_cx;
61 const struct msm_cpr_config *config;
62 struct notifier_block freq_transition;
63 struct msm_cpr_vp_data *vp;
64};
65
66/* Need to maintain state data for suspend and resume APIs */
67static struct msm_cpr_reg cpr_save_state;
68
69static inline
70void cpr_write_reg(struct msm_cpr *cpr, u32 offset, u32 value)
71{
72 writel_relaxed(value, cpr->base + offset);
73}
74
75static inline u32 cpr_read_reg(struct msm_cpr *cpr, u32 offset)
76{
77 return readl_relaxed(cpr->base + offset);
78}
79
80static
81void cpr_modify_reg(struct msm_cpr *cpr, u32 offset, u32 mask, u32 value)
82{
83 u32 reg_val;
84
85 reg_val = readl_relaxed(cpr->base + offset);
86 reg_val &= ~mask;
87 reg_val |= value;
88 writel_relaxed(reg_val, cpr->base + offset);
89}
90
91#ifdef DEBUG
92static void cpr_regs_dump_all(struct msm_cpr *cpr)
93{
94 pr_debug("RBCPR_GCNT_TARGET(%d): 0x%x\n",
95 cpr->curr_osc, readl_relaxed(cpr->base +
96 RBCPR_GCNT_TARGET(cpr->curr_osc)));
97 pr_debug("RBCPR_TIMER_INTERVAL: 0x%x\n",
98 readl_relaxed(cpr->base + RBCPR_TIMER_INTERVAL));
99 pr_debug("RBIF_TIMER_ADJUST: 0x%x\n",
100 readl_relaxed(cpr->base + RBIF_TIMER_ADJUST));
101 pr_debug("RBIF_LIMIT: 0x%x\n",
102 readl_relaxed(cpr->base + RBIF_LIMIT));
103 pr_debug("RBCPR_STEP_QUOT: 0x%x\n",
104 readl_relaxed(cpr->base + RBCPR_STEP_QUOT));
105 pr_debug("RBIF_SW_VLEVEL: 0x%x\n",
106 readl_relaxed(cpr->base + RBIF_SW_VLEVEL));
107 pr_debug("RBCPR_DEBUG1: 0x%x\n",
108 readl_relaxed(cpr->base + RBCPR_DEBUG1));
109 pr_debug("RBCPR_RESULT_0: 0x%x\n",
110 readl_relaxed(cpr->base + RBCPR_RESULT_0));
111 pr_debug("RBCPR_RESULT_1: 0x%x\n",
112 readl_relaxed(cpr->base + RBCPR_RESULT_1));
113 pr_debug("RBCPR_QUOT_AVG: 0x%x\n",
114 readl_relaxed(cpr->base + RBCPR_QUOT_AVG));
115 pr_debug("RBCPR_CTL: 0x%x\n",
116 readl_relaxed(cpr->base + RBCPR_CTL));
117 pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
118 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
119 pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
120 cpr_read_reg(cpr, RBIF_IRQ_STATUS));
121}
122#endif
123
124/* Enable the CPR H/W Block */
125static void cpr_enable(struct msm_cpr *cpr)
126{
127 mutex_lock(&cpr->cpr_mutex);
128 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
129 mutex_unlock(&cpr->cpr_mutex);
130}
131
132/* Disable the CPR H/W Block */
133static void cpr_disable(struct msm_cpr *cpr)
134{
135 mutex_lock(&cpr->cpr_mutex);
136 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
137 mutex_unlock(&cpr->cpr_mutex);
138}
139
140static int32_t cpr_poll_result(struct msm_cpr *cpr)
141{
142 uint32_t val = 0;
143 int8_t rc = 0;
144
145 rc = readl_poll_timeout(cpr->base + RBCPR_RESULT_0, val, ~val & BUSY_M,
146 10, 1000);
147 if (rc)
148 pr_info("%s: RBCPR_RESULT_0 read error: %d\n",
149 __func__, rc);
150 return rc;
151}
152
153static int32_t cpr_poll_result_done(struct msm_cpr *cpr)
154{
155 uint32_t val = 0;
156 int8_t rc = 0;
157
158 rc = readl_poll_timeout(cpr->base + RBIF_IRQ_STATUS, val, val & 0x1,
159 10, 1000);
160 if (rc)
161 pr_info("%s: RBCPR_IRQ_STATUS read error: %d\n",
162 __func__, rc);
163 return rc;
164}
165
166static void
167cpr_2pt_kv_analysis(struct msm_cpr *cpr, struct msm_cpr_mode *chip_data)
168{
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530169 int32_t level_uV = 0, rc;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530170 uint32_t quot1, quot2;
171
172 /**
173 * 2 Point KV Analysis to calculate Step Quot
174 * STEP_QUOT is number of QUOT units per PMIC step
175 * STEP_QUOT = (quot1 - quot2) / 4
176 *
177 * The step quot is calculated once for every mode and stored for
178 * later use.
179 */
180 if (chip_data->step_quot != ~0)
181 goto out_2pt_kv;
182
183 /**
184 * Using the value from chip_data->tgt_volt_offset
185 * calculate the new PMIC adjusted voltages and set
186 * the PMIC to provide this value.
187 *
188 * Assuming default voltage is the highest value of safe boot up
189 * voltage, offset is always subtracted from it.
190 *
191 */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530192 level_uV = chip_data->Vmax -
193 (chip_data->tgt_volt_offset * cpr->vp->step_size);
194 pr_debug("tgt_volt_uV = %d\n", level_uV);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530195
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530196 /* Call the PMIC specific routine to set the voltage */
197 rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
198 if (rc) {
199 pr_err("%s: Initial voltage set at %duV failed. %d\n",
200 __func__, level_uV, rc);
201 return;
202 }
203 rc = regulator_enable(cpr->vreg_cx);
204 if (rc) {
205 pr_err("failed to enable %s, rc=%d\n", "vdd_cx", rc);
206 return;
207 }
208
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530209 /* First CPR measurement at a higher voltage to get QUOT1 */
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530210
211 /* Enable the Software mode of operation */
212 cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
213
214 /* Enable the cpr measurement */
215 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
216
217 /* IRQ is already disabled */
218 rc = cpr_poll_result_done(cpr);
219 if (rc) {
220 pr_err("%s: Quot1: Exiting due to INT_DONE poll timeout\n",
221 __func__);
222 return;
223 }
224
225 rc = cpr_poll_result(cpr);
226 if (rc) {
227 pr_err("%s: Quot1: Exiting due to BUSY poll timeout\n",
228 __func__);
229 return;
230 }
231
232 quot1 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
233
234 /* Take second CPR measurement at a lower voltage to get QUOT2 */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530235 level_uV -= 4 * cpr->vp->step_size;
236 pr_debug("tgt_volt_uV = %d\n", level_uV);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530237
238 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
239 /* Call the PMIC specific routine to set the voltage */
240 rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
241 if (rc) {
242 pr_err("%s: Voltage set at %duV failed. %d\n",
243 __func__, level_uV, rc);
244 return;
245 }
246
247 cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
248 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
249
250 /* cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1); */
251 rc = cpr_poll_result_done(cpr);
252 if (rc) {
253 pr_err("%s: Quot2: Exiting due to INT_DONE poll timeout\n",
254 __func__);
255 goto err_poll_result_done;
256 }
257 /* IRQ is already disabled */
258 rc = cpr_poll_result(cpr);
259 if (rc) {
260 pr_err("%s: Quot2: Exiting due to BUSY poll timeout\n",
261 __func__);
262 goto err_poll_result;
263 }
264 quot2 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
265 chip_data->step_quot = (quot1 - quot2) / 4;
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530266 pr_info("%s: Calculated Step Quot is %d\n",
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530267 __func__, chip_data->step_quot);
268 /* Disable the cpr */
269 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
270
271out_2pt_kv:
272 /* Program the step quot */
273 cpr_write_reg(cpr, RBCPR_STEP_QUOT, (chip_data->step_quot & 0xFF));
274 return;
275err_poll_result:
276err_poll_result_done:
277 regulator_disable(cpr->vreg_cx);
278}
279
280static inline
281void cpr_irq_clr_and_ack(struct msm_cpr *cpr, uint32_t mask)
282{
283 /* Clear the interrupt */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530284 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530285 /* Acknowledge the Recommendation */
286 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
287}
288
289static inline
290void cpr_irq_clr_and_nack(struct msm_cpr *cpr, uint32_t mask)
291{
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530292 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530293 cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1);
294}
295
296static void cpr_irq_set(struct msm_cpr *cpr, uint32_t irq, bool enable)
297{
298 uint32_t irq_enabled;
299
300 irq_enabled = cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
301 if (enable == 1)
302 irq_enabled |= irq;
303 else
304 irq_enabled &= ~irq;
305 cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
306 INT_MASK, irq_enabled);
307}
308
309static void
310cpr_up_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
311{
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530312 int rc, set_volt_uV;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530313 struct msm_cpr_mode *chip_data;
314
315 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
316
317 /**
318 * FIXME: Need to handle a potential race condition between
319 * freq switch handler and CPR interrupt handler here
320 */
321 /* Set New PMIC voltage */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530322 set_volt_uV = (new_volt < chip_data->Vmax ? new_volt
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530323 : chip_data->Vmax);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530324 rc = regulator_set_voltage(cpr->vreg_cx, set_volt_uV,
325 set_volt_uV);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530326 if (rc) {
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530327 pr_err("%s: Voltage set at %duV failed. %d\n",
328 __func__, set_volt_uV, rc);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530329 cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
330 return;
331 }
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530332 pr_info("(railway_voltage: %d uV)\n", set_volt_uV);
333
334 cpr->max_volt_set = (set_volt_uV == chip_data->Vmax) ? 1 : 0;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530335
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530336 /* Clear all the interrupts */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530337 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530338
339 /* Disable Auto ACK for Down interrupts */
340 cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_NACK_DN_EN_M, 0);
341
342 /* Enable down interrupts to App as it might have got disabled if CPR
343 * hit Vmin earlier. Voltage set is above Vmin now.
344 */
345 cpr_irq_set(cpr, DOWN_INT, 1);
346
347 /* Acknowledge the Recommendation */
348 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
349}
350
351static void
352cpr_dn_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
353{
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530354 int rc, set_volt_uV;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530355 struct msm_cpr_mode *chip_data;
356
357 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
358
359 /**
360 * FIXME: Need to handle a potential race condition between
361 * freq switch handler and CPR interrupt handler here
362 */
363 /* Set New PMIC volt */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530364 set_volt_uV = (new_volt > chip_data->Vmin ? new_volt
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530365 : chip_data->Vmin);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530366 rc = regulator_set_voltage(cpr->vreg_cx, set_volt_uV,
367 set_volt_uV);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530368 if (rc) {
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530369 pr_err("%s: Voltage at %duV failed %d\n",
370 __func__, set_volt_uV, rc);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530371 cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
372 return;
373 }
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530374 pr_info("(railway_voltage: %d uV)\n", set_volt_uV);
375
376 cpr->max_volt_set = 0;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530377
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530378 /* Clear all the interrupts */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530379 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530380
381 if (new_volt <= chip_data->Vmin) {
382 /*
383 * Disable down interrupt to App after we hit Vmin
384 * It shall be enabled after we service an up interrupt
385 *
386 * A race condition between freq switch handler and CPR
387 * interrupt handler is possible. So, do not disable
388 * interrupt if a freq switch already caused a mode
389 * change since we need this interrupt in the new mode.
390 */
391 if (cpr->cpr_mode == cpr->prev_mode) {
392 /* Enable Auto ACK for CPR Down Flags
393 * while DOWN_INT to App is disabled */
394 cpr_modify_reg(cpr, RBCPR_CTL,
395 SW_AUTO_CONT_NACK_DN_EN_M,
396 SW_AUTO_CONT_NACK_DN_EN);
397 cpr_irq_set(cpr, DOWN_INT, 0);
398 pr_debug("%s: DOWN_INT disabled\n", __func__);
399 }
400 }
401 /* Acknowledge the Recommendation */
402 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
403}
404
405static void cpr_set_vdd(struct msm_cpr *cpr, enum cpr_action action)
406{
407 uint32_t curr_volt, new_volt, error_step;
408 struct msm_cpr_mode *chip_data;
409
410 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
411 error_step = cpr_read_reg(cpr, RBCPR_RESULT_0) >> 2;
412 error_step &= 0xF;
Trilok Soni18a226c2012-09-02 01:34:17 +0530413
414 curr_volt = regulator_get_voltage(cpr->vreg_cx);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530415
416 if (action == UP) {
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530417 /* Clear IRQ, ACK and return if Vdd already at Vmax */
418 if (cpr->max_volt_set == 1) {
419 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
420 cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1);
421 return;
422 }
423
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530424 /**
425 * Using up margin in the comparison helps avoid having to
426 * change up threshold values in chip register.
427 */
428 if (error_step < (cpr->config->up_threshold +
429 cpr->config->up_margin)) {
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530430 pr_debug("UP_INT error step too small to set\n");
431 cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
432 return;
433 }
434
435 /* Calculte new PMIC voltage */
436 new_volt = curr_volt + (error_step * cpr->vp->step_size);
437 pr_debug("UP_INT: new_volt: %d\n", new_volt);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530438 pr_info("(UP Voltage recommended by CPR: %d uV)\n", new_volt);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530439 cpr_up_event_handler(cpr, new_volt);
440
441 } else if (action == DOWN) {
442 /**
443 * Using down margin in the comparison helps avoid having to
444 * change down threshold values in chip register.
445 */
446 if (error_step < (cpr->config->dn_threshold +
447 cpr->config->dn_margin)) {
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530448 pr_debug("DOWN_INT error_step too small to set\n");
449 cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
450 return;
451 }
452
453 /* Calculte new PMIC voltage */
454 new_volt = curr_volt - (error_step * cpr->vp->step_size);
455 pr_debug("DOWN_INT: new_volt: %d\n", new_volt);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530456 pr_info("(DN Voltage recommended by CPR: %d uV)\n", new_volt);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530457 cpr_dn_event_handler(cpr, new_volt);
458 }
459}
460
461static irqreturn_t cpr_irq0_handler(int irq, void *dev_id)
462{
463 struct msm_cpr *cpr = dev_id;
464 uint32_t reg_val, ctl_reg;
465
466 reg_val = cpr_read_reg(cpr, RBIF_IRQ_STATUS);
467 ctl_reg = cpr_read_reg(cpr, RBCPR_CTL);
468
469 /* Following sequence of handling is as per each IRQ's priority */
470 if (reg_val & BIT(4)) {
471 pr_debug(" CPR:IRQ %d occured for UP Flag\n", irq);
472 cpr_set_vdd(cpr, UP);
473
474 } else if ((reg_val & BIT(2)) && !(ctl_reg & SW_AUTO_CONT_NACK_DN_EN)) {
475 pr_debug(" CPR:IRQ %d occured for Down Flag\n", irq);
476 cpr_set_vdd(cpr, DOWN);
477
478 } else if (reg_val & BIT(1)) {
479 pr_debug(" CPR:IRQ %d occured for Min Flag\n", irq);
480 cpr_irq_clr_and_nack(cpr, BIT(1) | BIT(0));
481
482 } else if (reg_val & BIT(5)) {
483 pr_debug(" CPR:IRQ %d occured for MAX Flag\n", irq);
484 cpr_irq_clr_and_nack(cpr, BIT(5) | BIT(0));
485
486 } else if (reg_val & BIT(3)) {
487 /* SW_AUTO_CONT_ACK_EN is enabled */
488 pr_debug(" CPR:IRQ %d occured for Mid Flag\n", irq);
489 }
490 return IRQ_HANDLED;
491}
492
493static void cpr_config(struct msm_cpr *cpr)
494{
495 uint32_t delay_count, cnt = 0, rc, tmp_uV;
496 struct msm_cpr_mode *chip_data;
497
498 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
499
500 /* Program the SW vlevel */
501 cpr_modify_reg(cpr, RBIF_SW_VLEVEL, SW_VLEVEL_M,
502 cpr->config->sw_vlevel);
503
504 /* Set the floor and ceiling values */
505 cpr->floor = cpr->config->floor;
506 cpr->ceiling = cpr->config->ceiling;
507
508 /* Program the Ceiling & Floor values */
509 cpr_modify_reg(cpr, RBIF_LIMIT, (CEILING_M | FLOOR_M),
510 ((cpr->ceiling << 6) | cpr->floor));
511
512 /* Program the Up and Down Threshold values */
513 cpr_modify_reg(cpr, RBCPR_CTL, UP_THRESHOLD_M | DN_THRESHOLD_M,
514 cpr->config->up_threshold << 24 |
515 cpr->config->dn_threshold << 28);
516
517 cpr->curr_osc = chip_data->ring_osc;
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530518 chip_data->ring_osc_data[cpr->curr_osc].quot =
519 cpr->config->max_quot;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530520
521 /**
522 * Program the gate count and target values
523 * for all the ring oscilators
524 */
525 while (cnt < NUM_OSC) {
526 cpr_modify_reg(cpr, RBCPR_GCNT_TARGET(cnt),
527 (GCNT_M | TARGET_M),
528 (chip_data->ring_osc_data[cnt].gcnt << 12 |
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530529 chip_data->ring_osc_data[cnt].quot));
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530530 pr_debug("RBCPR_GCNT_TARGET(%d): = 0x%x\n", cnt,
531 readl_relaxed(cpr->base + RBCPR_GCNT_TARGET(cnt)));
532 cnt++;
533 }
534
535 /* Configure the step quot */
536 cpr_2pt_kv_analysis(cpr, chip_data);
537
538 /**
539 * Call the PMIC specific routine to set the voltage
540 * Set with an extra step since it helps as per
541 * characterization data.
542 */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530543 chip_data->calibrated_uV += cpr->vp->step_size;
544 tmp_uV = chip_data->calibrated_uV;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530545 rc = regulator_set_voltage(cpr->vreg_cx, tmp_uV, tmp_uV);
546 if (rc)
547 pr_err("%s: Voltage set failed %d\n", __func__, rc);
548
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530549 /*
550 * Program the Timer Register for delay between CPR measurements
551 * This is required to allow the device sufficient time for idle
552 * power collapse.
553 */
554 delay_count = TIMER_COUNT(cpr->config->ref_clk_khz,
555 cpr->config->delay_us);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530556 cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL, delay_count);
557
558 /* Enable the Timer */
559 cpr_modify_reg(cpr, RBCPR_CTL, TIMER_M, ENABLE_TIMER);
560
561 /* Enable Auto ACK for Mid interrupts */
562 cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_ACK_EN_M,
563 SW_AUTO_CONT_ACK_EN);
564}
565
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530566static int
567cpr_freq_transition(struct notifier_block *nb, unsigned long val,
568 void *data)
569{
570 struct msm_cpr *cpr = container_of(nb, struct msm_cpr, freq_transition);
571 struct cpufreq_freqs *freqs = data;
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530572 uint32_t quot, new_freq;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530573
574 switch (val) {
575 case CPUFREQ_PRECHANGE:
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530576 pr_debug("pre freq change notification to cpr\n");
577
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530578 /* Disable Measurement to stop generation of CPR IRQs */
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530579 cpr_disable(cpr);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530580 /* Disable routing of IRQ to App */
581 cpr_irq_set(cpr, INT_MASK & ~MID_INT, 0);
582 disable_irq(cpr->irq);
583 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
584 pr_debug("RBCPR_CTL: 0x%x\n",
585 readl_relaxed(cpr->base + RBCPR_CTL));
586 pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
587 cpr_read_reg(cpr, RBIF_IRQ_STATUS));
588 pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
589 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
590
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530591 cpr->prev_mode = cpr->cpr_mode;
592 break;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530593
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530594 case CPUFREQ_POSTCHANGE:
595 pr_debug("post freq change notification to cpr\n");
596 /**
597 * As per chip characterization data, use max nominal freq
598 * to calculate quot for all lower frequencies too
599 */
600 new_freq = (freqs->new > cpr->config->max_nom_freq)
601 ? freqs->new
602 : cpr->config->max_nom_freq;
603
604 /* Configure CPR for the new frequency */
605 quot = cpr->config->get_quot(cpr->config->max_quot,
606 cpr->config->max_freq / 1000,
607 new_freq / 1000);
608 cpr_modify_reg(cpr, RBCPR_GCNT_TARGET(cpr->curr_osc), TARGET_M,
609 quot);
610 pr_debug("RBCPR_GCNT_TARGET(%d): = 0x%x\n", cpr->curr_osc,
611 readl_relaxed(cpr->base +
612 RBCPR_GCNT_TARGET(cpr->curr_osc)));
613 pr_debug("%s: new_freq: %d, set_freq: %d, quot: %d\n", __func__,
614 freqs->new, new_freq, quot);
615
616 enable_irq(cpr->irq);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530617 /**
618 * Enable all interrupts. One of them could be in a disabled
619 * state if vdd had hit Vmax / Vmin earlier
620 */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530621 cpr_irq_set(cpr, INT_MASK & ~MID_INT, 1);
622 pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
623 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
624 pr_debug("RBCPR_CTL: 0x%x\n",
625 readl_relaxed(cpr->base + RBCPR_CTL));
626 pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
627 cpr_read_reg(cpr, RBIF_IRQ_STATUS));
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530628 cpr_enable(cpr);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530629 break;
630 default:
631 break;
632 }
633 return 0;
634}
635
636#ifdef CONFIG_PM
637static int msm_cpr_resume(struct device *dev)
638{
639 struct msm_cpr *cpr = dev_get_drvdata(dev);
640 int osc_num = cpr->config->cpr_mode_data->ring_osc;
641
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530642 cpr->config->clk_enable();
643
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530644 cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL,
645 cpr_save_state.rbif_timer_interval);
646 cpr_write_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
647 cpr_save_state.rbif_int_en);
648 cpr_write_reg(cpr, RBIF_LIMIT,
649 cpr_save_state.rbif_limit);
650 cpr_write_reg(cpr, RBIF_TIMER_ADJUST,
651 cpr_save_state.rbif_timer_adjust);
652 cpr_write_reg(cpr, RBCPR_GCNT_TARGET(osc_num),
653 cpr_save_state.rbcpr_gcnt_target);
654 cpr_write_reg(cpr, RBCPR_STEP_QUOT,
655 cpr_save_state.rbcpr_step_quot);
656 cpr_write_reg(cpr, RBIF_SW_VLEVEL,
657 cpr_save_state.rbif_sw_level);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530658 cpr_write_reg(cpr, RBCPR_CTL,
659 cpr_save_state.rbcpr_ctl);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530660
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530661 enable_irq(cpr->irq);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530662 cpr_enable(cpr);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530663
664 return 0;
665}
666
667static int msm_cpr_suspend(struct device *dev)
668
669{
670 struct msm_cpr *cpr = dev_get_drvdata(dev);
671 int osc_num = cpr->config->cpr_mode_data->ring_osc;
672
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530673 /* Disable CPR measurement before IRQ to avoid pending interrupts */
674 cpr_disable(cpr);
675 disable_irq(cpr->irq);
676
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530677 cpr_save_state.rbif_timer_interval =
678 cpr_read_reg(cpr, RBCPR_TIMER_INTERVAL);
679 cpr_save_state.rbif_int_en =
680 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
681 cpr_save_state.rbif_limit =
682 cpr_read_reg(cpr, RBIF_LIMIT);
683 cpr_save_state.rbif_timer_adjust =
684 cpr_read_reg(cpr, RBIF_TIMER_ADJUST);
685 cpr_save_state.rbcpr_gcnt_target =
686 cpr_read_reg(cpr, RBCPR_GCNT_TARGET(osc_num));
687 cpr_save_state.rbcpr_step_quot =
688 cpr_read_reg(cpr, RBCPR_STEP_QUOT);
689 cpr_save_state.rbif_sw_level =
690 cpr_read_reg(cpr, RBIF_SW_VLEVEL);
691 cpr_save_state.rbcpr_ctl =
692 cpr_read_reg(cpr, RBCPR_CTL);
693
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530694 return 0;
695}
696
697void msm_cpr_pm_resume(void)
698{
699 msm_cpr_resume(&cpr_pdev->dev);
700}
701EXPORT_SYMBOL(msm_cpr_pm_resume);
702
703void msm_cpr_pm_suspend(void)
704{
705 msm_cpr_suspend(&cpr_pdev->dev);
706}
707EXPORT_SYMBOL(msm_cpr_pm_suspend);
708#endif
709
710void msm_cpr_disable(void)
711{
712 struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
713 cpr_disable(cpr);
714}
715EXPORT_SYMBOL(msm_cpr_disable);
716
717void msm_cpr_enable(void)
718{
719 struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
720 cpr_enable(cpr);
721}
722EXPORT_SYMBOL(msm_cpr_enable);
723
724static int __devinit msm_cpr_probe(struct platform_device *pdev)
725{
726 int res, irqn, irq_enabled;
727 struct msm_cpr *cpr;
728 const struct msm_cpr_config *pdata = pdev->dev.platform_data;
729 void __iomem *base;
730 struct resource *mem;
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530731 struct msm_cpr_mode *chip_data;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530732
733 if (!pdata) {
734 pr_err("CPR: Platform data is not available\n");
735 return -EIO;
736 }
737
738 cpr = devm_kzalloc(&pdev->dev, sizeof(struct msm_cpr), GFP_KERNEL);
739 if (!cpr)
740 return -ENOMEM;
741
742 /* Initialize platform_data */
743 cpr->config = pdata;
744
745 cpr_pdev = pdev;
746
747 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
748 if (!mem || !mem->start) {
749 pr_err("CPR: get resource failed\n");
750 res = -ENXIO;
751 goto out;
752 }
753
754 base = ioremap_nocache(mem->start, resource_size(mem));
755 if (!base) {
756 pr_err("CPR: ioremap failed\n");
757 res = -ENOMEM;
758 goto out;
759 }
760
761 if (cpr->config->irq_line < 0) {
762 pr_err("CPR: Invalid IRQ line specified\n");
763 res = -ENXIO;
764 goto err_ioremap;
765 }
766 irqn = platform_get_irq(pdev, cpr->config->irq_line);
767 if (irqn < 0) {
768 pr_err("CPR: Unable to get irq\n");
769 res = -ENXIO;
770 goto err_ioremap;
771 }
772
773 cpr->irq = irqn;
774
775 cpr->base = base;
776
777 cpr->vp = pdata->vp_data;
778
779 mutex_init(&cpr->cpr_mutex);
780
781 /* Initialize the Voltage domain for CPR */
782 cpr->vreg_cx = regulator_get(&pdev->dev, "vddx_cx");
783 if (IS_ERR(cpr->vreg_cx)) {
784 res = PTR_ERR(cpr->vreg_cx);
785 pr_err("could not get regulator: %d\n", res);
786 goto err_reg_get;
787 }
788
789 /* Assume current mode is TURBO Mode */
790 cpr->cpr_mode = TURBO_MODE;
791 cpr->prev_mode = TURBO_MODE;
792
793 /* Initial configuration of CPR */
794 cpr_config(cpr);
795
796 platform_set_drvdata(pdev, cpr);
797
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530798 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
799 pr_info("CPR Platform Data (upside_steps: %d) (downside_steps: %d) ",
800 cpr->config->up_threshold, cpr->config->dn_threshold);
801 pr_info("(nominal_voltage: %duV) (turbo_voltage: %duV)\n",
802 cpr->config->cpr_mode_data[NORMAL_MODE].calibrated_uV,
803 cpr->config->cpr_mode_data[TURBO_MODE].calibrated_uV);
804 pr_info("(Current corner: TURBO) (gcnt_target: %d) (quot: %d)\n",
805 chip_data->ring_osc_data[chip_data->ring_osc].gcnt,
806 chip_data->ring_osc_data[chip_data->ring_osc].quot);
807
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530808 /* Initialze the Debugfs Entry for cpr */
809 res = msm_cpr_debug_init(cpr->base);
810 if (res) {
811 pr_err("CPR: Debugfs Creation Failed\n");
812 goto err_ioremap;
813 }
814
815 /* Register the interrupt handler for IRQ 0 */
816 res = request_threaded_irq(irqn, NULL, cpr_irq0_handler,
817 IRQF_TRIGGER_RISING, "msm-cpr-irq0", cpr);
818 if (res) {
819 pr_err("CPR: request irq failed for IRQ %d\n", irqn);
820 goto err_ioremap;
821 }
822
823 /**
824 * Enable the requested interrupt lines.
825 * Do not enable MID_INT since we shall use
826 * SW_AUTO_CONT_ACK_EN bit.
827 */
828 irq_enabled = INT_MASK & ~MID_INT;
829 cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
830 INT_MASK, irq_enabled);
831
832 /* Enable the cpr */
833 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
834
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530835 cpr->freq_transition.notifier_call = cpr_freq_transition;
836 cpufreq_register_notifier(&cpr->freq_transition,
837 CPUFREQ_TRANSITION_NOTIFIER);
838
839 return res;
840
841err_reg_get:
842 free_irq(irqn, cpr);
843err_ioremap:
844 iounmap(base);
845out:
846 return res;
847}
848
849static int __devexit msm_cpr_remove(struct platform_device *pdev)
850{
851 struct msm_cpr *cpr = platform_get_drvdata(pdev);
852
853 cpufreq_unregister_notifier(&cpr->freq_transition,
854 CPUFREQ_TRANSITION_NOTIFIER);
855
856 regulator_disable(cpr->vreg_cx);
857 regulator_put(cpr->vreg_cx);
858 free_irq(cpr->irq, cpr);
859 iounmap(cpr->base);
860 mutex_destroy(&cpr->cpr_mutex);
861 platform_set_drvdata(pdev, NULL);
862
863 return 0;
864}
865
866static const struct dev_pm_ops msm_cpr_dev_pm_ops = {
867 .suspend = msm_cpr_suspend,
868 .resume = msm_cpr_resume,
869};
870
871static struct platform_driver msm_cpr_driver = {
872 .probe = msm_cpr_probe,
873 .remove = __devexit_p(msm_cpr_remove),
874 .driver = {
875 .name = MODULE_NAME,
876 .owner = THIS_MODULE,
877#ifdef CONFIG_PM
878 .pm = &msm_cpr_dev_pm_ops,
879#endif
880 },
881};
882
883static int __init msm_init_cpr(void)
884{
885 return platform_driver_register(&msm_cpr_driver);
886}
887
888module_init(msm_init_cpr);
889
890static void __exit msm_exit_cpr(void)
891{
892 platform_driver_unregister(&msm_cpr_driver);
893}
894
895module_exit(msm_exit_cpr);
896
897MODULE_DESCRIPTION("MSM CPR Driver");
898MODULE_VERSION("1.0");
899MODULE_LICENSE("GPL v2");