blob: e2640a281eb59aba8ba2d8cde63c0cf36ba3f3d5 [file] [log] [blame]
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +05301/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/module.h>
Kaushal Kumar5d83a9e2012-09-07 16:34:02 +053018#include <linux/moduleparam.h>
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053019#include <linux/kernel.h>
20#include <linux/io.h>
21#include <linux/irq.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/debugfs.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/cpufreq.h>
30#include <linux/iopoll.h>
31#include <linux/delay.h>
32#include <linux/regulator/consumer.h>
33
34#include <mach/irqs.h>
35
36#include "msm_cpr.h"
37
38#define MODULE_NAME "msm-cpr"
39
Kaushal Kumard0e4c812012-08-22 16:30:09 +053040/**
41 * Convert the Delay time to Timer Count Register
42 * e.g if frequency is 19200 kHz and delay required is
43 * 20000us, so timer count will be 19200 * 20000 / 1000
44 */
45#define TIMER_COUNT(freq, delay) ((freq * delay) / 1000)
46#define ALL_CPR_IRQ 0x3F
47
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053048/* Need platform device handle for suspend and resume APIs */
49static struct platform_device *cpr_pdev;
50
Kaushal Kumar5d83a9e2012-09-07 16:34:02 +053051static bool enable = 1;
52module_param(enable, bool, 0644);
53MODULE_PARM_DESC(enable, "CPR Enable");
54
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053055struct msm_cpr {
56 int curr_osc;
57 int cpr_mode;
58 int prev_mode;
59 uint32_t floor;
60 uint32_t ceiling;
Kaushal Kumard0e4c812012-08-22 16:30:09 +053061 bool max_volt_set;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053062 void __iomem *base;
63 unsigned int irq;
Trilok Sonidb64ae92012-09-02 02:09:04 +053064 uint32_t cur_Vmin;
65 uint32_t cur_Vmax;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +053066 struct mutex cpr_mutex;
67 struct regulator *vreg_cx;
68 const struct msm_cpr_config *config;
69 struct notifier_block freq_transition;
70 struct msm_cpr_vp_data *vp;
71};
72
73/* Need to maintain state data for suspend and resume APIs */
74static struct msm_cpr_reg cpr_save_state;
75
76static inline
77void cpr_write_reg(struct msm_cpr *cpr, u32 offset, u32 value)
78{
79 writel_relaxed(value, cpr->base + offset);
80}
81
82static inline u32 cpr_read_reg(struct msm_cpr *cpr, u32 offset)
83{
84 return readl_relaxed(cpr->base + offset);
85}
86
87static
88void cpr_modify_reg(struct msm_cpr *cpr, u32 offset, u32 mask, u32 value)
89{
90 u32 reg_val;
91
92 reg_val = readl_relaxed(cpr->base + offset);
93 reg_val &= ~mask;
94 reg_val |= value;
95 writel_relaxed(reg_val, cpr->base + offset);
96}
97
98#ifdef DEBUG
99static void cpr_regs_dump_all(struct msm_cpr *cpr)
100{
101 pr_debug("RBCPR_GCNT_TARGET(%d): 0x%x\n",
102 cpr->curr_osc, readl_relaxed(cpr->base +
103 RBCPR_GCNT_TARGET(cpr->curr_osc)));
104 pr_debug("RBCPR_TIMER_INTERVAL: 0x%x\n",
105 readl_relaxed(cpr->base + RBCPR_TIMER_INTERVAL));
106 pr_debug("RBIF_TIMER_ADJUST: 0x%x\n",
107 readl_relaxed(cpr->base + RBIF_TIMER_ADJUST));
108 pr_debug("RBIF_LIMIT: 0x%x\n",
109 readl_relaxed(cpr->base + RBIF_LIMIT));
110 pr_debug("RBCPR_STEP_QUOT: 0x%x\n",
111 readl_relaxed(cpr->base + RBCPR_STEP_QUOT));
112 pr_debug("RBIF_SW_VLEVEL: 0x%x\n",
113 readl_relaxed(cpr->base + RBIF_SW_VLEVEL));
114 pr_debug("RBCPR_DEBUG1: 0x%x\n",
115 readl_relaxed(cpr->base + RBCPR_DEBUG1));
116 pr_debug("RBCPR_RESULT_0: 0x%x\n",
117 readl_relaxed(cpr->base + RBCPR_RESULT_0));
118 pr_debug("RBCPR_RESULT_1: 0x%x\n",
119 readl_relaxed(cpr->base + RBCPR_RESULT_1));
120 pr_debug("RBCPR_QUOT_AVG: 0x%x\n",
121 readl_relaxed(cpr->base + RBCPR_QUOT_AVG));
122 pr_debug("RBCPR_CTL: 0x%x\n",
123 readl_relaxed(cpr->base + RBCPR_CTL));
124 pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
125 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
126 pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
127 cpr_read_reg(cpr, RBIF_IRQ_STATUS));
128}
129#endif
130
131/* Enable the CPR H/W Block */
132static void cpr_enable(struct msm_cpr *cpr)
133{
134 mutex_lock(&cpr->cpr_mutex);
135 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
136 mutex_unlock(&cpr->cpr_mutex);
137}
138
139/* Disable the CPR H/W Block */
140static void cpr_disable(struct msm_cpr *cpr)
141{
142 mutex_lock(&cpr->cpr_mutex);
143 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
144 mutex_unlock(&cpr->cpr_mutex);
145}
146
147static int32_t cpr_poll_result(struct msm_cpr *cpr)
148{
149 uint32_t val = 0;
150 int8_t rc = 0;
151
152 rc = readl_poll_timeout(cpr->base + RBCPR_RESULT_0, val, ~val & BUSY_M,
153 10, 1000);
154 if (rc)
155 pr_info("%s: RBCPR_RESULT_0 read error: %d\n",
156 __func__, rc);
157 return rc;
158}
159
160static int32_t cpr_poll_result_done(struct msm_cpr *cpr)
161{
162 uint32_t val = 0;
163 int8_t rc = 0;
164
165 rc = readl_poll_timeout(cpr->base + RBIF_IRQ_STATUS, val, val & 0x1,
166 10, 1000);
167 if (rc)
168 pr_info("%s: RBCPR_IRQ_STATUS read error: %d\n",
169 __func__, rc);
170 return rc;
171}
172
173static void
174cpr_2pt_kv_analysis(struct msm_cpr *cpr, struct msm_cpr_mode *chip_data)
175{
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530176 int32_t level_uV = 0, rc;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530177 uint32_t quot1, quot2;
178
179 /**
180 * 2 Point KV Analysis to calculate Step Quot
181 * STEP_QUOT is number of QUOT units per PMIC step
182 * STEP_QUOT = (quot1 - quot2) / 4
183 *
184 * The step quot is calculated once for every mode and stored for
185 * later use.
186 */
187 if (chip_data->step_quot != ~0)
188 goto out_2pt_kv;
189
190 /**
191 * Using the value from chip_data->tgt_volt_offset
192 * calculate the new PMIC adjusted voltages and set
193 * the PMIC to provide this value.
194 *
195 * Assuming default voltage is the highest value of safe boot up
196 * voltage, offset is always subtracted from it.
197 *
198 */
Trilok Sonidb64ae92012-09-02 02:09:04 +0530199 level_uV = chip_data->turbo_Vmax -
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530200 (chip_data->tgt_volt_offset * cpr->vp->step_size);
201 pr_debug("tgt_volt_uV = %d\n", level_uV);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530202
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530203 /* Call the PMIC specific routine to set the voltage */
204 rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
205 if (rc) {
206 pr_err("%s: Initial voltage set at %duV failed. %d\n",
207 __func__, level_uV, rc);
208 return;
209 }
210 rc = regulator_enable(cpr->vreg_cx);
211 if (rc) {
212 pr_err("failed to enable %s, rc=%d\n", "vdd_cx", rc);
213 return;
214 }
215
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530216 /* First CPR measurement at a higher voltage to get QUOT1 */
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530217
218 /* Enable the Software mode of operation */
219 cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
220
221 /* Enable the cpr measurement */
222 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
223
224 /* IRQ is already disabled */
225 rc = cpr_poll_result_done(cpr);
226 if (rc) {
227 pr_err("%s: Quot1: Exiting due to INT_DONE poll timeout\n",
228 __func__);
229 return;
230 }
231
232 rc = cpr_poll_result(cpr);
233 if (rc) {
234 pr_err("%s: Quot1: Exiting due to BUSY poll timeout\n",
235 __func__);
236 return;
237 }
238
239 quot1 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
240
241 /* Take second CPR measurement at a lower voltage to get QUOT2 */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530242 level_uV -= 4 * cpr->vp->step_size;
243 pr_debug("tgt_volt_uV = %d\n", level_uV);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530244
245 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
246 /* Call the PMIC specific routine to set the voltage */
247 rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
248 if (rc) {
249 pr_err("%s: Voltage set at %duV failed. %d\n",
250 __func__, level_uV, rc);
251 return;
252 }
253
254 cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
255 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
256
257 /* cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1); */
258 rc = cpr_poll_result_done(cpr);
259 if (rc) {
260 pr_err("%s: Quot2: Exiting due to INT_DONE poll timeout\n",
261 __func__);
262 goto err_poll_result_done;
263 }
264 /* IRQ is already disabled */
265 rc = cpr_poll_result(cpr);
266 if (rc) {
267 pr_err("%s: Quot2: Exiting due to BUSY poll timeout\n",
268 __func__);
269 goto err_poll_result;
270 }
271 quot2 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
272 chip_data->step_quot = (quot1 - quot2) / 4;
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530273 pr_info("%s: Calculated Step Quot is %d\n",
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530274 __func__, chip_data->step_quot);
275 /* Disable the cpr */
276 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
277
278out_2pt_kv:
279 /* Program the step quot */
280 cpr_write_reg(cpr, RBCPR_STEP_QUOT, (chip_data->step_quot & 0xFF));
281 return;
282err_poll_result:
283err_poll_result_done:
284 regulator_disable(cpr->vreg_cx);
285}
286
287static inline
288void cpr_irq_clr_and_ack(struct msm_cpr *cpr, uint32_t mask)
289{
290 /* Clear the interrupt */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530291 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530292 /* Acknowledge the Recommendation */
293 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
294}
295
296static inline
297void cpr_irq_clr_and_nack(struct msm_cpr *cpr, uint32_t mask)
298{
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530299 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530300 cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1);
301}
302
303static void cpr_irq_set(struct msm_cpr *cpr, uint32_t irq, bool enable)
304{
305 uint32_t irq_enabled;
306
307 irq_enabled = cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
308 if (enable == 1)
309 irq_enabled |= irq;
310 else
311 irq_enabled &= ~irq;
312 cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
313 INT_MASK, irq_enabled);
314}
315
316static void
317cpr_up_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
318{
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530319 int rc, set_volt_uV;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530320 struct msm_cpr_mode *chip_data;
321
322 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
323
324 /**
325 * FIXME: Need to handle a potential race condition between
326 * freq switch handler and CPR interrupt handler here
327 */
328 /* Set New PMIC voltage */
Trilok Sonidb64ae92012-09-02 02:09:04 +0530329 set_volt_uV = (new_volt < cpr->cur_Vmax ? new_volt
330 : cpr->cur_Vmax);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530331 rc = regulator_set_voltage(cpr->vreg_cx, set_volt_uV,
332 set_volt_uV);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530333 if (rc) {
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530334 pr_err("%s: Voltage set at %duV failed. %d\n",
335 __func__, set_volt_uV, rc);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530336 cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
337 return;
338 }
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530339 pr_info("(railway_voltage: %d uV)\n", set_volt_uV);
340
Trilok Sonidb64ae92012-09-02 02:09:04 +0530341 cpr->max_volt_set = (set_volt_uV == cpr->cur_Vmax) ? 1 : 0;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530342
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530343 /* Clear all the interrupts */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530344 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530345
346 /* Disable Auto ACK for Down interrupts */
347 cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_NACK_DN_EN_M, 0);
348
349 /* Enable down interrupts to App as it might have got disabled if CPR
350 * hit Vmin earlier. Voltage set is above Vmin now.
351 */
352 cpr_irq_set(cpr, DOWN_INT, 1);
353
354 /* Acknowledge the Recommendation */
355 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
356}
357
358static void
359cpr_dn_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
360{
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530361 int rc, set_volt_uV;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530362 struct msm_cpr_mode *chip_data;
363
364 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
365
366 /**
367 * FIXME: Need to handle a potential race condition between
368 * freq switch handler and CPR interrupt handler here
369 */
370 /* Set New PMIC volt */
Trilok Sonidb64ae92012-09-02 02:09:04 +0530371 set_volt_uV = (new_volt > cpr->cur_Vmin ? new_volt
372 : cpr->cur_Vmin);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530373 rc = regulator_set_voltage(cpr->vreg_cx, set_volt_uV,
374 set_volt_uV);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530375 if (rc) {
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530376 pr_err("%s: Voltage at %duV failed %d\n",
377 __func__, set_volt_uV, rc);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530378 cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
379 return;
380 }
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530381 pr_info("(railway_voltage: %d uV)\n", set_volt_uV);
382
383 cpr->max_volt_set = 0;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530384
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530385 /* Clear all the interrupts */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530386 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530387
Trilok Sonidb64ae92012-09-02 02:09:04 +0530388 if (new_volt <= cpr->cur_Vmin) {
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530389 /*
390 * Disable down interrupt to App after we hit Vmin
391 * It shall be enabled after we service an up interrupt
392 *
393 * A race condition between freq switch handler and CPR
394 * interrupt handler is possible. So, do not disable
395 * interrupt if a freq switch already caused a mode
396 * change since we need this interrupt in the new mode.
397 */
398 if (cpr->cpr_mode == cpr->prev_mode) {
399 /* Enable Auto ACK for CPR Down Flags
400 * while DOWN_INT to App is disabled */
401 cpr_modify_reg(cpr, RBCPR_CTL,
402 SW_AUTO_CONT_NACK_DN_EN_M,
403 SW_AUTO_CONT_NACK_DN_EN);
404 cpr_irq_set(cpr, DOWN_INT, 0);
405 pr_debug("%s: DOWN_INT disabled\n", __func__);
406 }
407 }
408 /* Acknowledge the Recommendation */
409 cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
410}
411
412static void cpr_set_vdd(struct msm_cpr *cpr, enum cpr_action action)
413{
414 uint32_t curr_volt, new_volt, error_step;
415 struct msm_cpr_mode *chip_data;
416
417 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
418 error_step = cpr_read_reg(cpr, RBCPR_RESULT_0) >> 2;
419 error_step &= 0xF;
Trilok Soni18a226c2012-09-02 01:34:17 +0530420
421 curr_volt = regulator_get_voltage(cpr->vreg_cx);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530422
423 if (action == UP) {
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530424 /* Clear IRQ, ACK and return if Vdd already at Vmax */
425 if (cpr->max_volt_set == 1) {
426 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
427 cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1);
428 return;
429 }
430
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530431 /**
432 * Using up margin in the comparison helps avoid having to
433 * change up threshold values in chip register.
434 */
435 if (error_step < (cpr->config->up_threshold +
436 cpr->config->up_margin)) {
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530437 pr_debug("UP_INT error step too small to set\n");
438 cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
439 return;
440 }
441
442 /* Calculte new PMIC voltage */
443 new_volt = curr_volt + (error_step * cpr->vp->step_size);
444 pr_debug("UP_INT: new_volt: %d\n", new_volt);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530445 pr_info("(UP Voltage recommended by CPR: %d uV)\n", new_volt);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530446 cpr_up_event_handler(cpr, new_volt);
447
448 } else if (action == DOWN) {
449 /**
450 * Using down margin in the comparison helps avoid having to
451 * change down threshold values in chip register.
452 */
453 if (error_step < (cpr->config->dn_threshold +
454 cpr->config->dn_margin)) {
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530455 pr_debug("DOWN_INT error_step too small to set\n");
456 cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
457 return;
458 }
459
460 /* Calculte new PMIC voltage */
461 new_volt = curr_volt - (error_step * cpr->vp->step_size);
462 pr_debug("DOWN_INT: new_volt: %d\n", new_volt);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530463 pr_info("(DN Voltage recommended by CPR: %d uV)\n", new_volt);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530464 cpr_dn_event_handler(cpr, new_volt);
465 }
466}
467
468static irqreturn_t cpr_irq0_handler(int irq, void *dev_id)
469{
470 struct msm_cpr *cpr = dev_id;
471 uint32_t reg_val, ctl_reg;
472
473 reg_val = cpr_read_reg(cpr, RBIF_IRQ_STATUS);
474 ctl_reg = cpr_read_reg(cpr, RBCPR_CTL);
475
476 /* Following sequence of handling is as per each IRQ's priority */
477 if (reg_val & BIT(4)) {
478 pr_debug(" CPR:IRQ %d occured for UP Flag\n", irq);
479 cpr_set_vdd(cpr, UP);
480
481 } else if ((reg_val & BIT(2)) && !(ctl_reg & SW_AUTO_CONT_NACK_DN_EN)) {
482 pr_debug(" CPR:IRQ %d occured for Down Flag\n", irq);
483 cpr_set_vdd(cpr, DOWN);
484
485 } else if (reg_val & BIT(1)) {
486 pr_debug(" CPR:IRQ %d occured for Min Flag\n", irq);
487 cpr_irq_clr_and_nack(cpr, BIT(1) | BIT(0));
488
489 } else if (reg_val & BIT(5)) {
490 pr_debug(" CPR:IRQ %d occured for MAX Flag\n", irq);
491 cpr_irq_clr_and_nack(cpr, BIT(5) | BIT(0));
492
493 } else if (reg_val & BIT(3)) {
494 /* SW_AUTO_CONT_ACK_EN is enabled */
495 pr_debug(" CPR:IRQ %d occured for Mid Flag\n", irq);
496 }
497 return IRQ_HANDLED;
498}
499
500static void cpr_config(struct msm_cpr *cpr)
501{
502 uint32_t delay_count, cnt = 0, rc, tmp_uV;
503 struct msm_cpr_mode *chip_data;
504
505 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
506
507 /* Program the SW vlevel */
508 cpr_modify_reg(cpr, RBIF_SW_VLEVEL, SW_VLEVEL_M,
509 cpr->config->sw_vlevel);
510
511 /* Set the floor and ceiling values */
512 cpr->floor = cpr->config->floor;
513 cpr->ceiling = cpr->config->ceiling;
514
515 /* Program the Ceiling & Floor values */
516 cpr_modify_reg(cpr, RBIF_LIMIT, (CEILING_M | FLOOR_M),
517 ((cpr->ceiling << 6) | cpr->floor));
518
519 /* Program the Up and Down Threshold values */
520 cpr_modify_reg(cpr, RBCPR_CTL, UP_THRESHOLD_M | DN_THRESHOLD_M,
521 cpr->config->up_threshold << 24 |
522 cpr->config->dn_threshold << 28);
523
524 cpr->curr_osc = chip_data->ring_osc;
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530525 chip_data->ring_osc_data[cpr->curr_osc].quot =
526 cpr->config->max_quot;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530527
528 /**
529 * Program the gate count and target values
530 * for all the ring oscilators
531 */
532 while (cnt < NUM_OSC) {
533 cpr_modify_reg(cpr, RBCPR_GCNT_TARGET(cnt),
534 (GCNT_M | TARGET_M),
535 (chip_data->ring_osc_data[cnt].gcnt << 12 |
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530536 chip_data->ring_osc_data[cnt].quot));
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530537 pr_debug("RBCPR_GCNT_TARGET(%d): = 0x%x\n", cnt,
538 readl_relaxed(cpr->base + RBCPR_GCNT_TARGET(cnt)));
539 cnt++;
540 }
541
542 /* Configure the step quot */
543 cpr_2pt_kv_analysis(cpr, chip_data);
544
545 /**
546 * Call the PMIC specific routine to set the voltage
547 * Set with an extra step since it helps as per
548 * characterization data.
549 */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530550 chip_data->calibrated_uV += cpr->vp->step_size;
551 tmp_uV = chip_data->calibrated_uV;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530552 rc = regulator_set_voltage(cpr->vreg_cx, tmp_uV, tmp_uV);
553 if (rc)
554 pr_err("%s: Voltage set failed %d\n", __func__, rc);
555
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530556 /*
557 * Program the Timer Register for delay between CPR measurements
558 * This is required to allow the device sufficient time for idle
559 * power collapse.
560 */
561 delay_count = TIMER_COUNT(cpr->config->ref_clk_khz,
562 cpr->config->delay_us);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530563 cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL, delay_count);
564
565 /* Enable the Timer */
566 cpr_modify_reg(cpr, RBCPR_CTL, TIMER_M, ENABLE_TIMER);
567
568 /* Enable Auto ACK for Mid interrupts */
569 cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_ACK_EN_M,
570 SW_AUTO_CONT_ACK_EN);
571}
572
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530573static int
574cpr_freq_transition(struct notifier_block *nb, unsigned long val,
575 void *data)
576{
577 struct msm_cpr *cpr = container_of(nb, struct msm_cpr, freq_transition);
578 struct cpufreq_freqs *freqs = data;
Trilok Sonic43d1122012-09-02 01:48:33 +0530579 uint32_t quot, new_freq, ctl_reg;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530580
581 switch (val) {
582 case CPUFREQ_PRECHANGE:
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530583 pr_debug("pre freq change notification to cpr\n");
584
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530585 /* Disable Measurement to stop generation of CPR IRQs */
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530586 cpr_disable(cpr);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530587 /* Disable routing of IRQ to App */
588 cpr_irq_set(cpr, INT_MASK & ~MID_INT, 0);
589 disable_irq(cpr->irq);
590 cpr_write_reg(cpr, RBIF_IRQ_CLEAR, ALL_CPR_IRQ);
591 pr_debug("RBCPR_CTL: 0x%x\n",
592 readl_relaxed(cpr->base + RBCPR_CTL));
593 pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
594 cpr_read_reg(cpr, RBIF_IRQ_STATUS));
595 pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
596 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
597
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530598 cpr->prev_mode = cpr->cpr_mode;
599 break;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530600
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530601 case CPUFREQ_POSTCHANGE:
602 pr_debug("post freq change notification to cpr\n");
Trilok Sonic43d1122012-09-02 01:48:33 +0530603 ctl_reg = cpr_read_reg(cpr, RBCPR_CTL);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530604 /**
605 * As per chip characterization data, use max nominal freq
606 * to calculate quot for all lower frequencies too
607 */
Trilok Sonidb64ae92012-09-02 02:09:04 +0530608 if (freqs->new > cpr->config->max_nom_freq) {
609 new_freq = freqs->new;
610 cpr->cur_Vmin = cpr->config->cpr_mode_data[1].turbo_Vmin;
611 cpr->cur_Vmax = cpr->config->cpr_mode_data[1].turbo_Vmax;
612 } else {
613 new_freq = cpr->config->max_nom_freq;
614 cpr->cur_Vmin = cpr->config->cpr_mode_data[1].nom_Vmin;
615 cpr->cur_Vmax = cpr->config->cpr_mode_data[1].nom_Vmax;
616 }
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530617
618 /* Configure CPR for the new frequency */
619 quot = cpr->config->get_quot(cpr->config->max_quot,
620 cpr->config->max_freq / 1000,
621 new_freq / 1000);
622 cpr_modify_reg(cpr, RBCPR_GCNT_TARGET(cpr->curr_osc), TARGET_M,
623 quot);
624 pr_debug("RBCPR_GCNT_TARGET(%d): = 0x%x\n", cpr->curr_osc,
625 readl_relaxed(cpr->base +
626 RBCPR_GCNT_TARGET(cpr->curr_osc)));
627 pr_debug("%s: new_freq: %d, set_freq: %d, quot: %d\n", __func__,
628 freqs->new, new_freq, quot);
629
630 enable_irq(cpr->irq);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530631 /**
632 * Enable all interrupts. One of them could be in a disabled
633 * state if vdd had hit Vmax / Vmin earlier
634 */
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530635 cpr_irq_set(cpr, INT_MASK & ~MID_INT, 1);
Trilok Sonic43d1122012-09-02 01:48:33 +0530636
637 /**
638 * Clear the auto NACK down bit if enabled in the freq.
639 * transition phase.
640 */
641 if (ctl_reg & SW_AUTO_CONT_NACK_DN_EN)
642 cpr_modify_reg(cpr, RBCPR_CTL,
643 SW_AUTO_CONT_NACK_DN_EN_M, 0);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530644 pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
645 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
646 pr_debug("RBCPR_CTL: 0x%x\n",
647 readl_relaxed(cpr->base + RBCPR_CTL));
648 pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
649 cpr_read_reg(cpr, RBIF_IRQ_STATUS));
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530650 cpr_enable(cpr);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530651 break;
652 default:
653 break;
654 }
655 return 0;
656}
657
658#ifdef CONFIG_PM
659static int msm_cpr_resume(struct device *dev)
660{
661 struct msm_cpr *cpr = dev_get_drvdata(dev);
662 int osc_num = cpr->config->cpr_mode_data->ring_osc;
663
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530664 cpr->config->clk_enable();
665
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530666 cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL,
667 cpr_save_state.rbif_timer_interval);
668 cpr_write_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
669 cpr_save_state.rbif_int_en);
670 cpr_write_reg(cpr, RBIF_LIMIT,
671 cpr_save_state.rbif_limit);
672 cpr_write_reg(cpr, RBIF_TIMER_ADJUST,
673 cpr_save_state.rbif_timer_adjust);
674 cpr_write_reg(cpr, RBCPR_GCNT_TARGET(osc_num),
675 cpr_save_state.rbcpr_gcnt_target);
676 cpr_write_reg(cpr, RBCPR_STEP_QUOT,
677 cpr_save_state.rbcpr_step_quot);
678 cpr_write_reg(cpr, RBIF_SW_VLEVEL,
679 cpr_save_state.rbif_sw_level);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530680 cpr_write_reg(cpr, RBCPR_CTL,
681 cpr_save_state.rbcpr_ctl);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530682
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530683 enable_irq(cpr->irq);
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530684 cpr_enable(cpr);
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530685
686 return 0;
687}
688
689static int msm_cpr_suspend(struct device *dev)
690
691{
692 struct msm_cpr *cpr = dev_get_drvdata(dev);
693 int osc_num = cpr->config->cpr_mode_data->ring_osc;
694
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530695 /* Disable CPR measurement before IRQ to avoid pending interrupts */
696 cpr_disable(cpr);
697 disable_irq(cpr->irq);
698
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530699 cpr_save_state.rbif_timer_interval =
700 cpr_read_reg(cpr, RBCPR_TIMER_INTERVAL);
701 cpr_save_state.rbif_int_en =
702 cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
703 cpr_save_state.rbif_limit =
704 cpr_read_reg(cpr, RBIF_LIMIT);
705 cpr_save_state.rbif_timer_adjust =
706 cpr_read_reg(cpr, RBIF_TIMER_ADJUST);
707 cpr_save_state.rbcpr_gcnt_target =
708 cpr_read_reg(cpr, RBCPR_GCNT_TARGET(osc_num));
709 cpr_save_state.rbcpr_step_quot =
710 cpr_read_reg(cpr, RBCPR_STEP_QUOT);
711 cpr_save_state.rbif_sw_level =
712 cpr_read_reg(cpr, RBIF_SW_VLEVEL);
713 cpr_save_state.rbcpr_ctl =
714 cpr_read_reg(cpr, RBCPR_CTL);
715
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530716 return 0;
717}
718
719void msm_cpr_pm_resume(void)
720{
721 msm_cpr_resume(&cpr_pdev->dev);
722}
723EXPORT_SYMBOL(msm_cpr_pm_resume);
724
725void msm_cpr_pm_suspend(void)
726{
727 msm_cpr_suspend(&cpr_pdev->dev);
728}
729EXPORT_SYMBOL(msm_cpr_pm_suspend);
730#endif
731
732void msm_cpr_disable(void)
733{
734 struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
735 cpr_disable(cpr);
736}
737EXPORT_SYMBOL(msm_cpr_disable);
738
739void msm_cpr_enable(void)
740{
741 struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
742 cpr_enable(cpr);
743}
744EXPORT_SYMBOL(msm_cpr_enable);
745
746static int __devinit msm_cpr_probe(struct platform_device *pdev)
747{
748 int res, irqn, irq_enabled;
749 struct msm_cpr *cpr;
750 const struct msm_cpr_config *pdata = pdev->dev.platform_data;
751 void __iomem *base;
752 struct resource *mem;
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530753 struct msm_cpr_mode *chip_data;
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530754
Kaushal Kumar5d83a9e2012-09-07 16:34:02 +0530755 if (!enable)
756 return -EPERM;
757
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530758 if (!pdata) {
759 pr_err("CPR: Platform data is not available\n");
760 return -EIO;
761 }
762
763 cpr = devm_kzalloc(&pdev->dev, sizeof(struct msm_cpr), GFP_KERNEL);
764 if (!cpr)
765 return -ENOMEM;
766
767 /* Initialize platform_data */
768 cpr->config = pdata;
769
Trilok Sonidb64ae92012-09-02 02:09:04 +0530770 /* Set initial Vmin,Vmax equal to turbo */
771 cpr->cur_Vmin = cpr->config->cpr_mode_data[1].turbo_Vmin;
772 cpr->cur_Vmax = cpr->config->cpr_mode_data[1].turbo_Vmax;
773
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530774 cpr_pdev = pdev;
775
776 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
777 if (!mem || !mem->start) {
778 pr_err("CPR: get resource failed\n");
779 res = -ENXIO;
780 goto out;
781 }
782
783 base = ioremap_nocache(mem->start, resource_size(mem));
784 if (!base) {
785 pr_err("CPR: ioremap failed\n");
786 res = -ENOMEM;
787 goto out;
788 }
789
790 if (cpr->config->irq_line < 0) {
791 pr_err("CPR: Invalid IRQ line specified\n");
792 res = -ENXIO;
793 goto err_ioremap;
794 }
795 irqn = platform_get_irq(pdev, cpr->config->irq_line);
796 if (irqn < 0) {
797 pr_err("CPR: Unable to get irq\n");
798 res = -ENXIO;
799 goto err_ioremap;
800 }
801
802 cpr->irq = irqn;
803
804 cpr->base = base;
805
806 cpr->vp = pdata->vp_data;
807
808 mutex_init(&cpr->cpr_mutex);
809
810 /* Initialize the Voltage domain for CPR */
811 cpr->vreg_cx = regulator_get(&pdev->dev, "vddx_cx");
812 if (IS_ERR(cpr->vreg_cx)) {
813 res = PTR_ERR(cpr->vreg_cx);
814 pr_err("could not get regulator: %d\n", res);
815 goto err_reg_get;
816 }
817
818 /* Assume current mode is TURBO Mode */
819 cpr->cpr_mode = TURBO_MODE;
820 cpr->prev_mode = TURBO_MODE;
821
822 /* Initial configuration of CPR */
823 cpr_config(cpr);
824
825 platform_set_drvdata(pdev, cpr);
826
Kaushal Kumard0e4c812012-08-22 16:30:09 +0530827 chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
828 pr_info("CPR Platform Data (upside_steps: %d) (downside_steps: %d) ",
829 cpr->config->up_threshold, cpr->config->dn_threshold);
830 pr_info("(nominal_voltage: %duV) (turbo_voltage: %duV)\n",
831 cpr->config->cpr_mode_data[NORMAL_MODE].calibrated_uV,
832 cpr->config->cpr_mode_data[TURBO_MODE].calibrated_uV);
833 pr_info("(Current corner: TURBO) (gcnt_target: %d) (quot: %d)\n",
834 chip_data->ring_osc_data[chip_data->ring_osc].gcnt,
835 chip_data->ring_osc_data[chip_data->ring_osc].quot);
836
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530837 /* Initialze the Debugfs Entry for cpr */
838 res = msm_cpr_debug_init(cpr->base);
839 if (res) {
840 pr_err("CPR: Debugfs Creation Failed\n");
841 goto err_ioremap;
842 }
843
844 /* Register the interrupt handler for IRQ 0 */
845 res = request_threaded_irq(irqn, NULL, cpr_irq0_handler,
846 IRQF_TRIGGER_RISING, "msm-cpr-irq0", cpr);
847 if (res) {
848 pr_err("CPR: request irq failed for IRQ %d\n", irqn);
849 goto err_ioremap;
850 }
851
852 /**
853 * Enable the requested interrupt lines.
854 * Do not enable MID_INT since we shall use
855 * SW_AUTO_CONT_ACK_EN bit.
856 */
857 irq_enabled = INT_MASK & ~MID_INT;
858 cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
859 INT_MASK, irq_enabled);
860
861 /* Enable the cpr */
862 cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
863
Pankaj Kumar32ce1ea2012-04-04 20:29:29 +0530864 cpr->freq_transition.notifier_call = cpr_freq_transition;
865 cpufreq_register_notifier(&cpr->freq_transition,
866 CPUFREQ_TRANSITION_NOTIFIER);
867
868 return res;
869
870err_reg_get:
871 free_irq(irqn, cpr);
872err_ioremap:
873 iounmap(base);
874out:
875 return res;
876}
877
878static int __devexit msm_cpr_remove(struct platform_device *pdev)
879{
880 struct msm_cpr *cpr = platform_get_drvdata(pdev);
881
882 cpufreq_unregister_notifier(&cpr->freq_transition,
883 CPUFREQ_TRANSITION_NOTIFIER);
884
885 regulator_disable(cpr->vreg_cx);
886 regulator_put(cpr->vreg_cx);
887 free_irq(cpr->irq, cpr);
888 iounmap(cpr->base);
889 mutex_destroy(&cpr->cpr_mutex);
890 platform_set_drvdata(pdev, NULL);
891
892 return 0;
893}
894
895static const struct dev_pm_ops msm_cpr_dev_pm_ops = {
896 .suspend = msm_cpr_suspend,
897 .resume = msm_cpr_resume,
898};
899
900static struct platform_driver msm_cpr_driver = {
901 .probe = msm_cpr_probe,
902 .remove = __devexit_p(msm_cpr_remove),
903 .driver = {
904 .name = MODULE_NAME,
905 .owner = THIS_MODULE,
906#ifdef CONFIG_PM
907 .pm = &msm_cpr_dev_pm_ops,
908#endif
909 },
910};
911
912static int __init msm_init_cpr(void)
913{
914 return platform_driver_register(&msm_cpr_driver);
915}
916
917module_init(msm_init_cpr);
918
919static void __exit msm_exit_cpr(void)
920{
921 platform_driver_unregister(&msm_cpr_driver);
922}
923
924module_exit(msm_exit_cpr);
925
926MODULE_DESCRIPTION("MSM CPR Driver");
927MODULE_VERSION("1.0");
928MODULE_LICENSE("GPL v2");