blob: 9566cead98067535a4b423b67dc48c5615cfe89c [file] [log] [blame]
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001/*
Matt Wagantallf9a4d322013-01-14 18:01:24 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Matt Wagantalle9b715a2012-01-04 18:16:14 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Matt Wagantalle9b715a2012-01-04 18:16:14 -080014#include <linux/kernel.h>
Matt Wagantall9515bc22012-07-19 18:13:40 -070015#include <linux/module.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080016#include <linux/init.h>
17#include <linux/io.h>
18#include <linux/delay.h>
19#include <linux/mutex.h>
20#include <linux/err.h>
21#include <linux/errno.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/regulator/consumer.h>
25
26#include <asm/mach-types.h>
27#include <asm/cpu.h>
28
29#include <mach/board.h>
30#include <mach/msm_iomap.h>
31#include <mach/socinfo.h>
32#include <mach/msm-krait-l2-accessors.h>
33#include <mach/rpm-regulator.h>
Matt Wagantall75473eb2012-05-31 15:23:22 -070034#include <mach/rpm-regulator-smd.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080035#include <mach/msm_bus.h>
Steve Mucklef9a87492012-11-02 15:41:00 -070036#include <mach/msm_dcvs.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080037
38#include "acpuclock.h"
39#include "acpuclock-krait.h"
Stephen Boyda86214a2012-09-14 11:25:34 -070040#include "avs.h"
Matt Wagantalle9b715a2012-01-04 18:16:14 -080041
42/* MUX source selects. */
43#define PRI_SRC_SEL_SEC_SRC 0
44#define PRI_SRC_SEL_HFPLL 1
45#define PRI_SRC_SEL_HFPLL_DIV2 2
Matt Wagantalle9b715a2012-01-04 18:16:14 -080046
Matt Wagantallaf4669b2012-09-25 12:47:24 -070047#define SECCLKAGD BIT(4)
48
Matt Wagantalle9b715a2012-01-04 18:16:14 -080049static DEFINE_MUTEX(driver_lock);
50static DEFINE_SPINLOCK(l2_lock);
51
Matt Wagantall488bef32012-07-13 19:42:11 -070052static struct drv_data drv;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080053
54static unsigned long acpuclk_krait_get_rate(int cpu)
55{
56 return drv.scalable[cpu].cur_speed->khz;
57}
58
59/* Select a source on the primary MUX. */
60static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel)
61{
62 u32 regval;
63
64 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
65 regval &= ~0x3;
66 regval |= (pri_src_sel & 0x3);
67 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
68 /* Wait for switch to complete. */
69 mb();
70 udelay(1);
71}
72
73/* Select a source on the secondary MUX. */
Matt Wagantall6cd5d752012-09-27 19:56:57 -070074static void __cpuinit set_sec_clk_src(struct scalable *sc, u32 sec_src_sel)
Matt Wagantalle9b715a2012-01-04 18:16:14 -080075{
76 u32 regval;
77
Matt Wagantallaf4669b2012-09-25 12:47:24 -070078 /* 8064 Errata: disable sec_src clock gating during switch. */
Matt Wagantalle9b715a2012-01-04 18:16:14 -080079 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
Matt Wagantallaf4669b2012-09-25 12:47:24 -070080 regval |= SECCLKAGD;
81 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
82
83 /* Program the MUX */
Matt Wagantalle9b715a2012-01-04 18:16:14 -080084 regval &= ~(0x3 << 2);
85 regval |= ((sec_src_sel & 0x3) << 2);
86 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
Matt Wagantallaf4669b2012-09-25 12:47:24 -070087
88 /* 8064 Errata: re-enabled sec_src clock gating. */
89 regval &= ~SECCLKAGD;
90 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
91
Matt Wagantalle9b715a2012-01-04 18:16:14 -080092 /* Wait for switch to complete. */
93 mb();
94 udelay(1);
95}
96
Matt Wagantall302d9a32012-07-03 13:37:29 -070097static int enable_rpm_vreg(struct vreg *vreg)
Matt Wagantalle9b715a2012-01-04 18:16:14 -080098{
Matt Wagantall302d9a32012-07-03 13:37:29 -070099 int ret = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800100
Matt Wagantall75473eb2012-05-31 15:23:22 -0700101 if (vreg->rpm_reg) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700102 ret = rpm_regulator_enable(vreg->rpm_reg);
103 if (ret)
Matt Wagantall75473eb2012-05-31 15:23:22 -0700104 dev_err(drv.dev, "%s regulator enable failed (%d)\n",
Matt Wagantall302d9a32012-07-03 13:37:29 -0700105 vreg->name, ret);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700106 }
Matt Wagantall302d9a32012-07-03 13:37:29 -0700107
108 return ret;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700109}
110
111static void disable_rpm_vreg(struct vreg *vreg)
112{
113 int rc;
114
115 if (vreg->rpm_reg) {
116 rc = rpm_regulator_disable(vreg->rpm_reg);
117 if (rc)
118 dev_err(drv.dev, "%s regulator disable failed (%d)\n",
119 vreg->name, rc);
120 }
121}
122
123/* Enable an already-configured HFPLL. */
124static void hfpll_enable(struct scalable *sc, bool skip_regulators)
125{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800126 if (!skip_regulators) {
127 /* Enable regulators required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700128 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
129 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800130 }
131
132 /* Disable PLL bypass mode. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700133 writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800134
135 /*
136 * H/W requires a 5us delay between disabling the bypass and
137 * de-asserting the reset. Delay 10us just to be safe.
138 */
139 mb();
140 udelay(10);
141
142 /* De-assert active-low PLL reset. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700143 writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800144
145 /* Wait for PLL to lock. */
146 mb();
147 udelay(60);
148
149 /* Enable PLL output. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700150 writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800151}
152
153/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
154static void hfpll_disable(struct scalable *sc, bool skip_regulators)
155{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800156 /*
157 * Disable the PLL output, disable test mode, enable the bypass mode,
158 * and assert the reset.
159 */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700160 writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800161
162 if (!skip_regulators) {
163 /* Remove voltage votes required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700164 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
165 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800166 }
167}
168
169/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
170static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
171{
Matt Wagantalla77b7f32012-07-18 16:32:01 -0700172 void __iomem *base = sc->hfpll_base;
173 u32 regval;
174
175 writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset);
176
177 if (drv.hfpll_data->has_user_reg) {
178 regval = readl_relaxed(base + drv.hfpll_data->user_offset);
179 if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max)
180 regval &= ~drv.hfpll_data->user_vco_mask;
181 else
182 regval |= drv.hfpll_data->user_vco_mask;
183 writel_relaxed(regval, base + drv.hfpll_data->user_offset);
184 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800185}
186
187/* Return the L2 speed that should be applied. */
Matt Wagantall600ea502012-06-08 18:49:53 -0700188static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800189{
Matt Wagantall600ea502012-06-08 18:49:53 -0700190 unsigned int new_l = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800191 int cpu;
192
193 /* Find max L2 speed vote. */
194 sc->l2_vote = vote_l;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800195 for_each_present_cpu(cpu)
196 new_l = max(new_l, drv.scalable[cpu].l2_vote);
197
198 return new_l;
199}
200
201/* Update the bus bandwidth request. */
202static void set_bus_bw(unsigned int bw)
203{
204 int ret;
205
206 /* Update bandwidth if request has changed. This may sleep. */
207 ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw);
208 if (ret)
209 dev_err(drv.dev, "bandwidth request failed (%d)\n", ret);
210}
211
212/* Set the CPU or L2 clock speed. */
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700213static void set_speed(struct scalable *sc, const struct core_speed *tgt_s,
214 bool skip_regulators)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800215{
216 const struct core_speed *strt_s = sc->cur_speed;
217
Stephen Boyd14a47392012-08-06 20:15:15 -0700218 if (strt_s == tgt_s)
219 return;
220
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800221 if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
222 /*
223 * Move to an always-on source running at a frequency
224 * that does not require an elevated CPU voltage.
225 */
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800226 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
227
228 /* Re-program HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700229 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800230 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700231 hfpll_enable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800232
233 /* Move to HFPLL. */
234 set_pri_clk_src(sc, tgt_s->pri_src_sel);
235 } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800236 set_pri_clk_src(sc, tgt_s->pri_src_sel);
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700237 hfpll_disable(sc, skip_regulators);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800238 } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
239 hfpll_set_rate(sc, tgt_s);
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700240 hfpll_enable(sc, skip_regulators);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800241 set_pri_clk_src(sc, tgt_s->pri_src_sel);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800242 }
243
244 sc->cur_speed = tgt_s;
245}
246
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700247struct vdd_data {
248 int vdd_mem;
249 int vdd_dig;
250 int vdd_core;
251 int ua_core;
252};
253
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800254/* Apply any per-cpu voltage increases. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700255static int increase_vdd(int cpu, struct vdd_data *data,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800256 enum setrate_reason reason)
257{
258 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700259 int rc;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800260
261 /*
262 * Increase vdd_mem active-set before vdd_dig.
263 * vdd_mem should be >= vdd_dig.
264 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700265 if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700266 rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700267 data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800268 if (rc) {
269 dev_err(drv.dev,
270 "vdd_mem (cpu%d) increase failed (%d)\n",
271 cpu, rc);
272 return rc;
273 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700274 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800275 }
276
277 /* Increase vdd_dig active-set vote. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700278 if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700279 rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700280 data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800281 if (rc) {
282 dev_err(drv.dev,
283 "vdd_dig (cpu%d) increase failed (%d)\n",
284 cpu, rc);
285 return rc;
286 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700287 sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
288 }
289
290 /* Increase current request. */
291 if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) {
292 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
293 data->ua_core);
294 if (rc < 0) {
295 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
296 sc->vreg[VREG_CORE].name, rc);
297 return rc;
298 }
299 sc->vreg[VREG_CORE].cur_ua = data->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800300 }
301
302 /*
303 * Update per-CPU core voltage. Don't do this for the hotplug path for
304 * which it should already be correct. Attempting to set it is bad
305 * because we don't know what CPU we are running on at this point, but
306 * the CPU regulator API requires we call it from the affected CPU.
307 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700308 if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800309 && reason != SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700310 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
311 data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800312 if (rc) {
313 dev_err(drv.dev,
314 "vdd_core (cpu%d) increase failed (%d)\n",
315 cpu, rc);
316 return rc;
317 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700318 sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800319 }
320
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700321 return 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800322}
323
324/* Apply any per-cpu voltage decreases. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700325static void decrease_vdd(int cpu, struct vdd_data *data,
326 enum setrate_reason reason)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800327{
328 struct scalable *sc = &drv.scalable[cpu];
329 int ret;
330
331 /*
332 * Update per-CPU core voltage. This must be called on the CPU
333 * that's being affected. Don't do this in the hotplug remove path,
334 * where the rail is off and we're executing on the other CPU.
335 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700336 if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800337 && reason != SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700338 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
339 data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800340 if (ret) {
341 dev_err(drv.dev,
342 "vdd_core (cpu%d) decrease failed (%d)\n",
343 cpu, ret);
344 return;
345 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700346 sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
347 }
348
349 /* Decrease current request. */
350 if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) {
351 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
352 data->ua_core);
353 if (ret < 0) {
354 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
355 sc->vreg[VREG_CORE].name, ret);
356 return;
357 }
358 sc->vreg[VREG_CORE].cur_ua = data->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800359 }
360
361 /* Decrease vdd_dig active-set vote. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700362 if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700363 ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700364 data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800365 if (ret) {
366 dev_err(drv.dev,
367 "vdd_dig (cpu%d) decrease failed (%d)\n",
368 cpu, ret);
369 return;
370 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700371 sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800372 }
373
374 /*
375 * Decrease vdd_mem active-set after vdd_dig.
376 * vdd_mem should be >= vdd_dig.
377 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700378 if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700379 ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700380 data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800381 if (ret) {
382 dev_err(drv.dev,
383 "vdd_mem (cpu%d) decrease failed (%d)\n",
384 cpu, ret);
385 return;
386 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700387 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800388 }
389}
390
391static int calculate_vdd_mem(const struct acpu_level *tgt)
392{
Matt Wagantall600ea502012-06-08 18:49:53 -0700393 return drv.l2_freq_tbl[tgt->l2_level].vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800394}
395
Matt Wagantall72a38002012-07-18 13:42:55 -0700396static int get_src_dig(const struct core_speed *s)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800397{
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700398 const int *hfpll_vdd = drv.hfpll_data->vdd;
399 const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max;
Matt Wagantall87465f52012-07-23 22:03:06 -0700400 const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800401
Matt Wagantall72a38002012-07-18 13:42:55 -0700402 if (s->src != HFPLL)
403 return hfpll_vdd[HFPLL_VDD_NONE];
Matt Wagantall87465f52012-07-23 22:03:06 -0700404 else if (s->pll_l_val > nom_vdd_l_max)
405 return hfpll_vdd[HFPLL_VDD_HIGH];
Matt Wagantall72a38002012-07-18 13:42:55 -0700406 else if (s->pll_l_val > low_vdd_l_max)
407 return hfpll_vdd[HFPLL_VDD_NOM];
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800408 else
Matt Wagantall72a38002012-07-18 13:42:55 -0700409 return hfpll_vdd[HFPLL_VDD_LOW];
410}
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800411
Matt Wagantall72a38002012-07-18 13:42:55 -0700412static int calculate_vdd_dig(const struct acpu_level *tgt)
413{
414 int l2_pll_vdd_dig, cpu_pll_vdd_dig;
415
416 l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed);
417 cpu_pll_vdd_dig = get_src_dig(&tgt->speed);
418
419 return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig,
420 max(l2_pll_vdd_dig, cpu_pll_vdd_dig));
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800421}
422
Matt Wagantall9515bc22012-07-19 18:13:40 -0700423static bool enable_boost = true;
424module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR);
425
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800426static int calculate_vdd_core(const struct acpu_level *tgt)
427{
Matt Wagantall9515bc22012-07-19 18:13:40 -0700428 return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800429}
430
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700431static DEFINE_MUTEX(l2_regulator_lock);
432static int l2_vreg_count;
433
434static int enable_l2_regulators(void)
435{
436 int ret = 0;
437
438 mutex_lock(&l2_regulator_lock);
439 if (l2_vreg_count == 0) {
440 ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
441 if (ret)
442 goto out;
443 ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
444 if (ret) {
445 disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
446 goto out;
447 }
448 }
449 l2_vreg_count++;
450out:
451 mutex_unlock(&l2_regulator_lock);
452
453 return ret;
454}
455
456static void disable_l2_regulators(void)
457{
458 mutex_lock(&l2_regulator_lock);
459
460 if (WARN(!l2_vreg_count, "L2 regulator votes are unbalanced!"))
461 goto out;
462
463 if (l2_vreg_count == 1) {
464 disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
465 disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
466 }
467 l2_vreg_count--;
468out:
469 mutex_unlock(&l2_regulator_lock);
470}
471
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800472/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
473static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
474 enum setrate_reason reason)
475{
476 const struct core_speed *strt_acpu_s, *tgt_acpu_s;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800477 const struct acpu_level *tgt;
Matt Wagantall600ea502012-06-08 18:49:53 -0700478 int tgt_l2_l;
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700479 enum src_id prev_l2_src = NUM_SRC_ID;
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700480 struct vdd_data vdd_data;
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700481 bool skip_regulators;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800482 int rc = 0;
483
Matt Wagantall5941a332012-07-10 23:20:44 -0700484 if (cpu > num_possible_cpus())
485 return -EINVAL;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800486
487 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
488 mutex_lock(&driver_lock);
489
490 strt_acpu_s = drv.scalable[cpu].cur_speed;
491
492 /* Return early if rate didn't change. */
493 if (rate == strt_acpu_s->khz)
494 goto out;
495
496 /* Find target frequency. */
497 for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
498 if (tgt->speed.khz == rate) {
499 tgt_acpu_s = &tgt->speed;
500 break;
501 }
502 }
503 if (tgt->speed.khz == 0) {
504 rc = -EINVAL;
505 goto out;
506 }
507
508 /* Calculate voltage requirements for the current CPU. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700509 vdd_data.vdd_mem = calculate_vdd_mem(tgt);
510 vdd_data.vdd_dig = calculate_vdd_dig(tgt);
511 vdd_data.vdd_core = calculate_vdd_core(tgt);
512 vdd_data.ua_core = tgt->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800513
Stephen Boyda86214a2012-09-14 11:25:34 -0700514 /* Disable AVS before voltage switch */
515 if (reason == SETRATE_CPUFREQ && drv.scalable[cpu].avs_enabled) {
516 AVS_DISABLE(cpu);
517 drv.scalable[cpu].avs_enabled = false;
518 }
519
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800520 /* Increase VDD levels if needed. */
521 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700522 rc = increase_vdd(cpu, &vdd_data, reason);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800523 if (rc)
524 goto out;
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700525
526 prev_l2_src =
527 drv.l2_freq_tbl[drv.scalable[cpu].l2_vote].speed.src;
528 /* Vote for the L2 regulators here if necessary. */
529 if (drv.l2_freq_tbl[tgt->l2_level].speed.src == HFPLL) {
530 rc = enable_l2_regulators();
531 if (rc)
532 goto out;
533 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800534 }
535
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700536 dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
537 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800538
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700539 /*
540 * If we are setting the rate as part of power collapse or in the resume
541 * path after power collapse, skip the vote for the HFPLL regulators,
542 * which are active-set-only votes that will be removed when apps enters
543 * its sleep set. This is needed to avoid voting for regulators with
544 * sleeping APIs from an atomic context.
545 */
546 skip_regulators = (reason == SETRATE_PC);
547
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800548 /* Set the new CPU speed. */
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700549 set_speed(&drv.scalable[cpu], tgt_acpu_s, skip_regulators);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800550
551 /*
552 * Update the L2 vote and apply the rate change. A spinlock is
553 * necessary to ensure L2 rate is calculated and set atomically
554 * with the CPU frequency, even if acpuclk_krait_set_rate() is
555 * called from an atomic context and the driver_lock mutex is not
556 * acquired.
557 */
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700558 spin_lock(&l2_lock);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800559 tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700560 set_speed(&drv.scalable[L2],
561 &drv.l2_freq_tbl[tgt_l2_l].speed, true);
562 spin_unlock(&l2_lock);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800563
564 /* Nothing else to do for power collapse or SWFI. */
565 if (reason == SETRATE_PC || reason == SETRATE_SWFI)
566 goto out;
567
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700568 /*
569 * Remove the vote for the L2 HFPLL regulators only if the L2
570 * was already on an HFPLL source.
571 */
572 if (prev_l2_src == HFPLL)
573 disable_l2_regulators();
574
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800575 /* Update bus bandwith request. */
Matt Wagantall600ea502012-06-08 18:49:53 -0700576 set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800577
578 /* Drop VDD levels if we can. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700579 decrease_vdd(cpu, &vdd_data, reason);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800580
Stephen Boyda86214a2012-09-14 11:25:34 -0700581 /* Re-enable AVS */
582 if (reason == SETRATE_CPUFREQ && tgt->avsdscr_setting) {
583 AVS_ENABLE(cpu, tgt->avsdscr_setting);
584 drv.scalable[cpu].avs_enabled = true;
585 }
586
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700587 dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800588
589out:
590 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
591 mutex_unlock(&driver_lock);
592 return rc;
593}
594
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700595static struct acpuclk_data acpuclk_krait_data = {
596 .set_rate = acpuclk_krait_set_rate,
597 .get_rate = acpuclk_krait_get_rate,
598};
599
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800600/* Initialize a HFPLL at a given rate and enable it. */
Matt Wagantall980d0672012-10-17 13:50:07 -0700601static void __cpuinit hfpll_init(struct scalable *sc,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800602 const struct core_speed *tgt_s)
603{
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700604 dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800605
606 /* Disable the PLL for re-programming. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700607 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800608
609 /* Configure PLL parameters for integer mode. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700610 writel_relaxed(drv.hfpll_data->config_val,
611 sc->hfpll_base + drv.hfpll_data->config_offset);
612 writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset);
613 writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset);
Matt Wagantalla77b7f32012-07-18 16:32:01 -0700614 if (drv.hfpll_data->has_user_reg)
615 writel_relaxed(drv.hfpll_data->user_val,
616 sc->hfpll_base + drv.hfpll_data->user_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800617
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700618 /* Program droop controller, if supported */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700619 if (drv.hfpll_data->has_droop_ctl)
620 writel_relaxed(drv.hfpll_data->droop_val,
621 sc->hfpll_base + drv.hfpll_data->droop_offset);
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700622
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800623 /* Set an initial rate and enable the PLL. */
624 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700625 hfpll_enable(sc, false);
626}
627
Matt Wagantall302d9a32012-07-03 13:37:29 -0700628static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg,
Matt Wagantall754ee272012-06-18 13:40:26 -0700629 int vdd, bool enable)
Matt Wagantall75473eb2012-05-31 15:23:22 -0700630{
631 int ret;
632
633 if (!sc->vreg[vreg].name)
Matt Wagantall302d9a32012-07-03 13:37:29 -0700634 return 0;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700635
636 sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
637 sc->vreg[vreg].name);
638 if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700639 ret = PTR_ERR(sc->vreg[vreg].rpm_reg);
640 dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n",
641 sc->vreg[vreg].name, ret);
642 goto err_get;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700643 }
644
645 ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
646 sc->vreg[vreg].max_vdd);
647 if (ret) {
648 dev_err(drv.dev, "%s initialization failed (%d)\n",
649 sc->vreg[vreg].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700650 goto err_conf;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700651 }
652 sc->vreg[vreg].cur_vdd = vdd;
653
Matt Wagantall302d9a32012-07-03 13:37:29 -0700654 if (enable) {
655 ret = enable_rpm_vreg(&sc->vreg[vreg]);
656 if (ret)
657 goto err_conf;
658 }
659
660 return 0;
661
662err_conf:
663 rpm_regulator_put(sc->vreg[vreg].rpm_reg);
664err_get:
665 return ret;
666}
667
668static void __cpuinit rpm_regulator_cleanup(struct scalable *sc,
669 enum vregs vreg)
670{
671 if (!sc->vreg[vreg].rpm_reg)
672 return;
673
674 disable_rpm_vreg(&sc->vreg[vreg]);
675 rpm_regulator_put(sc->vreg[vreg].rpm_reg);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800676}
677
678/* Voltage regulator initialization. */
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700679static int __cpuinit regulator_init(struct scalable *sc,
680 const struct acpu_level *acpu_level)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800681{
Matt Wagantall754ee272012-06-18 13:40:26 -0700682 int ret, vdd_mem, vdd_dig, vdd_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800683
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700684 vdd_mem = calculate_vdd_mem(acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700685 ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
686 if (ret)
687 goto err_mem;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700688
689 vdd_dig = calculate_vdd_dig(acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700690 ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
691 if (ret)
692 goto err_dig;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700693
Matt Wagantall302d9a32012-07-03 13:37:29 -0700694 ret = rpm_regulator_init(sc, VREG_HFPLL_A,
Matt Wagantall754ee272012-06-18 13:40:26 -0700695 sc->vreg[VREG_HFPLL_A].max_vdd, false);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700696 if (ret)
697 goto err_hfpll_a;
698 ret = rpm_regulator_init(sc, VREG_HFPLL_B,
Matt Wagantall754ee272012-06-18 13:40:26 -0700699 sc->vreg[VREG_HFPLL_B].max_vdd, false);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700700 if (ret)
701 goto err_hfpll_b;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700702
Matt Wagantall754ee272012-06-18 13:40:26 -0700703 /* Setup Krait CPU regulators and initial core voltage. */
704 sc->vreg[VREG_CORE].reg = regulator_get(drv.dev,
705 sc->vreg[VREG_CORE].name);
706 if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700707 ret = PTR_ERR(sc->vreg[VREG_CORE].reg);
708 dev_err(drv.dev, "regulator_get(%s) failed (%d)\n",
709 sc->vreg[VREG_CORE].name, ret);
710 goto err_core_get;
Matt Wagantall754ee272012-06-18 13:40:26 -0700711 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700712 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
713 acpu_level->ua_core);
714 if (ret < 0) {
715 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
716 sc->vreg[VREG_CORE].name, ret);
717 goto err_core_conf;
718 }
719 sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700720 vdd_core = calculate_vdd_core(acpu_level);
Matt Wagantall754ee272012-06-18 13:40:26 -0700721 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
722 sc->vreg[VREG_CORE].max_vdd);
723 if (ret) {
724 dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
725 sc->vreg[VREG_CORE].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700726 goto err_core_conf;
Matt Wagantall754ee272012-06-18 13:40:26 -0700727 }
728 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
Matt Wagantall754ee272012-06-18 13:40:26 -0700729 ret = regulator_enable(sc->vreg[VREG_CORE].reg);
730 if (ret) {
731 dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
732 sc->vreg[VREG_CORE].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700733 goto err_core_conf;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800734 }
Matt Wagantall302d9a32012-07-03 13:37:29 -0700735
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700736 /*
737 * Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency
738 * requires a corresponding target L2 frequency that needs the L2 to
739 * run off of an HFPLL.
740 */
741 if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL)
742 l2_vreg_count++;
743
Matt Wagantall302d9a32012-07-03 13:37:29 -0700744 return 0;
745
746err_core_conf:
747 regulator_put(sc->vreg[VREG_CORE].reg);
748err_core_get:
749 rpm_regulator_cleanup(sc, VREG_HFPLL_B);
750err_hfpll_b:
751 rpm_regulator_cleanup(sc, VREG_HFPLL_A);
752err_hfpll_a:
753 rpm_regulator_cleanup(sc, VREG_DIG);
754err_dig:
755 rpm_regulator_cleanup(sc, VREG_MEM);
756err_mem:
757 return ret;
758}
759
760static void __cpuinit regulator_cleanup(struct scalable *sc)
761{
762 regulator_disable(sc->vreg[VREG_CORE].reg);
763 regulator_put(sc->vreg[VREG_CORE].reg);
764 rpm_regulator_cleanup(sc, VREG_HFPLL_B);
765 rpm_regulator_cleanup(sc, VREG_HFPLL_A);
766 rpm_regulator_cleanup(sc, VREG_DIG);
767 rpm_regulator_cleanup(sc, VREG_MEM);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800768}
769
770/* Set initial rate for a given core. */
Matt Wagantall302d9a32012-07-03 13:37:29 -0700771static int __cpuinit init_clock_sources(struct scalable *sc,
Matt Wagantall754ee272012-06-18 13:40:26 -0700772 const struct core_speed *tgt_s)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800773{
774 u32 regval;
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700775 void __iomem *aux_reg;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800776
777 /* Program AUX source input to the secondary MUX. */
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700778 if (sc->aux_clk_sel_phys) {
779 aux_reg = ioremap(sc->aux_clk_sel_phys, 4);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700780 if (!aux_reg)
781 return -ENOMEM;
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700782 writel_relaxed(sc->aux_clk_sel, aux_reg);
783 iounmap(aux_reg);
784 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800785
786 /* Switch away from the HFPLL while it's re-initialized. */
Matt Wagantall6cd5d752012-09-27 19:56:57 -0700787 set_sec_clk_src(sc, sc->sec_clk_sel);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800788 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
789 hfpll_init(sc, tgt_s);
790
791 /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
792 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
793 regval &= ~(0x3 << 6);
794 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
795
796 /* Switch to the target clock source. */
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800797 set_pri_clk_src(sc, tgt_s->pri_src_sel);
798 sc->cur_speed = tgt_s;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700799
800 return 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800801}
802
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700803static void __cpuinit fill_cur_core_speed(struct core_speed *s,
804 struct scalable *sc)
805{
806 s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700807 s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset);
808}
809
810static bool __cpuinit speed_equal(const struct core_speed *s1,
811 const struct core_speed *s2)
812{
813 return (s1->pri_src_sel == s2->pri_src_sel &&
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700814 s1->pll_l_val == s2->pll_l_val);
815}
816
817static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu)
818{
819 struct scalable *sc = &drv.scalable[cpu];
820 const struct acpu_level *l;
821 struct core_speed cur_speed;
822
823 fill_cur_core_speed(&cur_speed, sc);
824 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
825 if (speed_equal(&l->speed, &cur_speed))
826 return l;
827 return NULL;
828}
829
830static const struct l2_level __init *find_cur_l2_level(void)
831{
832 struct scalable *sc = &drv.scalable[L2];
833 const struct l2_level *l;
834 struct core_speed cur_speed;
835
836 fill_cur_core_speed(&cur_speed, sc);
837 for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++)
838 if (speed_equal(&l->speed, &cur_speed))
839 return l;
840 return NULL;
841}
842
843static const struct acpu_level __cpuinit *find_min_acpu_level(void)
844{
845 struct acpu_level *l;
846
847 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
848 if (l->use_for_scaling)
849 return l;
850
851 return NULL;
852}
853
Matt Wagantall302d9a32012-07-03 13:37:29 -0700854static int __cpuinit per_cpu_init(int cpu)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800855{
Matt Wagantall754ee272012-06-18 13:40:26 -0700856 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700857 const struct acpu_level *acpu_level;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700858 int ret;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800859
Matt Wagantall754ee272012-06-18 13:40:26 -0700860 sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700861 if (!sc->hfpll_base) {
862 ret = -ENOMEM;
863 goto err_ioremap;
864 }
Matt Wagantall754ee272012-06-18 13:40:26 -0700865
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700866 acpu_level = find_cur_acpu_level(cpu);
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700867 if (!acpu_level) {
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700868 acpu_level = find_min_acpu_level();
869 if (!acpu_level) {
870 ret = -ENODEV;
871 goto err_table;
872 }
873 dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
874 cpu, acpu_level->speed.khz);
875 } else {
876 dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
877 acpu_level->speed.khz);
878 }
879
880 ret = regulator_init(sc, acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700881 if (ret)
882 goto err_regulators;
Matt Wagantall754ee272012-06-18 13:40:26 -0700883
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700884 ret = init_clock_sources(sc, &acpu_level->speed);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700885 if (ret)
886 goto err_clocks;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700887
888 sc->l2_vote = acpu_level->l2_level;
Matt Wagantall754ee272012-06-18 13:40:26 -0700889 sc->initialized = true;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700890
891 return 0;
892
893err_clocks:
894 regulator_cleanup(sc);
895err_regulators:
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700896err_table:
Matt Wagantall302d9a32012-07-03 13:37:29 -0700897 iounmap(sc->hfpll_base);
898err_ioremap:
899 return ret;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800900}
901
902/* Register with bus driver. */
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700903static void __init bus_init(const struct l2_level *l2_level)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800904{
905 int ret;
906
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700907 drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800908 if (!drv.bus_perf_client) {
909 dev_err(drv.dev, "unable to register bus client\n");
910 BUG();
911 }
912
Matt Wagantall754ee272012-06-18 13:40:26 -0700913 ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700914 l2_level->bw_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800915 if (ret)
916 dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
917}
918
919#ifdef CONFIG_CPU_FREQ_MSM
920static struct cpufreq_frequency_table freq_table[NR_CPUS][35];
921
922static void __init cpufreq_table_init(void)
923{
924 int cpu;
925
926 for_each_possible_cpu(cpu) {
927 int i, freq_cnt = 0;
928 /* Construct the freq_table tables from acpu_freq_tbl. */
929 for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0
930 && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
931 if (drv.acpu_freq_tbl[i].use_for_scaling) {
932 freq_table[cpu][freq_cnt].index = freq_cnt;
933 freq_table[cpu][freq_cnt].frequency
934 = drv.acpu_freq_tbl[i].speed.khz;
935 freq_cnt++;
936 }
937 }
938 /* freq_table not big enough to store all usable freqs. */
939 BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0);
940
941 freq_table[cpu][freq_cnt].index = freq_cnt;
942 freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
943
944 dev_info(drv.dev, "CPU%d: %d frequencies supported\n",
945 cpu, freq_cnt);
946
947 /* Register table with CPUFreq. */
948 cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
949 }
950}
951#else
952static void __init cpufreq_table_init(void) {}
953#endif
954
Steve Mucklef9a87492012-11-02 15:41:00 -0700955static void __init dcvs_freq_init(void)
956{
957 int i;
958
959 for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0; i++)
960 if (drv.acpu_freq_tbl[i].use_for_scaling)
961 msm_dcvs_register_cpu_freq(
962 drv.acpu_freq_tbl[i].speed.khz,
963 drv.acpu_freq_tbl[i].vdd_core / 1000);
964}
965
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800966static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
967 unsigned long action, void *hcpu)
968{
969 static int prev_khz[NR_CPUS];
970 int rc, cpu = (int)hcpu;
971 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700972 unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800973
974 switch (action & ~CPU_TASKS_FROZEN) {
975 case CPU_DEAD:
976 prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
977 /* Fall through. */
978 case CPU_UP_CANCELED:
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700979 acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
Michael Bohane01ba8e2012-11-08 18:40:42 -0800980
981 regulator_disable(sc->vreg[VREG_CORE].reg);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800982 regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
Michael Bohane01ba8e2012-11-08 18:40:42 -0800983 regulator_set_voltage(sc->vreg[VREG_CORE].reg, 0,
984 sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800985 break;
986 case CPU_UP_PREPARE:
Matt Wagantall754ee272012-06-18 13:40:26 -0700987 if (!sc->initialized) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700988 rc = per_cpu_init(cpu);
989 if (rc)
990 return NOTIFY_BAD;
Matt Wagantall754ee272012-06-18 13:40:26 -0700991 break;
992 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800993 if (WARN_ON(!prev_khz[cpu]))
994 return NOTIFY_BAD;
Michael Bohane01ba8e2012-11-08 18:40:42 -0800995
996 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
997 sc->vreg[VREG_CORE].cur_vdd,
998 sc->vreg[VREG_CORE].max_vdd);
999 if (rc < 0)
1000 return NOTIFY_BAD;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001001 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -07001002 sc->vreg[VREG_CORE].cur_ua);
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001003 if (rc < 0)
1004 return NOTIFY_BAD;
Michael Bohane01ba8e2012-11-08 18:40:42 -08001005 rc = regulator_enable(sc->vreg[VREG_CORE].reg);
1006 if (rc < 0)
1007 return NOTIFY_BAD;
1008
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001009 acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
1010 break;
1011 default:
1012 break;
1013 }
1014
1015 return NOTIFY_OK;
1016}
1017
1018static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = {
1019 .notifier_call = acpuclk_cpu_callback,
1020};
1021
Matt Wagantall713555e2013-01-16 12:22:39 -08001022static const int __init krait_needs_vmin(void)
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001023{
1024 switch (read_cpuid_id()) {
1025 case 0x511F04D0: /* KR28M2A20 */
1026 case 0x511F04D1: /* KR28M2A21 */
1027 case 0x510F06F0: /* KR28M4A10 */
1028 return 1;
1029 default:
1030 return 0;
1031 };
1032}
1033
Matt Wagantall713555e2013-01-16 12:22:39 -08001034static void __init krait_apply_vmin(struct acpu_level *tbl)
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001035{
Stephen Boyda86214a2012-09-14 11:25:34 -07001036 for (; tbl->speed.khz != 0; tbl++) {
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001037 if (tbl->vdd_core < 1150000)
1038 tbl->vdd_core = 1150000;
Stephen Boyda86214a2012-09-14 11:25:34 -07001039 tbl->avsdscr_setting = 0;
1040 }
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001041}
1042
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001043void __init get_krait_bin_format_a(void __iomem *base, struct bin_info *bin)
Patrick Daly18d2d482012-08-24 14:22:06 -07001044{
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001045 u32 pte_efuse = readl_relaxed(base);
Patrick Daly18d2d482012-08-24 14:22:06 -07001046
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001047 bin->speed = pte_efuse & 0xF;
1048 if (bin->speed == 0xF)
1049 bin->speed = (pte_efuse >> 4) & 0xF;
1050 bin->speed_valid = bin->speed != 0xF;
Patrick Daly18d2d482012-08-24 14:22:06 -07001051
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001052 bin->pvs = (pte_efuse >> 10) & 0x7;
1053 if (bin->pvs == 0x7)
1054 bin->pvs = (pte_efuse >> 13) & 0x7;
1055 bin->pvs_valid = bin->pvs != 0x7;
Patrick Daly18d2d482012-08-24 14:22:06 -07001056}
1057
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001058void __init get_krait_bin_format_b(void __iomem *base, struct bin_info *bin)
Patrick Daly18d2d482012-08-24 14:22:06 -07001059{
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001060 u32 pte_efuse, redundant_sel;
Patrick Daly18d2d482012-08-24 14:22:06 -07001061
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001062 pte_efuse = readl_relaxed(base);
1063 redundant_sel = (pte_efuse >> 24) & 0x7;
1064 bin->speed = pte_efuse & 0x7;
1065 bin->pvs = (pte_efuse >> 6) & 0x7;
Patrick Daly18d2d482012-08-24 14:22:06 -07001066
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001067 switch (redundant_sel) {
1068 case 1:
1069 bin->speed = (pte_efuse >> 27) & 0x7;
1070 break;
1071 case 2:
1072 bin->pvs = (pte_efuse >> 27) & 0x7;
1073 break;
Patrick Daly18d2d482012-08-24 14:22:06 -07001074 }
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001075 bin->speed_valid = true;
Patrick Daly18d2d482012-08-24 14:22:06 -07001076
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001077 /* Check PVS_BLOW_STATUS */
1078 pte_efuse = readl_relaxed(base + 0x4);
1079 bin->pvs_valid = !!(pte_efuse & BIT(21));
Patrick Daly18d2d482012-08-24 14:22:06 -07001080}
1081
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001082static struct pvs_table * __init select_freq_plan(
1083 const struct acpuclk_krait_params *params)
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001084{
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001085 void __iomem *pte_efuse_base;
1086 struct bin_info bin;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001087
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001088 pte_efuse_base = ioremap(params->pte_efuse_phys, 8);
1089 if (!pte_efuse_base) {
1090 dev_err(drv.dev, "Unable to map PTE eFuse base\n");
Patrick Daly18d2d482012-08-24 14:22:06 -07001091 return NULL;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001092 }
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001093 params->get_bin_info(pte_efuse_base, &bin);
1094 iounmap(pte_efuse_base);
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001095
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001096 if (bin.speed_valid) {
1097 drv.speed_bin = bin.speed;
1098 dev_info(drv.dev, "SPEED BIN: %d\n", drv.speed_bin);
1099 } else {
1100 drv.speed_bin = 0;
1101 dev_warn(drv.dev, "SPEED BIN: Defaulting to %d\n",
1102 drv.speed_bin);
1103 }
Patrick Daly18d2d482012-08-24 14:22:06 -07001104
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001105 if (bin.pvs_valid) {
1106 drv.pvs_bin = bin.pvs;
1107 dev_info(drv.dev, "ACPU PVS: %d\n", drv.pvs_bin);
1108 } else {
1109 drv.pvs_bin = 0;
1110 dev_warn(drv.dev, "ACPU PVS: Defaulting to %d\n",
1111 drv.pvs_bin);
1112 }
Patrick Daly18d2d482012-08-24 14:22:06 -07001113
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001114 return &params->pvs_tables[drv.speed_bin][drv.pvs_bin];
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001115}
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001116
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001117static void __init drv_data_init(struct device *dev,
1118 const struct acpuclk_krait_params *params)
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001119{
Patrick Daly18d2d482012-08-24 14:22:06 -07001120 struct pvs_table *pvs;
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001121
1122 drv.dev = dev;
1123 drv.scalable = kmemdup(params->scalable, params->scalable_size,
1124 GFP_KERNEL);
1125 BUG_ON(!drv.scalable);
1126
1127 drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data),
1128 GFP_KERNEL);
1129 BUG_ON(!drv.hfpll_data);
1130
1131 drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size,
1132 GFP_KERNEL);
1133 BUG_ON(!drv.l2_freq_tbl);
1134
1135 drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale),
1136 GFP_KERNEL);
1137 BUG_ON(!drv.bus_scale);
1138 drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase,
1139 drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase),
1140 GFP_KERNEL);
1141 BUG_ON(!drv.bus_scale->usecase);
1142
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001143 pvs = select_freq_plan(params);
Patrick Daly18d2d482012-08-24 14:22:06 -07001144 BUG_ON(!pvs->table);
1145
1146 drv.acpu_freq_tbl = kmemdup(pvs->table, pvs->size, GFP_KERNEL);
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001147 BUG_ON(!drv.acpu_freq_tbl);
Patrick Daly18d2d482012-08-24 14:22:06 -07001148 drv.boost_uv = pvs->boost_uv;
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001149
1150 acpuclk_krait_data.power_collapse_khz = params->stby_khz;
1151 acpuclk_krait_data.wait_for_irq_khz = params->stby_khz;
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001152}
1153
1154static void __init hw_init(void)
1155{
1156 struct scalable *l2 = &drv.scalable[L2];
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001157 const struct l2_level *l2_level;
Matt Wagantall302d9a32012-07-03 13:37:29 -07001158 int cpu, rc;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001159
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001160 if (krait_needs_vmin())
1161 krait_apply_vmin(drv.acpu_freq_tbl);
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001162
Matt Wagantall754ee272012-06-18 13:40:26 -07001163 l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
1164 BUG_ON(!l2->hfpll_base);
Matt Wagantall754ee272012-06-18 13:40:26 -07001165
Matt Wagantall302d9a32012-07-03 13:37:29 -07001166 rc = rpm_regulator_init(l2, VREG_HFPLL_A,
1167 l2->vreg[VREG_HFPLL_A].max_vdd, false);
1168 BUG_ON(rc);
1169 rc = rpm_regulator_init(l2, VREG_HFPLL_B,
1170 l2->vreg[VREG_HFPLL_B].max_vdd, false);
1171 BUG_ON(rc);
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001172
1173 l2_level = find_cur_l2_level();
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001174 if (!l2_level) {
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001175 l2_level = drv.l2_freq_tbl;
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001176 dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
1177 l2_level->speed.khz);
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001178 } else {
1179 dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
1180 l2_level->speed.khz);
1181 }
1182
1183 rc = init_clock_sources(l2, &l2_level->speed);
Matt Wagantall302d9a32012-07-03 13:37:29 -07001184 BUG_ON(rc);
1185
1186 for_each_online_cpu(cpu) {
1187 rc = per_cpu_init(cpu);
1188 BUG_ON(rc);
1189 }
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001190
1191 bus_init(l2_level);
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001192}
1193
1194int __init acpuclk_krait_init(struct device *dev,
1195 const struct acpuclk_krait_params *params)
1196{
1197 drv_data_init(dev, params);
1198 hw_init();
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001199
1200 cpufreq_table_init();
Steve Mucklef9a87492012-11-02 15:41:00 -07001201 dcvs_freq_init();
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001202 acpuclk_register(&acpuclk_krait_data);
1203 register_hotcpu_notifier(&acpuclk_cpu_notifier);
1204
Matt Wagantall488bef32012-07-13 19:42:11 -07001205 acpuclk_krait_debug_init(&drv);
1206
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001207 return 0;
1208}