blob: a6f442343c8d0253409a6ac985e31816261e201d [file] [log] [blame]
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001/*
Matt Wagantallf9a4d322013-01-14 18:01:24 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Matt Wagantalle9b715a2012-01-04 18:16:14 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Matt Wagantalle9b715a2012-01-04 18:16:14 -080014#include <linux/kernel.h>
Matt Wagantall9515bc22012-07-19 18:13:40 -070015#include <linux/module.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080016#include <linux/init.h>
17#include <linux/io.h>
18#include <linux/delay.h>
19#include <linux/mutex.h>
20#include <linux/err.h>
21#include <linux/errno.h>
22#include <linux/cpufreq.h>
23#include <linux/cpu.h>
24#include <linux/regulator/consumer.h>
25
26#include <asm/mach-types.h>
27#include <asm/cpu.h>
28
29#include <mach/board.h>
30#include <mach/msm_iomap.h>
31#include <mach/socinfo.h>
32#include <mach/msm-krait-l2-accessors.h>
33#include <mach/rpm-regulator.h>
Matt Wagantall75473eb2012-05-31 15:23:22 -070034#include <mach/rpm-regulator-smd.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080035#include <mach/msm_bus.h>
Steve Mucklef9a87492012-11-02 15:41:00 -070036#include <mach/msm_dcvs.h>
Matt Wagantalle9b715a2012-01-04 18:16:14 -080037
38#include "acpuclock.h"
39#include "acpuclock-krait.h"
Stephen Boyda86214a2012-09-14 11:25:34 -070040#include "avs.h"
Matt Wagantalle9b715a2012-01-04 18:16:14 -080041
42/* MUX source selects. */
43#define PRI_SRC_SEL_SEC_SRC 0
44#define PRI_SRC_SEL_HFPLL 1
45#define PRI_SRC_SEL_HFPLL_DIV2 2
Matt Wagantalle9b715a2012-01-04 18:16:14 -080046
Matt Wagantalle9b715a2012-01-04 18:16:14 -080047static DEFINE_MUTEX(driver_lock);
48static DEFINE_SPINLOCK(l2_lock);
49
Matt Wagantall488bef32012-07-13 19:42:11 -070050static struct drv_data drv;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080051
52static unsigned long acpuclk_krait_get_rate(int cpu)
53{
54 return drv.scalable[cpu].cur_speed->khz;
55}
56
57/* Select a source on the primary MUX. */
58static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel)
59{
60 u32 regval;
61
62 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
63 regval &= ~0x3;
64 regval |= (pri_src_sel & 0x3);
65 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
66 /* Wait for switch to complete. */
67 mb();
68 udelay(1);
69}
70
71/* Select a source on the secondary MUX. */
Matt Wagantall6cd5d752012-09-27 19:56:57 -070072static void __cpuinit set_sec_clk_src(struct scalable *sc, u32 sec_src_sel)
Matt Wagantalle9b715a2012-01-04 18:16:14 -080073{
74 u32 regval;
75
76 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
77 regval &= ~(0x3 << 2);
78 regval |= ((sec_src_sel & 0x3) << 2);
79 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
80 /* Wait for switch to complete. */
81 mb();
82 udelay(1);
83}
84
Matt Wagantall302d9a32012-07-03 13:37:29 -070085static int enable_rpm_vreg(struct vreg *vreg)
Matt Wagantalle9b715a2012-01-04 18:16:14 -080086{
Matt Wagantall302d9a32012-07-03 13:37:29 -070087 int ret = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -080088
Matt Wagantall75473eb2012-05-31 15:23:22 -070089 if (vreg->rpm_reg) {
Matt Wagantall302d9a32012-07-03 13:37:29 -070090 ret = rpm_regulator_enable(vreg->rpm_reg);
91 if (ret)
Matt Wagantall75473eb2012-05-31 15:23:22 -070092 dev_err(drv.dev, "%s regulator enable failed (%d)\n",
Matt Wagantall302d9a32012-07-03 13:37:29 -070093 vreg->name, ret);
Matt Wagantall75473eb2012-05-31 15:23:22 -070094 }
Matt Wagantall302d9a32012-07-03 13:37:29 -070095
96 return ret;
Matt Wagantall75473eb2012-05-31 15:23:22 -070097}
98
99static void disable_rpm_vreg(struct vreg *vreg)
100{
101 int rc;
102
103 if (vreg->rpm_reg) {
104 rc = rpm_regulator_disable(vreg->rpm_reg);
105 if (rc)
106 dev_err(drv.dev, "%s regulator disable failed (%d)\n",
107 vreg->name, rc);
108 }
109}
110
111/* Enable an already-configured HFPLL. */
112static void hfpll_enable(struct scalable *sc, bool skip_regulators)
113{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800114 if (!skip_regulators) {
115 /* Enable regulators required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700116 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
117 enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800118 }
119
120 /* Disable PLL bypass mode. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700121 writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800122
123 /*
124 * H/W requires a 5us delay between disabling the bypass and
125 * de-asserting the reset. Delay 10us just to be safe.
126 */
127 mb();
128 udelay(10);
129
130 /* De-assert active-low PLL reset. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700131 writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800132
133 /* Wait for PLL to lock. */
134 mb();
135 udelay(60);
136
137 /* Enable PLL output. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700138 writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800139}
140
141/* Disable a HFPLL for power-savings or while it's being reprogrammed. */
142static void hfpll_disable(struct scalable *sc, bool skip_regulators)
143{
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800144 /*
145 * Disable the PLL output, disable test mode, enable the bypass mode,
146 * and assert the reset.
147 */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700148 writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800149
150 if (!skip_regulators) {
151 /* Remove voltage votes required by the HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700152 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]);
153 disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800154 }
155}
156
157/* Program the HFPLL rate. Assumes HFPLL is already disabled. */
158static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
159{
Matt Wagantalla77b7f32012-07-18 16:32:01 -0700160 void __iomem *base = sc->hfpll_base;
161 u32 regval;
162
163 writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset);
164
165 if (drv.hfpll_data->has_user_reg) {
166 regval = readl_relaxed(base + drv.hfpll_data->user_offset);
167 if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max)
168 regval &= ~drv.hfpll_data->user_vco_mask;
169 else
170 regval |= drv.hfpll_data->user_vco_mask;
171 writel_relaxed(regval, base + drv.hfpll_data->user_offset);
172 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800173}
174
175/* Return the L2 speed that should be applied. */
Matt Wagantall600ea502012-06-08 18:49:53 -0700176static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800177{
Matt Wagantall600ea502012-06-08 18:49:53 -0700178 unsigned int new_l = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800179 int cpu;
180
181 /* Find max L2 speed vote. */
182 sc->l2_vote = vote_l;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800183 for_each_present_cpu(cpu)
184 new_l = max(new_l, drv.scalable[cpu].l2_vote);
185
186 return new_l;
187}
188
189/* Update the bus bandwidth request. */
190static void set_bus_bw(unsigned int bw)
191{
192 int ret;
193
194 /* Update bandwidth if request has changed. This may sleep. */
195 ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw);
196 if (ret)
197 dev_err(drv.dev, "bandwidth request failed (%d)\n", ret);
198}
199
200/* Set the CPU or L2 clock speed. */
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700201static void set_speed(struct scalable *sc, const struct core_speed *tgt_s,
202 bool skip_regulators)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800203{
204 const struct core_speed *strt_s = sc->cur_speed;
205
Stephen Boyd14a47392012-08-06 20:15:15 -0700206 if (strt_s == tgt_s)
207 return;
208
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800209 if (strt_s->src == HFPLL && tgt_s->src == HFPLL) {
210 /*
211 * Move to an always-on source running at a frequency
212 * that does not require an elevated CPU voltage.
213 */
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800214 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
215
216 /* Re-program HFPLL. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700217 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800218 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700219 hfpll_enable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800220
221 /* Move to HFPLL. */
222 set_pri_clk_src(sc, tgt_s->pri_src_sel);
223 } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) {
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800224 set_pri_clk_src(sc, tgt_s->pri_src_sel);
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700225 hfpll_disable(sc, skip_regulators);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800226 } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) {
227 hfpll_set_rate(sc, tgt_s);
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700228 hfpll_enable(sc, skip_regulators);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800229 set_pri_clk_src(sc, tgt_s->pri_src_sel);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800230 }
231
232 sc->cur_speed = tgt_s;
233}
234
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700235struct vdd_data {
236 int vdd_mem;
237 int vdd_dig;
238 int vdd_core;
239 int ua_core;
240};
241
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800242/* Apply any per-cpu voltage increases. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700243static int increase_vdd(int cpu, struct vdd_data *data,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800244 enum setrate_reason reason)
245{
246 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700247 int rc;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800248
249 /*
250 * Increase vdd_mem active-set before vdd_dig.
251 * vdd_mem should be >= vdd_dig.
252 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700253 if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700254 rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700255 data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800256 if (rc) {
257 dev_err(drv.dev,
258 "vdd_mem (cpu%d) increase failed (%d)\n",
259 cpu, rc);
260 return rc;
261 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700262 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800263 }
264
265 /* Increase vdd_dig active-set vote. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700266 if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700267 rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700268 data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800269 if (rc) {
270 dev_err(drv.dev,
271 "vdd_dig (cpu%d) increase failed (%d)\n",
272 cpu, rc);
273 return rc;
274 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700275 sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
276 }
277
278 /* Increase current request. */
279 if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) {
280 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
281 data->ua_core);
282 if (rc < 0) {
283 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
284 sc->vreg[VREG_CORE].name, rc);
285 return rc;
286 }
287 sc->vreg[VREG_CORE].cur_ua = data->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800288 }
289
290 /*
291 * Update per-CPU core voltage. Don't do this for the hotplug path for
292 * which it should already be correct. Attempting to set it is bad
293 * because we don't know what CPU we are running on at this point, but
294 * the CPU regulator API requires we call it from the affected CPU.
295 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700296 if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800297 && reason != SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700298 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
299 data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800300 if (rc) {
301 dev_err(drv.dev,
302 "vdd_core (cpu%d) increase failed (%d)\n",
303 cpu, rc);
304 return rc;
305 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700306 sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800307 }
308
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700309 return 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800310}
311
312/* Apply any per-cpu voltage decreases. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700313static void decrease_vdd(int cpu, struct vdd_data *data,
314 enum setrate_reason reason)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800315{
316 struct scalable *sc = &drv.scalable[cpu];
317 int ret;
318
319 /*
320 * Update per-CPU core voltage. This must be called on the CPU
321 * that's being affected. Don't do this in the hotplug remove path,
322 * where the rail is off and we're executing on the other CPU.
323 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700324 if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800325 && reason != SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700326 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
327 data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800328 if (ret) {
329 dev_err(drv.dev,
330 "vdd_core (cpu%d) decrease failed (%d)\n",
331 cpu, ret);
332 return;
333 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700334 sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
335 }
336
337 /* Decrease current request. */
338 if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) {
339 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
340 data->ua_core);
341 if (ret < 0) {
342 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
343 sc->vreg[VREG_CORE].name, ret);
344 return;
345 }
346 sc->vreg[VREG_CORE].cur_ua = data->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800347 }
348
349 /* Decrease vdd_dig active-set vote. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700350 if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700351 ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700352 data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800353 if (ret) {
354 dev_err(drv.dev,
355 "vdd_dig (cpu%d) decrease failed (%d)\n",
356 cpu, ret);
357 return;
358 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700359 sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800360 }
361
362 /*
363 * Decrease vdd_mem active-set after vdd_dig.
364 * vdd_mem should be >= vdd_dig.
365 */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700366 if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
Matt Wagantall75473eb2012-05-31 15:23:22 -0700367 ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700368 data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800369 if (ret) {
370 dev_err(drv.dev,
371 "vdd_mem (cpu%d) decrease failed (%d)\n",
372 cpu, ret);
373 return;
374 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700375 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800376 }
377}
378
379static int calculate_vdd_mem(const struct acpu_level *tgt)
380{
Matt Wagantall600ea502012-06-08 18:49:53 -0700381 return drv.l2_freq_tbl[tgt->l2_level].vdd_mem;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800382}
383
Matt Wagantall72a38002012-07-18 13:42:55 -0700384static int get_src_dig(const struct core_speed *s)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800385{
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700386 const int *hfpll_vdd = drv.hfpll_data->vdd;
387 const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max;
Matt Wagantall87465f52012-07-23 22:03:06 -0700388 const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800389
Matt Wagantall72a38002012-07-18 13:42:55 -0700390 if (s->src != HFPLL)
391 return hfpll_vdd[HFPLL_VDD_NONE];
Matt Wagantall87465f52012-07-23 22:03:06 -0700392 else if (s->pll_l_val > nom_vdd_l_max)
393 return hfpll_vdd[HFPLL_VDD_HIGH];
Matt Wagantall72a38002012-07-18 13:42:55 -0700394 else if (s->pll_l_val > low_vdd_l_max)
395 return hfpll_vdd[HFPLL_VDD_NOM];
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800396 else
Matt Wagantall72a38002012-07-18 13:42:55 -0700397 return hfpll_vdd[HFPLL_VDD_LOW];
398}
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800399
Matt Wagantall72a38002012-07-18 13:42:55 -0700400static int calculate_vdd_dig(const struct acpu_level *tgt)
401{
402 int l2_pll_vdd_dig, cpu_pll_vdd_dig;
403
404 l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed);
405 cpu_pll_vdd_dig = get_src_dig(&tgt->speed);
406
407 return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig,
408 max(l2_pll_vdd_dig, cpu_pll_vdd_dig));
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800409}
410
Matt Wagantall9515bc22012-07-19 18:13:40 -0700411static bool enable_boost = true;
412module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR);
413
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800414static int calculate_vdd_core(const struct acpu_level *tgt)
415{
Matt Wagantall9515bc22012-07-19 18:13:40 -0700416 return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800417}
418
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700419static DEFINE_MUTEX(l2_regulator_lock);
420static int l2_vreg_count;
421
422static int enable_l2_regulators(void)
423{
424 int ret = 0;
425
426 mutex_lock(&l2_regulator_lock);
427 if (l2_vreg_count == 0) {
428 ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
429 if (ret)
430 goto out;
431 ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
432 if (ret) {
433 disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
434 goto out;
435 }
436 }
437 l2_vreg_count++;
438out:
439 mutex_unlock(&l2_regulator_lock);
440
441 return ret;
442}
443
444static void disable_l2_regulators(void)
445{
446 mutex_lock(&l2_regulator_lock);
447
448 if (WARN(!l2_vreg_count, "L2 regulator votes are unbalanced!"))
449 goto out;
450
451 if (l2_vreg_count == 1) {
452 disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]);
453 disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]);
454 }
455 l2_vreg_count--;
456out:
457 mutex_unlock(&l2_regulator_lock);
458}
459
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800460/* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
461static int acpuclk_krait_set_rate(int cpu, unsigned long rate,
462 enum setrate_reason reason)
463{
464 const struct core_speed *strt_acpu_s, *tgt_acpu_s;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800465 const struct acpu_level *tgt;
Matt Wagantall600ea502012-06-08 18:49:53 -0700466 int tgt_l2_l;
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700467 enum src_id prev_l2_src = NUM_SRC_ID;
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700468 struct vdd_data vdd_data;
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700469 bool skip_regulators;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800470 int rc = 0;
471
Matt Wagantall5941a332012-07-10 23:20:44 -0700472 if (cpu > num_possible_cpus())
473 return -EINVAL;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800474
475 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
476 mutex_lock(&driver_lock);
477
478 strt_acpu_s = drv.scalable[cpu].cur_speed;
479
480 /* Return early if rate didn't change. */
481 if (rate == strt_acpu_s->khz)
482 goto out;
483
484 /* Find target frequency. */
485 for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) {
486 if (tgt->speed.khz == rate) {
487 tgt_acpu_s = &tgt->speed;
488 break;
489 }
490 }
491 if (tgt->speed.khz == 0) {
492 rc = -EINVAL;
493 goto out;
494 }
495
496 /* Calculate voltage requirements for the current CPU. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700497 vdd_data.vdd_mem = calculate_vdd_mem(tgt);
498 vdd_data.vdd_dig = calculate_vdd_dig(tgt);
499 vdd_data.vdd_core = calculate_vdd_core(tgt);
500 vdd_data.ua_core = tgt->ua_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800501
Stephen Boyda86214a2012-09-14 11:25:34 -0700502 /* Disable AVS before voltage switch */
503 if (reason == SETRATE_CPUFREQ && drv.scalable[cpu].avs_enabled) {
504 AVS_DISABLE(cpu);
505 drv.scalable[cpu].avs_enabled = false;
506 }
507
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800508 /* Increase VDD levels if needed. */
509 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700510 rc = increase_vdd(cpu, &vdd_data, reason);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800511 if (rc)
512 goto out;
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700513
514 prev_l2_src =
515 drv.l2_freq_tbl[drv.scalable[cpu].l2_vote].speed.src;
516 /* Vote for the L2 regulators here if necessary. */
517 if (drv.l2_freq_tbl[tgt->l2_level].speed.src == HFPLL) {
518 rc = enable_l2_regulators();
519 if (rc)
520 goto out;
521 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800522 }
523
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700524 dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
525 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800526
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700527 /*
528 * If we are setting the rate as part of power collapse or in the resume
529 * path after power collapse, skip the vote for the HFPLL regulators,
530 * which are active-set-only votes that will be removed when apps enters
531 * its sleep set. This is needed to avoid voting for regulators with
532 * sleeping APIs from an atomic context.
533 */
534 skip_regulators = (reason == SETRATE_PC);
535
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800536 /* Set the new CPU speed. */
Vikram Mulukutla23ce1c62012-10-19 07:42:45 -0700537 set_speed(&drv.scalable[cpu], tgt_acpu_s, skip_regulators);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800538
539 /*
540 * Update the L2 vote and apply the rate change. A spinlock is
541 * necessary to ensure L2 rate is calculated and set atomically
542 * with the CPU frequency, even if acpuclk_krait_set_rate() is
543 * called from an atomic context and the driver_lock mutex is not
544 * acquired.
545 */
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700546 spin_lock(&l2_lock);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800547 tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level);
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700548 set_speed(&drv.scalable[L2],
549 &drv.l2_freq_tbl[tgt_l2_l].speed, true);
550 spin_unlock(&l2_lock);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800551
552 /* Nothing else to do for power collapse or SWFI. */
553 if (reason == SETRATE_PC || reason == SETRATE_SWFI)
554 goto out;
555
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700556 /*
557 * Remove the vote for the L2 HFPLL regulators only if the L2
558 * was already on an HFPLL source.
559 */
560 if (prev_l2_src == HFPLL)
561 disable_l2_regulators();
562
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800563 /* Update bus bandwith request. */
Matt Wagantall600ea502012-06-08 18:49:53 -0700564 set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800565
566 /* Drop VDD levels if we can. */
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700567 decrease_vdd(cpu, &vdd_data, reason);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800568
Stephen Boyda86214a2012-09-14 11:25:34 -0700569 /* Re-enable AVS */
570 if (reason == SETRATE_CPUFREQ && tgt->avsdscr_setting) {
571 AVS_ENABLE(cpu, tgt->avsdscr_setting);
572 drv.scalable[cpu].avs_enabled = true;
573 }
574
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700575 dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800576
577out:
578 if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
579 mutex_unlock(&driver_lock);
580 return rc;
581}
582
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700583static struct acpuclk_data acpuclk_krait_data = {
584 .set_rate = acpuclk_krait_set_rate,
585 .get_rate = acpuclk_krait_get_rate,
586};
587
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800588/* Initialize a HFPLL at a given rate and enable it. */
Matt Wagantall980d0672012-10-17 13:50:07 -0700589static void __cpuinit hfpll_init(struct scalable *sc,
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800590 const struct core_speed *tgt_s)
591{
Matt Wagantallbd1b4042012-07-24 11:20:03 -0700592 dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800593
594 /* Disable the PLL for re-programming. */
Matt Wagantall75473eb2012-05-31 15:23:22 -0700595 hfpll_disable(sc, true);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800596
597 /* Configure PLL parameters for integer mode. */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700598 writel_relaxed(drv.hfpll_data->config_val,
599 sc->hfpll_base + drv.hfpll_data->config_offset);
600 writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset);
601 writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset);
Matt Wagantalla77b7f32012-07-18 16:32:01 -0700602 if (drv.hfpll_data->has_user_reg)
603 writel_relaxed(drv.hfpll_data->user_val,
604 sc->hfpll_base + drv.hfpll_data->user_offset);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800605
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700606 /* Program droop controller, if supported */
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700607 if (drv.hfpll_data->has_droop_ctl)
608 writel_relaxed(drv.hfpll_data->droop_val,
609 sc->hfpll_base + drv.hfpll_data->droop_offset);
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700610
Matt Wagantall7aaf27d2013-03-18 21:50:47 -0700611 /* Set an initial PLL rate. */
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800612 hfpll_set_rate(sc, tgt_s);
Matt Wagantall75473eb2012-05-31 15:23:22 -0700613}
614
Matt Wagantall302d9a32012-07-03 13:37:29 -0700615static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg,
Matt Wagantall754ee272012-06-18 13:40:26 -0700616 int vdd, bool enable)
Matt Wagantall75473eb2012-05-31 15:23:22 -0700617{
618 int ret;
619
620 if (!sc->vreg[vreg].name)
Matt Wagantall302d9a32012-07-03 13:37:29 -0700621 return 0;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700622
623 sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev,
624 sc->vreg[vreg].name);
625 if (IS_ERR(sc->vreg[vreg].rpm_reg)) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700626 ret = PTR_ERR(sc->vreg[vreg].rpm_reg);
627 dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n",
628 sc->vreg[vreg].name, ret);
629 goto err_get;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700630 }
631
632 ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd,
633 sc->vreg[vreg].max_vdd);
634 if (ret) {
635 dev_err(drv.dev, "%s initialization failed (%d)\n",
636 sc->vreg[vreg].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700637 goto err_conf;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700638 }
639 sc->vreg[vreg].cur_vdd = vdd;
640
Matt Wagantall302d9a32012-07-03 13:37:29 -0700641 if (enable) {
642 ret = enable_rpm_vreg(&sc->vreg[vreg]);
643 if (ret)
644 goto err_conf;
645 }
646
647 return 0;
648
649err_conf:
650 rpm_regulator_put(sc->vreg[vreg].rpm_reg);
651err_get:
652 return ret;
653}
654
655static void __cpuinit rpm_regulator_cleanup(struct scalable *sc,
656 enum vregs vreg)
657{
658 if (!sc->vreg[vreg].rpm_reg)
659 return;
660
661 disable_rpm_vreg(&sc->vreg[vreg]);
662 rpm_regulator_put(sc->vreg[vreg].rpm_reg);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800663}
664
665/* Voltage regulator initialization. */
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700666static int __cpuinit regulator_init(struct scalable *sc,
667 const struct acpu_level *acpu_level)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800668{
Matt Wagantall754ee272012-06-18 13:40:26 -0700669 int ret, vdd_mem, vdd_dig, vdd_core;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800670
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700671 vdd_mem = calculate_vdd_mem(acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700672 ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
673 if (ret)
674 goto err_mem;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700675
676 vdd_dig = calculate_vdd_dig(acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700677 ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
678 if (ret)
679 goto err_dig;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700680
Matt Wagantall302d9a32012-07-03 13:37:29 -0700681 ret = rpm_regulator_init(sc, VREG_HFPLL_A,
Matt Wagantall754ee272012-06-18 13:40:26 -0700682 sc->vreg[VREG_HFPLL_A].max_vdd, false);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700683 if (ret)
684 goto err_hfpll_a;
685 ret = rpm_regulator_init(sc, VREG_HFPLL_B,
Matt Wagantall754ee272012-06-18 13:40:26 -0700686 sc->vreg[VREG_HFPLL_B].max_vdd, false);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700687 if (ret)
688 goto err_hfpll_b;
Matt Wagantall75473eb2012-05-31 15:23:22 -0700689
Matt Wagantall754ee272012-06-18 13:40:26 -0700690 /* Setup Krait CPU regulators and initial core voltage. */
691 sc->vreg[VREG_CORE].reg = regulator_get(drv.dev,
692 sc->vreg[VREG_CORE].name);
693 if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700694 ret = PTR_ERR(sc->vreg[VREG_CORE].reg);
695 dev_err(drv.dev, "regulator_get(%s) failed (%d)\n",
696 sc->vreg[VREG_CORE].name, ret);
697 goto err_core_get;
Matt Wagantall754ee272012-06-18 13:40:26 -0700698 }
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700699 ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
700 acpu_level->ua_core);
701 if (ret < 0) {
702 dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
703 sc->vreg[VREG_CORE].name, ret);
704 goto err_core_conf;
705 }
706 sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700707 vdd_core = calculate_vdd_core(acpu_level);
Matt Wagantall754ee272012-06-18 13:40:26 -0700708 ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
709 sc->vreg[VREG_CORE].max_vdd);
710 if (ret) {
711 dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n",
712 sc->vreg[VREG_CORE].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700713 goto err_core_conf;
Matt Wagantall754ee272012-06-18 13:40:26 -0700714 }
715 sc->vreg[VREG_CORE].cur_vdd = vdd_core;
Matt Wagantall754ee272012-06-18 13:40:26 -0700716 ret = regulator_enable(sc->vreg[VREG_CORE].reg);
717 if (ret) {
718 dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
719 sc->vreg[VREG_CORE].name, ret);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700720 goto err_core_conf;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800721 }
Matt Wagantall302d9a32012-07-03 13:37:29 -0700722
Vikram Mulukutla77ef5912012-10-19 08:53:19 -0700723 /*
724 * Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency
725 * requires a corresponding target L2 frequency that needs the L2 to
726 * run off of an HFPLL.
727 */
728 if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL)
729 l2_vreg_count++;
730
Matt Wagantall302d9a32012-07-03 13:37:29 -0700731 return 0;
732
733err_core_conf:
734 regulator_put(sc->vreg[VREG_CORE].reg);
735err_core_get:
736 rpm_regulator_cleanup(sc, VREG_HFPLL_B);
737err_hfpll_b:
738 rpm_regulator_cleanup(sc, VREG_HFPLL_A);
739err_hfpll_a:
740 rpm_regulator_cleanup(sc, VREG_DIG);
741err_dig:
742 rpm_regulator_cleanup(sc, VREG_MEM);
743err_mem:
744 return ret;
745}
746
747static void __cpuinit regulator_cleanup(struct scalable *sc)
748{
749 regulator_disable(sc->vreg[VREG_CORE].reg);
750 regulator_put(sc->vreg[VREG_CORE].reg);
751 rpm_regulator_cleanup(sc, VREG_HFPLL_B);
752 rpm_regulator_cleanup(sc, VREG_HFPLL_A);
753 rpm_regulator_cleanup(sc, VREG_DIG);
754 rpm_regulator_cleanup(sc, VREG_MEM);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800755}
756
757/* Set initial rate for a given core. */
Matt Wagantall302d9a32012-07-03 13:37:29 -0700758static int __cpuinit init_clock_sources(struct scalable *sc,
Matt Wagantall754ee272012-06-18 13:40:26 -0700759 const struct core_speed *tgt_s)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800760{
761 u32 regval;
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700762 void __iomem *aux_reg;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800763
764 /* Program AUX source input to the secondary MUX. */
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700765 if (sc->aux_clk_sel_phys) {
766 aux_reg = ioremap(sc->aux_clk_sel_phys, 4);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700767 if (!aux_reg)
768 return -ENOMEM;
Matt Wagantall06e4a1f2012-06-07 18:38:13 -0700769 writel_relaxed(sc->aux_clk_sel, aux_reg);
770 iounmap(aux_reg);
771 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800772
773 /* Switch away from the HFPLL while it's re-initialized. */
Matt Wagantall6cd5d752012-09-27 19:56:57 -0700774 set_sec_clk_src(sc, sc->sec_clk_sel);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800775 set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC);
776 hfpll_init(sc, tgt_s);
777
778 /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */
779 regval = get_l2_indirect_reg(sc->l2cpmr_iaddr);
780 regval &= ~(0x3 << 6);
781 set_l2_indirect_reg(sc->l2cpmr_iaddr, regval);
782
Matt Wagantall7aaf27d2013-03-18 21:50:47 -0700783 /* Enable and switch to the target clock source. */
784 if (tgt_s->src == HFPLL)
785 hfpll_enable(sc, false);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800786 set_pri_clk_src(sc, tgt_s->pri_src_sel);
787 sc->cur_speed = tgt_s;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700788
789 return 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800790}
791
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700792static void __cpuinit fill_cur_core_speed(struct core_speed *s,
793 struct scalable *sc)
794{
795 s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700796 s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset);
797}
798
799static bool __cpuinit speed_equal(const struct core_speed *s1,
800 const struct core_speed *s2)
801{
802 return (s1->pri_src_sel == s2->pri_src_sel &&
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700803 s1->pll_l_val == s2->pll_l_val);
804}
805
806static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu)
807{
808 struct scalable *sc = &drv.scalable[cpu];
809 const struct acpu_level *l;
810 struct core_speed cur_speed;
811
812 fill_cur_core_speed(&cur_speed, sc);
813 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
814 if (speed_equal(&l->speed, &cur_speed))
815 return l;
816 return NULL;
817}
818
819static const struct l2_level __init *find_cur_l2_level(void)
820{
821 struct scalable *sc = &drv.scalable[L2];
822 const struct l2_level *l;
823 struct core_speed cur_speed;
824
825 fill_cur_core_speed(&cur_speed, sc);
826 for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++)
827 if (speed_equal(&l->speed, &cur_speed))
828 return l;
829 return NULL;
830}
831
832static const struct acpu_level __cpuinit *find_min_acpu_level(void)
833{
834 struct acpu_level *l;
835
836 for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
837 if (l->use_for_scaling)
838 return l;
839
840 return NULL;
841}
842
Matt Wagantall302d9a32012-07-03 13:37:29 -0700843static int __cpuinit per_cpu_init(int cpu)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800844{
Matt Wagantall754ee272012-06-18 13:40:26 -0700845 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700846 const struct acpu_level *acpu_level;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700847 int ret;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800848
Matt Wagantall754ee272012-06-18 13:40:26 -0700849 sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700850 if (!sc->hfpll_base) {
851 ret = -ENOMEM;
852 goto err_ioremap;
853 }
Matt Wagantall754ee272012-06-18 13:40:26 -0700854
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700855 acpu_level = find_cur_acpu_level(cpu);
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700856 if (!acpu_level) {
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700857 acpu_level = find_min_acpu_level();
858 if (!acpu_level) {
859 ret = -ENODEV;
860 goto err_table;
861 }
862 dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
863 cpu, acpu_level->speed.khz);
864 } else {
865 dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
866 acpu_level->speed.khz);
867 }
868
869 ret = regulator_init(sc, acpu_level);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700870 if (ret)
871 goto err_regulators;
Matt Wagantall754ee272012-06-18 13:40:26 -0700872
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700873 ret = init_clock_sources(sc, &acpu_level->speed);
Matt Wagantall302d9a32012-07-03 13:37:29 -0700874 if (ret)
875 goto err_clocks;
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700876
877 sc->l2_vote = acpu_level->l2_level;
Matt Wagantall754ee272012-06-18 13:40:26 -0700878 sc->initialized = true;
Matt Wagantall302d9a32012-07-03 13:37:29 -0700879
880 return 0;
881
882err_clocks:
883 regulator_cleanup(sc);
884err_regulators:
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700885err_table:
Matt Wagantall302d9a32012-07-03 13:37:29 -0700886 iounmap(sc->hfpll_base);
887err_ioremap:
888 return ret;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800889}
890
891/* Register with bus driver. */
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700892static void __init bus_init(const struct l2_level *l2_level)
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800893{
894 int ret;
895
Matt Wagantall1f3762d2012-06-08 19:08:48 -0700896 drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800897 if (!drv.bus_perf_client) {
898 dev_err(drv.dev, "unable to register bus client\n");
899 BUG();
900 }
901
Matt Wagantall754ee272012-06-18 13:40:26 -0700902 ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -0700903 l2_level->bw_level);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800904 if (ret)
905 dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
906}
907
908#ifdef CONFIG_CPU_FREQ_MSM
909static struct cpufreq_frequency_table freq_table[NR_CPUS][35];
910
911static void __init cpufreq_table_init(void)
912{
913 int cpu;
Matt Wagantallfcbb29b2013-03-20 19:52:52 -0700914 int freq_cnt = 0;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800915
916 for_each_possible_cpu(cpu) {
Matt Wagantallfcbb29b2013-03-20 19:52:52 -0700917 int i;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800918 /* Construct the freq_table tables from acpu_freq_tbl. */
Matt Wagantallfcbb29b2013-03-20 19:52:52 -0700919 for (i = 0, freq_cnt = 0; drv.acpu_freq_tbl[i].speed.khz != 0
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800920 && freq_cnt < ARRAY_SIZE(*freq_table); i++) {
921 if (drv.acpu_freq_tbl[i].use_for_scaling) {
922 freq_table[cpu][freq_cnt].index = freq_cnt;
923 freq_table[cpu][freq_cnt].frequency
924 = drv.acpu_freq_tbl[i].speed.khz;
925 freq_cnt++;
926 }
927 }
928 /* freq_table not big enough to store all usable freqs. */
929 BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0);
930
931 freq_table[cpu][freq_cnt].index = freq_cnt;
932 freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END;
933
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800934 /* Register table with CPUFreq. */
935 cpufreq_frequency_table_get_attr(freq_table[cpu], cpu);
936 }
Matt Wagantallfcbb29b2013-03-20 19:52:52 -0700937
938 dev_info(drv.dev, "CPU Frequencies Supported: %d\n", freq_cnt);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800939}
940#else
941static void __init cpufreq_table_init(void) {}
942#endif
943
Steve Mucklef9a87492012-11-02 15:41:00 -0700944static void __init dcvs_freq_init(void)
945{
946 int i;
947
948 for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0; i++)
949 if (drv.acpu_freq_tbl[i].use_for_scaling)
950 msm_dcvs_register_cpu_freq(
951 drv.acpu_freq_tbl[i].speed.khz,
952 drv.acpu_freq_tbl[i].vdd_core / 1000);
953}
954
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800955static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
956 unsigned long action, void *hcpu)
957{
958 static int prev_khz[NR_CPUS];
959 int rc, cpu = (int)hcpu;
960 struct scalable *sc = &drv.scalable[cpu];
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700961 unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800962
963 switch (action & ~CPU_TASKS_FROZEN) {
964 case CPU_DEAD:
965 prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
966 /* Fall through. */
967 case CPU_UP_CANCELED:
Matt Wagantallb7c231b2012-07-24 18:40:17 -0700968 acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
Michael Bohane01ba8e2012-11-08 18:40:42 -0800969
970 regulator_disable(sc->vreg[VREG_CORE].reg);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800971 regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
Michael Bohane01ba8e2012-11-08 18:40:42 -0800972 regulator_set_voltage(sc->vreg[VREG_CORE].reg, 0,
973 sc->vreg[VREG_CORE].max_vdd);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800974 break;
975 case CPU_UP_PREPARE:
Matt Wagantall754ee272012-06-18 13:40:26 -0700976 if (!sc->initialized) {
Matt Wagantall302d9a32012-07-03 13:37:29 -0700977 rc = per_cpu_init(cpu);
978 if (rc)
979 return NOTIFY_BAD;
Matt Wagantall754ee272012-06-18 13:40:26 -0700980 break;
981 }
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800982 if (WARN_ON(!prev_khz[cpu]))
983 return NOTIFY_BAD;
Michael Bohane01ba8e2012-11-08 18:40:42 -0800984
985 rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
986 sc->vreg[VREG_CORE].cur_vdd,
987 sc->vreg[VREG_CORE].max_vdd);
988 if (rc < 0)
989 return NOTIFY_BAD;
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800990 rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
Matt Wagantall6d9c4162012-07-16 18:58:16 -0700991 sc->vreg[VREG_CORE].cur_ua);
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800992 if (rc < 0)
993 return NOTIFY_BAD;
Michael Bohane01ba8e2012-11-08 18:40:42 -0800994 rc = regulator_enable(sc->vreg[VREG_CORE].reg);
995 if (rc < 0)
996 return NOTIFY_BAD;
997
Matt Wagantalle9b715a2012-01-04 18:16:14 -0800998 acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
999 break;
1000 default:
1001 break;
1002 }
1003
1004 return NOTIFY_OK;
1005}
1006
1007static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = {
1008 .notifier_call = acpuclk_cpu_callback,
1009};
1010
Matt Wagantall713555e2013-01-16 12:22:39 -08001011static const int __init krait_needs_vmin(void)
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001012{
1013 switch (read_cpuid_id()) {
1014 case 0x511F04D0: /* KR28M2A20 */
1015 case 0x511F04D1: /* KR28M2A21 */
1016 case 0x510F06F0: /* KR28M4A10 */
1017 return 1;
1018 default:
1019 return 0;
1020 };
1021}
1022
Matt Wagantall713555e2013-01-16 12:22:39 -08001023static void __init krait_apply_vmin(struct acpu_level *tbl)
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001024{
Stephen Boyda86214a2012-09-14 11:25:34 -07001025 for (; tbl->speed.khz != 0; tbl++) {
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001026 if (tbl->vdd_core < 1150000)
1027 tbl->vdd_core = 1150000;
Stephen Boyda86214a2012-09-14 11:25:34 -07001028 tbl->avsdscr_setting = 0;
1029 }
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001030}
1031
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001032void __init get_krait_bin_format_a(void __iomem *base, struct bin_info *bin)
Patrick Daly18d2d482012-08-24 14:22:06 -07001033{
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001034 u32 pte_efuse = readl_relaxed(base);
Patrick Daly18d2d482012-08-24 14:22:06 -07001035
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001036 bin->speed = pte_efuse & 0xF;
1037 if (bin->speed == 0xF)
1038 bin->speed = (pte_efuse >> 4) & 0xF;
1039 bin->speed_valid = bin->speed != 0xF;
Patrick Daly18d2d482012-08-24 14:22:06 -07001040
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001041 bin->pvs = (pte_efuse >> 10) & 0x7;
1042 if (bin->pvs == 0x7)
1043 bin->pvs = (pte_efuse >> 13) & 0x7;
1044 bin->pvs_valid = bin->pvs != 0x7;
Patrick Daly18d2d482012-08-24 14:22:06 -07001045}
1046
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001047void __init get_krait_bin_format_b(void __iomem *base, struct bin_info *bin)
Patrick Daly18d2d482012-08-24 14:22:06 -07001048{
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001049 u32 pte_efuse, redundant_sel;
Patrick Daly18d2d482012-08-24 14:22:06 -07001050
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001051 pte_efuse = readl_relaxed(base);
1052 redundant_sel = (pte_efuse >> 24) & 0x7;
1053 bin->speed = pte_efuse & 0x7;
1054 bin->pvs = (pte_efuse >> 6) & 0x7;
Patrick Daly18d2d482012-08-24 14:22:06 -07001055
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001056 switch (redundant_sel) {
1057 case 1:
1058 bin->speed = (pte_efuse >> 27) & 0x7;
1059 break;
1060 case 2:
1061 bin->pvs = (pte_efuse >> 27) & 0x7;
1062 break;
Patrick Daly18d2d482012-08-24 14:22:06 -07001063 }
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001064 bin->speed_valid = true;
Patrick Daly18d2d482012-08-24 14:22:06 -07001065
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001066 /* Check PVS_BLOW_STATUS */
1067 pte_efuse = readl_relaxed(base + 0x4);
1068 bin->pvs_valid = !!(pte_efuse & BIT(21));
Patrick Daly18d2d482012-08-24 14:22:06 -07001069}
1070
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001071static struct pvs_table * __init select_freq_plan(
1072 const struct acpuclk_krait_params *params)
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001073{
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001074 void __iomem *pte_efuse_base;
1075 struct bin_info bin;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001076
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001077 pte_efuse_base = ioremap(params->pte_efuse_phys, 8);
1078 if (!pte_efuse_base) {
1079 dev_err(drv.dev, "Unable to map PTE eFuse base\n");
Patrick Daly18d2d482012-08-24 14:22:06 -07001080 return NULL;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001081 }
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001082 params->get_bin_info(pte_efuse_base, &bin);
1083 iounmap(pte_efuse_base);
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001084
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001085 if (bin.speed_valid) {
1086 drv.speed_bin = bin.speed;
1087 dev_info(drv.dev, "SPEED BIN: %d\n", drv.speed_bin);
1088 } else {
1089 drv.speed_bin = 0;
1090 dev_warn(drv.dev, "SPEED BIN: Defaulting to %d\n",
1091 drv.speed_bin);
1092 }
Patrick Daly18d2d482012-08-24 14:22:06 -07001093
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001094 if (bin.pvs_valid) {
1095 drv.pvs_bin = bin.pvs;
1096 dev_info(drv.dev, "ACPU PVS: %d\n", drv.pvs_bin);
1097 } else {
1098 drv.pvs_bin = 0;
1099 dev_warn(drv.dev, "ACPU PVS: Defaulting to %d\n",
1100 drv.pvs_bin);
1101 }
Patrick Daly18d2d482012-08-24 14:22:06 -07001102
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001103 return &params->pvs_tables[drv.speed_bin][drv.pvs_bin];
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001104}
Matt Wagantall06e4a1f2012-06-07 18:38:13 -07001105
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001106static void __init drv_data_init(struct device *dev,
1107 const struct acpuclk_krait_params *params)
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001108{
Patrick Daly18d2d482012-08-24 14:22:06 -07001109 struct pvs_table *pvs;
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001110
1111 drv.dev = dev;
1112 drv.scalable = kmemdup(params->scalable, params->scalable_size,
1113 GFP_KERNEL);
1114 BUG_ON(!drv.scalable);
1115
1116 drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data),
1117 GFP_KERNEL);
1118 BUG_ON(!drv.hfpll_data);
1119
1120 drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size,
1121 GFP_KERNEL);
1122 BUG_ON(!drv.l2_freq_tbl);
1123
1124 drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale),
1125 GFP_KERNEL);
1126 BUG_ON(!drv.bus_scale);
1127 drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase,
1128 drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase),
1129 GFP_KERNEL);
1130 BUG_ON(!drv.bus_scale->usecase);
1131
Matt Wagantallf9a4d322013-01-14 18:01:24 -08001132 pvs = select_freq_plan(params);
Patrick Daly18d2d482012-08-24 14:22:06 -07001133 BUG_ON(!pvs->table);
1134
1135 drv.acpu_freq_tbl = kmemdup(pvs->table, pvs->size, GFP_KERNEL);
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001136 BUG_ON(!drv.acpu_freq_tbl);
Patrick Daly18d2d482012-08-24 14:22:06 -07001137 drv.boost_uv = pvs->boost_uv;
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001138
1139 acpuclk_krait_data.power_collapse_khz = params->stby_khz;
1140 acpuclk_krait_data.wait_for_irq_khz = params->stby_khz;
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001141}
1142
1143static void __init hw_init(void)
1144{
1145 struct scalable *l2 = &drv.scalable[L2];
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001146 const struct l2_level *l2_level;
Matt Wagantall302d9a32012-07-03 13:37:29 -07001147 int cpu, rc;
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001148
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001149 if (krait_needs_vmin())
1150 krait_apply_vmin(drv.acpu_freq_tbl);
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001151
Matt Wagantall754ee272012-06-18 13:40:26 -07001152 l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
1153 BUG_ON(!l2->hfpll_base);
Matt Wagantall754ee272012-06-18 13:40:26 -07001154
Matt Wagantall302d9a32012-07-03 13:37:29 -07001155 rc = rpm_regulator_init(l2, VREG_HFPLL_A,
1156 l2->vreg[VREG_HFPLL_A].max_vdd, false);
1157 BUG_ON(rc);
1158 rc = rpm_regulator_init(l2, VREG_HFPLL_B,
1159 l2->vreg[VREG_HFPLL_B].max_vdd, false);
1160 BUG_ON(rc);
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001161
1162 l2_level = find_cur_l2_level();
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001163 if (!l2_level) {
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001164 l2_level = drv.l2_freq_tbl;
Matt Wagantallb7c231b2012-07-24 18:40:17 -07001165 dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
1166 l2_level->speed.khz);
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001167 } else {
1168 dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
1169 l2_level->speed.khz);
1170 }
1171
1172 rc = init_clock_sources(l2, &l2_level->speed);
Matt Wagantall302d9a32012-07-03 13:37:29 -07001173 BUG_ON(rc);
1174
1175 for_each_online_cpu(cpu) {
1176 rc = per_cpu_init(cpu);
1177 BUG_ON(rc);
1178 }
Matt Wagantall9c8cb6e2012-07-13 19:39:15 -07001179
1180 bus_init(l2_level);
Matt Wagantall1f3762d2012-06-08 19:08:48 -07001181}
1182
1183int __init acpuclk_krait_init(struct device *dev,
1184 const struct acpuclk_krait_params *params)
1185{
1186 drv_data_init(dev, params);
1187 hw_init();
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001188
1189 cpufreq_table_init();
Steve Mucklef9a87492012-11-02 15:41:00 -07001190 dcvs_freq_init();
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001191 acpuclk_register(&acpuclk_krait_data);
1192 register_hotcpu_notifier(&acpuclk_cpu_notifier);
1193
Matt Wagantall488bef32012-07-13 19:42:11 -07001194 acpuclk_krait_debug_init(&drv);
1195
Matt Wagantalle9b715a2012-01-04 18:16:14 -08001196 return 0;
1197}