Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1 | /* |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Matt Wagantall | 9515bc2 | 2012-07-19 18:13:40 -0700 | [diff] [blame] | 15 | #include <linux/module.h> |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 16 | #include <linux/init.h> |
| 17 | #include <linux/io.h> |
| 18 | #include <linux/delay.h> |
| 19 | #include <linux/mutex.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/errno.h> |
| 22 | #include <linux/cpufreq.h> |
| 23 | #include <linux/cpu.h> |
| 24 | #include <linux/regulator/consumer.h> |
| 25 | |
| 26 | #include <asm/mach-types.h> |
| 27 | #include <asm/cpu.h> |
| 28 | |
| 29 | #include <mach/board.h> |
| 30 | #include <mach/msm_iomap.h> |
| 31 | #include <mach/socinfo.h> |
| 32 | #include <mach/msm-krait-l2-accessors.h> |
| 33 | #include <mach/rpm-regulator.h> |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 34 | #include <mach/rpm-regulator-smd.h> |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 35 | #include <mach/msm_bus.h> |
Steve Muckle | f9a8749 | 2012-11-02 15:41:00 -0700 | [diff] [blame] | 36 | #include <mach/msm_dcvs.h> |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 37 | |
| 38 | #include "acpuclock.h" |
| 39 | #include "acpuclock-krait.h" |
Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 40 | #include "avs.h" |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 41 | |
| 42 | /* MUX source selects. */ |
| 43 | #define PRI_SRC_SEL_SEC_SRC 0 |
| 44 | #define PRI_SRC_SEL_HFPLL 1 |
| 45 | #define PRI_SRC_SEL_HFPLL_DIV2 2 |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 46 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 47 | static DEFINE_MUTEX(driver_lock); |
| 48 | static DEFINE_SPINLOCK(l2_lock); |
| 49 | |
Matt Wagantall | 488bef3 | 2012-07-13 19:42:11 -0700 | [diff] [blame] | 50 | static struct drv_data drv; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 51 | |
| 52 | static unsigned long acpuclk_krait_get_rate(int cpu) |
| 53 | { |
| 54 | return drv.scalable[cpu].cur_speed->khz; |
| 55 | } |
| 56 | |
| 57 | /* Select a source on the primary MUX. */ |
| 58 | static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel) |
| 59 | { |
| 60 | u32 regval; |
| 61 | |
| 62 | regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); |
| 63 | regval &= ~0x3; |
| 64 | regval |= (pri_src_sel & 0x3); |
| 65 | set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); |
| 66 | /* Wait for switch to complete. */ |
| 67 | mb(); |
| 68 | udelay(1); |
| 69 | } |
| 70 | |
| 71 | /* Select a source on the secondary MUX. */ |
Matt Wagantall | 6cd5d75 | 2012-09-27 19:56:57 -0700 | [diff] [blame] | 72 | static void __cpuinit set_sec_clk_src(struct scalable *sc, u32 sec_src_sel) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 73 | { |
| 74 | u32 regval; |
| 75 | |
| 76 | regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); |
| 77 | regval &= ~(0x3 << 2); |
| 78 | regval |= ((sec_src_sel & 0x3) << 2); |
| 79 | set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); |
| 80 | /* Wait for switch to complete. */ |
| 81 | mb(); |
| 82 | udelay(1); |
| 83 | } |
| 84 | |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 85 | static int enable_rpm_vreg(struct vreg *vreg) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 86 | { |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 87 | int ret = 0; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 88 | |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 89 | if (vreg->rpm_reg) { |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 90 | ret = rpm_regulator_enable(vreg->rpm_reg); |
| 91 | if (ret) |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 92 | dev_err(drv.dev, "%s regulator enable failed (%d)\n", |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 93 | vreg->name, ret); |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 94 | } |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 95 | |
| 96 | return ret; |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | static void disable_rpm_vreg(struct vreg *vreg) |
| 100 | { |
| 101 | int rc; |
| 102 | |
| 103 | if (vreg->rpm_reg) { |
| 104 | rc = rpm_regulator_disable(vreg->rpm_reg); |
| 105 | if (rc) |
| 106 | dev_err(drv.dev, "%s regulator disable failed (%d)\n", |
| 107 | vreg->name, rc); |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | /* Enable an already-configured HFPLL. */ |
| 112 | static void hfpll_enable(struct scalable *sc, bool skip_regulators) |
| 113 | { |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 114 | if (!skip_regulators) { |
| 115 | /* Enable regulators required by the HFPLL. */ |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 116 | enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]); |
| 117 | enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | /* Disable PLL bypass mode. */ |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 121 | writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 122 | |
| 123 | /* |
| 124 | * H/W requires a 5us delay between disabling the bypass and |
| 125 | * de-asserting the reset. Delay 10us just to be safe. |
| 126 | */ |
| 127 | mb(); |
| 128 | udelay(10); |
| 129 | |
| 130 | /* De-assert active-low PLL reset. */ |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 131 | writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 132 | |
| 133 | /* Wait for PLL to lock. */ |
| 134 | mb(); |
| 135 | udelay(60); |
| 136 | |
| 137 | /* Enable PLL output. */ |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 138 | writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | /* Disable a HFPLL for power-savings or while it's being reprogrammed. */ |
| 142 | static void hfpll_disable(struct scalable *sc, bool skip_regulators) |
| 143 | { |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 144 | /* |
| 145 | * Disable the PLL output, disable test mode, enable the bypass mode, |
| 146 | * and assert the reset. |
| 147 | */ |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 148 | writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 149 | |
| 150 | if (!skip_regulators) { |
| 151 | /* Remove voltage votes required by the HFPLL. */ |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 152 | disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]); |
| 153 | disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 154 | } |
| 155 | } |
| 156 | |
| 157 | /* Program the HFPLL rate. Assumes HFPLL is already disabled. */ |
| 158 | static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s) |
| 159 | { |
Matt Wagantall | a77b7f3 | 2012-07-18 16:32:01 -0700 | [diff] [blame] | 160 | void __iomem *base = sc->hfpll_base; |
| 161 | u32 regval; |
| 162 | |
| 163 | writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset); |
| 164 | |
| 165 | if (drv.hfpll_data->has_user_reg) { |
| 166 | regval = readl_relaxed(base + drv.hfpll_data->user_offset); |
| 167 | if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max) |
| 168 | regval &= ~drv.hfpll_data->user_vco_mask; |
| 169 | else |
| 170 | regval |= drv.hfpll_data->user_vco_mask; |
| 171 | writel_relaxed(regval, base + drv.hfpll_data->user_offset); |
| 172 | } |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | /* Return the L2 speed that should be applied. */ |
Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 176 | static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 177 | { |
Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 178 | unsigned int new_l = 0; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 179 | int cpu; |
| 180 | |
| 181 | /* Find max L2 speed vote. */ |
| 182 | sc->l2_vote = vote_l; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 183 | for_each_present_cpu(cpu) |
| 184 | new_l = max(new_l, drv.scalable[cpu].l2_vote); |
| 185 | |
| 186 | return new_l; |
| 187 | } |
| 188 | |
| 189 | /* Update the bus bandwidth request. */ |
| 190 | static void set_bus_bw(unsigned int bw) |
| 191 | { |
| 192 | int ret; |
| 193 | |
| 194 | /* Update bandwidth if request has changed. This may sleep. */ |
| 195 | ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw); |
| 196 | if (ret) |
| 197 | dev_err(drv.dev, "bandwidth request failed (%d)\n", ret); |
| 198 | } |
| 199 | |
| 200 | /* Set the CPU or L2 clock speed. */ |
Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 201 | static void set_speed(struct scalable *sc, const struct core_speed *tgt_s, |
| 202 | bool skip_regulators) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 203 | { |
| 204 | const struct core_speed *strt_s = sc->cur_speed; |
| 205 | |
Stephen Boyd | 14a4739 | 2012-08-06 20:15:15 -0700 | [diff] [blame] | 206 | if (strt_s == tgt_s) |
| 207 | return; |
| 208 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 209 | if (strt_s->src == HFPLL && tgt_s->src == HFPLL) { |
| 210 | /* |
| 211 | * Move to an always-on source running at a frequency |
| 212 | * that does not require an elevated CPU voltage. |
| 213 | */ |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 214 | set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC); |
| 215 | |
| 216 | /* Re-program HFPLL. */ |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 217 | hfpll_disable(sc, true); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 218 | hfpll_set_rate(sc, tgt_s); |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 219 | hfpll_enable(sc, true); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 220 | |
| 221 | /* Move to HFPLL. */ |
| 222 | set_pri_clk_src(sc, tgt_s->pri_src_sel); |
| 223 | } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) { |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 224 | set_pri_clk_src(sc, tgt_s->pri_src_sel); |
Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 225 | hfpll_disable(sc, skip_regulators); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 226 | } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) { |
| 227 | hfpll_set_rate(sc, tgt_s); |
Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 228 | hfpll_enable(sc, skip_regulators); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 229 | set_pri_clk_src(sc, tgt_s->pri_src_sel); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | sc->cur_speed = tgt_s; |
| 233 | } |
| 234 | |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 235 | struct vdd_data { |
| 236 | int vdd_mem; |
| 237 | int vdd_dig; |
| 238 | int vdd_core; |
| 239 | int ua_core; |
| 240 | }; |
| 241 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 242 | /* Apply any per-cpu voltage increases. */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 243 | static int increase_vdd(int cpu, struct vdd_data *data, |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 244 | enum setrate_reason reason) |
| 245 | { |
| 246 | struct scalable *sc = &drv.scalable[cpu]; |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 247 | int rc; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 248 | |
| 249 | /* |
| 250 | * Increase vdd_mem active-set before vdd_dig. |
| 251 | * vdd_mem should be >= vdd_dig. |
| 252 | */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 253 | if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) { |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 254 | rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg, |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 255 | data->vdd_mem, sc->vreg[VREG_MEM].max_vdd); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 256 | if (rc) { |
| 257 | dev_err(drv.dev, |
| 258 | "vdd_mem (cpu%d) increase failed (%d)\n", |
| 259 | cpu, rc); |
| 260 | return rc; |
| 261 | } |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 262 | sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | /* Increase vdd_dig active-set vote. */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 266 | if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) { |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 267 | rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg, |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 268 | data->vdd_dig, sc->vreg[VREG_DIG].max_vdd); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 269 | if (rc) { |
| 270 | dev_err(drv.dev, |
| 271 | "vdd_dig (cpu%d) increase failed (%d)\n", |
| 272 | cpu, rc); |
| 273 | return rc; |
| 274 | } |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 275 | sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig; |
| 276 | } |
| 277 | |
| 278 | /* Increase current request. */ |
| 279 | if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) { |
| 280 | rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, |
| 281 | data->ua_core); |
| 282 | if (rc < 0) { |
| 283 | dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", |
| 284 | sc->vreg[VREG_CORE].name, rc); |
| 285 | return rc; |
| 286 | } |
| 287 | sc->vreg[VREG_CORE].cur_ua = data->ua_core; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 288 | } |
| 289 | |
| 290 | /* |
| 291 | * Update per-CPU core voltage. Don't do this for the hotplug path for |
| 292 | * which it should already be correct. Attempting to set it is bad |
| 293 | * because we don't know what CPU we are running on at this point, but |
| 294 | * the CPU regulator API requires we call it from the affected CPU. |
| 295 | */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 296 | if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 297 | && reason != SETRATE_HOTPLUG) { |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 298 | rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, |
| 299 | data->vdd_core, sc->vreg[VREG_CORE].max_vdd); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 300 | if (rc) { |
| 301 | dev_err(drv.dev, |
| 302 | "vdd_core (cpu%d) increase failed (%d)\n", |
| 303 | cpu, rc); |
| 304 | return rc; |
| 305 | } |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 306 | sc->vreg[VREG_CORE].cur_vdd = data->vdd_core; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 307 | } |
| 308 | |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 309 | return 0; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | /* Apply any per-cpu voltage decreases. */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 313 | static void decrease_vdd(int cpu, struct vdd_data *data, |
| 314 | enum setrate_reason reason) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 315 | { |
| 316 | struct scalable *sc = &drv.scalable[cpu]; |
| 317 | int ret; |
| 318 | |
| 319 | /* |
| 320 | * Update per-CPU core voltage. This must be called on the CPU |
| 321 | * that's being affected. Don't do this in the hotplug remove path, |
| 322 | * where the rail is off and we're executing on the other CPU. |
| 323 | */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 324 | if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 325 | && reason != SETRATE_HOTPLUG) { |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 326 | ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, |
| 327 | data->vdd_core, sc->vreg[VREG_CORE].max_vdd); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 328 | if (ret) { |
| 329 | dev_err(drv.dev, |
| 330 | "vdd_core (cpu%d) decrease failed (%d)\n", |
| 331 | cpu, ret); |
| 332 | return; |
| 333 | } |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 334 | sc->vreg[VREG_CORE].cur_vdd = data->vdd_core; |
| 335 | } |
| 336 | |
| 337 | /* Decrease current request. */ |
| 338 | if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) { |
| 339 | ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, |
| 340 | data->ua_core); |
| 341 | if (ret < 0) { |
| 342 | dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", |
| 343 | sc->vreg[VREG_CORE].name, ret); |
| 344 | return; |
| 345 | } |
| 346 | sc->vreg[VREG_CORE].cur_ua = data->ua_core; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 347 | } |
| 348 | |
| 349 | /* Decrease vdd_dig active-set vote. */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 350 | if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) { |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 351 | ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg, |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 352 | data->vdd_dig, sc->vreg[VREG_DIG].max_vdd); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 353 | if (ret) { |
| 354 | dev_err(drv.dev, |
| 355 | "vdd_dig (cpu%d) decrease failed (%d)\n", |
| 356 | cpu, ret); |
| 357 | return; |
| 358 | } |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 359 | sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 360 | } |
| 361 | |
| 362 | /* |
| 363 | * Decrease vdd_mem active-set after vdd_dig. |
| 364 | * vdd_mem should be >= vdd_dig. |
| 365 | */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 366 | if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) { |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 367 | ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg, |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 368 | data->vdd_mem, sc->vreg[VREG_MEM].max_vdd); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 369 | if (ret) { |
| 370 | dev_err(drv.dev, |
| 371 | "vdd_mem (cpu%d) decrease failed (%d)\n", |
| 372 | cpu, ret); |
| 373 | return; |
| 374 | } |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 375 | sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 376 | } |
| 377 | } |
| 378 | |
| 379 | static int calculate_vdd_mem(const struct acpu_level *tgt) |
| 380 | { |
Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 381 | return drv.l2_freq_tbl[tgt->l2_level].vdd_mem; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 382 | } |
| 383 | |
Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 384 | static int get_src_dig(const struct core_speed *s) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 385 | { |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 386 | const int *hfpll_vdd = drv.hfpll_data->vdd; |
| 387 | const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max; |
Matt Wagantall | 87465f5 | 2012-07-23 22:03:06 -0700 | [diff] [blame] | 388 | const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 389 | |
Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 390 | if (s->src != HFPLL) |
| 391 | return hfpll_vdd[HFPLL_VDD_NONE]; |
Matt Wagantall | 87465f5 | 2012-07-23 22:03:06 -0700 | [diff] [blame] | 392 | else if (s->pll_l_val > nom_vdd_l_max) |
| 393 | return hfpll_vdd[HFPLL_VDD_HIGH]; |
Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 394 | else if (s->pll_l_val > low_vdd_l_max) |
| 395 | return hfpll_vdd[HFPLL_VDD_NOM]; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 396 | else |
Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 397 | return hfpll_vdd[HFPLL_VDD_LOW]; |
| 398 | } |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 399 | |
Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 400 | static int calculate_vdd_dig(const struct acpu_level *tgt) |
| 401 | { |
| 402 | int l2_pll_vdd_dig, cpu_pll_vdd_dig; |
| 403 | |
| 404 | l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed); |
| 405 | cpu_pll_vdd_dig = get_src_dig(&tgt->speed); |
| 406 | |
| 407 | return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig, |
| 408 | max(l2_pll_vdd_dig, cpu_pll_vdd_dig)); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 409 | } |
| 410 | |
Matt Wagantall | 9515bc2 | 2012-07-19 18:13:40 -0700 | [diff] [blame] | 411 | static bool enable_boost = true; |
| 412 | module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR); |
| 413 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 414 | static int calculate_vdd_core(const struct acpu_level *tgt) |
| 415 | { |
Matt Wagantall | 9515bc2 | 2012-07-19 18:13:40 -0700 | [diff] [blame] | 416 | return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 417 | } |
| 418 | |
Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 419 | static DEFINE_MUTEX(l2_regulator_lock); |
| 420 | static int l2_vreg_count; |
| 421 | |
| 422 | static int enable_l2_regulators(void) |
| 423 | { |
| 424 | int ret = 0; |
| 425 | |
| 426 | mutex_lock(&l2_regulator_lock); |
| 427 | if (l2_vreg_count == 0) { |
| 428 | ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); |
| 429 | if (ret) |
| 430 | goto out; |
| 431 | ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]); |
| 432 | if (ret) { |
| 433 | disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); |
| 434 | goto out; |
| 435 | } |
| 436 | } |
| 437 | l2_vreg_count++; |
| 438 | out: |
| 439 | mutex_unlock(&l2_regulator_lock); |
| 440 | |
| 441 | return ret; |
| 442 | } |
| 443 | |
| 444 | static void disable_l2_regulators(void) |
| 445 | { |
| 446 | mutex_lock(&l2_regulator_lock); |
| 447 | |
| 448 | if (WARN(!l2_vreg_count, "L2 regulator votes are unbalanced!")) |
| 449 | goto out; |
| 450 | |
| 451 | if (l2_vreg_count == 1) { |
| 452 | disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]); |
| 453 | disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); |
| 454 | } |
| 455 | l2_vreg_count--; |
| 456 | out: |
| 457 | mutex_unlock(&l2_regulator_lock); |
| 458 | } |
| 459 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 460 | /* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */ |
| 461 | static int acpuclk_krait_set_rate(int cpu, unsigned long rate, |
| 462 | enum setrate_reason reason) |
| 463 | { |
| 464 | const struct core_speed *strt_acpu_s, *tgt_acpu_s; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 465 | const struct acpu_level *tgt; |
Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 466 | int tgt_l2_l; |
Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 467 | enum src_id prev_l2_src = NUM_SRC_ID; |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 468 | struct vdd_data vdd_data; |
Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 469 | bool skip_regulators; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 470 | int rc = 0; |
| 471 | |
Matt Wagantall | 5941a33 | 2012-07-10 23:20:44 -0700 | [diff] [blame] | 472 | if (cpu > num_possible_cpus()) |
| 473 | return -EINVAL; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 474 | |
| 475 | if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) |
| 476 | mutex_lock(&driver_lock); |
| 477 | |
| 478 | strt_acpu_s = drv.scalable[cpu].cur_speed; |
| 479 | |
| 480 | /* Return early if rate didn't change. */ |
| 481 | if (rate == strt_acpu_s->khz) |
| 482 | goto out; |
| 483 | |
| 484 | /* Find target frequency. */ |
| 485 | for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) { |
| 486 | if (tgt->speed.khz == rate) { |
| 487 | tgt_acpu_s = &tgt->speed; |
| 488 | break; |
| 489 | } |
| 490 | } |
| 491 | if (tgt->speed.khz == 0) { |
| 492 | rc = -EINVAL; |
| 493 | goto out; |
| 494 | } |
| 495 | |
| 496 | /* Calculate voltage requirements for the current CPU. */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 497 | vdd_data.vdd_mem = calculate_vdd_mem(tgt); |
| 498 | vdd_data.vdd_dig = calculate_vdd_dig(tgt); |
| 499 | vdd_data.vdd_core = calculate_vdd_core(tgt); |
| 500 | vdd_data.ua_core = tgt->ua_core; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 501 | |
Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 502 | /* Disable AVS before voltage switch */ |
| 503 | if (reason == SETRATE_CPUFREQ && drv.scalable[cpu].avs_enabled) { |
| 504 | AVS_DISABLE(cpu); |
| 505 | drv.scalable[cpu].avs_enabled = false; |
| 506 | } |
| 507 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 508 | /* Increase VDD levels if needed. */ |
| 509 | if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) { |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 510 | rc = increase_vdd(cpu, &vdd_data, reason); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 511 | if (rc) |
| 512 | goto out; |
Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 513 | |
| 514 | prev_l2_src = |
| 515 | drv.l2_freq_tbl[drv.scalable[cpu].l2_vote].speed.src; |
| 516 | /* Vote for the L2 regulators here if necessary. */ |
| 517 | if (drv.l2_freq_tbl[tgt->l2_level].speed.src == HFPLL) { |
| 518 | rc = enable_l2_regulators(); |
| 519 | if (rc) |
| 520 | goto out; |
| 521 | } |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 522 | } |
| 523 | |
Matt Wagantall | bd1b404 | 2012-07-24 11:20:03 -0700 | [diff] [blame] | 524 | dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n", |
| 525 | cpu, strt_acpu_s->khz, tgt_acpu_s->khz); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 526 | |
Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 527 | /* |
| 528 | * If we are setting the rate as part of power collapse or in the resume |
| 529 | * path after power collapse, skip the vote for the HFPLL regulators, |
| 530 | * which are active-set-only votes that will be removed when apps enters |
| 531 | * its sleep set. This is needed to avoid voting for regulators with |
| 532 | * sleeping APIs from an atomic context. |
| 533 | */ |
| 534 | skip_regulators = (reason == SETRATE_PC); |
| 535 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 536 | /* Set the new CPU speed. */ |
Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 537 | set_speed(&drv.scalable[cpu], tgt_acpu_s, skip_regulators); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 538 | |
| 539 | /* |
| 540 | * Update the L2 vote and apply the rate change. A spinlock is |
| 541 | * necessary to ensure L2 rate is calculated and set atomically |
| 542 | * with the CPU frequency, even if acpuclk_krait_set_rate() is |
| 543 | * called from an atomic context and the driver_lock mutex is not |
| 544 | * acquired. |
| 545 | */ |
Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 546 | spin_lock(&l2_lock); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 547 | tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level); |
Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 548 | set_speed(&drv.scalable[L2], |
| 549 | &drv.l2_freq_tbl[tgt_l2_l].speed, true); |
| 550 | spin_unlock(&l2_lock); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 551 | |
| 552 | /* Nothing else to do for power collapse or SWFI. */ |
| 553 | if (reason == SETRATE_PC || reason == SETRATE_SWFI) |
| 554 | goto out; |
| 555 | |
Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 556 | /* |
| 557 | * Remove the vote for the L2 HFPLL regulators only if the L2 |
| 558 | * was already on an HFPLL source. |
| 559 | */ |
| 560 | if (prev_l2_src == HFPLL) |
| 561 | disable_l2_regulators(); |
| 562 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 563 | /* Update bus bandwith request. */ |
Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 564 | set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 565 | |
| 566 | /* Drop VDD levels if we can. */ |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 567 | decrease_vdd(cpu, &vdd_data, reason); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 568 | |
Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 569 | /* Re-enable AVS */ |
| 570 | if (reason == SETRATE_CPUFREQ && tgt->avsdscr_setting) { |
| 571 | AVS_ENABLE(cpu, tgt->avsdscr_setting); |
| 572 | drv.scalable[cpu].avs_enabled = true; |
| 573 | } |
| 574 | |
Matt Wagantall | bd1b404 | 2012-07-24 11:20:03 -0700 | [diff] [blame] | 575 | dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 576 | |
| 577 | out: |
| 578 | if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) |
| 579 | mutex_unlock(&driver_lock); |
| 580 | return rc; |
| 581 | } |
| 582 | |
Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 583 | static struct acpuclk_data acpuclk_krait_data = { |
| 584 | .set_rate = acpuclk_krait_set_rate, |
| 585 | .get_rate = acpuclk_krait_get_rate, |
| 586 | }; |
| 587 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 588 | /* Initialize a HFPLL at a given rate and enable it. */ |
Matt Wagantall | 980d067 | 2012-10-17 13:50:07 -0700 | [diff] [blame] | 589 | static void __cpuinit hfpll_init(struct scalable *sc, |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 590 | const struct core_speed *tgt_s) |
| 591 | { |
Matt Wagantall | bd1b404 | 2012-07-24 11:20:03 -0700 | [diff] [blame] | 592 | dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 593 | |
| 594 | /* Disable the PLL for re-programming. */ |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 595 | hfpll_disable(sc, true); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 596 | |
| 597 | /* Configure PLL parameters for integer mode. */ |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 598 | writel_relaxed(drv.hfpll_data->config_val, |
| 599 | sc->hfpll_base + drv.hfpll_data->config_offset); |
| 600 | writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset); |
| 601 | writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset); |
Matt Wagantall | a77b7f3 | 2012-07-18 16:32:01 -0700 | [diff] [blame] | 602 | if (drv.hfpll_data->has_user_reg) |
| 603 | writel_relaxed(drv.hfpll_data->user_val, |
| 604 | sc->hfpll_base + drv.hfpll_data->user_offset); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 605 | |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 606 | /* Program droop controller, if supported */ |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 607 | if (drv.hfpll_data->has_droop_ctl) |
| 608 | writel_relaxed(drv.hfpll_data->droop_val, |
| 609 | sc->hfpll_base + drv.hfpll_data->droop_offset); |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 610 | |
Matt Wagantall | 7aaf27d | 2013-03-18 21:50:47 -0700 | [diff] [blame] | 611 | /* Set an initial PLL rate. */ |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 612 | hfpll_set_rate(sc, tgt_s); |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 613 | } |
| 614 | |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 615 | static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg, |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 616 | int vdd, bool enable) |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 617 | { |
| 618 | int ret; |
| 619 | |
| 620 | if (!sc->vreg[vreg].name) |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 621 | return 0; |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 622 | |
| 623 | sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev, |
| 624 | sc->vreg[vreg].name); |
| 625 | if (IS_ERR(sc->vreg[vreg].rpm_reg)) { |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 626 | ret = PTR_ERR(sc->vreg[vreg].rpm_reg); |
| 627 | dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n", |
| 628 | sc->vreg[vreg].name, ret); |
| 629 | goto err_get; |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 630 | } |
| 631 | |
| 632 | ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd, |
| 633 | sc->vreg[vreg].max_vdd); |
| 634 | if (ret) { |
| 635 | dev_err(drv.dev, "%s initialization failed (%d)\n", |
| 636 | sc->vreg[vreg].name, ret); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 637 | goto err_conf; |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 638 | } |
| 639 | sc->vreg[vreg].cur_vdd = vdd; |
| 640 | |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 641 | if (enable) { |
| 642 | ret = enable_rpm_vreg(&sc->vreg[vreg]); |
| 643 | if (ret) |
| 644 | goto err_conf; |
| 645 | } |
| 646 | |
| 647 | return 0; |
| 648 | |
| 649 | err_conf: |
| 650 | rpm_regulator_put(sc->vreg[vreg].rpm_reg); |
| 651 | err_get: |
| 652 | return ret; |
| 653 | } |
| 654 | |
| 655 | static void __cpuinit rpm_regulator_cleanup(struct scalable *sc, |
| 656 | enum vregs vreg) |
| 657 | { |
| 658 | if (!sc->vreg[vreg].rpm_reg) |
| 659 | return; |
| 660 | |
| 661 | disable_rpm_vreg(&sc->vreg[vreg]); |
| 662 | rpm_regulator_put(sc->vreg[vreg].rpm_reg); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 663 | } |
| 664 | |
| 665 | /* Voltage regulator initialization. */ |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 666 | static int __cpuinit regulator_init(struct scalable *sc, |
| 667 | const struct acpu_level *acpu_level) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 668 | { |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 669 | int ret, vdd_mem, vdd_dig, vdd_core; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 670 | |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 671 | vdd_mem = calculate_vdd_mem(acpu_level); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 672 | ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true); |
| 673 | if (ret) |
| 674 | goto err_mem; |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 675 | |
| 676 | vdd_dig = calculate_vdd_dig(acpu_level); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 677 | ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true); |
| 678 | if (ret) |
| 679 | goto err_dig; |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 680 | |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 681 | ret = rpm_regulator_init(sc, VREG_HFPLL_A, |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 682 | sc->vreg[VREG_HFPLL_A].max_vdd, false); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 683 | if (ret) |
| 684 | goto err_hfpll_a; |
| 685 | ret = rpm_regulator_init(sc, VREG_HFPLL_B, |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 686 | sc->vreg[VREG_HFPLL_B].max_vdd, false); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 687 | if (ret) |
| 688 | goto err_hfpll_b; |
Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 689 | |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 690 | /* Setup Krait CPU regulators and initial core voltage. */ |
| 691 | sc->vreg[VREG_CORE].reg = regulator_get(drv.dev, |
| 692 | sc->vreg[VREG_CORE].name); |
| 693 | if (IS_ERR(sc->vreg[VREG_CORE].reg)) { |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 694 | ret = PTR_ERR(sc->vreg[VREG_CORE].reg); |
| 695 | dev_err(drv.dev, "regulator_get(%s) failed (%d)\n", |
| 696 | sc->vreg[VREG_CORE].name, ret); |
| 697 | goto err_core_get; |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 698 | } |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 699 | ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, |
| 700 | acpu_level->ua_core); |
| 701 | if (ret < 0) { |
| 702 | dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", |
| 703 | sc->vreg[VREG_CORE].name, ret); |
| 704 | goto err_core_conf; |
| 705 | } |
| 706 | sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core; |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 707 | vdd_core = calculate_vdd_core(acpu_level); |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 708 | ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core, |
| 709 | sc->vreg[VREG_CORE].max_vdd); |
| 710 | if (ret) { |
| 711 | dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n", |
| 712 | sc->vreg[VREG_CORE].name, ret); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 713 | goto err_core_conf; |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 714 | } |
| 715 | sc->vreg[VREG_CORE].cur_vdd = vdd_core; |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 716 | ret = regulator_enable(sc->vreg[VREG_CORE].reg); |
| 717 | if (ret) { |
| 718 | dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n", |
| 719 | sc->vreg[VREG_CORE].name, ret); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 720 | goto err_core_conf; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 721 | } |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 722 | |
Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 723 | /* |
| 724 | * Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency |
| 725 | * requires a corresponding target L2 frequency that needs the L2 to |
| 726 | * run off of an HFPLL. |
| 727 | */ |
| 728 | if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL) |
| 729 | l2_vreg_count++; |
| 730 | |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 731 | return 0; |
| 732 | |
| 733 | err_core_conf: |
| 734 | regulator_put(sc->vreg[VREG_CORE].reg); |
| 735 | err_core_get: |
| 736 | rpm_regulator_cleanup(sc, VREG_HFPLL_B); |
| 737 | err_hfpll_b: |
| 738 | rpm_regulator_cleanup(sc, VREG_HFPLL_A); |
| 739 | err_hfpll_a: |
| 740 | rpm_regulator_cleanup(sc, VREG_DIG); |
| 741 | err_dig: |
| 742 | rpm_regulator_cleanup(sc, VREG_MEM); |
| 743 | err_mem: |
| 744 | return ret; |
| 745 | } |
| 746 | |
| 747 | static void __cpuinit regulator_cleanup(struct scalable *sc) |
| 748 | { |
| 749 | regulator_disable(sc->vreg[VREG_CORE].reg); |
| 750 | regulator_put(sc->vreg[VREG_CORE].reg); |
| 751 | rpm_regulator_cleanup(sc, VREG_HFPLL_B); |
| 752 | rpm_regulator_cleanup(sc, VREG_HFPLL_A); |
| 753 | rpm_regulator_cleanup(sc, VREG_DIG); |
| 754 | rpm_regulator_cleanup(sc, VREG_MEM); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 755 | } |
| 756 | |
| 757 | /* Set initial rate for a given core. */ |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 758 | static int __cpuinit init_clock_sources(struct scalable *sc, |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 759 | const struct core_speed *tgt_s) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 760 | { |
| 761 | u32 regval; |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 762 | void __iomem *aux_reg; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 763 | |
| 764 | /* Program AUX source input to the secondary MUX. */ |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 765 | if (sc->aux_clk_sel_phys) { |
| 766 | aux_reg = ioremap(sc->aux_clk_sel_phys, 4); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 767 | if (!aux_reg) |
| 768 | return -ENOMEM; |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 769 | writel_relaxed(sc->aux_clk_sel, aux_reg); |
| 770 | iounmap(aux_reg); |
| 771 | } |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 772 | |
| 773 | /* Switch away from the HFPLL while it's re-initialized. */ |
Matt Wagantall | 6cd5d75 | 2012-09-27 19:56:57 -0700 | [diff] [blame] | 774 | set_sec_clk_src(sc, sc->sec_clk_sel); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 775 | set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC); |
| 776 | hfpll_init(sc, tgt_s); |
| 777 | |
| 778 | /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */ |
| 779 | regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); |
| 780 | regval &= ~(0x3 << 6); |
| 781 | set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); |
| 782 | |
Matt Wagantall | 7aaf27d | 2013-03-18 21:50:47 -0700 | [diff] [blame] | 783 | /* Enable and switch to the target clock source. */ |
| 784 | if (tgt_s->src == HFPLL) |
| 785 | hfpll_enable(sc, false); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 786 | set_pri_clk_src(sc, tgt_s->pri_src_sel); |
| 787 | sc->cur_speed = tgt_s; |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 788 | |
| 789 | return 0; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 790 | } |
| 791 | |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 792 | static void __cpuinit fill_cur_core_speed(struct core_speed *s, |
| 793 | struct scalable *sc) |
| 794 | { |
| 795 | s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3; |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 796 | s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset); |
| 797 | } |
| 798 | |
| 799 | static bool __cpuinit speed_equal(const struct core_speed *s1, |
| 800 | const struct core_speed *s2) |
| 801 | { |
| 802 | return (s1->pri_src_sel == s2->pri_src_sel && |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 803 | s1->pll_l_val == s2->pll_l_val); |
| 804 | } |
| 805 | |
| 806 | static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu) |
| 807 | { |
| 808 | struct scalable *sc = &drv.scalable[cpu]; |
| 809 | const struct acpu_level *l; |
| 810 | struct core_speed cur_speed; |
| 811 | |
| 812 | fill_cur_core_speed(&cur_speed, sc); |
| 813 | for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++) |
| 814 | if (speed_equal(&l->speed, &cur_speed)) |
| 815 | return l; |
| 816 | return NULL; |
| 817 | } |
| 818 | |
| 819 | static const struct l2_level __init *find_cur_l2_level(void) |
| 820 | { |
| 821 | struct scalable *sc = &drv.scalable[L2]; |
| 822 | const struct l2_level *l; |
| 823 | struct core_speed cur_speed; |
| 824 | |
| 825 | fill_cur_core_speed(&cur_speed, sc); |
| 826 | for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++) |
| 827 | if (speed_equal(&l->speed, &cur_speed)) |
| 828 | return l; |
| 829 | return NULL; |
| 830 | } |
| 831 | |
| 832 | static const struct acpu_level __cpuinit *find_min_acpu_level(void) |
| 833 | { |
| 834 | struct acpu_level *l; |
| 835 | |
| 836 | for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++) |
| 837 | if (l->use_for_scaling) |
| 838 | return l; |
| 839 | |
| 840 | return NULL; |
| 841 | } |
| 842 | |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 843 | static int __cpuinit per_cpu_init(int cpu) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 844 | { |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 845 | struct scalable *sc = &drv.scalable[cpu]; |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 846 | const struct acpu_level *acpu_level; |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 847 | int ret; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 848 | |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 849 | sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 850 | if (!sc->hfpll_base) { |
| 851 | ret = -ENOMEM; |
| 852 | goto err_ioremap; |
| 853 | } |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 854 | |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 855 | acpu_level = find_cur_acpu_level(cpu); |
Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 856 | if (!acpu_level) { |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 857 | acpu_level = find_min_acpu_level(); |
| 858 | if (!acpu_level) { |
| 859 | ret = -ENODEV; |
| 860 | goto err_table; |
| 861 | } |
| 862 | dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n", |
| 863 | cpu, acpu_level->speed.khz); |
| 864 | } else { |
| 865 | dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu, |
| 866 | acpu_level->speed.khz); |
| 867 | } |
| 868 | |
| 869 | ret = regulator_init(sc, acpu_level); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 870 | if (ret) |
| 871 | goto err_regulators; |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 872 | |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 873 | ret = init_clock_sources(sc, &acpu_level->speed); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 874 | if (ret) |
| 875 | goto err_clocks; |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 876 | |
| 877 | sc->l2_vote = acpu_level->l2_level; |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 878 | sc->initialized = true; |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 879 | |
| 880 | return 0; |
| 881 | |
| 882 | err_clocks: |
| 883 | regulator_cleanup(sc); |
| 884 | err_regulators: |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 885 | err_table: |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 886 | iounmap(sc->hfpll_base); |
| 887 | err_ioremap: |
| 888 | return ret; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 889 | } |
| 890 | |
| 891 | /* Register with bus driver. */ |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 892 | static void __init bus_init(const struct l2_level *l2_level) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 893 | { |
| 894 | int ret; |
| 895 | |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 896 | drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 897 | if (!drv.bus_perf_client) { |
| 898 | dev_err(drv.dev, "unable to register bus client\n"); |
| 899 | BUG(); |
| 900 | } |
| 901 | |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 902 | ret = msm_bus_scale_client_update_request(drv.bus_perf_client, |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 903 | l2_level->bw_level); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 904 | if (ret) |
| 905 | dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret); |
| 906 | } |
| 907 | |
| 908 | #ifdef CONFIG_CPU_FREQ_MSM |
| 909 | static struct cpufreq_frequency_table freq_table[NR_CPUS][35]; |
| 910 | |
| 911 | static void __init cpufreq_table_init(void) |
| 912 | { |
| 913 | int cpu; |
Matt Wagantall | fcbb29b | 2013-03-20 19:52:52 -0700 | [diff] [blame] | 914 | int freq_cnt = 0; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 915 | |
| 916 | for_each_possible_cpu(cpu) { |
Matt Wagantall | fcbb29b | 2013-03-20 19:52:52 -0700 | [diff] [blame] | 917 | int i; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 918 | /* Construct the freq_table tables from acpu_freq_tbl. */ |
Matt Wagantall | fcbb29b | 2013-03-20 19:52:52 -0700 | [diff] [blame] | 919 | for (i = 0, freq_cnt = 0; drv.acpu_freq_tbl[i].speed.khz != 0 |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 920 | && freq_cnt < ARRAY_SIZE(*freq_table); i++) { |
| 921 | if (drv.acpu_freq_tbl[i].use_for_scaling) { |
| 922 | freq_table[cpu][freq_cnt].index = freq_cnt; |
| 923 | freq_table[cpu][freq_cnt].frequency |
| 924 | = drv.acpu_freq_tbl[i].speed.khz; |
| 925 | freq_cnt++; |
| 926 | } |
| 927 | } |
| 928 | /* freq_table not big enough to store all usable freqs. */ |
| 929 | BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0); |
| 930 | |
| 931 | freq_table[cpu][freq_cnt].index = freq_cnt; |
| 932 | freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END; |
| 933 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 934 | /* Register table with CPUFreq. */ |
| 935 | cpufreq_frequency_table_get_attr(freq_table[cpu], cpu); |
| 936 | } |
Matt Wagantall | fcbb29b | 2013-03-20 19:52:52 -0700 | [diff] [blame] | 937 | |
| 938 | dev_info(drv.dev, "CPU Frequencies Supported: %d\n", freq_cnt); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 939 | } |
| 940 | #else |
| 941 | static void __init cpufreq_table_init(void) {} |
| 942 | #endif |
| 943 | |
Steve Muckle | f9a8749 | 2012-11-02 15:41:00 -0700 | [diff] [blame] | 944 | static void __init dcvs_freq_init(void) |
| 945 | { |
| 946 | int i; |
| 947 | |
| 948 | for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0; i++) |
| 949 | if (drv.acpu_freq_tbl[i].use_for_scaling) |
| 950 | msm_dcvs_register_cpu_freq( |
| 951 | drv.acpu_freq_tbl[i].speed.khz, |
| 952 | drv.acpu_freq_tbl[i].vdd_core / 1000); |
| 953 | } |
| 954 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 955 | static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb, |
| 956 | unsigned long action, void *hcpu) |
| 957 | { |
| 958 | static int prev_khz[NR_CPUS]; |
| 959 | int rc, cpu = (int)hcpu; |
| 960 | struct scalable *sc = &drv.scalable[cpu]; |
Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 961 | unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 962 | |
| 963 | switch (action & ~CPU_TASKS_FROZEN) { |
| 964 | case CPU_DEAD: |
| 965 | prev_khz[cpu] = acpuclk_krait_get_rate(cpu); |
| 966 | /* Fall through. */ |
| 967 | case CPU_UP_CANCELED: |
Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 968 | acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG); |
Michael Bohan | e01ba8e | 2012-11-08 18:40:42 -0800 | [diff] [blame] | 969 | |
| 970 | regulator_disable(sc->vreg[VREG_CORE].reg); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 971 | regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0); |
Michael Bohan | e01ba8e | 2012-11-08 18:40:42 -0800 | [diff] [blame] | 972 | regulator_set_voltage(sc->vreg[VREG_CORE].reg, 0, |
| 973 | sc->vreg[VREG_CORE].max_vdd); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 974 | break; |
| 975 | case CPU_UP_PREPARE: |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 976 | if (!sc->initialized) { |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 977 | rc = per_cpu_init(cpu); |
| 978 | if (rc) |
| 979 | return NOTIFY_BAD; |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 980 | break; |
| 981 | } |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 982 | if (WARN_ON(!prev_khz[cpu])) |
| 983 | return NOTIFY_BAD; |
Michael Bohan | e01ba8e | 2012-11-08 18:40:42 -0800 | [diff] [blame] | 984 | |
| 985 | rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, |
| 986 | sc->vreg[VREG_CORE].cur_vdd, |
| 987 | sc->vreg[VREG_CORE].max_vdd); |
| 988 | if (rc < 0) |
| 989 | return NOTIFY_BAD; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 990 | rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, |
Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 991 | sc->vreg[VREG_CORE].cur_ua); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 992 | if (rc < 0) |
| 993 | return NOTIFY_BAD; |
Michael Bohan | e01ba8e | 2012-11-08 18:40:42 -0800 | [diff] [blame] | 994 | rc = regulator_enable(sc->vreg[VREG_CORE].reg); |
| 995 | if (rc < 0) |
| 996 | return NOTIFY_BAD; |
| 997 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 998 | acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG); |
| 999 | break; |
| 1000 | default: |
| 1001 | break; |
| 1002 | } |
| 1003 | |
| 1004 | return NOTIFY_OK; |
| 1005 | } |
| 1006 | |
| 1007 | static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = { |
| 1008 | .notifier_call = acpuclk_cpu_callback, |
| 1009 | }; |
| 1010 | |
Matt Wagantall | 713555e | 2013-01-16 12:22:39 -0800 | [diff] [blame] | 1011 | static const int __init krait_needs_vmin(void) |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1012 | { |
| 1013 | switch (read_cpuid_id()) { |
| 1014 | case 0x511F04D0: /* KR28M2A20 */ |
| 1015 | case 0x511F04D1: /* KR28M2A21 */ |
| 1016 | case 0x510F06F0: /* KR28M4A10 */ |
| 1017 | return 1; |
| 1018 | default: |
| 1019 | return 0; |
| 1020 | }; |
| 1021 | } |
| 1022 | |
Matt Wagantall | 713555e | 2013-01-16 12:22:39 -0800 | [diff] [blame] | 1023 | static void __init krait_apply_vmin(struct acpu_level *tbl) |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1024 | { |
Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 1025 | for (; tbl->speed.khz != 0; tbl++) { |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1026 | if (tbl->vdd_core < 1150000) |
| 1027 | tbl->vdd_core = 1150000; |
Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 1028 | tbl->avsdscr_setting = 0; |
| 1029 | } |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1030 | } |
| 1031 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1032 | void __init get_krait_bin_format_a(void __iomem *base, struct bin_info *bin) |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1033 | { |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1034 | u32 pte_efuse = readl_relaxed(base); |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1035 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1036 | bin->speed = pte_efuse & 0xF; |
| 1037 | if (bin->speed == 0xF) |
| 1038 | bin->speed = (pte_efuse >> 4) & 0xF; |
| 1039 | bin->speed_valid = bin->speed != 0xF; |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1040 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1041 | bin->pvs = (pte_efuse >> 10) & 0x7; |
| 1042 | if (bin->pvs == 0x7) |
| 1043 | bin->pvs = (pte_efuse >> 13) & 0x7; |
| 1044 | bin->pvs_valid = bin->pvs != 0x7; |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1045 | } |
| 1046 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1047 | void __init get_krait_bin_format_b(void __iomem *base, struct bin_info *bin) |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1048 | { |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1049 | u32 pte_efuse, redundant_sel; |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1050 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1051 | pte_efuse = readl_relaxed(base); |
| 1052 | redundant_sel = (pte_efuse >> 24) & 0x7; |
| 1053 | bin->speed = pte_efuse & 0x7; |
| 1054 | bin->pvs = (pte_efuse >> 6) & 0x7; |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1055 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1056 | switch (redundant_sel) { |
| 1057 | case 1: |
| 1058 | bin->speed = (pte_efuse >> 27) & 0x7; |
| 1059 | break; |
| 1060 | case 2: |
| 1061 | bin->pvs = (pte_efuse >> 27) & 0x7; |
| 1062 | break; |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1063 | } |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1064 | bin->speed_valid = true; |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1065 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1066 | /* Check PVS_BLOW_STATUS */ |
| 1067 | pte_efuse = readl_relaxed(base + 0x4); |
| 1068 | bin->pvs_valid = !!(pte_efuse & BIT(21)); |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1069 | } |
| 1070 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1071 | static struct pvs_table * __init select_freq_plan( |
| 1072 | const struct acpuclk_krait_params *params) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1073 | { |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1074 | void __iomem *pte_efuse_base; |
| 1075 | struct bin_info bin; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1076 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1077 | pte_efuse_base = ioremap(params->pte_efuse_phys, 8); |
| 1078 | if (!pte_efuse_base) { |
| 1079 | dev_err(drv.dev, "Unable to map PTE eFuse base\n"); |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1080 | return NULL; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1081 | } |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1082 | params->get_bin_info(pte_efuse_base, &bin); |
| 1083 | iounmap(pte_efuse_base); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1084 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1085 | if (bin.speed_valid) { |
| 1086 | drv.speed_bin = bin.speed; |
| 1087 | dev_info(drv.dev, "SPEED BIN: %d\n", drv.speed_bin); |
| 1088 | } else { |
| 1089 | drv.speed_bin = 0; |
| 1090 | dev_warn(drv.dev, "SPEED BIN: Defaulting to %d\n", |
| 1091 | drv.speed_bin); |
| 1092 | } |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1093 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1094 | if (bin.pvs_valid) { |
| 1095 | drv.pvs_bin = bin.pvs; |
| 1096 | dev_info(drv.dev, "ACPU PVS: %d\n", drv.pvs_bin); |
| 1097 | } else { |
| 1098 | drv.pvs_bin = 0; |
| 1099 | dev_warn(drv.dev, "ACPU PVS: Defaulting to %d\n", |
| 1100 | drv.pvs_bin); |
| 1101 | } |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1102 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1103 | return ¶ms->pvs_tables[drv.speed_bin][drv.pvs_bin]; |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1104 | } |
Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1105 | |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1106 | static void __init drv_data_init(struct device *dev, |
| 1107 | const struct acpuclk_krait_params *params) |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1108 | { |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1109 | struct pvs_table *pvs; |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1110 | |
| 1111 | drv.dev = dev; |
| 1112 | drv.scalable = kmemdup(params->scalable, params->scalable_size, |
| 1113 | GFP_KERNEL); |
| 1114 | BUG_ON(!drv.scalable); |
| 1115 | |
| 1116 | drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data), |
| 1117 | GFP_KERNEL); |
| 1118 | BUG_ON(!drv.hfpll_data); |
| 1119 | |
| 1120 | drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size, |
| 1121 | GFP_KERNEL); |
| 1122 | BUG_ON(!drv.l2_freq_tbl); |
| 1123 | |
| 1124 | drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale), |
| 1125 | GFP_KERNEL); |
| 1126 | BUG_ON(!drv.bus_scale); |
| 1127 | drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase, |
| 1128 | drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase), |
| 1129 | GFP_KERNEL); |
| 1130 | BUG_ON(!drv.bus_scale->usecase); |
| 1131 | |
Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1132 | pvs = select_freq_plan(params); |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1133 | BUG_ON(!pvs->table); |
| 1134 | |
| 1135 | drv.acpu_freq_tbl = kmemdup(pvs->table, pvs->size, GFP_KERNEL); |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1136 | BUG_ON(!drv.acpu_freq_tbl); |
Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1137 | drv.boost_uv = pvs->boost_uv; |
Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 1138 | |
| 1139 | acpuclk_krait_data.power_collapse_khz = params->stby_khz; |
| 1140 | acpuclk_krait_data.wait_for_irq_khz = params->stby_khz; |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1141 | } |
| 1142 | |
| 1143 | static void __init hw_init(void) |
| 1144 | { |
| 1145 | struct scalable *l2 = &drv.scalable[L2]; |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1146 | const struct l2_level *l2_level; |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 1147 | int cpu, rc; |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1148 | |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1149 | if (krait_needs_vmin()) |
| 1150 | krait_apply_vmin(drv.acpu_freq_tbl); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1151 | |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 1152 | l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32); |
| 1153 | BUG_ON(!l2->hfpll_base); |
Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 1154 | |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 1155 | rc = rpm_regulator_init(l2, VREG_HFPLL_A, |
| 1156 | l2->vreg[VREG_HFPLL_A].max_vdd, false); |
| 1157 | BUG_ON(rc); |
| 1158 | rc = rpm_regulator_init(l2, VREG_HFPLL_B, |
| 1159 | l2->vreg[VREG_HFPLL_B].max_vdd, false); |
| 1160 | BUG_ON(rc); |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1161 | |
| 1162 | l2_level = find_cur_l2_level(); |
Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 1163 | if (!l2_level) { |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1164 | l2_level = drv.l2_freq_tbl; |
Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 1165 | dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n", |
| 1166 | l2_level->speed.khz); |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1167 | } else { |
| 1168 | dev_dbg(drv.dev, "L2 is running at %lu KHz\n", |
| 1169 | l2_level->speed.khz); |
| 1170 | } |
| 1171 | |
| 1172 | rc = init_clock_sources(l2, &l2_level->speed); |
Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 1173 | BUG_ON(rc); |
| 1174 | |
| 1175 | for_each_online_cpu(cpu) { |
| 1176 | rc = per_cpu_init(cpu); |
| 1177 | BUG_ON(rc); |
| 1178 | } |
Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1179 | |
| 1180 | bus_init(l2_level); |
Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1181 | } |
| 1182 | |
| 1183 | int __init acpuclk_krait_init(struct device *dev, |
| 1184 | const struct acpuclk_krait_params *params) |
| 1185 | { |
| 1186 | drv_data_init(dev, params); |
| 1187 | hw_init(); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1188 | |
| 1189 | cpufreq_table_init(); |
Steve Muckle | f9a8749 | 2012-11-02 15:41:00 -0700 | [diff] [blame] | 1190 | dcvs_freq_init(); |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1191 | acpuclk_register(&acpuclk_krait_data); |
| 1192 | register_hotcpu_notifier(&acpuclk_cpu_notifier); |
| 1193 | |
Matt Wagantall | 488bef3 | 2012-07-13 19:42:11 -0700 | [diff] [blame] | 1194 | acpuclk_krait_debug_init(&drv); |
| 1195 | |
Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1196 | return 0; |
| 1197 | } |