| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1 | /* | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 2 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify | 
|  | 5 | * it under the terms of the GNU General Public License version 2 and | 
|  | 6 | * only version 2 as published by the Free Software Foundation. | 
|  | 7 | * | 
|  | 8 | * This program is distributed in the hope that it will be useful, | 
|  | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 11 | * GNU General Public License for more details. | 
|  | 12 | */ | 
|  | 13 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 14 | #include <linux/kernel.h> | 
| Matt Wagantall | 9515bc2 | 2012-07-19 18:13:40 -0700 | [diff] [blame] | 15 | #include <linux/module.h> | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 16 | #include <linux/init.h> | 
|  | 17 | #include <linux/io.h> | 
|  | 18 | #include <linux/delay.h> | 
|  | 19 | #include <linux/mutex.h> | 
|  | 20 | #include <linux/err.h> | 
|  | 21 | #include <linux/errno.h> | 
|  | 22 | #include <linux/cpufreq.h> | 
|  | 23 | #include <linux/cpu.h> | 
|  | 24 | #include <linux/regulator/consumer.h> | 
|  | 25 |  | 
|  | 26 | #include <asm/mach-types.h> | 
|  | 27 | #include <asm/cpu.h> | 
|  | 28 |  | 
|  | 29 | #include <mach/board.h> | 
|  | 30 | #include <mach/msm_iomap.h> | 
|  | 31 | #include <mach/socinfo.h> | 
|  | 32 | #include <mach/msm-krait-l2-accessors.h> | 
|  | 33 | #include <mach/rpm-regulator.h> | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 34 | #include <mach/rpm-regulator-smd.h> | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 35 | #include <mach/msm_bus.h> | 
| Steve Muckle | f9a8749 | 2012-11-02 15:41:00 -0700 | [diff] [blame] | 36 | #include <mach/msm_dcvs.h> | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 37 |  | 
|  | 38 | #include "acpuclock.h" | 
|  | 39 | #include "acpuclock-krait.h" | 
| Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 40 | #include "avs.h" | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 41 |  | 
|  | 42 | /* MUX source selects. */ | 
|  | 43 | #define PRI_SRC_SEL_SEC_SRC	0 | 
|  | 44 | #define PRI_SRC_SEL_HFPLL	1 | 
|  | 45 | #define PRI_SRC_SEL_HFPLL_DIV2	2 | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 46 |  | 
| Matt Wagantall | af4669b | 2012-09-25 12:47:24 -0700 | [diff] [blame] | 47 | #define SECCLKAGD		BIT(4) | 
|  | 48 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 49 | static DEFINE_MUTEX(driver_lock); | 
|  | 50 | static DEFINE_SPINLOCK(l2_lock); | 
|  | 51 |  | 
| Matt Wagantall | 488bef3 | 2012-07-13 19:42:11 -0700 | [diff] [blame] | 52 | static struct drv_data drv; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 53 |  | 
|  | 54 | static unsigned long acpuclk_krait_get_rate(int cpu) | 
|  | 55 | { | 
|  | 56 | return drv.scalable[cpu].cur_speed->khz; | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 | /* Select a source on the primary MUX. */ | 
|  | 60 | static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel) | 
|  | 61 | { | 
|  | 62 | u32 regval; | 
|  | 63 |  | 
|  | 64 | regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); | 
|  | 65 | regval &= ~0x3; | 
|  | 66 | regval |= (pri_src_sel & 0x3); | 
|  | 67 | set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | 
|  | 68 | /* Wait for switch to complete. */ | 
|  | 69 | mb(); | 
|  | 70 | udelay(1); | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | /* Select a source on the secondary MUX. */ | 
| Matt Wagantall | 6cd5d75 | 2012-09-27 19:56:57 -0700 | [diff] [blame] | 74 | static void __cpuinit set_sec_clk_src(struct scalable *sc, u32 sec_src_sel) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 75 | { | 
|  | 76 | u32 regval; | 
|  | 77 |  | 
| Matt Wagantall | af4669b | 2012-09-25 12:47:24 -0700 | [diff] [blame] | 78 | /* 8064 Errata: disable sec_src clock gating during switch. */ | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 79 | regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); | 
| Matt Wagantall | af4669b | 2012-09-25 12:47:24 -0700 | [diff] [blame] | 80 | regval |= SECCLKAGD; | 
|  | 81 | set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | 
|  | 82 |  | 
|  | 83 | /* Program the MUX */ | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 84 | regval &= ~(0x3 << 2); | 
|  | 85 | regval |= ((sec_src_sel & 0x3) << 2); | 
|  | 86 | set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | 
| Matt Wagantall | af4669b | 2012-09-25 12:47:24 -0700 | [diff] [blame] | 87 |  | 
|  | 88 | /* 8064 Errata: re-enabled sec_src clock gating. */ | 
|  | 89 | regval &= ~SECCLKAGD; | 
|  | 90 | set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | 
|  | 91 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 92 | /* Wait for switch to complete. */ | 
|  | 93 | mb(); | 
|  | 94 | udelay(1); | 
|  | 95 | } | 
|  | 96 |  | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 97 | static int enable_rpm_vreg(struct vreg *vreg) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 98 | { | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 99 | int ret = 0; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 100 |  | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 101 | if (vreg->rpm_reg) { | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 102 | ret = rpm_regulator_enable(vreg->rpm_reg); | 
|  | 103 | if (ret) | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 104 | dev_err(drv.dev, "%s regulator enable failed (%d)\n", | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 105 | vreg->name, ret); | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 106 | } | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 107 |  | 
|  | 108 | return ret; | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 109 | } | 
|  | 110 |  | 
|  | 111 | static void disable_rpm_vreg(struct vreg *vreg) | 
|  | 112 | { | 
|  | 113 | int rc; | 
|  | 114 |  | 
|  | 115 | if (vreg->rpm_reg) { | 
|  | 116 | rc = rpm_regulator_disable(vreg->rpm_reg); | 
|  | 117 | if (rc) | 
|  | 118 | dev_err(drv.dev, "%s regulator disable failed (%d)\n", | 
|  | 119 | vreg->name, rc); | 
|  | 120 | } | 
|  | 121 | } | 
|  | 122 |  | 
|  | 123 | /* Enable an already-configured HFPLL. */ | 
|  | 124 | static void hfpll_enable(struct scalable *sc, bool skip_regulators) | 
|  | 125 | { | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 126 | if (!skip_regulators) { | 
|  | 127 | /* Enable regulators required by the HFPLL. */ | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 128 | enable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]); | 
|  | 129 | enable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 130 | } | 
|  | 131 |  | 
|  | 132 | /* Disable PLL bypass mode. */ | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 133 | writel_relaxed(0x2, sc->hfpll_base + drv.hfpll_data->mode_offset); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 134 |  | 
|  | 135 | /* | 
|  | 136 | * H/W requires a 5us delay between disabling the bypass and | 
|  | 137 | * de-asserting the reset. Delay 10us just to be safe. | 
|  | 138 | */ | 
|  | 139 | mb(); | 
|  | 140 | udelay(10); | 
|  | 141 |  | 
|  | 142 | /* De-assert active-low PLL reset. */ | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 143 | writel_relaxed(0x6, sc->hfpll_base + drv.hfpll_data->mode_offset); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 144 |  | 
|  | 145 | /* Wait for PLL to lock. */ | 
|  | 146 | mb(); | 
|  | 147 | udelay(60); | 
|  | 148 |  | 
|  | 149 | /* Enable PLL output. */ | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 150 | writel_relaxed(0x7, sc->hfpll_base + drv.hfpll_data->mode_offset); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 151 | } | 
|  | 152 |  | 
|  | 153 | /* Disable a HFPLL for power-savings or while it's being reprogrammed. */ | 
|  | 154 | static void hfpll_disable(struct scalable *sc, bool skip_regulators) | 
|  | 155 | { | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 156 | /* | 
|  | 157 | * Disable the PLL output, disable test mode, enable the bypass mode, | 
|  | 158 | * and assert the reset. | 
|  | 159 | */ | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 160 | writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->mode_offset); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 161 |  | 
|  | 162 | if (!skip_regulators) { | 
|  | 163 | /* Remove voltage votes required by the HFPLL. */ | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 164 | disable_rpm_vreg(&sc->vreg[VREG_HFPLL_B]); | 
|  | 165 | disable_rpm_vreg(&sc->vreg[VREG_HFPLL_A]); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 166 | } | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | /* Program the HFPLL rate. Assumes HFPLL is already disabled. */ | 
|  | 170 | static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s) | 
|  | 171 | { | 
| Matt Wagantall | a77b7f3 | 2012-07-18 16:32:01 -0700 | [diff] [blame] | 172 | void __iomem *base = sc->hfpll_base; | 
|  | 173 | u32 regval; | 
|  | 174 |  | 
|  | 175 | writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset); | 
|  | 176 |  | 
|  | 177 | if (drv.hfpll_data->has_user_reg) { | 
|  | 178 | regval = readl_relaxed(base + drv.hfpll_data->user_offset); | 
|  | 179 | if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max) | 
|  | 180 | regval &= ~drv.hfpll_data->user_vco_mask; | 
|  | 181 | else | 
|  | 182 | regval |= drv.hfpll_data->user_vco_mask; | 
|  | 183 | writel_relaxed(regval, base  + drv.hfpll_data->user_offset); | 
|  | 184 | } | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 185 | } | 
|  | 186 |  | 
|  | 187 | /* Return the L2 speed that should be applied. */ | 
| Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 188 | static unsigned int compute_l2_level(struct scalable *sc, unsigned int vote_l) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 189 | { | 
| Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 190 | unsigned int new_l = 0; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 191 | int cpu; | 
|  | 192 |  | 
|  | 193 | /* Find max L2 speed vote. */ | 
|  | 194 | sc->l2_vote = vote_l; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 195 | for_each_present_cpu(cpu) | 
|  | 196 | new_l = max(new_l, drv.scalable[cpu].l2_vote); | 
|  | 197 |  | 
|  | 198 | return new_l; | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | /* Update the bus bandwidth request. */ | 
|  | 202 | static void set_bus_bw(unsigned int bw) | 
|  | 203 | { | 
|  | 204 | int ret; | 
|  | 205 |  | 
|  | 206 | /* Update bandwidth if request has changed. This may sleep. */ | 
|  | 207 | ret = msm_bus_scale_client_update_request(drv.bus_perf_client, bw); | 
|  | 208 | if (ret) | 
|  | 209 | dev_err(drv.dev, "bandwidth request failed (%d)\n", ret); | 
|  | 210 | } | 
|  | 211 |  | 
|  | 212 | /* Set the CPU or L2 clock speed. */ | 
| Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 213 | static void set_speed(struct scalable *sc, const struct core_speed *tgt_s, | 
|  | 214 | bool skip_regulators) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 215 | { | 
|  | 216 | const struct core_speed *strt_s = sc->cur_speed; | 
|  | 217 |  | 
| Stephen Boyd | 14a4739 | 2012-08-06 20:15:15 -0700 | [diff] [blame] | 218 | if (strt_s == tgt_s) | 
|  | 219 | return; | 
|  | 220 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 221 | if (strt_s->src == HFPLL && tgt_s->src == HFPLL) { | 
|  | 222 | /* | 
|  | 223 | * Move to an always-on source running at a frequency | 
|  | 224 | * that does not require an elevated CPU voltage. | 
|  | 225 | */ | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 226 | set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC); | 
|  | 227 |  | 
|  | 228 | /* Re-program HFPLL. */ | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 229 | hfpll_disable(sc, true); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 230 | hfpll_set_rate(sc, tgt_s); | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 231 | hfpll_enable(sc, true); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 232 |  | 
|  | 233 | /* Move to HFPLL. */ | 
|  | 234 | set_pri_clk_src(sc, tgt_s->pri_src_sel); | 
|  | 235 | } else if (strt_s->src == HFPLL && tgt_s->src != HFPLL) { | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 236 | set_pri_clk_src(sc, tgt_s->pri_src_sel); | 
| Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 237 | hfpll_disable(sc, skip_regulators); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 238 | } else if (strt_s->src != HFPLL && tgt_s->src == HFPLL) { | 
|  | 239 | hfpll_set_rate(sc, tgt_s); | 
| Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 240 | hfpll_enable(sc, skip_regulators); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 241 | set_pri_clk_src(sc, tgt_s->pri_src_sel); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 242 | } | 
|  | 243 |  | 
|  | 244 | sc->cur_speed = tgt_s; | 
|  | 245 | } | 
|  | 246 |  | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 247 | struct vdd_data { | 
|  | 248 | int vdd_mem; | 
|  | 249 | int vdd_dig; | 
|  | 250 | int vdd_core; | 
|  | 251 | int ua_core; | 
|  | 252 | }; | 
|  | 253 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 254 | /* Apply any per-cpu voltage increases. */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 255 | static int increase_vdd(int cpu, struct vdd_data *data, | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 256 | enum setrate_reason reason) | 
|  | 257 | { | 
|  | 258 | struct scalable *sc = &drv.scalable[cpu]; | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 259 | int rc; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 260 |  | 
|  | 261 | /* | 
|  | 262 | * Increase vdd_mem active-set before vdd_dig. | 
|  | 263 | * vdd_mem should be >= vdd_dig. | 
|  | 264 | */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 265 | if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) { | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 266 | rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg, | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 267 | data->vdd_mem, sc->vreg[VREG_MEM].max_vdd); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 268 | if (rc) { | 
|  | 269 | dev_err(drv.dev, | 
|  | 270 | "vdd_mem (cpu%d) increase failed (%d)\n", | 
|  | 271 | cpu, rc); | 
|  | 272 | return rc; | 
|  | 273 | } | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 274 | sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 275 | } | 
|  | 276 |  | 
|  | 277 | /* Increase vdd_dig active-set vote. */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 278 | if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) { | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 279 | rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg, | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 280 | data->vdd_dig, sc->vreg[VREG_DIG].max_vdd); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 281 | if (rc) { | 
|  | 282 | dev_err(drv.dev, | 
|  | 283 | "vdd_dig (cpu%d) increase failed (%d)\n", | 
|  | 284 | cpu, rc); | 
|  | 285 | return rc; | 
|  | 286 | } | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 287 | sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig; | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | /* Increase current request. */ | 
|  | 291 | if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) { | 
|  | 292 | rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, | 
|  | 293 | data->ua_core); | 
|  | 294 | if (rc < 0) { | 
|  | 295 | dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", | 
|  | 296 | sc->vreg[VREG_CORE].name, rc); | 
|  | 297 | return rc; | 
|  | 298 | } | 
|  | 299 | sc->vreg[VREG_CORE].cur_ua = data->ua_core; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 300 | } | 
|  | 301 |  | 
|  | 302 | /* | 
|  | 303 | * Update per-CPU core voltage. Don't do this for the hotplug path for | 
|  | 304 | * which it should already be correct. Attempting to set it is bad | 
|  | 305 | * because we don't know what CPU we are running on at this point, but | 
|  | 306 | * the CPU regulator API requires we call it from the affected CPU. | 
|  | 307 | */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 308 | if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 309 | && reason != SETRATE_HOTPLUG) { | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 310 | rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, | 
|  | 311 | data->vdd_core, sc->vreg[VREG_CORE].max_vdd); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 312 | if (rc) { | 
|  | 313 | dev_err(drv.dev, | 
|  | 314 | "vdd_core (cpu%d) increase failed (%d)\n", | 
|  | 315 | cpu, rc); | 
|  | 316 | return rc; | 
|  | 317 | } | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 318 | sc->vreg[VREG_CORE].cur_vdd = data->vdd_core; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 319 | } | 
|  | 320 |  | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 321 | return 0; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 322 | } | 
|  | 323 |  | 
|  | 324 | /* Apply any per-cpu voltage decreases. */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 325 | static void decrease_vdd(int cpu, struct vdd_data *data, | 
|  | 326 | enum setrate_reason reason) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 327 | { | 
|  | 328 | struct scalable *sc = &drv.scalable[cpu]; | 
|  | 329 | int ret; | 
|  | 330 |  | 
|  | 331 | /* | 
|  | 332 | * Update per-CPU core voltage. This must be called on the CPU | 
|  | 333 | * that's being affected. Don't do this in the hotplug remove path, | 
|  | 334 | * where the rail is off and we're executing on the other CPU. | 
|  | 335 | */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 336 | if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 337 | && reason != SETRATE_HOTPLUG) { | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 338 | ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, | 
|  | 339 | data->vdd_core, sc->vreg[VREG_CORE].max_vdd); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 340 | if (ret) { | 
|  | 341 | dev_err(drv.dev, | 
|  | 342 | "vdd_core (cpu%d) decrease failed (%d)\n", | 
|  | 343 | cpu, ret); | 
|  | 344 | return; | 
|  | 345 | } | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 346 | sc->vreg[VREG_CORE].cur_vdd = data->vdd_core; | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | /* Decrease current request. */ | 
|  | 350 | if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) { | 
|  | 351 | ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, | 
|  | 352 | data->ua_core); | 
|  | 353 | if (ret < 0) { | 
|  | 354 | dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", | 
|  | 355 | sc->vreg[VREG_CORE].name, ret); | 
|  | 356 | return; | 
|  | 357 | } | 
|  | 358 | sc->vreg[VREG_CORE].cur_ua = data->ua_core; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 359 | } | 
|  | 360 |  | 
|  | 361 | /* Decrease vdd_dig active-set vote. */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 362 | if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) { | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 363 | ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg, | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 364 | data->vdd_dig, sc->vreg[VREG_DIG].max_vdd); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 365 | if (ret) { | 
|  | 366 | dev_err(drv.dev, | 
|  | 367 | "vdd_dig (cpu%d) decrease failed (%d)\n", | 
|  | 368 | cpu, ret); | 
|  | 369 | return; | 
|  | 370 | } | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 371 | sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 372 | } | 
|  | 373 |  | 
|  | 374 | /* | 
|  | 375 | * Decrease vdd_mem active-set after vdd_dig. | 
|  | 376 | * vdd_mem should be >= vdd_dig. | 
|  | 377 | */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 378 | if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) { | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 379 | ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg, | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 380 | data->vdd_mem, sc->vreg[VREG_MEM].max_vdd); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 381 | if (ret) { | 
|  | 382 | dev_err(drv.dev, | 
|  | 383 | "vdd_mem (cpu%d) decrease failed (%d)\n", | 
|  | 384 | cpu, ret); | 
|  | 385 | return; | 
|  | 386 | } | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 387 | sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 388 | } | 
|  | 389 | } | 
|  | 390 |  | 
|  | 391 | static int calculate_vdd_mem(const struct acpu_level *tgt) | 
|  | 392 | { | 
| Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 393 | return drv.l2_freq_tbl[tgt->l2_level].vdd_mem; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 394 | } | 
|  | 395 |  | 
| Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 396 | static int get_src_dig(const struct core_speed *s) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 397 | { | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 398 | const int *hfpll_vdd = drv.hfpll_data->vdd; | 
|  | 399 | const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max; | 
| Matt Wagantall | 87465f5 | 2012-07-23 22:03:06 -0700 | [diff] [blame] | 400 | const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 401 |  | 
| Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 402 | if (s->src != HFPLL) | 
|  | 403 | return hfpll_vdd[HFPLL_VDD_NONE]; | 
| Matt Wagantall | 87465f5 | 2012-07-23 22:03:06 -0700 | [diff] [blame] | 404 | else if (s->pll_l_val > nom_vdd_l_max) | 
|  | 405 | return hfpll_vdd[HFPLL_VDD_HIGH]; | 
| Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 406 | else if (s->pll_l_val > low_vdd_l_max) | 
|  | 407 | return hfpll_vdd[HFPLL_VDD_NOM]; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 408 | else | 
| Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 409 | return hfpll_vdd[HFPLL_VDD_LOW]; | 
|  | 410 | } | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 411 |  | 
| Matt Wagantall | 72a3800 | 2012-07-18 13:42:55 -0700 | [diff] [blame] | 412 | static int calculate_vdd_dig(const struct acpu_level *tgt) | 
|  | 413 | { | 
|  | 414 | int l2_pll_vdd_dig, cpu_pll_vdd_dig; | 
|  | 415 |  | 
|  | 416 | l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed); | 
|  | 417 | cpu_pll_vdd_dig = get_src_dig(&tgt->speed); | 
|  | 418 |  | 
|  | 419 | return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig, | 
|  | 420 | max(l2_pll_vdd_dig, cpu_pll_vdd_dig)); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 421 | } | 
|  | 422 |  | 
| Matt Wagantall | 9515bc2 | 2012-07-19 18:13:40 -0700 | [diff] [blame] | 423 | static bool enable_boost = true; | 
|  | 424 | module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR); | 
|  | 425 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 426 | static int calculate_vdd_core(const struct acpu_level *tgt) | 
|  | 427 | { | 
| Matt Wagantall | 9515bc2 | 2012-07-19 18:13:40 -0700 | [diff] [blame] | 428 | return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 429 | } | 
|  | 430 |  | 
| Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 431 | static DEFINE_MUTEX(l2_regulator_lock); | 
|  | 432 | static int l2_vreg_count; | 
|  | 433 |  | 
|  | 434 | static int enable_l2_regulators(void) | 
|  | 435 | { | 
|  | 436 | int ret = 0; | 
|  | 437 |  | 
|  | 438 | mutex_lock(&l2_regulator_lock); | 
|  | 439 | if (l2_vreg_count == 0) { | 
|  | 440 | ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); | 
|  | 441 | if (ret) | 
|  | 442 | goto out; | 
|  | 443 | ret = enable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]); | 
|  | 444 | if (ret) { | 
|  | 445 | disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); | 
|  | 446 | goto out; | 
|  | 447 | } | 
|  | 448 | } | 
|  | 449 | l2_vreg_count++; | 
|  | 450 | out: | 
|  | 451 | mutex_unlock(&l2_regulator_lock); | 
|  | 452 |  | 
|  | 453 | return ret; | 
|  | 454 | } | 
|  | 455 |  | 
|  | 456 | static void disable_l2_regulators(void) | 
|  | 457 | { | 
|  | 458 | mutex_lock(&l2_regulator_lock); | 
|  | 459 |  | 
|  | 460 | if (WARN(!l2_vreg_count, "L2 regulator votes are unbalanced!")) | 
|  | 461 | goto out; | 
|  | 462 |  | 
|  | 463 | if (l2_vreg_count == 1) { | 
|  | 464 | disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_B]); | 
|  | 465 | disable_rpm_vreg(&drv.scalable[L2].vreg[VREG_HFPLL_A]); | 
|  | 466 | } | 
|  | 467 | l2_vreg_count--; | 
|  | 468 | out: | 
|  | 469 | mutex_unlock(&l2_regulator_lock); | 
|  | 470 | } | 
|  | 471 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 472 | /* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */ | 
|  | 473 | static int acpuclk_krait_set_rate(int cpu, unsigned long rate, | 
|  | 474 | enum setrate_reason reason) | 
|  | 475 | { | 
|  | 476 | const struct core_speed *strt_acpu_s, *tgt_acpu_s; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 477 | const struct acpu_level *tgt; | 
| Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 478 | int tgt_l2_l; | 
| Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 479 | enum src_id prev_l2_src = NUM_SRC_ID; | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 480 | struct vdd_data vdd_data; | 
| Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 481 | bool skip_regulators; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 482 | int rc = 0; | 
|  | 483 |  | 
| Matt Wagantall | 5941a33 | 2012-07-10 23:20:44 -0700 | [diff] [blame] | 484 | if (cpu > num_possible_cpus()) | 
|  | 485 | return -EINVAL; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 486 |  | 
|  | 487 | if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) | 
|  | 488 | mutex_lock(&driver_lock); | 
|  | 489 |  | 
|  | 490 | strt_acpu_s = drv.scalable[cpu].cur_speed; | 
|  | 491 |  | 
|  | 492 | /* Return early if rate didn't change. */ | 
|  | 493 | if (rate == strt_acpu_s->khz) | 
|  | 494 | goto out; | 
|  | 495 |  | 
|  | 496 | /* Find target frequency. */ | 
|  | 497 | for (tgt = drv.acpu_freq_tbl; tgt->speed.khz != 0; tgt++) { | 
|  | 498 | if (tgt->speed.khz == rate) { | 
|  | 499 | tgt_acpu_s = &tgt->speed; | 
|  | 500 | break; | 
|  | 501 | } | 
|  | 502 | } | 
|  | 503 | if (tgt->speed.khz == 0) { | 
|  | 504 | rc = -EINVAL; | 
|  | 505 | goto out; | 
|  | 506 | } | 
|  | 507 |  | 
|  | 508 | /* Calculate voltage requirements for the current CPU. */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 509 | vdd_data.vdd_mem  = calculate_vdd_mem(tgt); | 
|  | 510 | vdd_data.vdd_dig  = calculate_vdd_dig(tgt); | 
|  | 511 | vdd_data.vdd_core = calculate_vdd_core(tgt); | 
|  | 512 | vdd_data.ua_core = tgt->ua_core; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 513 |  | 
| Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 514 | /* Disable AVS before voltage switch */ | 
|  | 515 | if (reason == SETRATE_CPUFREQ && drv.scalable[cpu].avs_enabled) { | 
|  | 516 | AVS_DISABLE(cpu); | 
|  | 517 | drv.scalable[cpu].avs_enabled = false; | 
|  | 518 | } | 
|  | 519 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 520 | /* Increase VDD levels if needed. */ | 
|  | 521 | if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) { | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 522 | rc = increase_vdd(cpu, &vdd_data, reason); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 523 | if (rc) | 
|  | 524 | goto out; | 
| Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 525 |  | 
|  | 526 | prev_l2_src = | 
|  | 527 | drv.l2_freq_tbl[drv.scalable[cpu].l2_vote].speed.src; | 
|  | 528 | /* Vote for the L2 regulators here if necessary. */ | 
|  | 529 | if (drv.l2_freq_tbl[tgt->l2_level].speed.src == HFPLL) { | 
|  | 530 | rc = enable_l2_regulators(); | 
|  | 531 | if (rc) | 
|  | 532 | goto out; | 
|  | 533 | } | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 534 | } | 
|  | 535 |  | 
| Matt Wagantall | bd1b404 | 2012-07-24 11:20:03 -0700 | [diff] [blame] | 536 | dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n", | 
|  | 537 | cpu, strt_acpu_s->khz, tgt_acpu_s->khz); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 538 |  | 
| Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 539 | /* | 
|  | 540 | * If we are setting the rate as part of power collapse or in the resume | 
|  | 541 | * path after power collapse, skip the vote for the HFPLL regulators, | 
|  | 542 | * which are active-set-only votes that will be removed when apps enters | 
|  | 543 | * its sleep set. This is needed to avoid voting for regulators with | 
|  | 544 | * sleeping APIs from an atomic context. | 
|  | 545 | */ | 
|  | 546 | skip_regulators = (reason == SETRATE_PC); | 
|  | 547 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 548 | /* Set the new CPU speed. */ | 
| Vikram Mulukutla | 23ce1c6 | 2012-10-19 07:42:45 -0700 | [diff] [blame] | 549 | set_speed(&drv.scalable[cpu], tgt_acpu_s, skip_regulators); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 550 |  | 
|  | 551 | /* | 
|  | 552 | * Update the L2 vote and apply the rate change. A spinlock is | 
|  | 553 | * necessary to ensure L2 rate is calculated and set atomically | 
|  | 554 | * with the CPU frequency, even if acpuclk_krait_set_rate() is | 
|  | 555 | * called from an atomic context and the driver_lock mutex is not | 
|  | 556 | * acquired. | 
|  | 557 | */ | 
| Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 558 | spin_lock(&l2_lock); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 559 | tgt_l2_l = compute_l2_level(&drv.scalable[cpu], tgt->l2_level); | 
| Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 560 | set_speed(&drv.scalable[L2], | 
|  | 561 | &drv.l2_freq_tbl[tgt_l2_l].speed, true); | 
|  | 562 | spin_unlock(&l2_lock); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 563 |  | 
|  | 564 | /* Nothing else to do for power collapse or SWFI. */ | 
|  | 565 | if (reason == SETRATE_PC || reason == SETRATE_SWFI) | 
|  | 566 | goto out; | 
|  | 567 |  | 
| Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 568 | /* | 
|  | 569 | * Remove the vote for the L2 HFPLL regulators only if the L2 | 
|  | 570 | * was already on an HFPLL source. | 
|  | 571 | */ | 
|  | 572 | if (prev_l2_src == HFPLL) | 
|  | 573 | disable_l2_regulators(); | 
|  | 574 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 575 | /* Update bus bandwith request. */ | 
| Matt Wagantall | 600ea50 | 2012-06-08 18:49:53 -0700 | [diff] [blame] | 576 | set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 577 |  | 
|  | 578 | /* Drop VDD levels if we can. */ | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 579 | decrease_vdd(cpu, &vdd_data, reason); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 580 |  | 
| Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 581 | /* Re-enable AVS */ | 
|  | 582 | if (reason == SETRATE_CPUFREQ && tgt->avsdscr_setting) { | 
|  | 583 | AVS_ENABLE(cpu, tgt->avsdscr_setting); | 
|  | 584 | drv.scalable[cpu].avs_enabled = true; | 
|  | 585 | } | 
|  | 586 |  | 
| Matt Wagantall | bd1b404 | 2012-07-24 11:20:03 -0700 | [diff] [blame] | 587 | dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 588 |  | 
|  | 589 | out: | 
|  | 590 | if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) | 
|  | 591 | mutex_unlock(&driver_lock); | 
|  | 592 | return rc; | 
|  | 593 | } | 
|  | 594 |  | 
| Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 595 | static struct acpuclk_data acpuclk_krait_data = { | 
|  | 596 | .set_rate = acpuclk_krait_set_rate, | 
|  | 597 | .get_rate = acpuclk_krait_get_rate, | 
|  | 598 | }; | 
|  | 599 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 600 | /* Initialize a HFPLL at a given rate and enable it. */ | 
| Matt Wagantall | 980d067 | 2012-10-17 13:50:07 -0700 | [diff] [blame] | 601 | static void __cpuinit hfpll_init(struct scalable *sc, | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 602 | const struct core_speed *tgt_s) | 
|  | 603 | { | 
| Matt Wagantall | bd1b404 | 2012-07-24 11:20:03 -0700 | [diff] [blame] | 604 | dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 605 |  | 
|  | 606 | /* Disable the PLL for re-programming. */ | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 607 | hfpll_disable(sc, true); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 608 |  | 
|  | 609 | /* Configure PLL parameters for integer mode. */ | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 610 | writel_relaxed(drv.hfpll_data->config_val, | 
|  | 611 | sc->hfpll_base + drv.hfpll_data->config_offset); | 
|  | 612 | writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset); | 
|  | 613 | writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset); | 
| Matt Wagantall | a77b7f3 | 2012-07-18 16:32:01 -0700 | [diff] [blame] | 614 | if (drv.hfpll_data->has_user_reg) | 
|  | 615 | writel_relaxed(drv.hfpll_data->user_val, | 
|  | 616 | sc->hfpll_base + drv.hfpll_data->user_offset); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 617 |  | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 618 | /* Program droop controller, if supported */ | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 619 | if (drv.hfpll_data->has_droop_ctl) | 
|  | 620 | writel_relaxed(drv.hfpll_data->droop_val, | 
|  | 621 | sc->hfpll_base + drv.hfpll_data->droop_offset); | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 622 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 623 | /* Set an initial rate and enable the PLL. */ | 
|  | 624 | hfpll_set_rate(sc, tgt_s); | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 625 | hfpll_enable(sc, false); | 
|  | 626 | } | 
|  | 627 |  | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 628 | static int __cpuinit rpm_regulator_init(struct scalable *sc, enum vregs vreg, | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 629 | int vdd, bool enable) | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 630 | { | 
|  | 631 | int ret; | 
|  | 632 |  | 
|  | 633 | if (!sc->vreg[vreg].name) | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 634 | return 0; | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 635 |  | 
|  | 636 | sc->vreg[vreg].rpm_reg = rpm_regulator_get(drv.dev, | 
|  | 637 | sc->vreg[vreg].name); | 
|  | 638 | if (IS_ERR(sc->vreg[vreg].rpm_reg)) { | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 639 | ret = PTR_ERR(sc->vreg[vreg].rpm_reg); | 
|  | 640 | dev_err(drv.dev, "rpm_regulator_get(%s) failed (%d)\n", | 
|  | 641 | sc->vreg[vreg].name, ret); | 
|  | 642 | goto err_get; | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 643 | } | 
|  | 644 |  | 
|  | 645 | ret = rpm_regulator_set_voltage(sc->vreg[vreg].rpm_reg, vdd, | 
|  | 646 | sc->vreg[vreg].max_vdd); | 
|  | 647 | if (ret) { | 
|  | 648 | dev_err(drv.dev, "%s initialization failed (%d)\n", | 
|  | 649 | sc->vreg[vreg].name, ret); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 650 | goto err_conf; | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 651 | } | 
|  | 652 | sc->vreg[vreg].cur_vdd = vdd; | 
|  | 653 |  | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 654 | if (enable) { | 
|  | 655 | ret = enable_rpm_vreg(&sc->vreg[vreg]); | 
|  | 656 | if (ret) | 
|  | 657 | goto err_conf; | 
|  | 658 | } | 
|  | 659 |  | 
|  | 660 | return 0; | 
|  | 661 |  | 
|  | 662 | err_conf: | 
|  | 663 | rpm_regulator_put(sc->vreg[vreg].rpm_reg); | 
|  | 664 | err_get: | 
|  | 665 | return ret; | 
|  | 666 | } | 
|  | 667 |  | 
|  | 668 | static void __cpuinit rpm_regulator_cleanup(struct scalable *sc, | 
|  | 669 | enum vregs vreg) | 
|  | 670 | { | 
|  | 671 | if (!sc->vreg[vreg].rpm_reg) | 
|  | 672 | return; | 
|  | 673 |  | 
|  | 674 | disable_rpm_vreg(&sc->vreg[vreg]); | 
|  | 675 | rpm_regulator_put(sc->vreg[vreg].rpm_reg); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 676 | } | 
|  | 677 |  | 
|  | 678 | /* Voltage regulator initialization. */ | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 679 | static int __cpuinit regulator_init(struct scalable *sc, | 
|  | 680 | const struct acpu_level *acpu_level) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 681 | { | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 682 | int ret, vdd_mem, vdd_dig, vdd_core; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 683 |  | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 684 | vdd_mem = calculate_vdd_mem(acpu_level); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 685 | ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true); | 
|  | 686 | if (ret) | 
|  | 687 | goto err_mem; | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 688 |  | 
|  | 689 | vdd_dig = calculate_vdd_dig(acpu_level); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 690 | ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true); | 
|  | 691 | if (ret) | 
|  | 692 | goto err_dig; | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 693 |  | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 694 | ret = rpm_regulator_init(sc, VREG_HFPLL_A, | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 695 | sc->vreg[VREG_HFPLL_A].max_vdd, false); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 696 | if (ret) | 
|  | 697 | goto err_hfpll_a; | 
|  | 698 | ret = rpm_regulator_init(sc, VREG_HFPLL_B, | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 699 | sc->vreg[VREG_HFPLL_B].max_vdd, false); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 700 | if (ret) | 
|  | 701 | goto err_hfpll_b; | 
| Matt Wagantall | 75473eb | 2012-05-31 15:23:22 -0700 | [diff] [blame] | 702 |  | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 703 | /* Setup Krait CPU regulators and initial core voltage. */ | 
|  | 704 | sc->vreg[VREG_CORE].reg = regulator_get(drv.dev, | 
|  | 705 | sc->vreg[VREG_CORE].name); | 
|  | 706 | if (IS_ERR(sc->vreg[VREG_CORE].reg)) { | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 707 | ret = PTR_ERR(sc->vreg[VREG_CORE].reg); | 
|  | 708 | dev_err(drv.dev, "regulator_get(%s) failed (%d)\n", | 
|  | 709 | sc->vreg[VREG_CORE].name, ret); | 
|  | 710 | goto err_core_get; | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 711 | } | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 712 | ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, | 
|  | 713 | acpu_level->ua_core); | 
|  | 714 | if (ret < 0) { | 
|  | 715 | dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n", | 
|  | 716 | sc->vreg[VREG_CORE].name, ret); | 
|  | 717 | goto err_core_conf; | 
|  | 718 | } | 
|  | 719 | sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core; | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 720 | vdd_core = calculate_vdd_core(acpu_level); | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 721 | ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core, | 
|  | 722 | sc->vreg[VREG_CORE].max_vdd); | 
|  | 723 | if (ret) { | 
|  | 724 | dev_err(drv.dev, "regulator_set_voltage(%s) (%d)\n", | 
|  | 725 | sc->vreg[VREG_CORE].name, ret); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 726 | goto err_core_conf; | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 727 | } | 
|  | 728 | sc->vreg[VREG_CORE].cur_vdd = vdd_core; | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 729 | ret = regulator_enable(sc->vreg[VREG_CORE].reg); | 
|  | 730 | if (ret) { | 
|  | 731 | dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n", | 
|  | 732 | sc->vreg[VREG_CORE].name, ret); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 733 | goto err_core_conf; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 734 | } | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 735 |  | 
| Vikram Mulukutla | 77ef591 | 2012-10-19 08:53:19 -0700 | [diff] [blame] | 736 | /* | 
|  | 737 | * Increment the L2 HFPLL regulator refcount if _this_ CPU's frequency | 
|  | 738 | * requires a corresponding target L2 frequency that needs the L2 to | 
|  | 739 | * run off of an HFPLL. | 
|  | 740 | */ | 
|  | 741 | if (drv.l2_freq_tbl[acpu_level->l2_level].speed.src == HFPLL) | 
|  | 742 | l2_vreg_count++; | 
|  | 743 |  | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 744 | return 0; | 
|  | 745 |  | 
|  | 746 | err_core_conf: | 
|  | 747 | regulator_put(sc->vreg[VREG_CORE].reg); | 
|  | 748 | err_core_get: | 
|  | 749 | rpm_regulator_cleanup(sc, VREG_HFPLL_B); | 
|  | 750 | err_hfpll_b: | 
|  | 751 | rpm_regulator_cleanup(sc, VREG_HFPLL_A); | 
|  | 752 | err_hfpll_a: | 
|  | 753 | rpm_regulator_cleanup(sc, VREG_DIG); | 
|  | 754 | err_dig: | 
|  | 755 | rpm_regulator_cleanup(sc, VREG_MEM); | 
|  | 756 | err_mem: | 
|  | 757 | return ret; | 
|  | 758 | } | 
|  | 759 |  | 
|  | 760 | static void __cpuinit regulator_cleanup(struct scalable *sc) | 
|  | 761 | { | 
|  | 762 | regulator_disable(sc->vreg[VREG_CORE].reg); | 
|  | 763 | regulator_put(sc->vreg[VREG_CORE].reg); | 
|  | 764 | rpm_regulator_cleanup(sc, VREG_HFPLL_B); | 
|  | 765 | rpm_regulator_cleanup(sc, VREG_HFPLL_A); | 
|  | 766 | rpm_regulator_cleanup(sc, VREG_DIG); | 
|  | 767 | rpm_regulator_cleanup(sc, VREG_MEM); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 768 | } | 
|  | 769 |  | 
|  | 770 | /* Set initial rate for a given core. */ | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 771 | static int __cpuinit init_clock_sources(struct scalable *sc, | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 772 | const struct core_speed *tgt_s) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 773 | { | 
|  | 774 | u32 regval; | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 775 | void __iomem *aux_reg; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 776 |  | 
|  | 777 | /* Program AUX source input to the secondary MUX. */ | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 778 | if (sc->aux_clk_sel_phys) { | 
|  | 779 | aux_reg = ioremap(sc->aux_clk_sel_phys, 4); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 780 | if (!aux_reg) | 
|  | 781 | return -ENOMEM; | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 782 | writel_relaxed(sc->aux_clk_sel, aux_reg); | 
|  | 783 | iounmap(aux_reg); | 
|  | 784 | } | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 785 |  | 
|  | 786 | /* Switch away from the HFPLL while it's re-initialized. */ | 
| Matt Wagantall | 6cd5d75 | 2012-09-27 19:56:57 -0700 | [diff] [blame] | 787 | set_sec_clk_src(sc, sc->sec_clk_sel); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 788 | set_pri_clk_src(sc, PRI_SRC_SEL_SEC_SRC); | 
|  | 789 | hfpll_init(sc, tgt_s); | 
|  | 790 |  | 
|  | 791 | /* Set PRI_SRC_SEL_HFPLL_DIV2 divider to div-2. */ | 
|  | 792 | regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); | 
|  | 793 | regval &= ~(0x3 << 6); | 
|  | 794 | set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); | 
|  | 795 |  | 
|  | 796 | /* Switch to the target clock source. */ | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 797 | set_pri_clk_src(sc, tgt_s->pri_src_sel); | 
|  | 798 | sc->cur_speed = tgt_s; | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 799 |  | 
|  | 800 | return 0; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 801 | } | 
|  | 802 |  | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 803 | static void __cpuinit fill_cur_core_speed(struct core_speed *s, | 
|  | 804 | struct scalable *sc) | 
|  | 805 | { | 
|  | 806 | s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3; | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 807 | s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset); | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | static bool __cpuinit speed_equal(const struct core_speed *s1, | 
|  | 811 | const struct core_speed *s2) | 
|  | 812 | { | 
|  | 813 | return (s1->pri_src_sel == s2->pri_src_sel && | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 814 | s1->pll_l_val == s2->pll_l_val); | 
|  | 815 | } | 
|  | 816 |  | 
|  | 817 | static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu) | 
|  | 818 | { | 
|  | 819 | struct scalable *sc = &drv.scalable[cpu]; | 
|  | 820 | const struct acpu_level *l; | 
|  | 821 | struct core_speed cur_speed; | 
|  | 822 |  | 
|  | 823 | fill_cur_core_speed(&cur_speed, sc); | 
|  | 824 | for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++) | 
|  | 825 | if (speed_equal(&l->speed, &cur_speed)) | 
|  | 826 | return l; | 
|  | 827 | return NULL; | 
|  | 828 | } | 
|  | 829 |  | 
|  | 830 | static const struct l2_level __init *find_cur_l2_level(void) | 
|  | 831 | { | 
|  | 832 | struct scalable *sc = &drv.scalable[L2]; | 
|  | 833 | const struct l2_level *l; | 
|  | 834 | struct core_speed cur_speed; | 
|  | 835 |  | 
|  | 836 | fill_cur_core_speed(&cur_speed, sc); | 
|  | 837 | for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++) | 
|  | 838 | if (speed_equal(&l->speed, &cur_speed)) | 
|  | 839 | return l; | 
|  | 840 | return NULL; | 
|  | 841 | } | 
|  | 842 |  | 
|  | 843 | static const struct acpu_level __cpuinit *find_min_acpu_level(void) | 
|  | 844 | { | 
|  | 845 | struct acpu_level *l; | 
|  | 846 |  | 
|  | 847 | for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++) | 
|  | 848 | if (l->use_for_scaling) | 
|  | 849 | return l; | 
|  | 850 |  | 
|  | 851 | return NULL; | 
|  | 852 | } | 
|  | 853 |  | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 854 | static int __cpuinit per_cpu_init(int cpu) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 855 | { | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 856 | struct scalable *sc = &drv.scalable[cpu]; | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 857 | const struct acpu_level *acpu_level; | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 858 | int ret; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 859 |  | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 860 | sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 861 | if (!sc->hfpll_base) { | 
|  | 862 | ret = -ENOMEM; | 
|  | 863 | goto err_ioremap; | 
|  | 864 | } | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 865 |  | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 866 | acpu_level = find_cur_acpu_level(cpu); | 
| Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 867 | if (!acpu_level) { | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 868 | acpu_level = find_min_acpu_level(); | 
|  | 869 | if (!acpu_level) { | 
|  | 870 | ret = -ENODEV; | 
|  | 871 | goto err_table; | 
|  | 872 | } | 
|  | 873 | dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n", | 
|  | 874 | cpu, acpu_level->speed.khz); | 
|  | 875 | } else { | 
|  | 876 | dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu, | 
|  | 877 | acpu_level->speed.khz); | 
|  | 878 | } | 
|  | 879 |  | 
|  | 880 | ret = regulator_init(sc, acpu_level); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 881 | if (ret) | 
|  | 882 | goto err_regulators; | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 883 |  | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 884 | ret = init_clock_sources(sc, &acpu_level->speed); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 885 | if (ret) | 
|  | 886 | goto err_clocks; | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 887 |  | 
|  | 888 | sc->l2_vote = acpu_level->l2_level; | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 889 | sc->initialized = true; | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 890 |  | 
|  | 891 | return 0; | 
|  | 892 |  | 
|  | 893 | err_clocks: | 
|  | 894 | regulator_cleanup(sc); | 
|  | 895 | err_regulators: | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 896 | err_table: | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 897 | iounmap(sc->hfpll_base); | 
|  | 898 | err_ioremap: | 
|  | 899 | return ret; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 900 | } | 
|  | 901 |  | 
|  | 902 | /* Register with bus driver. */ | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 903 | static void __init bus_init(const struct l2_level *l2_level) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 904 | { | 
|  | 905 | int ret; | 
|  | 906 |  | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 907 | drv.bus_perf_client = msm_bus_scale_register_client(drv.bus_scale); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 908 | if (!drv.bus_perf_client) { | 
|  | 909 | dev_err(drv.dev, "unable to register bus client\n"); | 
|  | 910 | BUG(); | 
|  | 911 | } | 
|  | 912 |  | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 913 | ret = msm_bus_scale_client_update_request(drv.bus_perf_client, | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 914 | l2_level->bw_level); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 915 | if (ret) | 
|  | 916 | dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret); | 
|  | 917 | } | 
|  | 918 |  | 
|  | 919 | #ifdef CONFIG_CPU_FREQ_MSM | 
|  | 920 | static struct cpufreq_frequency_table freq_table[NR_CPUS][35]; | 
|  | 921 |  | 
|  | 922 | static void __init cpufreq_table_init(void) | 
|  | 923 | { | 
|  | 924 | int cpu; | 
|  | 925 |  | 
|  | 926 | for_each_possible_cpu(cpu) { | 
|  | 927 | int i, freq_cnt = 0; | 
|  | 928 | /* Construct the freq_table tables from acpu_freq_tbl. */ | 
|  | 929 | for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0 | 
|  | 930 | && freq_cnt < ARRAY_SIZE(*freq_table); i++) { | 
|  | 931 | if (drv.acpu_freq_tbl[i].use_for_scaling) { | 
|  | 932 | freq_table[cpu][freq_cnt].index = freq_cnt; | 
|  | 933 | freq_table[cpu][freq_cnt].frequency | 
|  | 934 | = drv.acpu_freq_tbl[i].speed.khz; | 
|  | 935 | freq_cnt++; | 
|  | 936 | } | 
|  | 937 | } | 
|  | 938 | /* freq_table not big enough to store all usable freqs. */ | 
|  | 939 | BUG_ON(drv.acpu_freq_tbl[i].speed.khz != 0); | 
|  | 940 |  | 
|  | 941 | freq_table[cpu][freq_cnt].index = freq_cnt; | 
|  | 942 | freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END; | 
|  | 943 |  | 
|  | 944 | dev_info(drv.dev, "CPU%d: %d frequencies supported\n", | 
|  | 945 | cpu, freq_cnt); | 
|  | 946 |  | 
|  | 947 | /* Register table with CPUFreq. */ | 
|  | 948 | cpufreq_frequency_table_get_attr(freq_table[cpu], cpu); | 
|  | 949 | } | 
|  | 950 | } | 
|  | 951 | #else | 
|  | 952 | static void __init cpufreq_table_init(void) {} | 
|  | 953 | #endif | 
|  | 954 |  | 
| Steve Muckle | f9a8749 | 2012-11-02 15:41:00 -0700 | [diff] [blame] | 955 | static void __init dcvs_freq_init(void) | 
|  | 956 | { | 
|  | 957 | int i; | 
|  | 958 |  | 
|  | 959 | for (i = 0; drv.acpu_freq_tbl[i].speed.khz != 0; i++) | 
|  | 960 | if (drv.acpu_freq_tbl[i].use_for_scaling) | 
|  | 961 | msm_dcvs_register_cpu_freq( | 
|  | 962 | drv.acpu_freq_tbl[i].speed.khz, | 
|  | 963 | drv.acpu_freq_tbl[i].vdd_core / 1000); | 
|  | 964 | } | 
|  | 965 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 966 | static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb, | 
|  | 967 | unsigned long action, void *hcpu) | 
|  | 968 | { | 
|  | 969 | static int prev_khz[NR_CPUS]; | 
|  | 970 | int rc, cpu = (int)hcpu; | 
|  | 971 | struct scalable *sc = &drv.scalable[cpu]; | 
| Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 972 | unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 973 |  | 
|  | 974 | switch (action & ~CPU_TASKS_FROZEN) { | 
|  | 975 | case CPU_DEAD: | 
|  | 976 | prev_khz[cpu] = acpuclk_krait_get_rate(cpu); | 
|  | 977 | /* Fall through. */ | 
|  | 978 | case CPU_UP_CANCELED: | 
| Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 979 | acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG); | 
| Michael Bohan | e01ba8e | 2012-11-08 18:40:42 -0800 | [diff] [blame] | 980 |  | 
|  | 981 | regulator_disable(sc->vreg[VREG_CORE].reg); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 982 | regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0); | 
| Michael Bohan | e01ba8e | 2012-11-08 18:40:42 -0800 | [diff] [blame] | 983 | regulator_set_voltage(sc->vreg[VREG_CORE].reg, 0, | 
|  | 984 | sc->vreg[VREG_CORE].max_vdd); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 985 | break; | 
|  | 986 | case CPU_UP_PREPARE: | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 987 | if (!sc->initialized) { | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 988 | rc = per_cpu_init(cpu); | 
|  | 989 | if (rc) | 
|  | 990 | return NOTIFY_BAD; | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 991 | break; | 
|  | 992 | } | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 993 | if (WARN_ON(!prev_khz[cpu])) | 
|  | 994 | return NOTIFY_BAD; | 
| Michael Bohan | e01ba8e | 2012-11-08 18:40:42 -0800 | [diff] [blame] | 995 |  | 
|  | 996 | rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, | 
|  | 997 | sc->vreg[VREG_CORE].cur_vdd, | 
|  | 998 | sc->vreg[VREG_CORE].max_vdd); | 
|  | 999 | if (rc < 0) | 
|  | 1000 | return NOTIFY_BAD; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1001 | rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, | 
| Matt Wagantall | 6d9c416 | 2012-07-16 18:58:16 -0700 | [diff] [blame] | 1002 | sc->vreg[VREG_CORE].cur_ua); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1003 | if (rc < 0) | 
|  | 1004 | return NOTIFY_BAD; | 
| Michael Bohan | e01ba8e | 2012-11-08 18:40:42 -0800 | [diff] [blame] | 1005 | rc = regulator_enable(sc->vreg[VREG_CORE].reg); | 
|  | 1006 | if (rc < 0) | 
|  | 1007 | return NOTIFY_BAD; | 
|  | 1008 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1009 | acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG); | 
|  | 1010 | break; | 
|  | 1011 | default: | 
|  | 1012 | break; | 
|  | 1013 | } | 
|  | 1014 |  | 
|  | 1015 | return NOTIFY_OK; | 
|  | 1016 | } | 
|  | 1017 |  | 
|  | 1018 | static struct notifier_block __cpuinitdata acpuclk_cpu_notifier = { | 
|  | 1019 | .notifier_call = acpuclk_cpu_callback, | 
|  | 1020 | }; | 
|  | 1021 |  | 
| Matt Wagantall | 713555e | 2013-01-16 12:22:39 -0800 | [diff] [blame] | 1022 | static const int __init krait_needs_vmin(void) | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1023 | { | 
|  | 1024 | switch (read_cpuid_id()) { | 
|  | 1025 | case 0x511F04D0: /* KR28M2A20 */ | 
|  | 1026 | case 0x511F04D1: /* KR28M2A21 */ | 
|  | 1027 | case 0x510F06F0: /* KR28M4A10 */ | 
|  | 1028 | return 1; | 
|  | 1029 | default: | 
|  | 1030 | return 0; | 
|  | 1031 | }; | 
|  | 1032 | } | 
|  | 1033 |  | 
| Matt Wagantall | 713555e | 2013-01-16 12:22:39 -0800 | [diff] [blame] | 1034 | static void __init krait_apply_vmin(struct acpu_level *tbl) | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1035 | { | 
| Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 1036 | for (; tbl->speed.khz != 0; tbl++) { | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1037 | if (tbl->vdd_core < 1150000) | 
|  | 1038 | tbl->vdd_core = 1150000; | 
| Stephen Boyd | a86214a | 2012-09-14 11:25:34 -0700 | [diff] [blame] | 1039 | tbl->avsdscr_setting = 0; | 
|  | 1040 | } | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1041 | } | 
|  | 1042 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1043 | void __init get_krait_bin_format_a(void __iomem *base, struct bin_info *bin) | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1044 | { | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1045 | u32 pte_efuse = readl_relaxed(base); | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1046 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1047 | bin->speed = pte_efuse & 0xF; | 
|  | 1048 | if (bin->speed == 0xF) | 
|  | 1049 | bin->speed = (pte_efuse >> 4) & 0xF; | 
|  | 1050 | bin->speed_valid = bin->speed != 0xF; | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1051 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1052 | bin->pvs = (pte_efuse >> 10) & 0x7; | 
|  | 1053 | if (bin->pvs == 0x7) | 
|  | 1054 | bin->pvs = (pte_efuse >> 13) & 0x7; | 
|  | 1055 | bin->pvs_valid = bin->pvs != 0x7; | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1056 | } | 
|  | 1057 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1058 | void __init get_krait_bin_format_b(void __iomem *base, struct bin_info *bin) | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1059 | { | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1060 | u32 pte_efuse, redundant_sel; | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1061 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1062 | pte_efuse = readl_relaxed(base); | 
|  | 1063 | redundant_sel = (pte_efuse >> 24) & 0x7; | 
|  | 1064 | bin->speed = pte_efuse & 0x7; | 
|  | 1065 | bin->pvs = (pte_efuse >> 6) & 0x7; | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1066 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1067 | switch (redundant_sel) { | 
|  | 1068 | case 1: | 
|  | 1069 | bin->speed = (pte_efuse >> 27) & 0x7; | 
|  | 1070 | break; | 
|  | 1071 | case 2: | 
|  | 1072 | bin->pvs = (pte_efuse >> 27) & 0x7; | 
|  | 1073 | break; | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1074 | } | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1075 | bin->speed_valid = true; | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1076 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1077 | /* Check PVS_BLOW_STATUS */ | 
|  | 1078 | pte_efuse = readl_relaxed(base + 0x4); | 
|  | 1079 | bin->pvs_valid = !!(pte_efuse & BIT(21)); | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1080 | } | 
|  | 1081 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1082 | static struct pvs_table * __init select_freq_plan( | 
|  | 1083 | const struct acpuclk_krait_params *params) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1084 | { | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1085 | void __iomem *pte_efuse_base; | 
|  | 1086 | struct bin_info bin; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1087 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1088 | pte_efuse_base = ioremap(params->pte_efuse_phys, 8); | 
|  | 1089 | if (!pte_efuse_base) { | 
|  | 1090 | dev_err(drv.dev, "Unable to map PTE eFuse base\n"); | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1091 | return NULL; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1092 | } | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1093 | params->get_bin_info(pte_efuse_base, &bin); | 
|  | 1094 | iounmap(pte_efuse_base); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1095 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1096 | if (bin.speed_valid) { | 
|  | 1097 | drv.speed_bin = bin.speed; | 
|  | 1098 | dev_info(drv.dev, "SPEED BIN: %d\n", drv.speed_bin); | 
|  | 1099 | } else { | 
|  | 1100 | drv.speed_bin = 0; | 
|  | 1101 | dev_warn(drv.dev, "SPEED BIN: Defaulting to %d\n", | 
|  | 1102 | drv.speed_bin); | 
|  | 1103 | } | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1104 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1105 | if (bin.pvs_valid) { | 
|  | 1106 | drv.pvs_bin = bin.pvs; | 
|  | 1107 | dev_info(drv.dev, "ACPU PVS: %d\n", drv.pvs_bin); | 
|  | 1108 | } else { | 
|  | 1109 | drv.pvs_bin = 0; | 
|  | 1110 | dev_warn(drv.dev, "ACPU PVS: Defaulting to %d\n", | 
|  | 1111 | drv.pvs_bin); | 
|  | 1112 | } | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1113 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1114 | return ¶ms->pvs_tables[drv.speed_bin][drv.pvs_bin]; | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1115 | } | 
| Matt Wagantall | 06e4a1f | 2012-06-07 18:38:13 -0700 | [diff] [blame] | 1116 |  | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1117 | static void __init drv_data_init(struct device *dev, | 
|  | 1118 | const struct acpuclk_krait_params *params) | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1119 | { | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1120 | struct pvs_table *pvs; | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1121 |  | 
|  | 1122 | drv.dev = dev; | 
|  | 1123 | drv.scalable = kmemdup(params->scalable, params->scalable_size, | 
|  | 1124 | GFP_KERNEL); | 
|  | 1125 | BUG_ON(!drv.scalable); | 
|  | 1126 |  | 
|  | 1127 | drv.hfpll_data = kmemdup(params->hfpll_data, sizeof(*drv.hfpll_data), | 
|  | 1128 | GFP_KERNEL); | 
|  | 1129 | BUG_ON(!drv.hfpll_data); | 
|  | 1130 |  | 
|  | 1131 | drv.l2_freq_tbl = kmemdup(params->l2_freq_tbl, params->l2_freq_tbl_size, | 
|  | 1132 | GFP_KERNEL); | 
|  | 1133 | BUG_ON(!drv.l2_freq_tbl); | 
|  | 1134 |  | 
|  | 1135 | drv.bus_scale = kmemdup(params->bus_scale, sizeof(*drv.bus_scale), | 
|  | 1136 | GFP_KERNEL); | 
|  | 1137 | BUG_ON(!drv.bus_scale); | 
|  | 1138 | drv.bus_scale->usecase = kmemdup(drv.bus_scale->usecase, | 
|  | 1139 | drv.bus_scale->num_usecases * sizeof(*drv.bus_scale->usecase), | 
|  | 1140 | GFP_KERNEL); | 
|  | 1141 | BUG_ON(!drv.bus_scale->usecase); | 
|  | 1142 |  | 
| Matt Wagantall | f9a4d32 | 2013-01-14 18:01:24 -0800 | [diff] [blame] | 1143 | pvs = select_freq_plan(params); | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1144 | BUG_ON(!pvs->table); | 
|  | 1145 |  | 
|  | 1146 | drv.acpu_freq_tbl = kmemdup(pvs->table, pvs->size, GFP_KERNEL); | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1147 | BUG_ON(!drv.acpu_freq_tbl); | 
| Patrick Daly | 18d2d48 | 2012-08-24 14:22:06 -0700 | [diff] [blame] | 1148 | drv.boost_uv = pvs->boost_uv; | 
| Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 1149 |  | 
|  | 1150 | acpuclk_krait_data.power_collapse_khz = params->stby_khz; | 
|  | 1151 | acpuclk_krait_data.wait_for_irq_khz = params->stby_khz; | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | static void __init hw_init(void) | 
|  | 1155 | { | 
|  | 1156 | struct scalable *l2 = &drv.scalable[L2]; | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1157 | const struct l2_level *l2_level; | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 1158 | int cpu, rc; | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1159 |  | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1160 | if (krait_needs_vmin()) | 
|  | 1161 | krait_apply_vmin(drv.acpu_freq_tbl); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1162 |  | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 1163 | l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32); | 
|  | 1164 | BUG_ON(!l2->hfpll_base); | 
| Matt Wagantall | 754ee27 | 2012-06-18 13:40:26 -0700 | [diff] [blame] | 1165 |  | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 1166 | rc = rpm_regulator_init(l2, VREG_HFPLL_A, | 
|  | 1167 | l2->vreg[VREG_HFPLL_A].max_vdd, false); | 
|  | 1168 | BUG_ON(rc); | 
|  | 1169 | rc = rpm_regulator_init(l2, VREG_HFPLL_B, | 
|  | 1170 | l2->vreg[VREG_HFPLL_B].max_vdd, false); | 
|  | 1171 | BUG_ON(rc); | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1172 |  | 
|  | 1173 | l2_level = find_cur_l2_level(); | 
| Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 1174 | if (!l2_level) { | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1175 | l2_level = drv.l2_freq_tbl; | 
| Matt Wagantall | b7c231b | 2012-07-24 18:40:17 -0700 | [diff] [blame] | 1176 | dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n", | 
|  | 1177 | l2_level->speed.khz); | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1178 | } else { | 
|  | 1179 | dev_dbg(drv.dev, "L2 is running at %lu KHz\n", | 
|  | 1180 | l2_level->speed.khz); | 
|  | 1181 | } | 
|  | 1182 |  | 
|  | 1183 | rc = init_clock_sources(l2, &l2_level->speed); | 
| Matt Wagantall | 302d9a3 | 2012-07-03 13:37:29 -0700 | [diff] [blame] | 1184 | BUG_ON(rc); | 
|  | 1185 |  | 
|  | 1186 | for_each_online_cpu(cpu) { | 
|  | 1187 | rc = per_cpu_init(cpu); | 
|  | 1188 | BUG_ON(rc); | 
|  | 1189 | } | 
| Matt Wagantall | 9c8cb6e | 2012-07-13 19:39:15 -0700 | [diff] [blame] | 1190 |  | 
|  | 1191 | bus_init(l2_level); | 
| Matt Wagantall | 1f3762d | 2012-06-08 19:08:48 -0700 | [diff] [blame] | 1192 | } | 
|  | 1193 |  | 
|  | 1194 | int __init acpuclk_krait_init(struct device *dev, | 
|  | 1195 | const struct acpuclk_krait_params *params) | 
|  | 1196 | { | 
|  | 1197 | drv_data_init(dev, params); | 
|  | 1198 | hw_init(); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1199 |  | 
|  | 1200 | cpufreq_table_init(); | 
| Steve Muckle | f9a8749 | 2012-11-02 15:41:00 -0700 | [diff] [blame] | 1201 | dcvs_freq_init(); | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1202 | acpuclk_register(&acpuclk_krait_data); | 
|  | 1203 | register_hotcpu_notifier(&acpuclk_cpu_notifier); | 
|  | 1204 |  | 
| Matt Wagantall | 488bef3 | 2012-07-13 19:42:11 -0700 | [diff] [blame] | 1205 | acpuclk_krait_debug_init(&drv); | 
|  | 1206 |  | 
| Matt Wagantall | e9b715a | 2012-01-04 18:16:14 -0800 | [diff] [blame] | 1207 | return 0; | 
|  | 1208 | } |