Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 13 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
| 14 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 15 | #include <linux/kernel.h> |
| 16 | #include <linux/io.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/platform_device.h> |
| 19 | #include <linux/err.h> |
| 20 | #include <linux/regulator/driver.h> |
| 21 | #include <linux/regulator/machine.h> |
| 22 | #include <linux/clk.h> |
| 23 | #include <mach/msm_iomap.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 24 | #include <mach/msm_bus.h> |
| 25 | #include <mach/scm-io.h> |
| 26 | #include "clock.h" |
| 27 | #include "footswitch.h" |
| 28 | |
| 29 | #ifdef CONFIG_MSM_SECURE_IO |
| 30 | #undef readl_relaxed |
| 31 | #undef writel_relaxed |
| 32 | #define readl_relaxed secure_readl |
| 33 | #define writel_relaxed secure_writel |
| 34 | #endif |
| 35 | |
| 36 | #define REG(off) (MSM_MMSS_CLK_CTL_BASE + (off)) |
| 37 | #define GEMINI_GFS_CTL_REG REG(0x01A0) |
| 38 | #define GFX2D0_GFS_CTL_REG REG(0x0180) |
| 39 | #define GFX2D1_GFS_CTL_REG REG(0x0184) |
| 40 | #define GFX3D_GFS_CTL_REG REG(0x0188) |
| 41 | #define MDP_GFS_CTL_REG REG(0x0190) |
| 42 | #define ROT_GFS_CTL_REG REG(0x018C) |
| 43 | #define VED_GFS_CTL_REG REG(0x0194) |
| 44 | #define VFE_GFS_CTL_REG REG(0x0198) |
| 45 | #define VPE_GFS_CTL_REG REG(0x019C) |
Matt Wagantall | 37f34b3 | 2011-08-23 18:14:47 -0700 | [diff] [blame] | 46 | #define VCAP_GFS_CTL_REG REG(0x0254) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 47 | |
| 48 | #define CLAMP_BIT BIT(5) |
| 49 | #define ENABLE_BIT BIT(8) |
| 50 | #define RETENTION_BIT BIT(9) |
| 51 | |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 52 | #define GFS_DELAY_CNT 31 |
| 53 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 54 | #define RESET_DELAY_US 1 |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 55 | /* Clock rate to use if one has not previously been set. */ |
| 56 | #define DEFAULT_RATE 27000000 |
| 57 | #define MAX_CLKS 10 |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 58 | |
| 59 | /* |
| 60 | * Lock is only needed to protect against the first footswitch_enable() |
| 61 | * call occuring concurrently with late_footswitch_init(). |
| 62 | */ |
| 63 | static DEFINE_MUTEX(claim_lock); |
| 64 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 65 | struct footswitch { |
| 66 | struct regulator_dev *rdev; |
| 67 | struct regulator_desc desc; |
| 68 | void *gfs_ctl_reg; |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 69 | int bus_port0, bus_port1; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 70 | bool is_enabled; |
| 71 | bool is_claimed; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 72 | struct fs_clk_data *clk_data; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 73 | struct clk *core_clk; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 74 | }; |
| 75 | |
| 76 | static int setup_clocks(struct footswitch *fs) |
| 77 | { |
| 78 | int rc = 0; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 79 | struct fs_clk_data *clock; |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 80 | long rate; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 81 | |
| 82 | /* |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 83 | * Enable all clocks in the power domain. If a specific clock rate is |
| 84 | * required for reset timing, set that rate before enabling the clocks. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 85 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 86 | for (clock = fs->clk_data; clock->clk; clock++) { |
| 87 | clock->rate = clk_get_rate(clock->clk); |
| 88 | if (!clock->rate || clock->reset_rate) { |
| 89 | rate = clock->reset_rate ? |
| 90 | clock->reset_rate : DEFAULT_RATE; |
| 91 | rc = clk_set_rate(clock->clk, rate); |
| 92 | if (rc && rc != -ENOSYS) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 93 | pr_err("Failed to set %s %s rate to %lu Hz.\n", |
| 94 | fs->desc.name, clock->name, clock->rate); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 95 | for (clock--; clock >= fs->clk_data; clock--) { |
| 96 | if (clock->enabled) |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 97 | clk_disable_unprepare( |
| 98 | clock->clk); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 99 | clk_set_rate(clock->clk, clock->rate); |
| 100 | } |
| 101 | return rc; |
| 102 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 103 | } |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 104 | /* |
| 105 | * Some clocks are for reset purposes only. These clocks will |
| 106 | * fail to enable. Ignore the failures but keep track of them so |
| 107 | * we don't try to disable them later and crash due to |
| 108 | * unbalanced calls. |
| 109 | */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 110 | clock->enabled = !clk_prepare_enable(clock->clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 111 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 112 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 113 | return 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | static void restore_clocks(struct footswitch *fs) |
| 117 | { |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 118 | struct fs_clk_data *clock; |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 119 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 120 | /* Restore clocks to their orignal states before setup_clocks(). */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 121 | for (clock = fs->clk_data; clock->clk; clock++) { |
| 122 | if (clock->enabled) |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 123 | clk_disable_unprepare(clock->clk); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 124 | if (clock->rate && clk_set_rate(clock->clk, clock->rate)) |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 125 | pr_err("Failed to restore %s %s rate to %lu Hz.\n", |
| 126 | fs->desc.name, clock->name, clock->rate); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 127 | } |
| 128 | } |
| 129 | |
| 130 | static int footswitch_is_enabled(struct regulator_dev *rdev) |
| 131 | { |
| 132 | struct footswitch *fs = rdev_get_drvdata(rdev); |
| 133 | |
| 134 | return fs->is_enabled; |
| 135 | } |
| 136 | |
| 137 | static int footswitch_enable(struct regulator_dev *rdev) |
| 138 | { |
| 139 | struct footswitch *fs = rdev_get_drvdata(rdev); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 140 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 141 | uint32_t regval, rc = 0; |
| 142 | |
| 143 | mutex_lock(&claim_lock); |
| 144 | fs->is_claimed = true; |
| 145 | mutex_unlock(&claim_lock); |
| 146 | |
Matt Wagantall | 88edea9 | 2011-07-21 10:29:56 -0700 | [diff] [blame] | 147 | /* Return early if already enabled. */ |
| 148 | regval = readl_relaxed(fs->gfs_ctl_reg); |
| 149 | if ((regval & (ENABLE_BIT | CLAMP_BIT)) == ENABLE_BIT) |
| 150 | return 0; |
| 151 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 152 | /* Make sure required clocks are on at the correct rates. */ |
| 153 | rc = setup_clocks(fs); |
| 154 | if (rc) |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 155 | return rc; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 156 | |
| 157 | /* Un-halt all bus ports in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 158 | if (fs->bus_port0) { |
| 159 | rc = msm_bus_axi_portunhalt(fs->bus_port0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 160 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 161 | pr_err("%s port 0 unhalt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 162 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 163 | } |
| 164 | } |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 165 | if (fs->bus_port1) { |
| 166 | rc = msm_bus_axi_portunhalt(fs->bus_port1); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 167 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 168 | pr_err("%s port 1 unhalt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 169 | goto err_port2_halt; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 170 | } |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * (Re-)Assert resets for all clocks in the clock domain, since |
| 175 | * footswitch_enable() is first called before footswitch_disable() |
| 176 | * and resets should be asserted before power is restored. |
| 177 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 178 | for (clock = fs->clk_data; clock->clk; clock++) |
Stephen Boyd | 0c62938 | 2011-12-28 19:15:57 -0800 | [diff] [blame] | 179 | ; /* Do nothing */ |
| 180 | for (clock--; clock >= fs->clk_data; clock--) |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 181 | clk_reset(clock->clk, CLK_RESET_ASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 182 | /* Wait for synchronous resets to propagate. */ |
| 183 | udelay(RESET_DELAY_US); |
| 184 | |
| 185 | /* Enable the power rail at the footswitch. */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 186 | regval |= ENABLE_BIT; |
| 187 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 188 | /* Wait for the rail to fully charge. */ |
| 189 | mb(); |
| 190 | udelay(1); |
| 191 | |
| 192 | /* Un-clamp the I/O ports. */ |
| 193 | regval &= ~CLAMP_BIT; |
| 194 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 195 | |
| 196 | /* Deassert resets for all clocks in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 197 | for (clock = fs->clk_data; clock->clk; clock++) |
| 198 | clk_reset(clock->clk, CLK_RESET_DEASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 199 | /* Toggle core reset again after first power-on (required for GFX3D). */ |
| 200 | if (fs->desc.id == FS_GFX3D) { |
| 201 | clk_reset(fs->core_clk, CLK_RESET_ASSERT); |
| 202 | udelay(RESET_DELAY_US); |
| 203 | clk_reset(fs->core_clk, CLK_RESET_DEASSERT); |
| 204 | udelay(RESET_DELAY_US); |
| 205 | } |
| 206 | |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 207 | /* Prevent core memory from collapsing when its clock is gated. */ |
| 208 | clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); |
| 209 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 210 | /* Return clocks to their state before this function. */ |
| 211 | restore_clocks(fs); |
| 212 | |
| 213 | fs->is_enabled = true; |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 214 | return 0; |
| 215 | |
| 216 | err_port2_halt: |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 217 | msm_bus_axi_porthalt(fs->bus_port0); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 218 | err: |
| 219 | restore_clocks(fs); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 220 | return rc; |
| 221 | } |
| 222 | |
| 223 | static int footswitch_disable(struct regulator_dev *rdev) |
| 224 | { |
| 225 | struct footswitch *fs = rdev_get_drvdata(rdev); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 226 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 227 | uint32_t regval, rc = 0; |
| 228 | |
Matt Wagantall | 88edea9 | 2011-07-21 10:29:56 -0700 | [diff] [blame] | 229 | /* Return early if already disabled. */ |
| 230 | regval = readl_relaxed(fs->gfs_ctl_reg); |
| 231 | if ((regval & ENABLE_BIT) == 0) |
| 232 | return 0; |
| 233 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 234 | /* Make sure required clocks are on at the correct rates. */ |
| 235 | rc = setup_clocks(fs); |
| 236 | if (rc) |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 237 | return rc; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 238 | |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 239 | /* Allow core memory to collapse when its clock is gated. */ |
| 240 | clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN); |
| 241 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 242 | /* Halt all bus ports in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 243 | if (fs->bus_port0) { |
| 244 | rc = msm_bus_axi_porthalt(fs->bus_port0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 245 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 246 | pr_err("%s port 0 halt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 247 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 248 | } |
| 249 | } |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 250 | if (fs->bus_port1) { |
| 251 | rc = msm_bus_axi_porthalt(fs->bus_port1); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 252 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 253 | pr_err("%s port 1 halt failed.\n", fs->desc.name); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 254 | goto err_port2_halt; |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | /* |
| 259 | * Assert resets for all clocks in the clock domain so that |
| 260 | * outputs settle prior to clamping. |
| 261 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 262 | for (clock = fs->clk_data; clock->clk; clock++) |
Stephen Boyd | 0c62938 | 2011-12-28 19:15:57 -0800 | [diff] [blame] | 263 | ; /* Do nothing */ |
| 264 | for (clock--; clock >= fs->clk_data; clock--) |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 265 | clk_reset(clock->clk, CLK_RESET_ASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 266 | /* Wait for synchronous resets to propagate. */ |
| 267 | udelay(RESET_DELAY_US); |
| 268 | |
| 269 | /* |
Matt Wagantall | 1ab7d94 | 2011-12-02 17:59:57 -0800 | [diff] [blame] | 270 | * Return clocks to their state before this function. For robustness |
| 271 | * if memory-retention across collapses is required, clocks should |
| 272 | * be disabled before asserting the clamps. Assuming clocks were off |
| 273 | * before entering footswitch_disable(), this will be true. |
| 274 | */ |
| 275 | restore_clocks(fs); |
| 276 | |
| 277 | /* |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 278 | * Clamp the I/O ports of the core to ensure the values |
| 279 | * remain fixed while the core is collapsed. |
| 280 | */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 281 | regval |= CLAMP_BIT; |
| 282 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 283 | |
| 284 | /* Collapse the power rail at the footswitch. */ |
| 285 | regval &= ~ENABLE_BIT; |
| 286 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 287 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 288 | fs->is_enabled = false; |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 289 | return 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 290 | |
| 291 | err_port2_halt: |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 292 | msm_bus_axi_portunhalt(fs->bus_port0); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 293 | err: |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 294 | clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 295 | restore_clocks(fs); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 296 | return rc; |
| 297 | } |
| 298 | |
| 299 | static int gfx2d_footswitch_enable(struct regulator_dev *rdev) |
| 300 | { |
| 301 | struct footswitch *fs = rdev_get_drvdata(rdev); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 302 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 303 | uint32_t regval, rc = 0; |
| 304 | |
| 305 | mutex_lock(&claim_lock); |
| 306 | fs->is_claimed = true; |
| 307 | mutex_unlock(&claim_lock); |
| 308 | |
Matt Wagantall | 88edea9 | 2011-07-21 10:29:56 -0700 | [diff] [blame] | 309 | /* Return early if already enabled. */ |
| 310 | regval = readl_relaxed(fs->gfs_ctl_reg); |
| 311 | if ((regval & (ENABLE_BIT | CLAMP_BIT)) == ENABLE_BIT) |
| 312 | return 0; |
| 313 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 314 | /* Make sure required clocks are on at the correct rates. */ |
| 315 | rc = setup_clocks(fs); |
| 316 | if (rc) |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 317 | return rc; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 318 | |
| 319 | /* Un-halt all bus ports in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 320 | if (fs->bus_port0) { |
| 321 | rc = msm_bus_axi_portunhalt(fs->bus_port0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 322 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 323 | pr_err("%s port 0 unhalt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 324 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 325 | } |
| 326 | } |
| 327 | |
| 328 | /* Disable core clock. */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 329 | clk_disable_unprepare(fs->core_clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 330 | |
| 331 | /* |
| 332 | * (Re-)Assert resets for all clocks in the clock domain, since |
| 333 | * footswitch_enable() is first called before footswitch_disable() |
| 334 | * and resets should be asserted before power is restored. |
| 335 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 336 | for (clock = fs->clk_data; clock->clk; clock++) |
Stephen Boyd | 0c62938 | 2011-12-28 19:15:57 -0800 | [diff] [blame] | 337 | ; /* Do nothing */ |
| 338 | for (clock--; clock >= fs->clk_data; clock--) |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 339 | clk_reset(clock->clk, CLK_RESET_ASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 340 | /* Wait for synchronous resets to propagate. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 341 | udelay(RESET_DELAY_US); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 342 | |
| 343 | /* Enable the power rail at the footswitch. */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 344 | regval |= ENABLE_BIT; |
| 345 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 346 | mb(); |
| 347 | udelay(1); |
| 348 | |
| 349 | /* Un-clamp the I/O ports. */ |
| 350 | regval &= ~CLAMP_BIT; |
| 351 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 352 | |
| 353 | /* Deassert resets for all clocks in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 354 | for (clock = fs->clk_data; clock->clk; clock++) |
| 355 | clk_reset(clock->clk, CLK_RESET_DEASSERT); |
| 356 | udelay(RESET_DELAY_US); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 357 | |
| 358 | /* Re-enable core clock. */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 359 | clk_prepare_enable(fs->core_clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 360 | |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 361 | /* Prevent core memory from collapsing when its clock is gated. */ |
| 362 | clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); |
| 363 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 364 | /* Return clocks to their state before this function. */ |
| 365 | restore_clocks(fs); |
| 366 | |
| 367 | fs->is_enabled = true; |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 368 | return 0; |
| 369 | |
| 370 | err: |
| 371 | restore_clocks(fs); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 372 | return rc; |
| 373 | } |
| 374 | |
| 375 | static int gfx2d_footswitch_disable(struct regulator_dev *rdev) |
| 376 | { |
| 377 | struct footswitch *fs = rdev_get_drvdata(rdev); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 378 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 379 | uint32_t regval, rc = 0; |
| 380 | |
Matt Wagantall | 88edea9 | 2011-07-21 10:29:56 -0700 | [diff] [blame] | 381 | /* Return early if already disabled. */ |
| 382 | regval = readl_relaxed(fs->gfs_ctl_reg); |
| 383 | if ((regval & ENABLE_BIT) == 0) |
| 384 | return 0; |
| 385 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 386 | /* Make sure required clocks are on at the correct rates. */ |
| 387 | rc = setup_clocks(fs); |
| 388 | if (rc) |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 389 | return rc; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 390 | |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 391 | /* Allow core memory to collapse when its clock is gated. */ |
| 392 | clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN); |
| 393 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 394 | /* Halt all bus ports in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 395 | if (fs->bus_port0) { |
| 396 | rc = msm_bus_axi_porthalt(fs->bus_port0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 397 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 398 | pr_err("%s port 0 halt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 399 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 400 | } |
| 401 | } |
| 402 | |
| 403 | /* Disable core clock. */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 404 | clk_disable_unprepare(fs->core_clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 405 | |
| 406 | /* |
| 407 | * Assert resets for all clocks in the clock domain so that |
| 408 | * outputs settle prior to clamping. |
| 409 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 410 | for (clock = fs->clk_data; clock->clk; clock++) |
Stephen Boyd | 0c62938 | 2011-12-28 19:15:57 -0800 | [diff] [blame] | 411 | ; /* Do nothing */ |
| 412 | for (clock--; clock >= fs->clk_data; clock--) |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 413 | clk_reset(clock->clk, CLK_RESET_ASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 414 | /* Wait for synchronous resets to propagate. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 415 | udelay(5); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 416 | |
| 417 | /* |
| 418 | * Clamp the I/O ports of the core to ensure the values |
| 419 | * remain fixed while the core is collapsed. |
| 420 | */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 421 | regval |= CLAMP_BIT; |
| 422 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 423 | |
| 424 | /* Collapse the power rail at the footswitch. */ |
| 425 | regval &= ~ENABLE_BIT; |
| 426 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 427 | |
| 428 | /* Re-enable core clock. */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 429 | clk_prepare_enable(fs->core_clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 430 | |
| 431 | /* Return clocks to their state before this function. */ |
| 432 | restore_clocks(fs); |
| 433 | |
| 434 | fs->is_enabled = false; |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 435 | return 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 436 | |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 437 | err: |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 438 | clk_set_flags(fs->core_clk, CLKFLAG_RETAIN); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 439 | restore_clocks(fs); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 440 | return rc; |
| 441 | } |
| 442 | |
| 443 | static struct regulator_ops standard_fs_ops = { |
| 444 | .is_enabled = footswitch_is_enabled, |
| 445 | .enable = footswitch_enable, |
| 446 | .disable = footswitch_disable, |
| 447 | }; |
| 448 | |
| 449 | static struct regulator_ops gfx2d_fs_ops = { |
| 450 | .is_enabled = footswitch_is_enabled, |
| 451 | .enable = gfx2d_footswitch_enable, |
| 452 | .disable = gfx2d_footswitch_disable, |
| 453 | }; |
| 454 | |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 455 | #define FOOTSWITCH(_id, _name, _ops, _gfs_ctl_reg) \ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 456 | [(_id)] = { \ |
| 457 | .desc = { \ |
| 458 | .id = (_id), \ |
| 459 | .name = (_name), \ |
| 460 | .ops = (_ops), \ |
| 461 | .type = REGULATOR_VOLTAGE, \ |
| 462 | .owner = THIS_MODULE, \ |
| 463 | }, \ |
| 464 | .gfs_ctl_reg = (_gfs_ctl_reg), \ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 465 | } |
| 466 | static struct footswitch footswitches[] = { |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 467 | FOOTSWITCH(FS_GFX2D0, "fs_gfx2d0", &gfx2d_fs_ops, GFX2D0_GFS_CTL_REG), |
| 468 | FOOTSWITCH(FS_GFX2D1, "fs_gfx2d1", &gfx2d_fs_ops, GFX2D1_GFS_CTL_REG), |
| 469 | FOOTSWITCH(FS_GFX3D, "fs_gfx3d", &standard_fs_ops, GFX3D_GFS_CTL_REG), |
| 470 | FOOTSWITCH(FS_IJPEG, "fs_ijpeg", &standard_fs_ops, GEMINI_GFS_CTL_REG), |
| 471 | FOOTSWITCH(FS_MDP, "fs_mdp", &standard_fs_ops, MDP_GFS_CTL_REG), |
| 472 | FOOTSWITCH(FS_ROT, "fs_rot", &standard_fs_ops, ROT_GFS_CTL_REG), |
| 473 | FOOTSWITCH(FS_VED, "fs_ved", &standard_fs_ops, VED_GFS_CTL_REG), |
| 474 | FOOTSWITCH(FS_VFE, "fs_vfe", &standard_fs_ops, VFE_GFS_CTL_REG), |
| 475 | FOOTSWITCH(FS_VPE, "fs_vpe", &standard_fs_ops, VPE_GFS_CTL_REG), |
| 476 | FOOTSWITCH(FS_VCAP, "fs_vcap", &standard_fs_ops, VCAP_GFS_CTL_REG), |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 477 | }; |
| 478 | |
| 479 | static int footswitch_probe(struct platform_device *pdev) |
| 480 | { |
| 481 | struct footswitch *fs; |
| 482 | struct regulator_init_data *init_data; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 483 | struct fs_driver_data *driver_data; |
| 484 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 485 | uint32_t regval, rc = 0; |
| 486 | |
| 487 | if (pdev == NULL) |
| 488 | return -EINVAL; |
| 489 | |
| 490 | if (pdev->id >= MAX_FS) |
| 491 | return -ENODEV; |
| 492 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 493 | init_data = pdev->dev.platform_data; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 494 | driver_data = init_data->driver_data; |
| 495 | fs = &footswitches[pdev->id]; |
| 496 | fs->clk_data = driver_data->clks; |
| 497 | fs->bus_port0 = driver_data->bus_port0; |
| 498 | fs->bus_port1 = driver_data->bus_port1; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 499 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 500 | for (clock = fs->clk_data; clock->name; clock++) { |
| 501 | clock->clk = clk_get(&pdev->dev, clock->name); |
| 502 | if (IS_ERR(clock->clk)) { |
| 503 | rc = PTR_ERR(clock->clk); |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 504 | pr_err("%s clk_get(%s) failed\n", fs->desc.name, |
| 505 | clock->name); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 506 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 507 | } |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 508 | if (!strncmp(clock->name, "core_clk", 8)) |
| 509 | fs->core_clk = clock->clk; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 510 | } |
| 511 | |
| 512 | /* |
| 513 | * Set number of AHB_CLK cycles to delay the assertion of gfs_en_all |
| 514 | * after enabling the footswitch. Also ensure the retention bit is |
| 515 | * clear so disabling the footswitch will power-collapse the core. |
| 516 | */ |
| 517 | regval = readl_relaxed(fs->gfs_ctl_reg); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 518 | regval |= GFS_DELAY_CNT; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 519 | regval &= ~RETENTION_BIT; |
| 520 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 521 | |
Rajendra Nayak | 11eafc6 | 2011-11-18 16:47:19 +0530 | [diff] [blame] | 522 | fs->rdev = regulator_register(&fs->desc, &pdev->dev, |
| 523 | init_data, fs, NULL); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 524 | if (IS_ERR(footswitches[pdev->id].rdev)) { |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 525 | pr_err("regulator_register(\"%s\") failed\n", |
| 526 | fs->desc.name); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 527 | rc = PTR_ERR(footswitches[pdev->id].rdev); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 528 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 529 | } |
| 530 | |
| 531 | return 0; |
| 532 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 533 | err: |
| 534 | for (clock = fs->clk_data; clock->clk; clock++) |
| 535 | clk_put(clock->clk); |
| 536 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 537 | return rc; |
| 538 | } |
| 539 | |
| 540 | static int __devexit footswitch_remove(struct platform_device *pdev) |
| 541 | { |
| 542 | struct footswitch *fs = &footswitches[pdev->id]; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 543 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 544 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 545 | for (clock = fs->clk_data; clock->clk; clock++) |
| 546 | clk_put(clock->clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 547 | regulator_unregister(fs->rdev); |
| 548 | |
| 549 | return 0; |
| 550 | } |
| 551 | |
| 552 | static struct platform_driver footswitch_driver = { |
| 553 | .probe = footswitch_probe, |
| 554 | .remove = __devexit_p(footswitch_remove), |
| 555 | .driver = { |
Matt Wagantall | 4972271 | 2011-08-17 18:50:53 -0700 | [diff] [blame] | 556 | .name = "footswitch-8x60", |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 557 | .owner = THIS_MODULE, |
| 558 | }, |
| 559 | }; |
| 560 | |
| 561 | static int __init late_footswitch_init(void) |
| 562 | { |
| 563 | int i; |
| 564 | |
| 565 | mutex_lock(&claim_lock); |
| 566 | /* Turn off all registered but unused footswitches. */ |
| 567 | for (i = 0; i < ARRAY_SIZE(footswitches); i++) |
| 568 | if (footswitches[i].rdev && !footswitches[i].is_claimed) |
Matt Wagantall | 7a26136 | 2011-07-14 19:07:10 -0700 | [diff] [blame] | 569 | footswitches[i].rdev->desc->ops-> |
| 570 | disable(footswitches[i].rdev); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 571 | mutex_unlock(&claim_lock); |
| 572 | |
| 573 | return 0; |
| 574 | } |
| 575 | late_initcall(late_footswitch_init); |
| 576 | |
| 577 | static int __init footswitch_init(void) |
| 578 | { |
| 579 | return platform_driver_register(&footswitch_driver); |
| 580 | } |
| 581 | subsys_initcall(footswitch_init); |
| 582 | |
| 583 | static void __exit footswitch_exit(void) |
| 584 | { |
| 585 | platform_driver_unregister(&footswitch_driver); |
| 586 | } |
| 587 | module_exit(footswitch_exit); |
| 588 | |
| 589 | MODULE_LICENSE("GPL v2"); |
| 590 | MODULE_DESCRIPTION("MSM8x60 rail footswitch"); |
| 591 | MODULE_ALIAS("platform:footswitch-msm8x60"); |