Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 1 | /* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 13 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
| 14 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 15 | #include <linux/kernel.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 16 | #include <linux/module.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 17 | #include <linux/io.h> |
| 18 | #include <linux/delay.h> |
| 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/regulator/driver.h> |
| 22 | #include <linux/regulator/machine.h> |
| 23 | #include <linux/clk.h> |
| 24 | #include <mach/msm_iomap.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 25 | #include <mach/msm_bus.h> |
| 26 | #include <mach/scm-io.h> |
Matt Wagantall | 33d01f5 | 2012-02-23 23:27:44 -0800 | [diff] [blame] | 27 | #include <mach/clk.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 28 | #include "footswitch.h" |
| 29 | |
| 30 | #ifdef CONFIG_MSM_SECURE_IO |
| 31 | #undef readl_relaxed |
| 32 | #undef writel_relaxed |
| 33 | #define readl_relaxed secure_readl |
| 34 | #define writel_relaxed secure_writel |
| 35 | #endif |
| 36 | |
| 37 | #define REG(off) (MSM_MMSS_CLK_CTL_BASE + (off)) |
| 38 | #define GEMINI_GFS_CTL_REG REG(0x01A0) |
| 39 | #define GFX2D0_GFS_CTL_REG REG(0x0180) |
| 40 | #define GFX2D1_GFS_CTL_REG REG(0x0184) |
| 41 | #define GFX3D_GFS_CTL_REG REG(0x0188) |
| 42 | #define MDP_GFS_CTL_REG REG(0x0190) |
| 43 | #define ROT_GFS_CTL_REG REG(0x018C) |
| 44 | #define VED_GFS_CTL_REG REG(0x0194) |
| 45 | #define VFE_GFS_CTL_REG REG(0x0198) |
| 46 | #define VPE_GFS_CTL_REG REG(0x019C) |
Matt Wagantall | 37f34b3 | 2011-08-23 18:14:47 -0700 | [diff] [blame] | 47 | #define VCAP_GFS_CTL_REG REG(0x0254) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 48 | |
| 49 | #define CLAMP_BIT BIT(5) |
| 50 | #define ENABLE_BIT BIT(8) |
| 51 | #define RETENTION_BIT BIT(9) |
| 52 | |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 53 | #define GFS_DELAY_CNT 31 |
| 54 | |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 55 | #define DEFAULT_RESET_DELAY_US 1 |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 56 | /* Clock rate to use if one has not previously been set. */ |
| 57 | #define DEFAULT_RATE 27000000 |
| 58 | #define MAX_CLKS 10 |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 59 | |
| 60 | /* |
| 61 | * Lock is only needed to protect against the first footswitch_enable() |
| 62 | * call occuring concurrently with late_footswitch_init(). |
| 63 | */ |
| 64 | static DEFINE_MUTEX(claim_lock); |
| 65 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 66 | struct footswitch { |
| 67 | struct regulator_dev *rdev; |
| 68 | struct regulator_desc desc; |
| 69 | void *gfs_ctl_reg; |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 70 | int bus_port0, bus_port1; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 71 | bool is_enabled; |
| 72 | bool is_claimed; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 73 | struct fs_clk_data *clk_data; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 74 | struct clk *core_clk; |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 75 | unsigned long reset_delay_us; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 76 | }; |
| 77 | |
| 78 | static int setup_clocks(struct footswitch *fs) |
| 79 | { |
| 80 | int rc = 0; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 81 | struct fs_clk_data *clock; |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 82 | long rate; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 83 | |
| 84 | /* |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 85 | * Enable all clocks in the power domain. If a specific clock rate is |
| 86 | * required for reset timing, set that rate before enabling the clocks. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 87 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 88 | for (clock = fs->clk_data; clock->clk; clock++) { |
| 89 | clock->rate = clk_get_rate(clock->clk); |
| 90 | if (!clock->rate || clock->reset_rate) { |
| 91 | rate = clock->reset_rate ? |
| 92 | clock->reset_rate : DEFAULT_RATE; |
| 93 | rc = clk_set_rate(clock->clk, rate); |
| 94 | if (rc && rc != -ENOSYS) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 95 | pr_err("Failed to set %s %s rate to %lu Hz.\n", |
| 96 | fs->desc.name, clock->name, clock->rate); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 97 | for (clock--; clock >= fs->clk_data; clock--) { |
| 98 | if (clock->enabled) |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 99 | clk_disable_unprepare( |
| 100 | clock->clk); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 101 | clk_set_rate(clock->clk, clock->rate); |
| 102 | } |
| 103 | return rc; |
| 104 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 105 | } |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 106 | /* |
| 107 | * Some clocks are for reset purposes only. These clocks will |
| 108 | * fail to enable. Ignore the failures but keep track of them so |
| 109 | * we don't try to disable them later and crash due to |
| 110 | * unbalanced calls. |
| 111 | */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 112 | clock->enabled = !clk_prepare_enable(clock->clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 113 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 114 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 115 | return 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | static void restore_clocks(struct footswitch *fs) |
| 119 | { |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 120 | struct fs_clk_data *clock; |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 121 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 122 | /* Restore clocks to their orignal states before setup_clocks(). */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 123 | for (clock = fs->clk_data; clock->clk; clock++) { |
| 124 | if (clock->enabled) |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 125 | clk_disable_unprepare(clock->clk); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 126 | if (clock->rate && clk_set_rate(clock->clk, clock->rate)) |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 127 | pr_err("Failed to restore %s %s rate to %lu Hz.\n", |
| 128 | fs->desc.name, clock->name, clock->rate); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 129 | } |
| 130 | } |
| 131 | |
| 132 | static int footswitch_is_enabled(struct regulator_dev *rdev) |
| 133 | { |
| 134 | struct footswitch *fs = rdev_get_drvdata(rdev); |
| 135 | |
| 136 | return fs->is_enabled; |
| 137 | } |
| 138 | |
| 139 | static int footswitch_enable(struct regulator_dev *rdev) |
| 140 | { |
| 141 | struct footswitch *fs = rdev_get_drvdata(rdev); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 142 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 143 | uint32_t regval, rc = 0; |
| 144 | |
| 145 | mutex_lock(&claim_lock); |
| 146 | fs->is_claimed = true; |
| 147 | mutex_unlock(&claim_lock); |
| 148 | |
Matt Wagantall | 88edea9 | 2011-07-21 10:29:56 -0700 | [diff] [blame] | 149 | /* Return early if already enabled. */ |
| 150 | regval = readl_relaxed(fs->gfs_ctl_reg); |
| 151 | if ((regval & (ENABLE_BIT | CLAMP_BIT)) == ENABLE_BIT) |
| 152 | return 0; |
| 153 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 154 | /* Make sure required clocks are on at the correct rates. */ |
| 155 | rc = setup_clocks(fs); |
| 156 | if (rc) |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 157 | return rc; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 158 | |
| 159 | /* Un-halt all bus ports in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 160 | if (fs->bus_port0) { |
| 161 | rc = msm_bus_axi_portunhalt(fs->bus_port0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 162 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 163 | pr_err("%s port 0 unhalt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 164 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 165 | } |
| 166 | } |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 167 | if (fs->bus_port1) { |
| 168 | rc = msm_bus_axi_portunhalt(fs->bus_port1); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 169 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 170 | pr_err("%s port 1 unhalt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 171 | goto err_port2_halt; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 172 | } |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * (Re-)Assert resets for all clocks in the clock domain, since |
| 177 | * footswitch_enable() is first called before footswitch_disable() |
| 178 | * and resets should be asserted before power is restored. |
| 179 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 180 | for (clock = fs->clk_data; clock->clk; clock++) |
Stephen Boyd | 0c62938 | 2011-12-28 19:15:57 -0800 | [diff] [blame] | 181 | ; /* Do nothing */ |
| 182 | for (clock--; clock >= fs->clk_data; clock--) |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 183 | clk_reset(clock->clk, CLK_RESET_ASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 184 | /* Wait for synchronous resets to propagate. */ |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 185 | udelay(fs->reset_delay_us); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 186 | |
| 187 | /* Enable the power rail at the footswitch. */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 188 | regval |= ENABLE_BIT; |
| 189 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 190 | /* Wait for the rail to fully charge. */ |
| 191 | mb(); |
| 192 | udelay(1); |
| 193 | |
| 194 | /* Un-clamp the I/O ports. */ |
| 195 | regval &= ~CLAMP_BIT; |
| 196 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 197 | |
| 198 | /* Deassert resets for all clocks in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 199 | for (clock = fs->clk_data; clock->clk; clock++) |
| 200 | clk_reset(clock->clk, CLK_RESET_DEASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 201 | /* Toggle core reset again after first power-on (required for GFX3D). */ |
| 202 | if (fs->desc.id == FS_GFX3D) { |
| 203 | clk_reset(fs->core_clk, CLK_RESET_ASSERT); |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 204 | udelay(fs->reset_delay_us); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 205 | clk_reset(fs->core_clk, CLK_RESET_DEASSERT); |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 206 | udelay(fs->reset_delay_us); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 207 | } |
| 208 | |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 209 | /* Prevent core memory from collapsing when its clock is gated. */ |
Matt Wagantall | 7637779 | 2013-04-10 19:37:58 -0700 | [diff] [blame] | 210 | clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM); |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 211 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 212 | /* Return clocks to their state before this function. */ |
| 213 | restore_clocks(fs); |
| 214 | |
| 215 | fs->is_enabled = true; |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 216 | return 0; |
| 217 | |
| 218 | err_port2_halt: |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 219 | msm_bus_axi_porthalt(fs->bus_port0); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 220 | err: |
| 221 | restore_clocks(fs); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 222 | return rc; |
| 223 | } |
| 224 | |
| 225 | static int footswitch_disable(struct regulator_dev *rdev) |
| 226 | { |
| 227 | struct footswitch *fs = rdev_get_drvdata(rdev); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 228 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 229 | uint32_t regval, rc = 0; |
| 230 | |
Matt Wagantall | 88edea9 | 2011-07-21 10:29:56 -0700 | [diff] [blame] | 231 | /* Return early if already disabled. */ |
| 232 | regval = readl_relaxed(fs->gfs_ctl_reg); |
| 233 | if ((regval & ENABLE_BIT) == 0) |
| 234 | return 0; |
| 235 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 236 | /* Make sure required clocks are on at the correct rates. */ |
| 237 | rc = setup_clocks(fs); |
| 238 | if (rc) |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 239 | return rc; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 240 | |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 241 | /* Allow core memory to collapse when its clock is gated. */ |
Matt Wagantall | 7637779 | 2013-04-10 19:37:58 -0700 | [diff] [blame] | 242 | clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN_MEM); |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 243 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 244 | /* Halt all bus ports in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 245 | if (fs->bus_port0) { |
| 246 | rc = msm_bus_axi_porthalt(fs->bus_port0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 247 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 248 | pr_err("%s port 0 halt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 249 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 250 | } |
| 251 | } |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 252 | if (fs->bus_port1) { |
| 253 | rc = msm_bus_axi_porthalt(fs->bus_port1); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 254 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 255 | pr_err("%s port 1 halt failed.\n", fs->desc.name); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 256 | goto err_port2_halt; |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | /* |
| 261 | * Assert resets for all clocks in the clock domain so that |
| 262 | * outputs settle prior to clamping. |
| 263 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 264 | for (clock = fs->clk_data; clock->clk; clock++) |
Stephen Boyd | 0c62938 | 2011-12-28 19:15:57 -0800 | [diff] [blame] | 265 | ; /* Do nothing */ |
| 266 | for (clock--; clock >= fs->clk_data; clock--) |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 267 | clk_reset(clock->clk, CLK_RESET_ASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 268 | /* Wait for synchronous resets to propagate. */ |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 269 | udelay(fs->reset_delay_us); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 270 | |
| 271 | /* |
Matt Wagantall | 1ab7d94 | 2011-12-02 17:59:57 -0800 | [diff] [blame] | 272 | * Return clocks to their state before this function. For robustness |
| 273 | * if memory-retention across collapses is required, clocks should |
| 274 | * be disabled before asserting the clamps. Assuming clocks were off |
| 275 | * before entering footswitch_disable(), this will be true. |
| 276 | */ |
| 277 | restore_clocks(fs); |
| 278 | |
| 279 | /* |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 280 | * Clamp the I/O ports of the core to ensure the values |
| 281 | * remain fixed while the core is collapsed. |
| 282 | */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 283 | regval |= CLAMP_BIT; |
| 284 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 285 | |
| 286 | /* Collapse the power rail at the footswitch. */ |
| 287 | regval &= ~ENABLE_BIT; |
| 288 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 289 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 290 | fs->is_enabled = false; |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 291 | return 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 292 | |
| 293 | err_port2_halt: |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 294 | msm_bus_axi_portunhalt(fs->bus_port0); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 295 | err: |
Matt Wagantall | 7637779 | 2013-04-10 19:37:58 -0700 | [diff] [blame] | 296 | clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 297 | restore_clocks(fs); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 298 | return rc; |
| 299 | } |
| 300 | |
| 301 | static int gfx2d_footswitch_enable(struct regulator_dev *rdev) |
| 302 | { |
| 303 | struct footswitch *fs = rdev_get_drvdata(rdev); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 304 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 305 | uint32_t regval, rc = 0; |
| 306 | |
| 307 | mutex_lock(&claim_lock); |
| 308 | fs->is_claimed = true; |
| 309 | mutex_unlock(&claim_lock); |
| 310 | |
Matt Wagantall | 88edea9 | 2011-07-21 10:29:56 -0700 | [diff] [blame] | 311 | /* Return early if already enabled. */ |
| 312 | regval = readl_relaxed(fs->gfs_ctl_reg); |
| 313 | if ((regval & (ENABLE_BIT | CLAMP_BIT)) == ENABLE_BIT) |
| 314 | return 0; |
| 315 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 316 | /* Make sure required clocks are on at the correct rates. */ |
| 317 | rc = setup_clocks(fs); |
| 318 | if (rc) |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 319 | return rc; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 320 | |
| 321 | /* Un-halt all bus ports in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 322 | if (fs->bus_port0) { |
| 323 | rc = msm_bus_axi_portunhalt(fs->bus_port0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 324 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 325 | pr_err("%s port 0 unhalt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 326 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 327 | } |
| 328 | } |
| 329 | |
| 330 | /* Disable core clock. */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 331 | clk_disable_unprepare(fs->core_clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 332 | |
| 333 | /* |
| 334 | * (Re-)Assert resets for all clocks in the clock domain, since |
| 335 | * footswitch_enable() is first called before footswitch_disable() |
| 336 | * and resets should be asserted before power is restored. |
| 337 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 338 | for (clock = fs->clk_data; clock->clk; clock++) |
Stephen Boyd | 0c62938 | 2011-12-28 19:15:57 -0800 | [diff] [blame] | 339 | ; /* Do nothing */ |
| 340 | for (clock--; clock >= fs->clk_data; clock--) |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 341 | clk_reset(clock->clk, CLK_RESET_ASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 342 | /* Wait for synchronous resets to propagate. */ |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 343 | udelay(fs->reset_delay_us); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 344 | |
| 345 | /* Enable the power rail at the footswitch. */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 346 | regval |= ENABLE_BIT; |
| 347 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 348 | mb(); |
| 349 | udelay(1); |
| 350 | |
| 351 | /* Un-clamp the I/O ports. */ |
| 352 | regval &= ~CLAMP_BIT; |
| 353 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 354 | |
| 355 | /* Deassert resets for all clocks in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 356 | for (clock = fs->clk_data; clock->clk; clock++) |
| 357 | clk_reset(clock->clk, CLK_RESET_DEASSERT); |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 358 | udelay(fs->reset_delay_us); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 359 | |
| 360 | /* Re-enable core clock. */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 361 | clk_prepare_enable(fs->core_clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 362 | |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 363 | /* Prevent core memory from collapsing when its clock is gated. */ |
Matt Wagantall | 7637779 | 2013-04-10 19:37:58 -0700 | [diff] [blame] | 364 | clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM); |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 365 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 366 | /* Return clocks to their state before this function. */ |
| 367 | restore_clocks(fs); |
| 368 | |
| 369 | fs->is_enabled = true; |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 370 | return 0; |
| 371 | |
| 372 | err: |
| 373 | restore_clocks(fs); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 374 | return rc; |
| 375 | } |
| 376 | |
| 377 | static int gfx2d_footswitch_disable(struct regulator_dev *rdev) |
| 378 | { |
| 379 | struct footswitch *fs = rdev_get_drvdata(rdev); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 380 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 381 | uint32_t regval, rc = 0; |
| 382 | |
Matt Wagantall | 88edea9 | 2011-07-21 10:29:56 -0700 | [diff] [blame] | 383 | /* Return early if already disabled. */ |
| 384 | regval = readl_relaxed(fs->gfs_ctl_reg); |
| 385 | if ((regval & ENABLE_BIT) == 0) |
| 386 | return 0; |
| 387 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 388 | /* Make sure required clocks are on at the correct rates. */ |
| 389 | rc = setup_clocks(fs); |
| 390 | if (rc) |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 391 | return rc; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 392 | |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 393 | /* Allow core memory to collapse when its clock is gated. */ |
Matt Wagantall | 7637779 | 2013-04-10 19:37:58 -0700 | [diff] [blame] | 394 | clk_set_flags(fs->core_clk, CLKFLAG_NORETAIN_MEM); |
Matt Wagantall | 8bec366 | 2012-01-25 11:06:13 -0800 | [diff] [blame] | 395 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 396 | /* Halt all bus ports in the power domain. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 397 | if (fs->bus_port0) { |
| 398 | rc = msm_bus_axi_porthalt(fs->bus_port0); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 399 | if (rc) { |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 400 | pr_err("%s port 0 halt failed.\n", fs->desc.name); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 401 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 402 | } |
| 403 | } |
| 404 | |
| 405 | /* Disable core clock. */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 406 | clk_disable_unprepare(fs->core_clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 407 | |
| 408 | /* |
| 409 | * Assert resets for all clocks in the clock domain so that |
| 410 | * outputs settle prior to clamping. |
| 411 | */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 412 | for (clock = fs->clk_data; clock->clk; clock++) |
Stephen Boyd | 0c62938 | 2011-12-28 19:15:57 -0800 | [diff] [blame] | 413 | ; /* Do nothing */ |
| 414 | for (clock--; clock >= fs->clk_data; clock--) |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 415 | clk_reset(clock->clk, CLK_RESET_ASSERT); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 416 | /* Wait for synchronous resets to propagate. */ |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 417 | udelay(5); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 418 | |
| 419 | /* |
| 420 | * Clamp the I/O ports of the core to ensure the values |
| 421 | * remain fixed while the core is collapsed. |
| 422 | */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 423 | regval |= CLAMP_BIT; |
| 424 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 425 | |
| 426 | /* Collapse the power rail at the footswitch. */ |
| 427 | regval &= ~ENABLE_BIT; |
| 428 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 429 | |
| 430 | /* Re-enable core clock. */ |
Matt Wagantall | 1a7ee89 | 2012-01-17 18:56:28 -0800 | [diff] [blame] | 431 | clk_prepare_enable(fs->core_clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 432 | |
| 433 | /* Return clocks to their state before this function. */ |
| 434 | restore_clocks(fs); |
| 435 | |
| 436 | fs->is_enabled = false; |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 437 | return 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 438 | |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 439 | err: |
Matt Wagantall | 7637779 | 2013-04-10 19:37:58 -0700 | [diff] [blame] | 440 | clk_set_flags(fs->core_clk, CLKFLAG_RETAIN_MEM); |
Stephen Boyd | 2fc19e8 | 2011-12-07 17:38:38 -0800 | [diff] [blame] | 441 | restore_clocks(fs); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 442 | return rc; |
| 443 | } |
| 444 | |
| 445 | static struct regulator_ops standard_fs_ops = { |
| 446 | .is_enabled = footswitch_is_enabled, |
| 447 | .enable = footswitch_enable, |
| 448 | .disable = footswitch_disable, |
| 449 | }; |
| 450 | |
| 451 | static struct regulator_ops gfx2d_fs_ops = { |
| 452 | .is_enabled = footswitch_is_enabled, |
| 453 | .enable = gfx2d_footswitch_enable, |
| 454 | .disable = gfx2d_footswitch_disable, |
| 455 | }; |
| 456 | |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 457 | #define FOOTSWITCH(_id, _name, _ops, _gfs_ctl_reg) \ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 458 | [(_id)] = { \ |
| 459 | .desc = { \ |
| 460 | .id = (_id), \ |
| 461 | .name = (_name), \ |
| 462 | .ops = (_ops), \ |
| 463 | .type = REGULATOR_VOLTAGE, \ |
| 464 | .owner = THIS_MODULE, \ |
| 465 | }, \ |
| 466 | .gfs_ctl_reg = (_gfs_ctl_reg), \ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 467 | } |
| 468 | static struct footswitch footswitches[] = { |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 469 | FOOTSWITCH(FS_GFX2D0, "fs_gfx2d0", &gfx2d_fs_ops, GFX2D0_GFS_CTL_REG), |
| 470 | FOOTSWITCH(FS_GFX2D1, "fs_gfx2d1", &gfx2d_fs_ops, GFX2D1_GFS_CTL_REG), |
| 471 | FOOTSWITCH(FS_GFX3D, "fs_gfx3d", &standard_fs_ops, GFX3D_GFS_CTL_REG), |
| 472 | FOOTSWITCH(FS_IJPEG, "fs_ijpeg", &standard_fs_ops, GEMINI_GFS_CTL_REG), |
| 473 | FOOTSWITCH(FS_MDP, "fs_mdp", &standard_fs_ops, MDP_GFS_CTL_REG), |
| 474 | FOOTSWITCH(FS_ROT, "fs_rot", &standard_fs_ops, ROT_GFS_CTL_REG), |
| 475 | FOOTSWITCH(FS_VED, "fs_ved", &standard_fs_ops, VED_GFS_CTL_REG), |
| 476 | FOOTSWITCH(FS_VFE, "fs_vfe", &standard_fs_ops, VFE_GFS_CTL_REG), |
| 477 | FOOTSWITCH(FS_VPE, "fs_vpe", &standard_fs_ops, VPE_GFS_CTL_REG), |
| 478 | FOOTSWITCH(FS_VCAP, "fs_vcap", &standard_fs_ops, VCAP_GFS_CTL_REG), |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 479 | }; |
| 480 | |
| 481 | static int footswitch_probe(struct platform_device *pdev) |
| 482 | { |
| 483 | struct footswitch *fs; |
| 484 | struct regulator_init_data *init_data; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 485 | struct fs_driver_data *driver_data; |
| 486 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 487 | uint32_t regval, rc = 0; |
| 488 | |
| 489 | if (pdev == NULL) |
| 490 | return -EINVAL; |
| 491 | |
| 492 | if (pdev->id >= MAX_FS) |
| 493 | return -ENODEV; |
| 494 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 495 | init_data = pdev->dev.platform_data; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 496 | driver_data = init_data->driver_data; |
| 497 | fs = &footswitches[pdev->id]; |
| 498 | fs->clk_data = driver_data->clks; |
| 499 | fs->bus_port0 = driver_data->bus_port0; |
| 500 | fs->bus_port1 = driver_data->bus_port1; |
Matt Wagantall | cd6f465 | 2013-04-01 15:22:47 -0700 | [diff] [blame^] | 501 | fs->reset_delay_us = |
| 502 | driver_data->reset_delay_us ? : DEFAULT_RESET_DELAY_US; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 503 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 504 | for (clock = fs->clk_data; clock->name; clock++) { |
| 505 | clock->clk = clk_get(&pdev->dev, clock->name); |
| 506 | if (IS_ERR(clock->clk)) { |
| 507 | rc = PTR_ERR(clock->clk); |
Matt Wagantall | 27fa282 | 2012-02-22 18:43:27 -0800 | [diff] [blame] | 508 | pr_err("%s clk_get(%s) failed\n", fs->desc.name, |
| 509 | clock->name); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 510 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 511 | } |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 512 | if (!strncmp(clock->name, "core_clk", 8)) |
| 513 | fs->core_clk = clock->clk; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 514 | } |
| 515 | |
| 516 | /* |
| 517 | * Set number of AHB_CLK cycles to delay the assertion of gfs_en_all |
| 518 | * after enabling the footswitch. Also ensure the retention bit is |
| 519 | * clear so disabling the footswitch will power-collapse the core. |
| 520 | */ |
| 521 | regval = readl_relaxed(fs->gfs_ctl_reg); |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 522 | regval |= GFS_DELAY_CNT; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 523 | regval &= ~RETENTION_BIT; |
| 524 | writel_relaxed(regval, fs->gfs_ctl_reg); |
| 525 | |
Rajendra Nayak | 11eafc6 | 2011-11-18 16:47:19 +0530 | [diff] [blame] | 526 | fs->rdev = regulator_register(&fs->desc, &pdev->dev, |
| 527 | init_data, fs, NULL); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 528 | if (IS_ERR(footswitches[pdev->id].rdev)) { |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 529 | pr_err("regulator_register(\"%s\") failed\n", |
| 530 | fs->desc.name); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 531 | rc = PTR_ERR(footswitches[pdev->id].rdev); |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 532 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 533 | } |
| 534 | |
| 535 | return 0; |
| 536 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 537 | err: |
| 538 | for (clock = fs->clk_data; clock->clk; clock++) |
| 539 | clk_put(clock->clk); |
| 540 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 541 | return rc; |
| 542 | } |
| 543 | |
| 544 | static int __devexit footswitch_remove(struct platform_device *pdev) |
| 545 | { |
| 546 | struct footswitch *fs = &footswitches[pdev->id]; |
Matt Wagantall | 1f65d9d | 2012-04-25 14:24:20 -0700 | [diff] [blame] | 547 | struct fs_clk_data *clock; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 548 | |
Matt Wagantall | b82a513 | 2011-12-12 22:26:41 -0800 | [diff] [blame] | 549 | for (clock = fs->clk_data; clock->clk; clock++) |
| 550 | clk_put(clock->clk); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 551 | regulator_unregister(fs->rdev); |
| 552 | |
| 553 | return 0; |
| 554 | } |
| 555 | |
| 556 | static struct platform_driver footswitch_driver = { |
| 557 | .probe = footswitch_probe, |
| 558 | .remove = __devexit_p(footswitch_remove), |
| 559 | .driver = { |
Matt Wagantall | 4972271 | 2011-08-17 18:50:53 -0700 | [diff] [blame] | 560 | .name = "footswitch-8x60", |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 561 | .owner = THIS_MODULE, |
| 562 | }, |
| 563 | }; |
| 564 | |
| 565 | static int __init late_footswitch_init(void) |
| 566 | { |
| 567 | int i; |
| 568 | |
| 569 | mutex_lock(&claim_lock); |
| 570 | /* Turn off all registered but unused footswitches. */ |
| 571 | for (i = 0; i < ARRAY_SIZE(footswitches); i++) |
| 572 | if (footswitches[i].rdev && !footswitches[i].is_claimed) |
Matt Wagantall | 7a26136 | 2011-07-14 19:07:10 -0700 | [diff] [blame] | 573 | footswitches[i].rdev->desc->ops-> |
| 574 | disable(footswitches[i].rdev); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 575 | mutex_unlock(&claim_lock); |
| 576 | |
| 577 | return 0; |
| 578 | } |
| 579 | late_initcall(late_footswitch_init); |
| 580 | |
| 581 | static int __init footswitch_init(void) |
| 582 | { |
| 583 | return platform_driver_register(&footswitch_driver); |
| 584 | } |
| 585 | subsys_initcall(footswitch_init); |
| 586 | |
| 587 | static void __exit footswitch_exit(void) |
| 588 | { |
| 589 | platform_driver_unregister(&footswitch_driver); |
| 590 | } |
| 591 | module_exit(footswitch_exit); |
| 592 | |
| 593 | MODULE_LICENSE("GPL v2"); |
| 594 | MODULE_DESCRIPTION("MSM8x60 rail footswitch"); |
| 595 | MODULE_ALIAS("platform:footswitch-msm8x60"); |