blob: 0963e2758a1fccabc3269d3981c9d2a8a0a8d8a5 [file] [log] [blame]
Matt Wagantallfc727212012-01-06 18:18:25 -08001/*
Matt Wagantall3ef52422013-04-10 20:29:19 -07002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Matt Wagantallfc727212012-01-06 18:18:25 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070015#include <linux/module.h>
Matt Wagantallfc727212012-01-06 18:18:25 -080016#include <linux/io.h>
17#include <linux/iopoll.h>
18#include <linux/delay.h>
19#include <linux/err.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/regulator/driver.h>
23#include <linux/regulator/machine.h>
24#include <linux/regulator/of_regulator.h>
Matt Wagantall5900b7b2013-04-11 15:45:17 -070025#include <linux/slab.h>
Matt Wagantall3ef52422013-04-10 20:29:19 -070026#include <linux/clk.h>
27#include <mach/clk.h>
Matt Wagantallfc727212012-01-06 18:18:25 -080028
29#define PWR_ON_MASK BIT(31)
30#define EN_REST_WAIT_MASK (0xF << 20)
31#define EN_FEW_WAIT_MASK (0xF << 16)
32#define CLK_DIS_WAIT_MASK (0xF << 12)
33#define SW_OVERRIDE_MASK BIT(2)
34#define HW_CONTROL_MASK BIT(1)
35#define SW_COLLAPSE_MASK BIT(0)
36
37/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
38#define EN_REST_WAIT_VAL (0x2 << 20)
Subbaraman Narayanamurthy4da45022013-03-22 19:48:52 -070039#define EN_FEW_WAIT_VAL (0x8 << 16)
Matt Wagantallfc727212012-01-06 18:18:25 -080040#define CLK_DIS_WAIT_VAL (0x2 << 12)
41
Matt Wagantall32754ca2013-04-19 11:28:40 -070042#define TIMEOUT_US 100
Matt Wagantallfc727212012-01-06 18:18:25 -080043
44struct gdsc {
45 struct regulator_dev *rdev;
46 struct regulator_desc rdesc;
47 void __iomem *gdscr;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070048 struct clk **clocks;
49 int clock_count;
50 bool toggle_mems;
Matt Wagantall27b19032013-05-20 19:11:15 -070051 bool toggle_logic;
52 bool resets_asserted;
Matt Wagantallfc727212012-01-06 18:18:25 -080053};
54
55static int gdsc_is_enabled(struct regulator_dev *rdev)
56{
57 struct gdsc *sc = rdev_get_drvdata(rdev);
58
Matt Wagantall27b19032013-05-20 19:11:15 -070059 if (!sc->toggle_logic)
60 return !sc->resets_asserted;
61
Matt Wagantallfc727212012-01-06 18:18:25 -080062 return !!(readl_relaxed(sc->gdscr) & PWR_ON_MASK);
63}
64
65static int gdsc_enable(struct regulator_dev *rdev)
66{
67 struct gdsc *sc = rdev_get_drvdata(rdev);
68 uint32_t regval;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070069 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -080070
Matt Wagantall27b19032013-05-20 19:11:15 -070071 if (sc->toggle_logic) {
72 regval = readl_relaxed(sc->gdscr);
73 regval &= ~SW_COLLAPSE_MASK;
74 writel_relaxed(regval, sc->gdscr);
Matt Wagantallfc727212012-01-06 18:18:25 -080075
Matt Wagantall27b19032013-05-20 19:11:15 -070076 ret = readl_tight_poll_timeout(sc->gdscr, regval,
77 regval & PWR_ON_MASK, TIMEOUT_US);
78 if (ret) {
79 dev_err(&rdev->dev, "%s enable timed out\n",
80 sc->rdesc.name);
81 return ret;
82 }
83 } else {
84 for (i = 0; i < sc->clock_count; i++)
85 clk_reset(sc->clocks[i], CLK_RESET_DEASSERT);
86 sc->resets_asserted = false;
Matt Wagantall64df1332012-06-26 12:00:19 -070087 }
Matt Wagantallfc727212012-01-06 18:18:25 -080088
Matt Wagantall5900b7b2013-04-11 15:45:17 -070089 if (sc->toggle_mems) {
90 for (i = 0; i < sc->clock_count; i++) {
91 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
92 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
93 }
94 }
95
Matt Wagantall64df1332012-06-26 12:00:19 -070096 /*
97 * If clocks to this power domain were already on, they will take an
98 * additional 4 clock cycles to re-enable after the rail is enabled.
Matt Wagantall5900b7b2013-04-11 15:45:17 -070099 * Delay to account for this. A delay is also needed to ensure clocks
100 * are not enabled within 400ns of enabling power to the memories.
Matt Wagantall64df1332012-06-26 12:00:19 -0700101 */
102 udelay(1);
103
104 return 0;
Matt Wagantallfc727212012-01-06 18:18:25 -0800105}
106
107static int gdsc_disable(struct regulator_dev *rdev)
108{
109 struct gdsc *sc = rdev_get_drvdata(rdev);
110 uint32_t regval;
Matt Wagantall7889c712013-05-17 12:48:15 -0700111 int i, ret = 0;
Matt Wagantallfc727212012-01-06 18:18:25 -0800112
Matt Wagantall27b19032013-05-20 19:11:15 -0700113 if (sc->toggle_logic) {
Matt Wagantall7889c712013-05-17 12:48:15 -0700114 regval = readl_relaxed(sc->gdscr);
115 regval |= SW_COLLAPSE_MASK;
116 writel_relaxed(regval, sc->gdscr);
Matt Wagantallfc727212012-01-06 18:18:25 -0800117
Matt Wagantall7889c712013-05-17 12:48:15 -0700118 ret = readl_tight_poll_timeout(sc->gdscr, regval,
119 !(regval & PWR_ON_MASK),
120 TIMEOUT_US);
121 if (ret)
122 dev_err(&rdev->dev, "%s disable timed out\n",
123 sc->rdesc.name);
Matt Wagantall27b19032013-05-20 19:11:15 -0700124 } else {
125 for (i = 0; i < sc->clock_count; i++)
126 clk_reset(sc->clocks[i], CLK_RESET_ASSERT);
127 sc->resets_asserted = true;
Matt Wagantall7889c712013-05-17 12:48:15 -0700128 }
Matt Wagantallfc727212012-01-06 18:18:25 -0800129
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700130 if (sc->toggle_mems) {
131 for (i = 0; i < sc->clock_count; i++) {
132 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
133 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
134 }
135 }
136
Matt Wagantallfc727212012-01-06 18:18:25 -0800137 return ret;
138}
139
140static struct regulator_ops gdsc_ops = {
141 .is_enabled = gdsc_is_enabled,
142 .enable = gdsc_enable,
143 .disable = gdsc_disable,
144};
145
146static int __devinit gdsc_probe(struct platform_device *pdev)
147{
148 static atomic_t gdsc_count = ATOMIC_INIT(-1);
149 struct regulator_init_data *init_data;
150 struct resource *res;
151 struct gdsc *sc;
152 uint32_t regval;
Matt Wagantall3ef52422013-04-10 20:29:19 -0700153 bool retain_mems;
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700154 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -0800155
156 sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
157 if (sc == NULL)
158 return -ENOMEM;
159
Steve Mucklef132c6c2012-06-06 18:30:57 -0700160 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
Matt Wagantallfc727212012-01-06 18:18:25 -0800161 if (init_data == NULL)
162 return -ENOMEM;
163
164 if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
165 init_data->supply_regulator = "parent";
166
167 ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
168 &sc->rdesc.name);
169 if (ret)
170 return ret;
171
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 if (res == NULL)
174 return -EINVAL;
175 sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
176 if (sc->gdscr == NULL)
177 return -ENOMEM;
178
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700179 sc->clock_count = of_property_count_strings(pdev->dev.of_node,
180 "qcom,clock-names");
181 if (sc->clock_count == -EINVAL) {
182 sc->clock_count = 0;
183 } else if (IS_ERR_VALUE(sc->clock_count)) {
184 dev_err(&pdev->dev, "Failed to get clock names\n");
185 return -EINVAL;
186 }
187
188 sc->clocks = devm_kzalloc(&pdev->dev,
189 sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
190 if (!sc->clocks)
191 return -ENOMEM;
192 for (i = 0; i < sc->clock_count; i++) {
193 const char *clock_name;
194 of_property_read_string_index(pdev->dev.of_node,
195 "qcom,clock-names", i,
196 &clock_name);
197 sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
198 if (IS_ERR(sc->clocks[i])) {
199 int rc = PTR_ERR(sc->clocks[i]);
200 if (rc != -EPROBE_DEFER)
201 dev_err(&pdev->dev, "Failed to get %s\n",
202 clock_name);
203 return rc;
204 }
205 }
206
Matt Wagantallfc727212012-01-06 18:18:25 -0800207 sc->rdesc.id = atomic_inc_return(&gdsc_count);
208 sc->rdesc.ops = &gdsc_ops;
209 sc->rdesc.type = REGULATOR_VOLTAGE;
210 sc->rdesc.owner = THIS_MODULE;
211 platform_set_drvdata(pdev, sc);
212
213 /*
214 * Disable HW trigger: collapse/restore occur based on registers writes.
215 * Disable SW override: Use hardware state-machine for sequencing.
216 */
217 regval = readl_relaxed(sc->gdscr);
218 regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
219
220 /* Configure wait time between states. */
221 regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
222 regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
223 writel_relaxed(regval, sc->gdscr);
224
Matt Wagantall3ef52422013-04-10 20:29:19 -0700225 retain_mems = of_property_read_bool(pdev->dev.of_node,
226 "qcom,retain-mems");
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700227 for (i = 0; i < sc->clock_count; i++) {
228 if (retain_mems || (regval & PWR_ON_MASK)) {
229 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
230 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
231 } else {
232 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
233 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
234 }
Matt Wagantall3ef52422013-04-10 20:29:19 -0700235 }
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700236 sc->toggle_mems = !retain_mems;
Matt Wagantall27b19032013-05-20 19:11:15 -0700237 sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
238 "qcom,skip-logic-collapse");
239 if (!sc->toggle_logic) {
240 regval &= ~SW_COLLAPSE_MASK;
241 writel_relaxed(regval, sc->gdscr);
242
243 ret = readl_tight_poll_timeout(sc->gdscr, regval,
244 regval & PWR_ON_MASK, TIMEOUT_US);
245 if (ret) {
246 dev_err(&pdev->dev, "%s enable timed out\n",
247 sc->rdesc.name);
248 return ret;
249 }
250 }
Matt Wagantall3ef52422013-04-10 20:29:19 -0700251
Matt Wagantallfc727212012-01-06 18:18:25 -0800252 sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
253 pdev->dev.of_node);
254 if (IS_ERR(sc->rdev)) {
255 dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
256 sc->rdesc.name);
257 return PTR_ERR(sc->rdev);
258 }
259
260 return 0;
261}
262
263static int __devexit gdsc_remove(struct platform_device *pdev)
264{
265 struct gdsc *sc = platform_get_drvdata(pdev);
266 regulator_unregister(sc->rdev);
267 return 0;
268}
269
270static struct of_device_id gdsc_match_table[] = {
271 { .compatible = "qcom,gdsc" },
272 {}
273};
274
275static struct platform_driver gdsc_driver = {
276 .probe = gdsc_probe,
277 .remove = __devexit_p(gdsc_remove),
278 .driver = {
279 .name = "gdsc",
280 .of_match_table = gdsc_match_table,
281 .owner = THIS_MODULE,
282 },
283};
284
285static int __init gdsc_init(void)
286{
287 return platform_driver_register(&gdsc_driver);
288}
289subsys_initcall(gdsc_init);
290
291static void __exit gdsc_exit(void)
292{
293 platform_driver_unregister(&gdsc_driver);
294}
295module_exit(gdsc_exit);
296
297MODULE_LICENSE("GPL v2");
Abhimanyu Kapur90ced6e2012-06-26 17:41:25 -0700298MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");