blob: ea4865d1f8319efd20b90cd19132fb23e3ff950d [file] [log] [blame]
Matt Wagantallfc727212012-01-06 18:18:25 -08001/*
Matt Wagantall3ef52422013-04-10 20:29:19 -07002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Matt Wagantallfc727212012-01-06 18:18:25 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070015#include <linux/module.h>
Matt Wagantallfc727212012-01-06 18:18:25 -080016#include <linux/io.h>
17#include <linux/iopoll.h>
18#include <linux/delay.h>
19#include <linux/err.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/regulator/driver.h>
23#include <linux/regulator/machine.h>
24#include <linux/regulator/of_regulator.h>
Matt Wagantall5900b7b2013-04-11 15:45:17 -070025#include <linux/slab.h>
Matt Wagantall3ef52422013-04-10 20:29:19 -070026#include <linux/clk.h>
27#include <mach/clk.h>
Matt Wagantallfc727212012-01-06 18:18:25 -080028
29#define PWR_ON_MASK BIT(31)
30#define EN_REST_WAIT_MASK (0xF << 20)
31#define EN_FEW_WAIT_MASK (0xF << 16)
32#define CLK_DIS_WAIT_MASK (0xF << 12)
33#define SW_OVERRIDE_MASK BIT(2)
34#define HW_CONTROL_MASK BIT(1)
35#define SW_COLLAPSE_MASK BIT(0)
36
37/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
38#define EN_REST_WAIT_VAL (0x2 << 20)
Subbaraman Narayanamurthy4da45022013-03-22 19:48:52 -070039#define EN_FEW_WAIT_VAL (0x8 << 16)
Matt Wagantallfc727212012-01-06 18:18:25 -080040#define CLK_DIS_WAIT_VAL (0x2 << 12)
41
Matt Wagantall32754ca2013-04-19 11:28:40 -070042#define TIMEOUT_US 100
Matt Wagantallfc727212012-01-06 18:18:25 -080043
44struct gdsc {
45 struct regulator_dev *rdev;
46 struct regulator_desc rdesc;
47 void __iomem *gdscr;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070048 struct clk **clocks;
49 int clock_count;
Matt Wagantall1c90aa82013-06-09 09:28:35 -070050 bool toggle_mem;
51 bool toggle_periph;
Matt Wagantall27b19032013-05-20 19:11:15 -070052 bool toggle_logic;
53 bool resets_asserted;
Matt Wagantallfc727212012-01-06 18:18:25 -080054};
55
56static int gdsc_is_enabled(struct regulator_dev *rdev)
57{
58 struct gdsc *sc = rdev_get_drvdata(rdev);
59
Matt Wagantall27b19032013-05-20 19:11:15 -070060 if (!sc->toggle_logic)
61 return !sc->resets_asserted;
62
Matt Wagantallfc727212012-01-06 18:18:25 -080063 return !!(readl_relaxed(sc->gdscr) & PWR_ON_MASK);
64}
65
66static int gdsc_enable(struct regulator_dev *rdev)
67{
68 struct gdsc *sc = rdev_get_drvdata(rdev);
69 uint32_t regval;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070070 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -080071
Matt Wagantall27b19032013-05-20 19:11:15 -070072 if (sc->toggle_logic) {
73 regval = readl_relaxed(sc->gdscr);
74 regval &= ~SW_COLLAPSE_MASK;
75 writel_relaxed(regval, sc->gdscr);
Matt Wagantallfc727212012-01-06 18:18:25 -080076
Matt Wagantall27b19032013-05-20 19:11:15 -070077 ret = readl_tight_poll_timeout(sc->gdscr, regval,
78 regval & PWR_ON_MASK, TIMEOUT_US);
79 if (ret) {
80 dev_err(&rdev->dev, "%s enable timed out\n",
81 sc->rdesc.name);
82 return ret;
83 }
84 } else {
85 for (i = 0; i < sc->clock_count; i++)
86 clk_reset(sc->clocks[i], CLK_RESET_DEASSERT);
87 sc->resets_asserted = false;
Matt Wagantall64df1332012-06-26 12:00:19 -070088 }
Matt Wagantallfc727212012-01-06 18:18:25 -080089
Matt Wagantall1c90aa82013-06-09 09:28:35 -070090 for (i = 0; i < sc->clock_count; i++) {
91 if (sc->toggle_mem)
Matt Wagantall5900b7b2013-04-11 15:45:17 -070092 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
Matt Wagantall1c90aa82013-06-09 09:28:35 -070093 if (sc->toggle_periph)
Matt Wagantall5900b7b2013-04-11 15:45:17 -070094 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
Matt Wagantall5900b7b2013-04-11 15:45:17 -070095 }
96
Matt Wagantall64df1332012-06-26 12:00:19 -070097 /*
98 * If clocks to this power domain were already on, they will take an
99 * additional 4 clock cycles to re-enable after the rail is enabled.
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700100 * Delay to account for this. A delay is also needed to ensure clocks
101 * are not enabled within 400ns of enabling power to the memories.
Matt Wagantall64df1332012-06-26 12:00:19 -0700102 */
103 udelay(1);
104
105 return 0;
Matt Wagantallfc727212012-01-06 18:18:25 -0800106}
107
108static int gdsc_disable(struct regulator_dev *rdev)
109{
110 struct gdsc *sc = rdev_get_drvdata(rdev);
111 uint32_t regval;
Matt Wagantall7889c712013-05-17 12:48:15 -0700112 int i, ret = 0;
Matt Wagantallfc727212012-01-06 18:18:25 -0800113
Matt Wagantall72f1c162013-08-16 13:25:14 -0700114 for (i = sc->clock_count-1; i >= 0; i--) {
Matt Wagantall75afcf72013-08-06 15:52:09 -0700115 if (sc->toggle_mem)
116 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
117 if (sc->toggle_periph)
118 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
119 }
120
Matt Wagantall27b19032013-05-20 19:11:15 -0700121 if (sc->toggle_logic) {
Matt Wagantall7889c712013-05-17 12:48:15 -0700122 regval = readl_relaxed(sc->gdscr);
123 regval |= SW_COLLAPSE_MASK;
124 writel_relaxed(regval, sc->gdscr);
Matt Wagantallfc727212012-01-06 18:18:25 -0800125
Matt Wagantall7889c712013-05-17 12:48:15 -0700126 ret = readl_tight_poll_timeout(sc->gdscr, regval,
127 !(regval & PWR_ON_MASK),
128 TIMEOUT_US);
129 if (ret)
130 dev_err(&rdev->dev, "%s disable timed out\n",
131 sc->rdesc.name);
Matt Wagantall27b19032013-05-20 19:11:15 -0700132 } else {
Matt Wagantall72f1c162013-08-16 13:25:14 -0700133 for (i = sc->clock_count-1; i >= 0; i--)
Matt Wagantall27b19032013-05-20 19:11:15 -0700134 clk_reset(sc->clocks[i], CLK_RESET_ASSERT);
135 sc->resets_asserted = true;
Matt Wagantall7889c712013-05-17 12:48:15 -0700136 }
Matt Wagantallfc727212012-01-06 18:18:25 -0800137
138 return ret;
139}
140
141static struct regulator_ops gdsc_ops = {
142 .is_enabled = gdsc_is_enabled,
143 .enable = gdsc_enable,
144 .disable = gdsc_disable,
145};
146
147static int __devinit gdsc_probe(struct platform_device *pdev)
148{
149 static atomic_t gdsc_count = ATOMIC_INIT(-1);
150 struct regulator_init_data *init_data;
151 struct resource *res;
152 struct gdsc *sc;
153 uint32_t regval;
Matt Wagantall1c90aa82013-06-09 09:28:35 -0700154 bool retain_mem, retain_periph;
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700155 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -0800156
157 sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
158 if (sc == NULL)
159 return -ENOMEM;
160
Steve Mucklef132c6c2012-06-06 18:30:57 -0700161 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
Matt Wagantallfc727212012-01-06 18:18:25 -0800162 if (init_data == NULL)
163 return -ENOMEM;
164
165 if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
166 init_data->supply_regulator = "parent";
167
168 ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
169 &sc->rdesc.name);
170 if (ret)
171 return ret;
172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 if (res == NULL)
175 return -EINVAL;
176 sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
177 if (sc->gdscr == NULL)
178 return -ENOMEM;
179
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700180 sc->clock_count = of_property_count_strings(pdev->dev.of_node,
181 "qcom,clock-names");
182 if (sc->clock_count == -EINVAL) {
183 sc->clock_count = 0;
184 } else if (IS_ERR_VALUE(sc->clock_count)) {
185 dev_err(&pdev->dev, "Failed to get clock names\n");
186 return -EINVAL;
187 }
188
189 sc->clocks = devm_kzalloc(&pdev->dev,
190 sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
191 if (!sc->clocks)
192 return -ENOMEM;
193 for (i = 0; i < sc->clock_count; i++) {
194 const char *clock_name;
195 of_property_read_string_index(pdev->dev.of_node,
196 "qcom,clock-names", i,
197 &clock_name);
198 sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
199 if (IS_ERR(sc->clocks[i])) {
200 int rc = PTR_ERR(sc->clocks[i]);
201 if (rc != -EPROBE_DEFER)
202 dev_err(&pdev->dev, "Failed to get %s\n",
203 clock_name);
204 return rc;
205 }
206 }
207
Matt Wagantallfc727212012-01-06 18:18:25 -0800208 sc->rdesc.id = atomic_inc_return(&gdsc_count);
209 sc->rdesc.ops = &gdsc_ops;
210 sc->rdesc.type = REGULATOR_VOLTAGE;
211 sc->rdesc.owner = THIS_MODULE;
212 platform_set_drvdata(pdev, sc);
213
214 /*
215 * Disable HW trigger: collapse/restore occur based on registers writes.
216 * Disable SW override: Use hardware state-machine for sequencing.
217 */
218 regval = readl_relaxed(sc->gdscr);
219 regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
220
221 /* Configure wait time between states. */
222 regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
223 regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
224 writel_relaxed(regval, sc->gdscr);
225
Matt Wagantall1c90aa82013-06-09 09:28:35 -0700226 retain_mem = of_property_read_bool(pdev->dev.of_node,
227 "qcom,retain-mem");
Matt Wagantall75afcf72013-08-06 15:52:09 -0700228 sc->toggle_mem = !retain_mem;
Matt Wagantall1c90aa82013-06-09 09:28:35 -0700229 retain_periph = of_property_read_bool(pdev->dev.of_node,
230 "qcom,retain-periph");
Matt Wagantall1c90aa82013-06-09 09:28:35 -0700231 sc->toggle_periph = !retain_periph;
Matt Wagantall27b19032013-05-20 19:11:15 -0700232 sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
233 "qcom,skip-logic-collapse");
234 if (!sc->toggle_logic) {
235 regval &= ~SW_COLLAPSE_MASK;
236 writel_relaxed(regval, sc->gdscr);
237
238 ret = readl_tight_poll_timeout(sc->gdscr, regval,
239 regval & PWR_ON_MASK, TIMEOUT_US);
240 if (ret) {
241 dev_err(&pdev->dev, "%s enable timed out\n",
242 sc->rdesc.name);
243 return ret;
244 }
245 }
Matt Wagantall3ef52422013-04-10 20:29:19 -0700246
Matt Wagantall75afcf72013-08-06 15:52:09 -0700247 for (i = 0; i < sc->clock_count; i++) {
248 if (retain_mem || (regval & PWR_ON_MASK))
249 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
250 else
251 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
252
253 if (retain_periph || (regval & PWR_ON_MASK))
254 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
255 else
256 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
257 }
258
Matt Wagantallfc727212012-01-06 18:18:25 -0800259 sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
260 pdev->dev.of_node);
261 if (IS_ERR(sc->rdev)) {
262 dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
263 sc->rdesc.name);
264 return PTR_ERR(sc->rdev);
265 }
266
267 return 0;
268}
269
270static int __devexit gdsc_remove(struct platform_device *pdev)
271{
272 struct gdsc *sc = platform_get_drvdata(pdev);
273 regulator_unregister(sc->rdev);
274 return 0;
275}
276
277static struct of_device_id gdsc_match_table[] = {
278 { .compatible = "qcom,gdsc" },
279 {}
280};
281
282static struct platform_driver gdsc_driver = {
283 .probe = gdsc_probe,
284 .remove = __devexit_p(gdsc_remove),
285 .driver = {
286 .name = "gdsc",
287 .of_match_table = gdsc_match_table,
288 .owner = THIS_MODULE,
289 },
290};
291
292static int __init gdsc_init(void)
293{
294 return platform_driver_register(&gdsc_driver);
295}
296subsys_initcall(gdsc_init);
297
298static void __exit gdsc_exit(void)
299{
300 platform_driver_unregister(&gdsc_driver);
301}
302module_exit(gdsc_exit);
303
304MODULE_LICENSE("GPL v2");
Abhimanyu Kapur90ced6e2012-06-26 17:41:25 -0700305MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");