blob: 30a034e77879223bf398300bdfe5d699f245cb72 [file] [log] [blame]
Matt Wagantallfc727212012-01-06 18:18:25 -08001/*
Matt Wagantall3ef52422013-04-10 20:29:19 -07002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Matt Wagantallfc727212012-01-06 18:18:25 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070015#include <linux/module.h>
Matt Wagantallfc727212012-01-06 18:18:25 -080016#include <linux/io.h>
17#include <linux/iopoll.h>
18#include <linux/delay.h>
19#include <linux/err.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/regulator/driver.h>
23#include <linux/regulator/machine.h>
24#include <linux/regulator/of_regulator.h>
Matt Wagantall5900b7b2013-04-11 15:45:17 -070025#include <linux/slab.h>
Matt Wagantall3ef52422013-04-10 20:29:19 -070026#include <linux/clk.h>
27#include <mach/clk.h>
Matt Wagantallfc727212012-01-06 18:18:25 -080028
29#define PWR_ON_MASK BIT(31)
30#define EN_REST_WAIT_MASK (0xF << 20)
31#define EN_FEW_WAIT_MASK (0xF << 16)
32#define CLK_DIS_WAIT_MASK (0xF << 12)
33#define SW_OVERRIDE_MASK BIT(2)
34#define HW_CONTROL_MASK BIT(1)
35#define SW_COLLAPSE_MASK BIT(0)
36
37/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
38#define EN_REST_WAIT_VAL (0x2 << 20)
Subbaraman Narayanamurthy4da45022013-03-22 19:48:52 -070039#define EN_FEW_WAIT_VAL (0x8 << 16)
Matt Wagantallfc727212012-01-06 18:18:25 -080040#define CLK_DIS_WAIT_VAL (0x2 << 12)
41
Matt Wagantall32754ca2013-04-19 11:28:40 -070042#define TIMEOUT_US 100
Matt Wagantallfc727212012-01-06 18:18:25 -080043
44struct gdsc {
45 struct regulator_dev *rdev;
46 struct regulator_desc rdesc;
47 void __iomem *gdscr;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070048 struct clk **clocks;
49 int clock_count;
50 bool toggle_mems;
Matt Wagantallfc727212012-01-06 18:18:25 -080051};
52
53static int gdsc_is_enabled(struct regulator_dev *rdev)
54{
55 struct gdsc *sc = rdev_get_drvdata(rdev);
56
57 return !!(readl_relaxed(sc->gdscr) & PWR_ON_MASK);
58}
59
60static int gdsc_enable(struct regulator_dev *rdev)
61{
62 struct gdsc *sc = rdev_get_drvdata(rdev);
63 uint32_t regval;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070064 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -080065
66 regval = readl_relaxed(sc->gdscr);
67 regval &= ~SW_COLLAPSE_MASK;
68 writel_relaxed(regval, sc->gdscr);
69
70 ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK,
71 TIMEOUT_US);
Matt Wagantall64df1332012-06-26 12:00:19 -070072 if (ret) {
Matt Wagantallfc727212012-01-06 18:18:25 -080073 dev_err(&rdev->dev, "%s enable timed out\n", sc->rdesc.name);
Matt Wagantall64df1332012-06-26 12:00:19 -070074 return ret;
75 }
Matt Wagantallfc727212012-01-06 18:18:25 -080076
Matt Wagantall5900b7b2013-04-11 15:45:17 -070077 if (sc->toggle_mems) {
78 for (i = 0; i < sc->clock_count; i++) {
79 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
80 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
81 }
82 }
83
Matt Wagantall64df1332012-06-26 12:00:19 -070084 /*
85 * If clocks to this power domain were already on, they will take an
86 * additional 4 clock cycles to re-enable after the rail is enabled.
Matt Wagantall5900b7b2013-04-11 15:45:17 -070087 * Delay to account for this. A delay is also needed to ensure clocks
88 * are not enabled within 400ns of enabling power to the memories.
Matt Wagantall64df1332012-06-26 12:00:19 -070089 */
90 udelay(1);
91
92 return 0;
Matt Wagantallfc727212012-01-06 18:18:25 -080093}
94
95static int gdsc_disable(struct regulator_dev *rdev)
96{
97 struct gdsc *sc = rdev_get_drvdata(rdev);
98 uint32_t regval;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070099 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -0800100
101 regval = readl_relaxed(sc->gdscr);
102 regval |= SW_COLLAPSE_MASK;
103 writel_relaxed(regval, sc->gdscr);
104
105 ret = readl_tight_poll_timeout(sc->gdscr, regval,
106 !(regval & PWR_ON_MASK), TIMEOUT_US);
107 if (ret)
108 dev_err(&rdev->dev, "%s disable timed out\n", sc->rdesc.name);
109
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700110 if (sc->toggle_mems) {
111 for (i = 0; i < sc->clock_count; i++) {
112 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
113 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
114 }
115 }
116
Matt Wagantallfc727212012-01-06 18:18:25 -0800117 return ret;
118}
119
120static struct regulator_ops gdsc_ops = {
121 .is_enabled = gdsc_is_enabled,
122 .enable = gdsc_enable,
123 .disable = gdsc_disable,
124};
125
126static int __devinit gdsc_probe(struct platform_device *pdev)
127{
128 static atomic_t gdsc_count = ATOMIC_INIT(-1);
129 struct regulator_init_data *init_data;
130 struct resource *res;
131 struct gdsc *sc;
132 uint32_t regval;
Matt Wagantall3ef52422013-04-10 20:29:19 -0700133 bool retain_mems;
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700134 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -0800135
136 sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
137 if (sc == NULL)
138 return -ENOMEM;
139
Steve Mucklef132c6c2012-06-06 18:30:57 -0700140 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
Matt Wagantallfc727212012-01-06 18:18:25 -0800141 if (init_data == NULL)
142 return -ENOMEM;
143
144 if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
145 init_data->supply_regulator = "parent";
146
147 ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
148 &sc->rdesc.name);
149 if (ret)
150 return ret;
151
152 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
153 if (res == NULL)
154 return -EINVAL;
155 sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
156 if (sc->gdscr == NULL)
157 return -ENOMEM;
158
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700159 sc->clock_count = of_property_count_strings(pdev->dev.of_node,
160 "qcom,clock-names");
161 if (sc->clock_count == -EINVAL) {
162 sc->clock_count = 0;
163 } else if (IS_ERR_VALUE(sc->clock_count)) {
164 dev_err(&pdev->dev, "Failed to get clock names\n");
165 return -EINVAL;
166 }
167
168 sc->clocks = devm_kzalloc(&pdev->dev,
169 sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
170 if (!sc->clocks)
171 return -ENOMEM;
172 for (i = 0; i < sc->clock_count; i++) {
173 const char *clock_name;
174 of_property_read_string_index(pdev->dev.of_node,
175 "qcom,clock-names", i,
176 &clock_name);
177 sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
178 if (IS_ERR(sc->clocks[i])) {
179 int rc = PTR_ERR(sc->clocks[i]);
180 if (rc != -EPROBE_DEFER)
181 dev_err(&pdev->dev, "Failed to get %s\n",
182 clock_name);
183 return rc;
184 }
185 }
186
Matt Wagantallfc727212012-01-06 18:18:25 -0800187 sc->rdesc.id = atomic_inc_return(&gdsc_count);
188 sc->rdesc.ops = &gdsc_ops;
189 sc->rdesc.type = REGULATOR_VOLTAGE;
190 sc->rdesc.owner = THIS_MODULE;
191 platform_set_drvdata(pdev, sc);
192
193 /*
194 * Disable HW trigger: collapse/restore occur based on registers writes.
195 * Disable SW override: Use hardware state-machine for sequencing.
196 */
197 regval = readl_relaxed(sc->gdscr);
198 regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
199
200 /* Configure wait time between states. */
201 regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
202 regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
203 writel_relaxed(regval, sc->gdscr);
204
Matt Wagantall3ef52422013-04-10 20:29:19 -0700205 retain_mems = of_property_read_bool(pdev->dev.of_node,
206 "qcom,retain-mems");
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700207 for (i = 0; i < sc->clock_count; i++) {
208 if (retain_mems || (regval & PWR_ON_MASK)) {
209 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
210 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
211 } else {
212 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
213 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
214 }
Matt Wagantall3ef52422013-04-10 20:29:19 -0700215 }
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700216 sc->toggle_mems = !retain_mems;
Matt Wagantall3ef52422013-04-10 20:29:19 -0700217
Matt Wagantallfc727212012-01-06 18:18:25 -0800218 sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
219 pdev->dev.of_node);
220 if (IS_ERR(sc->rdev)) {
221 dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
222 sc->rdesc.name);
223 return PTR_ERR(sc->rdev);
224 }
225
226 return 0;
227}
228
229static int __devexit gdsc_remove(struct platform_device *pdev)
230{
231 struct gdsc *sc = platform_get_drvdata(pdev);
232 regulator_unregister(sc->rdev);
233 return 0;
234}
235
236static struct of_device_id gdsc_match_table[] = {
237 { .compatible = "qcom,gdsc" },
238 {}
239};
240
241static struct platform_driver gdsc_driver = {
242 .probe = gdsc_probe,
243 .remove = __devexit_p(gdsc_remove),
244 .driver = {
245 .name = "gdsc",
246 .of_match_table = gdsc_match_table,
247 .owner = THIS_MODULE,
248 },
249};
250
251static int __init gdsc_init(void)
252{
253 return platform_driver_register(&gdsc_driver);
254}
255subsys_initcall(gdsc_init);
256
257static void __exit gdsc_exit(void)
258{
259 platform_driver_unregister(&gdsc_driver);
260}
261module_exit(gdsc_exit);
262
263MODULE_LICENSE("GPL v2");
Abhimanyu Kapur90ced6e2012-06-26 17:41:25 -0700264MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");