blob: 1701262881b4c3dadfbf4e0b7546f1efca0a018d [file] [log] [blame]
Matt Wagantallfc727212012-01-06 18:18:25 -08001/*
Matt Wagantall3ef52422013-04-10 20:29:19 -07002 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Matt Wagantallfc727212012-01-06 18:18:25 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070015#include <linux/module.h>
Matt Wagantallfc727212012-01-06 18:18:25 -080016#include <linux/io.h>
17#include <linux/iopoll.h>
18#include <linux/delay.h>
19#include <linux/err.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/regulator/driver.h>
23#include <linux/regulator/machine.h>
24#include <linux/regulator/of_regulator.h>
Matt Wagantall5900b7b2013-04-11 15:45:17 -070025#include <linux/slab.h>
Matt Wagantall3ef52422013-04-10 20:29:19 -070026#include <linux/clk.h>
27#include <mach/clk.h>
Matt Wagantallfc727212012-01-06 18:18:25 -080028
29#define PWR_ON_MASK BIT(31)
30#define EN_REST_WAIT_MASK (0xF << 20)
31#define EN_FEW_WAIT_MASK (0xF << 16)
32#define CLK_DIS_WAIT_MASK (0xF << 12)
33#define SW_OVERRIDE_MASK BIT(2)
34#define HW_CONTROL_MASK BIT(1)
35#define SW_COLLAPSE_MASK BIT(0)
36
37/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
38#define EN_REST_WAIT_VAL (0x2 << 20)
Subbaraman Narayanamurthy4da45022013-03-22 19:48:52 -070039#define EN_FEW_WAIT_VAL (0x8 << 16)
Matt Wagantallfc727212012-01-06 18:18:25 -080040#define CLK_DIS_WAIT_VAL (0x2 << 12)
41
Matt Wagantall32754ca2013-04-19 11:28:40 -070042#define TIMEOUT_US 100
Matt Wagantallfc727212012-01-06 18:18:25 -080043
44struct gdsc {
45 struct regulator_dev *rdev;
46 struct regulator_desc rdesc;
47 void __iomem *gdscr;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070048 struct clk **clocks;
49 int clock_count;
50 bool toggle_mems;
Matt Wagantall7889c712013-05-17 12:48:15 -070051 bool retain_logic;
Matt Wagantallfc727212012-01-06 18:18:25 -080052};
53
54static int gdsc_is_enabled(struct regulator_dev *rdev)
55{
56 struct gdsc *sc = rdev_get_drvdata(rdev);
57
58 return !!(readl_relaxed(sc->gdscr) & PWR_ON_MASK);
59}
60
61static int gdsc_enable(struct regulator_dev *rdev)
62{
63 struct gdsc *sc = rdev_get_drvdata(rdev);
64 uint32_t regval;
Matt Wagantall5900b7b2013-04-11 15:45:17 -070065 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -080066
67 regval = readl_relaxed(sc->gdscr);
68 regval &= ~SW_COLLAPSE_MASK;
69 writel_relaxed(regval, sc->gdscr);
70
71 ret = readl_tight_poll_timeout(sc->gdscr, regval, regval & PWR_ON_MASK,
72 TIMEOUT_US);
Matt Wagantall64df1332012-06-26 12:00:19 -070073 if (ret) {
Matt Wagantallfc727212012-01-06 18:18:25 -080074 dev_err(&rdev->dev, "%s enable timed out\n", sc->rdesc.name);
Matt Wagantall64df1332012-06-26 12:00:19 -070075 return ret;
76 }
Matt Wagantallfc727212012-01-06 18:18:25 -080077
Matt Wagantall5900b7b2013-04-11 15:45:17 -070078 if (sc->toggle_mems) {
79 for (i = 0; i < sc->clock_count; i++) {
80 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
81 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
82 }
83 }
84
Matt Wagantall64df1332012-06-26 12:00:19 -070085 /*
86 * If clocks to this power domain were already on, they will take an
87 * additional 4 clock cycles to re-enable after the rail is enabled.
Matt Wagantall5900b7b2013-04-11 15:45:17 -070088 * Delay to account for this. A delay is also needed to ensure clocks
89 * are not enabled within 400ns of enabling power to the memories.
Matt Wagantall64df1332012-06-26 12:00:19 -070090 */
91 udelay(1);
92
93 return 0;
Matt Wagantallfc727212012-01-06 18:18:25 -080094}
95
96static int gdsc_disable(struct regulator_dev *rdev)
97{
98 struct gdsc *sc = rdev_get_drvdata(rdev);
99 uint32_t regval;
Matt Wagantall7889c712013-05-17 12:48:15 -0700100 int i, ret = 0;
Matt Wagantallfc727212012-01-06 18:18:25 -0800101
Matt Wagantall7889c712013-05-17 12:48:15 -0700102 if (!sc->retain_logic) {
103 regval = readl_relaxed(sc->gdscr);
104 regval |= SW_COLLAPSE_MASK;
105 writel_relaxed(regval, sc->gdscr);
Matt Wagantallfc727212012-01-06 18:18:25 -0800106
Matt Wagantall7889c712013-05-17 12:48:15 -0700107 ret = readl_tight_poll_timeout(sc->gdscr, regval,
108 !(regval & PWR_ON_MASK),
109 TIMEOUT_US);
110 if (ret)
111 dev_err(&rdev->dev, "%s disable timed out\n",
112 sc->rdesc.name);
113 }
Matt Wagantallfc727212012-01-06 18:18:25 -0800114
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700115 if (sc->toggle_mems) {
116 for (i = 0; i < sc->clock_count; i++) {
117 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
118 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
119 }
120 }
121
Matt Wagantallfc727212012-01-06 18:18:25 -0800122 return ret;
123}
124
125static struct regulator_ops gdsc_ops = {
126 .is_enabled = gdsc_is_enabled,
127 .enable = gdsc_enable,
128 .disable = gdsc_disable,
129};
130
131static int __devinit gdsc_probe(struct platform_device *pdev)
132{
133 static atomic_t gdsc_count = ATOMIC_INIT(-1);
134 struct regulator_init_data *init_data;
135 struct resource *res;
136 struct gdsc *sc;
137 uint32_t regval;
Matt Wagantall3ef52422013-04-10 20:29:19 -0700138 bool retain_mems;
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700139 int i, ret;
Matt Wagantallfc727212012-01-06 18:18:25 -0800140
141 sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
142 if (sc == NULL)
143 return -ENOMEM;
144
Steve Mucklef132c6c2012-06-06 18:30:57 -0700145 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
Matt Wagantallfc727212012-01-06 18:18:25 -0800146 if (init_data == NULL)
147 return -ENOMEM;
148
149 if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
150 init_data->supply_regulator = "parent";
151
152 ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
153 &sc->rdesc.name);
154 if (ret)
155 return ret;
156
157 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
158 if (res == NULL)
159 return -EINVAL;
160 sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
161 if (sc->gdscr == NULL)
162 return -ENOMEM;
163
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700164 sc->clock_count = of_property_count_strings(pdev->dev.of_node,
165 "qcom,clock-names");
166 if (sc->clock_count == -EINVAL) {
167 sc->clock_count = 0;
168 } else if (IS_ERR_VALUE(sc->clock_count)) {
169 dev_err(&pdev->dev, "Failed to get clock names\n");
170 return -EINVAL;
171 }
172
173 sc->clocks = devm_kzalloc(&pdev->dev,
174 sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
175 if (!sc->clocks)
176 return -ENOMEM;
177 for (i = 0; i < sc->clock_count; i++) {
178 const char *clock_name;
179 of_property_read_string_index(pdev->dev.of_node,
180 "qcom,clock-names", i,
181 &clock_name);
182 sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
183 if (IS_ERR(sc->clocks[i])) {
184 int rc = PTR_ERR(sc->clocks[i]);
185 if (rc != -EPROBE_DEFER)
186 dev_err(&pdev->dev, "Failed to get %s\n",
187 clock_name);
188 return rc;
189 }
190 }
191
Matt Wagantallfc727212012-01-06 18:18:25 -0800192 sc->rdesc.id = atomic_inc_return(&gdsc_count);
193 sc->rdesc.ops = &gdsc_ops;
194 sc->rdesc.type = REGULATOR_VOLTAGE;
195 sc->rdesc.owner = THIS_MODULE;
196 platform_set_drvdata(pdev, sc);
197
198 /*
199 * Disable HW trigger: collapse/restore occur based on registers writes.
200 * Disable SW override: Use hardware state-machine for sequencing.
201 */
202 regval = readl_relaxed(sc->gdscr);
203 regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
204
205 /* Configure wait time between states. */
206 regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
207 regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
208 writel_relaxed(regval, sc->gdscr);
209
Matt Wagantall3ef52422013-04-10 20:29:19 -0700210 retain_mems = of_property_read_bool(pdev->dev.of_node,
211 "qcom,retain-mems");
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700212 for (i = 0; i < sc->clock_count; i++) {
213 if (retain_mems || (regval & PWR_ON_MASK)) {
214 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
215 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
216 } else {
217 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
218 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
219 }
Matt Wagantall3ef52422013-04-10 20:29:19 -0700220 }
Matt Wagantall5900b7b2013-04-11 15:45:17 -0700221 sc->toggle_mems = !retain_mems;
Matt Wagantall7889c712013-05-17 12:48:15 -0700222 sc->retain_logic = of_property_read_bool(pdev->dev.of_node,
223 "qcom,retain-logic");
Matt Wagantall3ef52422013-04-10 20:29:19 -0700224
Matt Wagantallfc727212012-01-06 18:18:25 -0800225 sc->rdev = regulator_register(&sc->rdesc, &pdev->dev, init_data, sc,
226 pdev->dev.of_node);
227 if (IS_ERR(sc->rdev)) {
228 dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
229 sc->rdesc.name);
230 return PTR_ERR(sc->rdev);
231 }
232
233 return 0;
234}
235
236static int __devexit gdsc_remove(struct platform_device *pdev)
237{
238 struct gdsc *sc = platform_get_drvdata(pdev);
239 regulator_unregister(sc->rdev);
240 return 0;
241}
242
243static struct of_device_id gdsc_match_table[] = {
244 { .compatible = "qcom,gdsc" },
245 {}
246};
247
248static struct platform_driver gdsc_driver = {
249 .probe = gdsc_probe,
250 .remove = __devexit_p(gdsc_remove),
251 .driver = {
252 .name = "gdsc",
253 .of_match_table = gdsc_match_table,
254 .owner = THIS_MODULE,
255 },
256};
257
258static int __init gdsc_init(void)
259{
260 return platform_driver_register(&gdsc_driver);
261}
262subsys_initcall(gdsc_init);
263
264static void __exit gdsc_exit(void)
265{
266 platform_driver_unregister(&gdsc_driver);
267}
268module_exit(gdsc_exit);
269
270MODULE_LICENSE("GPL v2");
Abhimanyu Kapur90ced6e2012-06-26 17:41:25 -0700271MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");