blob: 08991387d85aad2cd1831325bfdf71dfd073c2b4 [file] [log] [blame]
Taniya Das988608c2016-08-04 22:12:44 +05301/*
Vicky Wallace209cfbb2017-05-16 17:19:38 -07002 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Taniya Das988608c2016-08-04 22:12:44 +05303 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Deepak Katragadda9abd7942017-06-13 14:20:09 -070014#define pr_fmt(fmt) "gdsc: %s: " fmt, __func__
15
Taniya Das988608c2016-08-04 22:12:44 +053016#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/io.h>
19#include <linux/delay.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/regulator/driver.h>
24#include <linux/regulator/machine.h>
25#include <linux/regulator/of_regulator.h>
26#include <linux/slab.h>
27#include <linux/clk.h>
28#include <linux/regmap.h>
29#include <linux/reset.h>
30#include <linux/mfd/syscon.h>
Taniya Das77598782016-10-28 12:37:12 +053031#include <linux/clk/qcom.h>
Taniya Das988608c2016-08-04 22:12:44 +053032
33/* GDSCR */
34#define PWR_ON_MASK BIT(31)
35#define CLK_DIS_WAIT_MASK (0xF << 12)
36#define CLK_DIS_WAIT_SHIFT (12)
37#define SW_OVERRIDE_MASK BIT(2)
38#define HW_CONTROL_MASK BIT(1)
39#define SW_COLLAPSE_MASK BIT(0)
40
Deepak Katragadda522c8032016-11-11 11:37:10 -080041/* CFG_GDSCR */
42#define GDSC_POWER_UP_COMPLETE BIT(16)
43#define GDSC_POWER_DOWN_COMPLETE BIT(15)
44
Taniya Das988608c2016-08-04 22:12:44 +053045/* Domain Address */
46#define GMEM_CLAMP_IO_MASK BIT(0)
47#define GMEM_RESET_MASK BIT(4)
48
49/* SW Reset */
50#define BCR_BLK_ARES_BIT BIT(0)
51
52/* Register Offset */
53#define REG_OFFSET 0x0
Deepak Katragadda522c8032016-11-11 11:37:10 -080054#define CFG_GDSCR_OFFSET 0x4
Taniya Das988608c2016-08-04 22:12:44 +053055
56/* Timeout Delay */
57#define TIMEOUT_US 100
58
Vicky Wallace209cfbb2017-05-16 17:19:38 -070059/* TOGGLE SW COLLAPSE */
60#define TOGGLE_SW_COLLAPSE_IN_DISABLE BIT(0)
61
Taniya Das988608c2016-08-04 22:12:44 +053062struct gdsc {
63 struct regulator_dev *rdev;
64 struct regulator_desc rdesc;
65 void __iomem *gdscr;
66 struct regmap *regmap;
67 struct regmap *domain_addr;
68 struct regmap *hw_ctrl;
69 struct regmap *sw_reset;
70 struct clk **clocks;
71 struct reset_control **reset_clocks;
72 bool toggle_mem;
73 bool toggle_periph;
74 bool toggle_logic;
75 bool resets_asserted;
76 bool root_en;
77 bool force_root_en;
78 bool no_status_check_on_disable;
79 bool is_gdsc_enabled;
80 bool allow_clear;
81 bool reset_aon;
Deepak Katragadda522c8032016-11-11 11:37:10 -080082 bool poll_cfg_gdscr;
Taniya Das988608c2016-08-04 22:12:44 +053083 int clock_count;
84 int reset_count;
85 int root_clk_idx;
86 u32 gds_timeout;
Vicky Wallace209cfbb2017-05-16 17:19:38 -070087 u32 flags;
Taniya Das988608c2016-08-04 22:12:44 +053088};
89
90enum gdscr_status {
91 ENABLED,
92 DISABLED,
93};
94
95static DEFINE_MUTEX(gdsc_seq_lock);
96
97void gdsc_allow_clear_retention(struct regulator *regulator)
98{
99 struct gdsc *sc = regulator_get_drvdata(regulator);
100
101 if (sc)
102 sc->allow_clear = true;
103}
104
105static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
106{
107 struct regmap *regmap;
108 int count = sc->gds_timeout;
109 u32 val;
110
111 if (sc->hw_ctrl)
112 regmap = sc->hw_ctrl;
113 else
114 regmap = sc->regmap;
115
116 for (; count > 0; count--) {
117 regmap_read(regmap, REG_OFFSET, &val);
118 val &= PWR_ON_MASK;
119
120 switch (status) {
121 case ENABLED:
122 if (val)
123 return 0;
124 break;
125 case DISABLED:
126 if (!val)
127 return 0;
128 break;
129 }
130 /*
131 * There is no guarantee about the delay needed for the enable
132 * bit in the GDSCR to be set or reset after the GDSC state
133 * changes. Hence, keep on checking for a reasonable number
134 * of times until the bit is set with the least possible delay
135 * between succeessive tries.
136 */
137 udelay(1);
138 }
139
140 return -ETIMEDOUT;
141}
142
Deepak Katragadda522c8032016-11-11 11:37:10 -0800143static int poll_cfg_gdsc_status(struct gdsc *sc, enum gdscr_status status)
144{
145 struct regmap *regmap = sc->regmap;
146 int count = sc->gds_timeout;
147 u32 val;
148
149 for (; count > 0; count--) {
150 regmap_read(regmap, CFG_GDSCR_OFFSET, &val);
151
152 switch (status) {
153 case ENABLED:
154 if (val & GDSC_POWER_UP_COMPLETE)
155 return 0;
156 break;
157 case DISABLED:
158 if (val & GDSC_POWER_DOWN_COMPLETE)
159 return 0;
160 break;
161 }
162 udelay(1);
163 }
164
165 return -ETIMEDOUT;
166}
167
Taniya Das988608c2016-08-04 22:12:44 +0530168static int gdsc_is_enabled(struct regulator_dev *rdev)
169{
170 struct gdsc *sc = rdev_get_drvdata(rdev);
171 uint32_t regval;
172
173 if (!sc->toggle_logic)
174 return !sc->resets_asserted;
175
176 regmap_read(sc->regmap, REG_OFFSET, &regval);
177
178 if (regval & PWR_ON_MASK) {
179 /*
180 * The GDSC might be turned on due to TZ/HYP vote on the
181 * votable GDS registers. Check the SW_COLLAPSE_MASK to
182 * determine if HLOS has voted for it.
183 */
184 if (!(regval & SW_COLLAPSE_MASK))
185 return true;
186 }
187
188 return false;
189}
190
191static int gdsc_enable(struct regulator_dev *rdev)
192{
193 struct gdsc *sc = rdev_get_drvdata(rdev);
Deepak Katragadda522c8032016-11-11 11:37:10 -0800194 uint32_t regval, cfg_regval, hw_ctrl_regval = 0x0;
Taniya Das988608c2016-08-04 22:12:44 +0530195 int i, ret = 0;
196
197 mutex_lock(&gdsc_seq_lock);
198
199 if (sc->root_en || sc->force_root_en)
200 clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
201
202 regmap_read(sc->regmap, REG_OFFSET, &regval);
203 if (regval & HW_CONTROL_MASK) {
204 dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
205 sc->rdesc.name);
206 mutex_unlock(&gdsc_seq_lock);
207 return -EBUSY;
208 }
209
210 if (sc->toggle_logic) {
211 if (sc->sw_reset) {
212 regmap_read(sc->sw_reset, REG_OFFSET, &regval);
213 regval |= BCR_BLK_ARES_BIT;
214 regmap_write(sc->sw_reset, REG_OFFSET, regval);
215 /*
216 * BLK_ARES should be kept asserted for 1us before
217 * being de-asserted.
218 */
219 wmb();
220 udelay(1);
221
222 regval &= ~BCR_BLK_ARES_BIT;
223 regmap_write(sc->sw_reset, REG_OFFSET, regval);
224 /* Make sure de-assert goes through before continuing */
225 wmb();
226 }
227
228 if (sc->domain_addr) {
229 if (sc->reset_aon) {
230 regmap_read(sc->domain_addr, REG_OFFSET,
231 &regval);
232 regval |= GMEM_RESET_MASK;
233 regmap_write(sc->domain_addr, REG_OFFSET,
234 regval);
235 /*
236 * Keep reset asserted for at-least 1us before
237 * continuing.
238 */
239 wmb();
240 udelay(1);
241
242 regval &= ~GMEM_RESET_MASK;
243 regmap_write(sc->domain_addr, REG_OFFSET,
244 regval);
245 /*
246 * Make sure GMEM_RESET is de-asserted before
247 * continuing.
248 */
249 wmb();
250 }
251
252 regmap_read(sc->domain_addr, REG_OFFSET, &regval);
253 regval &= ~GMEM_CLAMP_IO_MASK;
254 regmap_write(sc->domain_addr, REG_OFFSET, regval);
255
256 /*
257 * Make sure CLAMP_IO is de-asserted before continuing.
258 */
259 wmb();
260 }
261
262 regmap_read(sc->regmap, REG_OFFSET, &regval);
263 regval &= ~SW_COLLAPSE_MASK;
264 regmap_write(sc->regmap, REG_OFFSET, regval);
265
266 /* Wait for 8 XO cycles before polling the status bit. */
267 mb();
268 udelay(1);
269
Deepak Katragadda522c8032016-11-11 11:37:10 -0800270 if (sc->poll_cfg_gdscr)
271 ret = poll_cfg_gdsc_status(sc, ENABLED);
272 else
273 ret = poll_gdsc_status(sc, ENABLED);
Taniya Das988608c2016-08-04 22:12:44 +0530274 if (ret) {
275 regmap_read(sc->regmap, REG_OFFSET, &regval);
276
277 if (sc->hw_ctrl) {
278 regmap_read(sc->hw_ctrl, REG_OFFSET,
279 &hw_ctrl_regval);
280 dev_warn(&rdev->dev, "%s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
281 sc->rdesc.name, sc->gds_timeout,
282 regval, hw_ctrl_regval);
283
284 ret = poll_gdsc_status(sc, ENABLED);
285 if (ret) {
286 regmap_read(sc->regmap, REG_OFFSET,
287 &regval);
288 regmap_read(sc->hw_ctrl, REG_OFFSET,
289 &hw_ctrl_regval);
290 dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
291 sc->rdesc.name, sc->gds_timeout,
292 regval, hw_ctrl_regval);
293
294 mutex_unlock(&gdsc_seq_lock);
295 return ret;
296 }
297 } else {
298 dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
299 sc->rdesc.name,
300 regval);
301 udelay(sc->gds_timeout);
302
Deepak Katragadda522c8032016-11-11 11:37:10 -0800303 if (sc->poll_cfg_gdscr) {
304 regmap_read(sc->regmap, REG_OFFSET,
305 &regval);
306 regmap_read(sc->regmap,
307 CFG_GDSCR_OFFSET, &cfg_regval);
308 dev_err(&rdev->dev, "%s final state: gdscr - 0x%x, cfg_gdscr - 0x%x (%d us after timeout)\n",
309 sc->rdesc.name, regval,
310 cfg_regval, sc->gds_timeout);
311 } else {
312 regmap_read(sc->regmap, REG_OFFSET,
313 &regval);
314 dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
315 sc->rdesc.name, regval,
316 sc->gds_timeout);
317 }
Taniya Das988608c2016-08-04 22:12:44 +0530318 mutex_unlock(&gdsc_seq_lock);
319
320 return ret;
321 }
322 }
323 } else {
324 for (i = 0; i < sc->reset_count; i++)
325 reset_control_deassert(sc->reset_clocks[i]);
326 sc->resets_asserted = false;
327 }
328
329 for (i = 0; i < sc->clock_count; i++) {
330 if (unlikely(i == sc->root_clk_idx))
331 continue;
332 if (sc->toggle_mem)
333 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
334 if (sc->toggle_periph)
335 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
336 }
337
338 /*
339 * If clocks to this power domain were already on, they will take an
340 * additional 4 clock cycles to re-enable after the rail is enabled.
341 * Delay to account for this. A delay is also needed to ensure clocks
342 * are not enabled within 400ns of enabling power to the memories.
343 */
344 udelay(1);
345
346 /* Delay to account for staggered memory powerup. */
347 udelay(1);
348
349 if (sc->force_root_en)
350 clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
351
352 sc->is_gdsc_enabled = true;
353
354 mutex_unlock(&gdsc_seq_lock);
355
356 return ret;
357}
358
359static int gdsc_disable(struct regulator_dev *rdev)
360{
361 struct gdsc *sc = rdev_get_drvdata(rdev);
362 uint32_t regval;
363 int i, ret = 0;
364
365 mutex_lock(&gdsc_seq_lock);
366
367 if (sc->force_root_en)
368 clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
369
370 for (i = sc->clock_count - 1; i >= 0; i--) {
371 if (unlikely(i == sc->root_clk_idx))
372 continue;
373 if (sc->toggle_mem && sc->allow_clear)
374 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
375 if (sc->toggle_periph && sc->allow_clear)
376 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
377 }
378
379 /* Delay to account for staggered memory powerdown. */
380 udelay(1);
381
382 if (sc->toggle_logic) {
383 regmap_read(sc->regmap, REG_OFFSET, &regval);
384 regval |= SW_COLLAPSE_MASK;
385 regmap_write(sc->regmap, REG_OFFSET, regval);
386
Vicky Wallace209cfbb2017-05-16 17:19:38 -0700387 if (sc->flags & TOGGLE_SW_COLLAPSE_IN_DISABLE) {
388 regval &= ~SW_COLLAPSE_MASK;
389 regmap_write(sc->regmap, REG_OFFSET, regval);
390 regval |= SW_COLLAPSE_MASK;
391 regmap_write(sc->regmap, REG_OFFSET, regval);
392 }
393
Taniya Das988608c2016-08-04 22:12:44 +0530394 /* Wait for 8 XO cycles before polling the status bit. */
395 mb();
396 udelay(1);
397
398 if (sc->no_status_check_on_disable) {
399 /*
400 * Add a short delay here to ensure that gdsc_enable
401 * right after it was disabled does not put it in a
402 * weird state.
403 */
404 udelay(TIMEOUT_US);
405 } else {
Deepak Katragadda522c8032016-11-11 11:37:10 -0800406 if (sc->poll_cfg_gdscr)
407 ret = poll_cfg_gdsc_status(sc, DISABLED);
408 else
409 ret = poll_gdsc_status(sc, DISABLED);
Taniya Das988608c2016-08-04 22:12:44 +0530410 if (ret)
411 dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
412 sc->rdesc.name, regval);
413 }
414
415 if (sc->domain_addr) {
416 regmap_read(sc->domain_addr, REG_OFFSET, &regval);
417 regval |= GMEM_CLAMP_IO_MASK;
418 regmap_write(sc->domain_addr, REG_OFFSET, regval);
419 }
420
421 } else {
422 for (i = sc->reset_count - 1; i >= 0; i--)
423 reset_control_assert(sc->reset_clocks[i]);
424 sc->resets_asserted = true;
425 }
426
427 /*
428 * Check if gdsc_enable was called for this GDSC. If not, the root
429 * clock will not have been enabled prior to this.
430 */
431 if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
432 clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
433
434 sc->is_gdsc_enabled = false;
435
436 mutex_unlock(&gdsc_seq_lock);
437
438 return ret;
439}
440
441static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
442{
443 struct gdsc *sc = rdev_get_drvdata(rdev);
444 uint32_t regval;
445
446 mutex_lock(&gdsc_seq_lock);
447 regmap_read(sc->regmap, REG_OFFSET, &regval);
448 mutex_unlock(&gdsc_seq_lock);
449
450 if (regval & HW_CONTROL_MASK)
451 return REGULATOR_MODE_FAST;
452
453 return REGULATOR_MODE_NORMAL;
454}
455
456static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
457{
458 struct gdsc *sc = rdev_get_drvdata(rdev);
459 uint32_t regval;
460 int ret = 0;
461
462 mutex_lock(&gdsc_seq_lock);
463
464 regmap_read(sc->regmap, REG_OFFSET, &regval);
465
466 switch (mode) {
467 case REGULATOR_MODE_FAST:
468 /* Turn on HW trigger mode */
469 regval |= HW_CONTROL_MASK;
470 regmap_write(sc->regmap, REG_OFFSET, regval);
471 /*
472 * There may be a race with internal HW trigger signal,
473 * that will result in GDSC going through a power down and
474 * up cycle. In case HW trigger signal is controlled by
475 * firmware that also poll same status bits as we do, FW
476 * might read an 'on' status before the GDSC can finish
477 * power cycle. We wait 1us before returning to ensure
478 * FW can't immediately poll the status bit.
479 */
480 mb();
481 udelay(1);
482 break;
483 case REGULATOR_MODE_NORMAL:
484 /* Turn off HW trigger mode */
485 regval &= ~HW_CONTROL_MASK;
486 regmap_write(sc->regmap, REG_OFFSET, regval);
487 /*
488 * There may be a race with internal HW trigger signal,
489 * that will result in GDSC going through a power down and
490 * up cycle. If we poll too early, status bit will
491 * indicate 'on' before the GDSC can finish the power cycle.
492 * Account for this case by waiting 1us before polling.
493 */
494 mb();
495 udelay(1);
496
Deepak Katragadda522c8032016-11-11 11:37:10 -0800497 if (sc->poll_cfg_gdscr)
498 ret = poll_cfg_gdsc_status(sc, ENABLED);
499 else
500 ret = poll_gdsc_status(sc, ENABLED);
Taniya Das988608c2016-08-04 22:12:44 +0530501 if (ret)
502 dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
503 sc->rdesc.name, regval);
504 break;
505 default:
506 ret = -EINVAL;
507 break;
508 }
509
510 mutex_unlock(&gdsc_seq_lock);
511
512 return ret;
513}
514
515static struct regulator_ops gdsc_ops = {
516 .is_enabled = gdsc_is_enabled,
517 .enable = gdsc_enable,
518 .disable = gdsc_disable,
519 .set_mode = gdsc_set_mode,
520 .get_mode = gdsc_get_mode,
521};
522
523static const struct regmap_config gdsc_regmap_config = {
524 .reg_bits = 32,
525 .reg_stride = 4,
526 .val_bits = 32,
527 .fast_io = true,
528};
529
530static int gdsc_probe(struct platform_device *pdev)
531{
532 static atomic_t gdsc_count = ATOMIC_INIT(-1);
533 struct regulator_config reg_config = {};
534 struct regulator_init_data *init_data;
535 struct resource *res;
536 struct gdsc *sc;
537 uint32_t regval, clk_dis_wait_val = 0;
Vicky Wallace209cfbb2017-05-16 17:19:38 -0700538 bool retain_mem, retain_periph, support_hw_trigger, prop_val;
Taniya Das988608c2016-08-04 22:12:44 +0530539 int i, ret;
540 u32 timeout;
541
542 sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
543 if (sc == NULL)
544 return -ENOMEM;
545
546 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
547 &sc->rdesc);
548 if (init_data == NULL)
549 return -ENOMEM;
550
551 if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
552 init_data->supply_regulator = "parent";
553
554 ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
555 &sc->rdesc.name);
556 if (ret)
557 return ret;
558
559 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
560 if (res == NULL) {
561 dev_err(&pdev->dev, "Failed to get resources\n");
562 return -EINVAL;
563 }
564
565 sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
566 if (sc->gdscr == NULL)
567 return -ENOMEM;
568
569 sc->regmap = devm_regmap_init_mmio(&pdev->dev, sc->gdscr,
570 &gdsc_regmap_config);
571 if (!sc->regmap) {
572 dev_err(&pdev->dev, "Couldn't get regmap\n");
573 return -EINVAL;
574 }
575
576 if (of_find_property(pdev->dev.of_node, "domain-addr", NULL)) {
577 sc->domain_addr = syscon_regmap_lookup_by_phandle
578 (pdev->dev.of_node, "domain-addr");
579 if (IS_ERR(sc->domain_addr))
580 return -ENODEV;
581 }
582
583 if (of_find_property(pdev->dev.of_node, "sw-reset", NULL)) {
584 sc->sw_reset = syscon_regmap_lookup_by_phandle
585 (pdev->dev.of_node, "sw-reset");
586 if (IS_ERR(sc->sw_reset))
587 return -ENODEV;
588 }
589
590 if (of_find_property(pdev->dev.of_node, "hw-ctrl-addr", NULL)) {
591 sc->hw_ctrl = syscon_regmap_lookup_by_phandle(
592 pdev->dev.of_node, "hw-ctrl-addr");
593 if (IS_ERR(sc->hw_ctrl))
594 return -ENODEV;
595 }
596
Deepak Katragadda522c8032016-11-11 11:37:10 -0800597 sc->poll_cfg_gdscr = of_property_read_bool(pdev->dev.of_node,
598 "qcom,poll-cfg-gdscr");
599
Taniya Das988608c2016-08-04 22:12:44 +0530600 sc->gds_timeout = TIMEOUT_US;
601
602 ret = of_property_read_u32(pdev->dev.of_node, "qcom,gds-timeout",
603 &timeout);
604 if (!ret)
605 sc->gds_timeout = timeout;
606
607 sc->clock_count = of_property_count_strings(pdev->dev.of_node,
608 "clock-names");
609 if (sc->clock_count == -EINVAL) {
610 sc->clock_count = 0;
611 } else if (sc->clock_count < 0) {
612 dev_err(&pdev->dev, "Failed to get clock names\n");
613 return -EINVAL;
614 }
615
616 sc->clocks = devm_kzalloc(&pdev->dev,
617 sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
618 if (!sc->clocks)
619 return -ENOMEM;
620
621 sc->root_clk_idx = -1;
622
623 sc->root_en = of_property_read_bool(pdev->dev.of_node,
624 "qcom,enable-root-clk");
625
626 sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
627 "qcom,force-enable-root-clk");
628
Vicky Wallace209cfbb2017-05-16 17:19:38 -0700629 prop_val = of_property_read_bool(pdev->dev.of_node,
630 "qcom,toggle-sw-collapse-in-disable");
631 if (prop_val)
632 sc->flags |= TOGGLE_SW_COLLAPSE_IN_DISABLE;
633
Taniya Das988608c2016-08-04 22:12:44 +0530634 for (i = 0; i < sc->clock_count; i++) {
635 const char *clock_name;
636
637 of_property_read_string_index(pdev->dev.of_node, "clock-names",
638 i, &clock_name);
639
640 sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
641 if (IS_ERR(sc->clocks[i])) {
642 int rc = PTR_ERR(sc->clocks[i]);
643
644 if (rc != -EPROBE_DEFER)
645 dev_err(&pdev->dev, "Failed to get %s\n",
646 clock_name);
647 return rc;
648 }
649
650 if (!strcmp(clock_name, "core_root_clk"))
651 sc->root_clk_idx = i;
652 }
653
654 if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
655 dev_err(&pdev->dev, "Failed to get root clock name\n");
656 return -EINVAL;
657 }
658
659 sc->reset_aon = of_property_read_bool(pdev->dev.of_node,
660 "qcom,reset-aon-logic");
661
662 sc->rdesc.id = atomic_inc_return(&gdsc_count);
663 sc->rdesc.ops = &gdsc_ops;
664 sc->rdesc.type = REGULATOR_VOLTAGE;
665 sc->rdesc.owner = THIS_MODULE;
666 platform_set_drvdata(pdev, sc);
667
668 /*
669 * Disable HW trigger: collapse/restore occur based on registers writes.
670 * Disable SW override: Use hardware state-machine for sequencing.
671 */
672 regmap_read(sc->regmap, REG_OFFSET, &regval);
673 regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
674
675 if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
676 &clk_dis_wait_val)) {
677 clk_dis_wait_val = clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
678
679 /* Configure wait time between states. */
680 regval &= ~(CLK_DIS_WAIT_MASK);
681 regval |= clk_dis_wait_val;
682 }
683
684 regmap_write(sc->regmap, REG_OFFSET, regval);
685
686 sc->no_status_check_on_disable =
687 of_property_read_bool(pdev->dev.of_node,
688 "qcom,no-status-check-on-disable");
689 retain_mem = of_property_read_bool(pdev->dev.of_node,
690 "qcom,retain-mem");
691 sc->toggle_mem = !retain_mem;
692 retain_periph = of_property_read_bool(pdev->dev.of_node,
693 "qcom,retain-periph");
694 sc->toggle_periph = !retain_periph;
695 sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
696 "qcom,skip-logic-collapse");
697 support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
698 "qcom,support-hw-trigger");
699 if (support_hw_trigger) {
700 init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
701 init_data->constraints.valid_modes_mask |=
702 REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
703 }
704
705 if (!sc->toggle_logic) {
706 sc->reset_count = of_property_count_strings(pdev->dev.of_node,
707 "reset-names");
708 if (sc->reset_count == -EINVAL) {
709 sc->reset_count = 0;
710 } else if (sc->reset_count < 0) {
711 dev_err(&pdev->dev, "Failed to get reset clock names\n");
712 return -EINVAL;
713 }
714
715 sc->reset_clocks = devm_kzalloc(&pdev->dev,
716 sizeof(struct reset_control *) * sc->reset_count,
717 GFP_KERNEL);
718 if (!sc->reset_clocks)
719 return -ENOMEM;
720
721 for (i = 0; i < sc->reset_count; i++) {
722 const char *reset_name;
723
724 of_property_read_string_index(pdev->dev.of_node,
725 "reset-names", i, &reset_name);
726 sc->reset_clocks[i] = devm_reset_control_get(&pdev->dev,
727 reset_name);
728 if (IS_ERR(sc->reset_clocks[i])) {
729 int rc = PTR_ERR(sc->reset_clocks[i]);
730
731 if (rc != -EPROBE_DEFER)
732 dev_err(&pdev->dev, "Failed to get %s\n",
733 reset_name);
734 return rc;
735 }
736 }
737
738 regval &= ~SW_COLLAPSE_MASK;
739 regmap_write(sc->regmap, REG_OFFSET, regval);
740
Deepak Katragadda522c8032016-11-11 11:37:10 -0800741 if (sc->poll_cfg_gdscr)
742 ret = poll_cfg_gdsc_status(sc, ENABLED);
743 else
744 ret = poll_gdsc_status(sc, ENABLED);
Taniya Das988608c2016-08-04 22:12:44 +0530745 if (ret) {
746 dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
747 sc->rdesc.name, regval);
748 return ret;
749 }
750 }
751
752 sc->allow_clear = of_property_read_bool(pdev->dev.of_node,
753 "qcom,disallow-clear");
754 sc->allow_clear = !sc->allow_clear;
755
756 for (i = 0; i < sc->clock_count; i++) {
757 if (retain_mem || (regval & PWR_ON_MASK) || !sc->allow_clear)
758 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
759 else
760 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
761
762 if (retain_periph || (regval & PWR_ON_MASK) || !sc->allow_clear)
763 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
764 else
765 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
766 }
767
768 reg_config.dev = &pdev->dev;
769 reg_config.init_data = init_data;
770 reg_config.driver_data = sc;
771 reg_config.of_node = pdev->dev.of_node;
772 reg_config.regmap = sc->regmap;
773
774 sc->rdev = regulator_register(&sc->rdesc, &reg_config);
775 if (IS_ERR(sc->rdev)) {
776 dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
777 sc->rdesc.name);
778 return PTR_ERR(sc->rdev);
779 }
780
781 return 0;
782}
783
784static int gdsc_remove(struct platform_device *pdev)
785{
786 struct gdsc *sc = platform_get_drvdata(pdev);
787
788 regulator_unregister(sc->rdev);
789
790 return 0;
791}
792
793static const struct of_device_id gdsc_match_table[] = {
794 { .compatible = "qcom,gdsc" },
795 {}
796};
797
798static struct platform_driver gdsc_driver = {
799 .probe = gdsc_probe,
800 .remove = gdsc_remove,
801 .driver = {
802 .name = "gdsc",
803 .of_match_table = gdsc_match_table,
804 .owner = THIS_MODULE,
805 },
806};
807
808static int __init gdsc_init(void)
809{
810 return platform_driver_register(&gdsc_driver);
811}
812subsys_initcall(gdsc_init);
813
814static void __exit gdsc_exit(void)
815{
816 platform_driver_unregister(&gdsc_driver);
817}
818module_exit(gdsc_exit);