blob: 1ffb34105937663d761cb228579e1155406edd13 [file] [log] [blame]
Taniya Das988608c2016-08-04 22:12:44 +05301/*
Naveen Yadav255113b2020-04-29 22:26:26 +05302 * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
Taniya Das988608c2016-08-04 22:12:44 +05303 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Deepak Katragadda9abd7942017-06-13 14:20:09 -070014#define pr_fmt(fmt) "gdsc: %s: " fmt, __func__
15
Taniya Das988608c2016-08-04 22:12:44 +053016#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/io.h>
19#include <linux/delay.h>
20#include <linux/err.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/regulator/driver.h>
24#include <linux/regulator/machine.h>
25#include <linux/regulator/of_regulator.h>
26#include <linux/slab.h>
27#include <linux/clk.h>
28#include <linux/regmap.h>
29#include <linux/reset.h>
30#include <linux/mfd/syscon.h>
Taniya Das77598782016-10-28 12:37:12 +053031#include <linux/clk/qcom.h>
Taniya Das988608c2016-08-04 22:12:44 +053032
Odelu Kukatla0cb925e2019-01-02 18:25:30 +053033#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
34
Taniya Das988608c2016-08-04 22:12:44 +053035/* GDSCR */
36#define PWR_ON_MASK BIT(31)
37#define CLK_DIS_WAIT_MASK (0xF << 12)
38#define CLK_DIS_WAIT_SHIFT (12)
Amit Nischal9acc0962018-06-14 13:30:04 +053039#define EN_FEW_WAIT_MASK (0xF << 16)
40#define EN_FEW_WAIT_SHIFT (16)
41#define EN_REST_WAIT_MASK (0xF << 20)
42#define EN_REST_WAIT_SHIFT (20)
Taniya Das988608c2016-08-04 22:12:44 +053043#define SW_OVERRIDE_MASK BIT(2)
44#define HW_CONTROL_MASK BIT(1)
45#define SW_COLLAPSE_MASK BIT(0)
46
Deepak Katragadda522c8032016-11-11 11:37:10 -080047/* CFG_GDSCR */
48#define GDSC_POWER_UP_COMPLETE BIT(16)
49#define GDSC_POWER_DOWN_COMPLETE BIT(15)
50
Taniya Das988608c2016-08-04 22:12:44 +053051/* Domain Address */
52#define GMEM_CLAMP_IO_MASK BIT(0)
53#define GMEM_RESET_MASK BIT(4)
54
55/* SW Reset */
56#define BCR_BLK_ARES_BIT BIT(0)
57
58/* Register Offset */
59#define REG_OFFSET 0x0
Deepak Katragadda522c8032016-11-11 11:37:10 -080060#define CFG_GDSCR_OFFSET 0x4
Taniya Das988608c2016-08-04 22:12:44 +053061
62/* Timeout Delay */
63#define TIMEOUT_US 100
64
Vicky Wallace209cfbb2017-05-16 17:19:38 -070065/* TOGGLE SW COLLAPSE */
66#define TOGGLE_SW_COLLAPSE_IN_DISABLE BIT(0)
67
Taniya Das988608c2016-08-04 22:12:44 +053068struct gdsc {
69 struct regulator_dev *rdev;
70 struct regulator_desc rdesc;
71 void __iomem *gdscr;
72 struct regmap *regmap;
73 struct regmap *domain_addr;
74 struct regmap *hw_ctrl;
75 struct regmap *sw_reset;
76 struct clk **clocks;
Odelu Kukatla0cb925e2019-01-02 18:25:30 +053077 struct regulator *parent_regulator;
Taniya Das988608c2016-08-04 22:12:44 +053078 struct reset_control **reset_clocks;
79 bool toggle_mem;
80 bool toggle_periph;
81 bool toggle_logic;
82 bool resets_asserted;
83 bool root_en;
84 bool force_root_en;
85 bool no_status_check_on_disable;
86 bool is_gdsc_enabled;
87 bool allow_clear;
88 bool reset_aon;
Deepak Katragadda522c8032016-11-11 11:37:10 -080089 bool poll_cfg_gdscr;
Taniya Das988608c2016-08-04 22:12:44 +053090 int clock_count;
91 int reset_count;
92 int root_clk_idx;
93 u32 gds_timeout;
Vicky Wallace209cfbb2017-05-16 17:19:38 -070094 u32 flags;
Naveen Yadav255113b2020-04-29 22:26:26 +053095 bool skip_disable_before_enable;
Taniya Das988608c2016-08-04 22:12:44 +053096};
97
98enum gdscr_status {
99 ENABLED,
100 DISABLED,
101};
102
103static DEFINE_MUTEX(gdsc_seq_lock);
104
105void gdsc_allow_clear_retention(struct regulator *regulator)
106{
107 struct gdsc *sc = regulator_get_drvdata(regulator);
108
109 if (sc)
110 sc->allow_clear = true;
111}
112
113static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
114{
115 struct regmap *regmap;
116 int count = sc->gds_timeout;
117 u32 val;
118
119 if (sc->hw_ctrl)
120 regmap = sc->hw_ctrl;
121 else
122 regmap = sc->regmap;
123
124 for (; count > 0; count--) {
125 regmap_read(regmap, REG_OFFSET, &val);
126 val &= PWR_ON_MASK;
127
128 switch (status) {
129 case ENABLED:
130 if (val)
131 return 0;
132 break;
133 case DISABLED:
134 if (!val)
135 return 0;
136 break;
137 }
138 /*
139 * There is no guarantee about the delay needed for the enable
140 * bit in the GDSCR to be set or reset after the GDSC state
141 * changes. Hence, keep on checking for a reasonable number
142 * of times until the bit is set with the least possible delay
143 * between succeessive tries.
144 */
145 udelay(1);
146 }
147
148 return -ETIMEDOUT;
149}
150
Deepak Katragadda522c8032016-11-11 11:37:10 -0800151static int poll_cfg_gdsc_status(struct gdsc *sc, enum gdscr_status status)
152{
153 struct regmap *regmap = sc->regmap;
154 int count = sc->gds_timeout;
155 u32 val;
156
157 for (; count > 0; count--) {
158 regmap_read(regmap, CFG_GDSCR_OFFSET, &val);
159
160 switch (status) {
161 case ENABLED:
162 if (val & GDSC_POWER_UP_COMPLETE)
163 return 0;
164 break;
165 case DISABLED:
166 if (val & GDSC_POWER_DOWN_COMPLETE)
167 return 0;
168 break;
169 }
170 udelay(1);
171 }
172
173 return -ETIMEDOUT;
174}
175
Taniya Das988608c2016-08-04 22:12:44 +0530176static int gdsc_is_enabled(struct regulator_dev *rdev)
177{
178 struct gdsc *sc = rdev_get_drvdata(rdev);
179 uint32_t regval;
180
181 if (!sc->toggle_logic)
182 return !sc->resets_asserted;
183
184 regmap_read(sc->regmap, REG_OFFSET, &regval);
185
186 if (regval & PWR_ON_MASK) {
187 /*
188 * The GDSC might be turned on due to TZ/HYP vote on the
189 * votable GDS registers. Check the SW_COLLAPSE_MASK to
190 * determine if HLOS has voted for it.
191 */
192 if (!(regval & SW_COLLAPSE_MASK))
193 return true;
194 }
195
196 return false;
197}
198
199static int gdsc_enable(struct regulator_dev *rdev)
200{
201 struct gdsc *sc = rdev_get_drvdata(rdev);
Deepak Katragadda522c8032016-11-11 11:37:10 -0800202 uint32_t regval, cfg_regval, hw_ctrl_regval = 0x0;
Taniya Das988608c2016-08-04 22:12:44 +0530203 int i, ret = 0;
204
Odelu Kukatla0cb925e2019-01-02 18:25:30 +0530205 if (sc->parent_regulator) {
206 ret = regulator_set_voltage(sc->parent_regulator,
207 RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
208 if (ret) {
209 dev_warn(&rdev->dev,
210 "Unable to set the voltage on parent for %s\n",
211 sc->rdesc.name);
212 return ret;
213 }
214 }
215
Taniya Das988608c2016-08-04 22:12:44 +0530216 mutex_lock(&gdsc_seq_lock);
217
218 if (sc->root_en || sc->force_root_en)
219 clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
220
221 regmap_read(sc->regmap, REG_OFFSET, &regval);
222 if (regval & HW_CONTROL_MASK) {
223 dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
224 sc->rdesc.name);
225 mutex_unlock(&gdsc_seq_lock);
Odelu Kukatla0cb925e2019-01-02 18:25:30 +0530226 if (sc->parent_regulator)
227 regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
Taniya Das988608c2016-08-04 22:12:44 +0530228 return -EBUSY;
229 }
230
231 if (sc->toggle_logic) {
232 if (sc->sw_reset) {
233 regmap_read(sc->sw_reset, REG_OFFSET, &regval);
234 regval |= BCR_BLK_ARES_BIT;
235 regmap_write(sc->sw_reset, REG_OFFSET, regval);
236 /*
237 * BLK_ARES should be kept asserted for 1us before
238 * being de-asserted.
239 */
240 wmb();
241 udelay(1);
242
243 regval &= ~BCR_BLK_ARES_BIT;
244 regmap_write(sc->sw_reset, REG_OFFSET, regval);
245 /* Make sure de-assert goes through before continuing */
246 wmb();
247 }
248
249 if (sc->domain_addr) {
250 if (sc->reset_aon) {
251 regmap_read(sc->domain_addr, REG_OFFSET,
252 &regval);
253 regval |= GMEM_RESET_MASK;
254 regmap_write(sc->domain_addr, REG_OFFSET,
255 regval);
256 /*
257 * Keep reset asserted for at-least 1us before
258 * continuing.
259 */
260 wmb();
261 udelay(1);
262
263 regval &= ~GMEM_RESET_MASK;
264 regmap_write(sc->domain_addr, REG_OFFSET,
265 regval);
266 /*
267 * Make sure GMEM_RESET is de-asserted before
268 * continuing.
269 */
270 wmb();
271 }
272
273 regmap_read(sc->domain_addr, REG_OFFSET, &regval);
274 regval &= ~GMEM_CLAMP_IO_MASK;
275 regmap_write(sc->domain_addr, REG_OFFSET, regval);
276
277 /*
278 * Make sure CLAMP_IO is de-asserted before continuing.
279 */
280 wmb();
281 }
282
283 regmap_read(sc->regmap, REG_OFFSET, &regval);
284 regval &= ~SW_COLLAPSE_MASK;
285 regmap_write(sc->regmap, REG_OFFSET, regval);
286
287 /* Wait for 8 XO cycles before polling the status bit. */
288 mb();
289 udelay(1);
290
Deepak Katragadda522c8032016-11-11 11:37:10 -0800291 if (sc->poll_cfg_gdscr)
292 ret = poll_cfg_gdsc_status(sc, ENABLED);
293 else
294 ret = poll_gdsc_status(sc, ENABLED);
Taniya Das988608c2016-08-04 22:12:44 +0530295 if (ret) {
296 regmap_read(sc->regmap, REG_OFFSET, &regval);
297
298 if (sc->hw_ctrl) {
299 regmap_read(sc->hw_ctrl, REG_OFFSET,
300 &hw_ctrl_regval);
301 dev_warn(&rdev->dev, "%s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
302 sc->rdesc.name, sc->gds_timeout,
303 regval, hw_ctrl_regval);
304
305 ret = poll_gdsc_status(sc, ENABLED);
306 if (ret) {
307 regmap_read(sc->regmap, REG_OFFSET,
308 &regval);
309 regmap_read(sc->hw_ctrl, REG_OFFSET,
310 &hw_ctrl_regval);
311 dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
312 sc->rdesc.name, sc->gds_timeout,
313 regval, hw_ctrl_regval);
314
315 mutex_unlock(&gdsc_seq_lock);
Odelu Kukatla0cb925e2019-01-02 18:25:30 +0530316
317 if (sc->parent_regulator)
318 regulator_set_voltage(
319 sc->parent_regulator,
320 0, INT_MAX);
Taniya Das988608c2016-08-04 22:12:44 +0530321 return ret;
322 }
323 } else {
324 dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
325 sc->rdesc.name,
326 regval);
327 udelay(sc->gds_timeout);
328
Deepak Katragadda522c8032016-11-11 11:37:10 -0800329 if (sc->poll_cfg_gdscr) {
330 regmap_read(sc->regmap, REG_OFFSET,
331 &regval);
332 regmap_read(sc->regmap,
333 CFG_GDSCR_OFFSET, &cfg_regval);
334 dev_err(&rdev->dev, "%s final state: gdscr - 0x%x, cfg_gdscr - 0x%x (%d us after timeout)\n",
335 sc->rdesc.name, regval,
336 cfg_regval, sc->gds_timeout);
337 } else {
338 regmap_read(sc->regmap, REG_OFFSET,
339 &regval);
340 dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
341 sc->rdesc.name, regval,
342 sc->gds_timeout);
343 }
Taniya Das988608c2016-08-04 22:12:44 +0530344 mutex_unlock(&gdsc_seq_lock);
345
Odelu Kukatla0cb925e2019-01-02 18:25:30 +0530346 if (sc->parent_regulator)
347 regulator_set_voltage(
348 sc->parent_regulator,
349 0, INT_MAX);
Taniya Das988608c2016-08-04 22:12:44 +0530350 return ret;
351 }
352 }
353 } else {
354 for (i = 0; i < sc->reset_count; i++)
355 reset_control_deassert(sc->reset_clocks[i]);
356 sc->resets_asserted = false;
357 }
358
359 for (i = 0; i < sc->clock_count; i++) {
360 if (unlikely(i == sc->root_clk_idx))
361 continue;
362 if (sc->toggle_mem)
363 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
364 if (sc->toggle_periph)
365 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
366 }
367
368 /*
369 * If clocks to this power domain were already on, they will take an
370 * additional 4 clock cycles to re-enable after the rail is enabled.
371 * Delay to account for this. A delay is also needed to ensure clocks
372 * are not enabled within 400ns of enabling power to the memories.
373 */
374 udelay(1);
375
376 /* Delay to account for staggered memory powerup. */
377 udelay(1);
378
379 if (sc->force_root_en)
380 clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
381
382 sc->is_gdsc_enabled = true;
Naveen Yadav255113b2020-04-29 22:26:26 +0530383 sc->skip_disable_before_enable = false;
Taniya Das988608c2016-08-04 22:12:44 +0530384
385 mutex_unlock(&gdsc_seq_lock);
386
387 return ret;
388}
389
390static int gdsc_disable(struct regulator_dev *rdev)
391{
392 struct gdsc *sc = rdev_get_drvdata(rdev);
393 uint32_t regval;
394 int i, ret = 0;
395
Naveen Yadav255113b2020-04-29 22:26:26 +0530396 /*
397 * Protect GDSC against late_init disabling when the GDSC is enabled
398 * by an entity outside external to HLOS.
399 */
400 if (sc->skip_disable_before_enable) {
401 dev_dbg(&rdev->dev, "Skip Disabling: %s\n", sc->rdesc.name);
402 sc->skip_disable_before_enable = false;
403 return 0;
404 }
405
Taniya Das988608c2016-08-04 22:12:44 +0530406 mutex_lock(&gdsc_seq_lock);
407
408 if (sc->force_root_en)
409 clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
410
411 for (i = sc->clock_count - 1; i >= 0; i--) {
412 if (unlikely(i == sc->root_clk_idx))
413 continue;
414 if (sc->toggle_mem && sc->allow_clear)
415 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
416 if (sc->toggle_periph && sc->allow_clear)
417 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
418 }
419
420 /* Delay to account for staggered memory powerdown. */
421 udelay(1);
422
423 if (sc->toggle_logic) {
424 regmap_read(sc->regmap, REG_OFFSET, &regval);
425 regval |= SW_COLLAPSE_MASK;
426 regmap_write(sc->regmap, REG_OFFSET, regval);
427
Vicky Wallace209cfbb2017-05-16 17:19:38 -0700428 if (sc->flags & TOGGLE_SW_COLLAPSE_IN_DISABLE) {
429 regval &= ~SW_COLLAPSE_MASK;
430 regmap_write(sc->regmap, REG_OFFSET, regval);
431 regval |= SW_COLLAPSE_MASK;
432 regmap_write(sc->regmap, REG_OFFSET, regval);
433 }
434
Taniya Das988608c2016-08-04 22:12:44 +0530435 /* Wait for 8 XO cycles before polling the status bit. */
436 mb();
437 udelay(1);
438
439 if (sc->no_status_check_on_disable) {
440 /*
441 * Add a short delay here to ensure that gdsc_enable
442 * right after it was disabled does not put it in a
443 * weird state.
444 */
445 udelay(TIMEOUT_US);
446 } else {
Deepak Katragadda522c8032016-11-11 11:37:10 -0800447 if (sc->poll_cfg_gdscr)
448 ret = poll_cfg_gdsc_status(sc, DISABLED);
449 else
450 ret = poll_gdsc_status(sc, DISABLED);
Taniya Das988608c2016-08-04 22:12:44 +0530451 if (ret)
452 dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
453 sc->rdesc.name, regval);
454 }
455
456 if (sc->domain_addr) {
457 regmap_read(sc->domain_addr, REG_OFFSET, &regval);
458 regval |= GMEM_CLAMP_IO_MASK;
459 regmap_write(sc->domain_addr, REG_OFFSET, regval);
460 }
461
462 } else {
463 for (i = sc->reset_count - 1; i >= 0; i--)
464 reset_control_assert(sc->reset_clocks[i]);
465 sc->resets_asserted = true;
466 }
467
468 /*
469 * Check if gdsc_enable was called for this GDSC. If not, the root
470 * clock will not have been enabled prior to this.
471 */
472 if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
473 clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
474
475 sc->is_gdsc_enabled = false;
476
477 mutex_unlock(&gdsc_seq_lock);
478
Odelu Kukatla0cb925e2019-01-02 18:25:30 +0530479 if (sc->parent_regulator)
480 regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
481
Taniya Das988608c2016-08-04 22:12:44 +0530482 return ret;
483}
484
485static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
486{
487 struct gdsc *sc = rdev_get_drvdata(rdev);
488 uint32_t regval;
489
490 mutex_lock(&gdsc_seq_lock);
491 regmap_read(sc->regmap, REG_OFFSET, &regval);
492 mutex_unlock(&gdsc_seq_lock);
493
494 if (regval & HW_CONTROL_MASK)
495 return REGULATOR_MODE_FAST;
496
497 return REGULATOR_MODE_NORMAL;
498}
499
500static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
501{
502 struct gdsc *sc = rdev_get_drvdata(rdev);
503 uint32_t regval;
504 int ret = 0;
505
506 mutex_lock(&gdsc_seq_lock);
507
508 regmap_read(sc->regmap, REG_OFFSET, &regval);
509
510 switch (mode) {
511 case REGULATOR_MODE_FAST:
512 /* Turn on HW trigger mode */
513 regval |= HW_CONTROL_MASK;
514 regmap_write(sc->regmap, REG_OFFSET, regval);
515 /*
516 * There may be a race with internal HW trigger signal,
517 * that will result in GDSC going through a power down and
518 * up cycle. In case HW trigger signal is controlled by
519 * firmware that also poll same status bits as we do, FW
520 * might read an 'on' status before the GDSC can finish
521 * power cycle. We wait 1us before returning to ensure
522 * FW can't immediately poll the status bit.
523 */
524 mb();
525 udelay(1);
526 break;
527 case REGULATOR_MODE_NORMAL:
528 /* Turn off HW trigger mode */
529 regval &= ~HW_CONTROL_MASK;
530 regmap_write(sc->regmap, REG_OFFSET, regval);
531 /*
532 * There may be a race with internal HW trigger signal,
533 * that will result in GDSC going through a power down and
534 * up cycle. If we poll too early, status bit will
535 * indicate 'on' before the GDSC can finish the power cycle.
536 * Account for this case by waiting 1us before polling.
537 */
538 mb();
539 udelay(1);
540
Deepak Katragadda522c8032016-11-11 11:37:10 -0800541 if (sc->poll_cfg_gdscr)
542 ret = poll_cfg_gdsc_status(sc, ENABLED);
543 else
544 ret = poll_gdsc_status(sc, ENABLED);
Taniya Das988608c2016-08-04 22:12:44 +0530545 if (ret)
546 dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
547 sc->rdesc.name, regval);
548 break;
549 default:
550 ret = -EINVAL;
551 break;
552 }
553
554 mutex_unlock(&gdsc_seq_lock);
555
556 return ret;
557}
558
559static struct regulator_ops gdsc_ops = {
560 .is_enabled = gdsc_is_enabled,
561 .enable = gdsc_enable,
562 .disable = gdsc_disable,
563 .set_mode = gdsc_set_mode,
564 .get_mode = gdsc_get_mode,
565};
566
567static const struct regmap_config gdsc_regmap_config = {
568 .reg_bits = 32,
569 .reg_stride = 4,
570 .val_bits = 32,
571 .fast_io = true,
572};
573
574static int gdsc_probe(struct platform_device *pdev)
575{
576 static atomic_t gdsc_count = ATOMIC_INIT(-1);
577 struct regulator_config reg_config = {};
578 struct regulator_init_data *init_data;
579 struct resource *res;
580 struct gdsc *sc;
581 uint32_t regval, clk_dis_wait_val = 0;
Amit Nischal9acc0962018-06-14 13:30:04 +0530582 uint32_t en_few_wait_val, en_rest_wait_val;
Vicky Wallace209cfbb2017-05-16 17:19:38 -0700583 bool retain_mem, retain_periph, support_hw_trigger, prop_val;
Taniya Das988608c2016-08-04 22:12:44 +0530584 int i, ret;
585 u32 timeout;
586
587 sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
588 if (sc == NULL)
589 return -ENOMEM;
590
591 init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
592 &sc->rdesc);
593 if (init_data == NULL)
594 return -ENOMEM;
595
596 if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
597 init_data->supply_regulator = "parent";
598
599 ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
600 &sc->rdesc.name);
601 if (ret)
602 return ret;
603
604 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
605 if (res == NULL) {
606 dev_err(&pdev->dev, "Failed to get resources\n");
607 return -EINVAL;
608 }
609
610 sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
611 if (sc->gdscr == NULL)
612 return -ENOMEM;
613
614 sc->regmap = devm_regmap_init_mmio(&pdev->dev, sc->gdscr,
615 &gdsc_regmap_config);
616 if (!sc->regmap) {
617 dev_err(&pdev->dev, "Couldn't get regmap\n");
618 return -EINVAL;
619 }
620
621 if (of_find_property(pdev->dev.of_node, "domain-addr", NULL)) {
622 sc->domain_addr = syscon_regmap_lookup_by_phandle
623 (pdev->dev.of_node, "domain-addr");
624 if (IS_ERR(sc->domain_addr))
625 return -ENODEV;
626 }
627
628 if (of_find_property(pdev->dev.of_node, "sw-reset", NULL)) {
629 sc->sw_reset = syscon_regmap_lookup_by_phandle
630 (pdev->dev.of_node, "sw-reset");
631 if (IS_ERR(sc->sw_reset))
632 return -ENODEV;
633 }
634
635 if (of_find_property(pdev->dev.of_node, "hw-ctrl-addr", NULL)) {
636 sc->hw_ctrl = syscon_regmap_lookup_by_phandle(
637 pdev->dev.of_node, "hw-ctrl-addr");
638 if (IS_ERR(sc->hw_ctrl))
639 return -ENODEV;
640 }
641
Deepak Katragadda522c8032016-11-11 11:37:10 -0800642 sc->poll_cfg_gdscr = of_property_read_bool(pdev->dev.of_node,
643 "qcom,poll-cfg-gdscr");
644
Taniya Das988608c2016-08-04 22:12:44 +0530645 sc->gds_timeout = TIMEOUT_US;
646
647 ret = of_property_read_u32(pdev->dev.of_node, "qcom,gds-timeout",
648 &timeout);
649 if (!ret)
650 sc->gds_timeout = timeout;
651
652 sc->clock_count = of_property_count_strings(pdev->dev.of_node,
653 "clock-names");
654 if (sc->clock_count == -EINVAL) {
655 sc->clock_count = 0;
656 } else if (sc->clock_count < 0) {
657 dev_err(&pdev->dev, "Failed to get clock names\n");
658 return -EINVAL;
659 }
660
661 sc->clocks = devm_kzalloc(&pdev->dev,
662 sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
663 if (!sc->clocks)
664 return -ENOMEM;
665
666 sc->root_clk_idx = -1;
667
668 sc->root_en = of_property_read_bool(pdev->dev.of_node,
669 "qcom,enable-root-clk");
670
671 sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
672 "qcom,force-enable-root-clk");
673
Vicky Wallace209cfbb2017-05-16 17:19:38 -0700674 prop_val = of_property_read_bool(pdev->dev.of_node,
675 "qcom,toggle-sw-collapse-in-disable");
676 if (prop_val)
677 sc->flags |= TOGGLE_SW_COLLAPSE_IN_DISABLE;
678
Odelu Kukatla0cb925e2019-01-02 18:25:30 +0530679 if (of_find_property(pdev->dev.of_node, "vdd_parent-supply", NULL)) {
680 sc->parent_regulator = devm_regulator_get(&pdev->dev,
681 "vdd_parent");
682 if (IS_ERR(sc->parent_regulator)) {
683 ret = PTR_ERR(sc->parent_regulator);
684 if (ret != -EPROBE_DEFER)
685 dev_err(&pdev->dev,
686 "Unable to get vdd_parent regulator, err: %d\n",
687 ret);
688 return ret;
689 }
690 }
691
Taniya Das988608c2016-08-04 22:12:44 +0530692 for (i = 0; i < sc->clock_count; i++) {
693 const char *clock_name;
694
695 of_property_read_string_index(pdev->dev.of_node, "clock-names",
696 i, &clock_name);
697
698 sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
699 if (IS_ERR(sc->clocks[i])) {
700 int rc = PTR_ERR(sc->clocks[i]);
701
702 if (rc != -EPROBE_DEFER)
703 dev_err(&pdev->dev, "Failed to get %s\n",
704 clock_name);
705 return rc;
706 }
707
708 if (!strcmp(clock_name, "core_root_clk"))
709 sc->root_clk_idx = i;
710 }
711
712 if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
713 dev_err(&pdev->dev, "Failed to get root clock name\n");
714 return -EINVAL;
715 }
716
717 sc->reset_aon = of_property_read_bool(pdev->dev.of_node,
718 "qcom,reset-aon-logic");
719
720 sc->rdesc.id = atomic_inc_return(&gdsc_count);
721 sc->rdesc.ops = &gdsc_ops;
722 sc->rdesc.type = REGULATOR_VOLTAGE;
723 sc->rdesc.owner = THIS_MODULE;
724 platform_set_drvdata(pdev, sc);
725
726 /*
727 * Disable HW trigger: collapse/restore occur based on registers writes.
728 * Disable SW override: Use hardware state-machine for sequencing.
729 */
730 regmap_read(sc->regmap, REG_OFFSET, &regval);
731 regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
732
733 if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
734 &clk_dis_wait_val)) {
735 clk_dis_wait_val = clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
736
737 /* Configure wait time between states. */
738 regval &= ~(CLK_DIS_WAIT_MASK);
739 regval |= clk_dis_wait_val;
740 }
741
Amit Nischal9acc0962018-06-14 13:30:04 +0530742 if (!of_property_read_u32(pdev->dev.of_node, "qcom,en-few-wait-val",
743 &en_few_wait_val)) {
744 en_few_wait_val <<= EN_FEW_WAIT_SHIFT;
745
746 regval &= ~(EN_FEW_WAIT_MASK);
747 regval |= en_few_wait_val;
748 }
749
750 if (!of_property_read_u32(pdev->dev.of_node, "qcom,en-rest-wait-val",
751 &en_rest_wait_val)) {
752 en_rest_wait_val <<= EN_REST_WAIT_SHIFT;
753
754 regval &= ~(EN_REST_WAIT_MASK);
755 regval |= en_rest_wait_val;
756 }
757
Taniya Das988608c2016-08-04 22:12:44 +0530758 regmap_write(sc->regmap, REG_OFFSET, regval);
759
760 sc->no_status_check_on_disable =
761 of_property_read_bool(pdev->dev.of_node,
762 "qcom,no-status-check-on-disable");
763 retain_mem = of_property_read_bool(pdev->dev.of_node,
764 "qcom,retain-mem");
765 sc->toggle_mem = !retain_mem;
766 retain_periph = of_property_read_bool(pdev->dev.of_node,
767 "qcom,retain-periph");
768 sc->toggle_periph = !retain_periph;
769 sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
770 "qcom,skip-logic-collapse");
771 support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
772 "qcom,support-hw-trigger");
773 if (support_hw_trigger) {
774 init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
775 init_data->constraints.valid_modes_mask |=
776 REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
777 }
778
779 if (!sc->toggle_logic) {
780 sc->reset_count = of_property_count_strings(pdev->dev.of_node,
781 "reset-names");
782 if (sc->reset_count == -EINVAL) {
783 sc->reset_count = 0;
784 } else if (sc->reset_count < 0) {
785 dev_err(&pdev->dev, "Failed to get reset clock names\n");
786 return -EINVAL;
787 }
788
789 sc->reset_clocks = devm_kzalloc(&pdev->dev,
790 sizeof(struct reset_control *) * sc->reset_count,
791 GFP_KERNEL);
792 if (!sc->reset_clocks)
793 return -ENOMEM;
794
795 for (i = 0; i < sc->reset_count; i++) {
796 const char *reset_name;
797
798 of_property_read_string_index(pdev->dev.of_node,
799 "reset-names", i, &reset_name);
800 sc->reset_clocks[i] = devm_reset_control_get(&pdev->dev,
801 reset_name);
802 if (IS_ERR(sc->reset_clocks[i])) {
803 int rc = PTR_ERR(sc->reset_clocks[i]);
804
805 if (rc != -EPROBE_DEFER)
806 dev_err(&pdev->dev, "Failed to get %s\n",
807 reset_name);
808 return rc;
809 }
810 }
811
812 regval &= ~SW_COLLAPSE_MASK;
813 regmap_write(sc->regmap, REG_OFFSET, regval);
814
Deepak Katragadda522c8032016-11-11 11:37:10 -0800815 if (sc->poll_cfg_gdscr)
816 ret = poll_cfg_gdsc_status(sc, ENABLED);
817 else
818 ret = poll_gdsc_status(sc, ENABLED);
Taniya Das988608c2016-08-04 22:12:44 +0530819 if (ret) {
820 dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
821 sc->rdesc.name, regval);
822 return ret;
823 }
824 }
825
826 sc->allow_clear = of_property_read_bool(pdev->dev.of_node,
827 "qcom,disallow-clear");
828 sc->allow_clear = !sc->allow_clear;
829
830 for (i = 0; i < sc->clock_count; i++) {
831 if (retain_mem || (regval & PWR_ON_MASK) || !sc->allow_clear)
832 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
833 else
834 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
835
836 if (retain_periph || (regval & PWR_ON_MASK) || !sc->allow_clear)
837 clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
838 else
839 clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
840 }
841
Naveen Yadav255113b2020-04-29 22:26:26 +0530842 sc->skip_disable_before_enable = of_property_read_bool(
843 pdev->dev.of_node, "qcom,skip-disable-before-sw-enable");
844
Taniya Das988608c2016-08-04 22:12:44 +0530845 reg_config.dev = &pdev->dev;
846 reg_config.init_data = init_data;
847 reg_config.driver_data = sc;
848 reg_config.of_node = pdev->dev.of_node;
849 reg_config.regmap = sc->regmap;
850
851 sc->rdev = regulator_register(&sc->rdesc, &reg_config);
852 if (IS_ERR(sc->rdev)) {
853 dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
854 sc->rdesc.name);
855 return PTR_ERR(sc->rdev);
856 }
857
858 return 0;
859}
860
861static int gdsc_remove(struct platform_device *pdev)
862{
863 struct gdsc *sc = platform_get_drvdata(pdev);
864
865 regulator_unregister(sc->rdev);
866
867 return 0;
868}
869
870static const struct of_device_id gdsc_match_table[] = {
871 { .compatible = "qcom,gdsc" },
872 {}
873};
874
875static struct platform_driver gdsc_driver = {
876 .probe = gdsc_probe,
877 .remove = gdsc_remove,
878 .driver = {
879 .name = "gdsc",
880 .of_match_table = gdsc_match_table,
881 .owner = THIS_MODULE,
882 },
883};
884
885static int __init gdsc_init(void)
886{
887 return platform_driver_register(&gdsc_driver);
888}
889subsys_initcall(gdsc_init);
890
891static void __exit gdsc_exit(void)
892{
893 platform_driver_unregister(&gdsc_driver);
894}
895module_exit(gdsc_exit);