blob: 06c8be4892e39d5f548f6a38e561d0762abd5cd4 [file] [log] [blame]
Mahesh Sivasubramanian97040862016-02-01 10:40:26 -07001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/slab.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/err.h>
23#include <linux/platform_device.h>
24#include <linux/err.h>
25#include <linux/cpu.h>
26#include <soc/qcom/spm.h>
27#include "spm_driver.h"
28
29#define VDD_DEFAULT 0xDEADF00D
30#define SLP_CMD_BIT 17
31#define PC_MODE_BIT 16
32#define RET_MODE_BIT 15
33#define EVENT_SYNC_BIT 24
34#define ISAR_BIT 3
35#define SPM_EN_BIT 0
36
37struct msm_spm_power_modes {
38 uint32_t mode;
39 uint32_t ctl;
40};
41
42struct msm_spm_device {
43 struct list_head list;
44 bool initialized;
45 const char *name;
46 struct msm_spm_driver_data reg_data;
47 struct msm_spm_power_modes *modes;
48 uint32_t num_modes;
49 uint32_t cpu_vdd;
50 struct cpumask mask;
51 void __iomem *q2s_reg;
52 bool qchannel_ignore;
53 bool allow_rpm_hs;
54 bool use_spm_clk_gating;
55 bool use_qchannel_for_wfi;
56 void __iomem *flush_base_addr;
57 void __iomem *slpreq_base_addr;
58};
59
60struct msm_spm_vdd_info {
61 struct msm_spm_device *vctl_dev;
62 uint32_t vlevel;
63 int err;
64};
65
66static LIST_HEAD(spm_list);
67static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
68static DEFINE_PER_CPU(struct msm_spm_device *, cpu_vctl_device);
69
70static void msm_spm_smp_set_vdd(void *data)
71{
72 struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data;
73 struct msm_spm_device *dev = info->vctl_dev;
74
75 dev->cpu_vdd = info->vlevel;
76 info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);
77}
78
79/**
80 * msm_spm_probe_done(): Verify and return the status of the cpu(s) and l2
81 * probe.
82 * Return: 0 if all spm devices have been probed, else return -EPROBE_DEFER.
83 * if probe failed, then return the err number for that failure.
84 */
85int msm_spm_probe_done(void)
86{
87 struct msm_spm_device *dev;
88 int cpu;
89 int ret = 0;
90
91 for_each_possible_cpu(cpu) {
92 dev = per_cpu(cpu_vctl_device, cpu);
93 if (!dev)
94 return -EPROBE_DEFER;
95
96 ret = IS_ERR(dev);
97 if (ret)
98 return ret;
99 }
100
101 return 0;
102}
103EXPORT_SYMBOL(msm_spm_probe_done);
104
105void msm_spm_dump_regs(unsigned int cpu)
106{
107 dump_regs(&per_cpu(msm_cpu_spm_device, cpu).reg_data, cpu);
108}
109
110/**
111 * msm_spm_set_vdd(): Set core voltage
112 * @cpu: core id
113 * @vlevel: Encoded PMIC data.
114 *
115 * Return: 0 on success or -(ERRNO) on failure.
116 */
117int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
118{
119 struct msm_spm_vdd_info info;
120 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
121 int ret;
122
123 if (!dev)
124 return -EPROBE_DEFER;
125
126 ret = IS_ERR(dev);
127 if (ret)
128 return ret;
129
130 info.vctl_dev = dev;
131 info.vlevel = vlevel;
132
133 ret = smp_call_function_any(&dev->mask, msm_spm_smp_set_vdd, &info,
134 true);
135 if (ret)
136 return ret;
137
138 return info.err;
139}
140EXPORT_SYMBOL(msm_spm_set_vdd);
141
142/**
143 * msm_spm_get_vdd(): Get core voltage
144 * @cpu: core id
145 * @return: Returns encoded PMIC data.
146 */
147int msm_spm_get_vdd(unsigned int cpu)
148{
149 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
150
151 if (!dev)
152 return -EPROBE_DEFER;
153
154 return msm_spm_drv_get_vdd(&dev->reg_data) ? : -EINVAL;
155}
156EXPORT_SYMBOL(msm_spm_get_vdd);
157
158static void msm_spm_config_q2s(struct msm_spm_device *dev, unsigned int mode)
159{
160 uint32_t spm_legacy_mode = 0;
161 uint32_t qchannel_ignore = 0;
162 uint32_t val = 0;
163
164 if (!dev->q2s_reg)
165 return;
166
167 switch (mode) {
168 case MSM_SPM_MODE_DISABLED:
169 case MSM_SPM_MODE_CLOCK_GATING:
170 qchannel_ignore = !dev->use_qchannel_for_wfi;
171 spm_legacy_mode = 0;
172 break;
173 case MSM_SPM_MODE_RETENTION:
174 qchannel_ignore = 0;
175 spm_legacy_mode = 0;
176 break;
177 case MSM_SPM_MODE_GDHS:
178 case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
179 case MSM_SPM_MODE_POWER_COLLAPSE:
180 qchannel_ignore = dev->qchannel_ignore;
181 spm_legacy_mode = 1;
182 break;
183 default:
184 break;
185 }
186
187 val = spm_legacy_mode << 2 | qchannel_ignore << 1;
188 __raw_writel(val, dev->q2s_reg);
189 mb(); /* Ensure flush */
190}
191
192static void msm_spm_config_hw_flush(struct msm_spm_device *dev,
193 unsigned int mode)
194{
195 uint32_t val = 0;
196
197 if (!dev->flush_base_addr)
198 return;
199
200 switch (mode) {
201 case MSM_SPM_MODE_FASTPC:
202 case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
203 case MSM_SPM_MODE_POWER_COLLAPSE:
204 val = BIT(0);
205 break;
206 default:
207 break;
208 }
209
210 __raw_writel(val, dev->flush_base_addr);
211}
212
213static void msm_spm_config_slpreq(struct msm_spm_device *dev,
214 unsigned int mode)
215{
216 uint32_t val = 0;
217
218 if (!dev->slpreq_base_addr)
219 return;
220
221 switch (mode) {
222 case MSM_SPM_MODE_FASTPC:
223 case MSM_SPM_MODE_GDHS:
224 case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
225 case MSM_SPM_MODE_POWER_COLLAPSE:
226 val = BIT(4);
227 break;
228 default:
229 break;
230 }
231
232 val = (__raw_readl(dev->slpreq_base_addr) & ~BIT(4)) | val;
233 __raw_writel(val, dev->slpreq_base_addr);
234}
235
236static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
237 unsigned int mode, bool notify_rpm)
238{
239 uint32_t i;
240 int ret = -EINVAL;
241 uint32_t ctl = 0;
242
243 if (!dev) {
244 pr_err("dev is NULL\n");
245 return -ENODEV;
246 }
247
248 if (!dev->initialized)
249 return -ENXIO;
250
251 if (!dev->num_modes)
252 return 0;
253
254 if (mode == MSM_SPM_MODE_DISABLED) {
255 ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
256 } else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
257 for (i = 0; i < dev->num_modes; i++) {
258 if (dev->modes[i].mode != mode)
259 continue;
260
261 ctl = dev->modes[i].ctl;
262 if (!dev->allow_rpm_hs && notify_rpm)
263 ctl &= ~BIT(SLP_CMD_BIT);
264
265 break;
266 }
267 ret = msm_spm_drv_set_low_power_mode(&dev->reg_data, ctl);
268 }
269
270 msm_spm_config_q2s(dev, mode);
271 msm_spm_config_hw_flush(dev, mode);
272 msm_spm_config_slpreq(dev, mode);
273
274 return ret;
275}
276
277static int msm_spm_dev_init(struct msm_spm_device *dev,
278 struct msm_spm_platform_data *data)
279{
280 int i, ret = -ENOMEM;
281 uint32_t offset = 0;
282
283 dev->cpu_vdd = VDD_DEFAULT;
284 dev->num_modes = data->num_modes;
285 dev->modes = kmalloc(
286 sizeof(struct msm_spm_power_modes) * dev->num_modes,
287 GFP_KERNEL);
288
289 if (!dev->modes)
290 goto spm_failed_malloc;
291
292 ret = msm_spm_drv_init(&dev->reg_data, data);
293
294 if (ret)
295 goto spm_failed_init;
296
297 for (i = 0; i < dev->num_modes; i++) {
298
299 /* Default offset is 0 and gets updated as we write more
300 * sequences into SPM
301 */
302 dev->modes[i].ctl = data->modes[i].ctl | ((offset & 0x1FF)
303 << 4);
304 ret = msm_spm_drv_write_seq_data(&dev->reg_data,
305 data->modes[i].cmd, &offset);
306 if (ret < 0)
307 goto spm_failed_init;
308
309 dev->modes[i].mode = data->modes[i].mode;
310 }
311
312 msm_spm_drv_reinit(&dev->reg_data, dev->num_modes ? true : false);
313
314 dev->initialized = true;
315
316 return 0;
317
318spm_failed_init:
319 kfree(dev->modes);
320spm_failed_malloc:
321 return ret;
322}
323
324/**
325 * msm_spm_turn_on_cpu_rail(): Power on cpu rail before turning on core
326 * @node: The SPM node that controls the voltage for the CPU
327 * @val: The value to be set on the rail
328 * @cpu: The cpu for this with rail is being powered on
329 */
330int msm_spm_turn_on_cpu_rail(struct device_node *vctl_node,
331 unsigned int val, int cpu, int vctl_offset)
332{
333 uint32_t timeout = 2000; /* delay for voltage to settle on the core */
334 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
335 void __iomem *base;
336
337 base = of_iomap(vctl_node, 1);
338 if (base) {
339 /*
340 * Program Q2S to disable SPM legacy mode and ignore Q2S
341 * channel requests.
342 * bit[1] = qchannel_ignore = 1
343 * bit[2] = spm_legacy_mode = 0
344 */
345 writel_relaxed(0x2, base);
346 mb(); /* Ensure flush */
347 iounmap(base);
348 }
349
350 base = of_iomap(vctl_node, 0);
351 if (!base)
352 return -ENOMEM;
353
354 if (dev && (dev->cpu_vdd != VDD_DEFAULT))
355 return 0;
356
357 /* Set the CPU supply regulator voltage */
358 val = (val & 0xFF);
359 writel_relaxed(val, base + vctl_offset);
360 mb(); /* Ensure flush */
361 udelay(timeout);
362
363 /* Enable the CPU supply regulator*/
364 val = 0x30080;
365 writel_relaxed(val, base + vctl_offset);
366 mb(); /* Ensure flush */
367 udelay(timeout);
368
369 iounmap(base);
370
371 return 0;
372}
373EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
374
375void msm_spm_reinit(void)
376{
377 unsigned int cpu;
378
379 for_each_possible_cpu(cpu)
380 msm_spm_drv_reinit(
381 &per_cpu(msm_cpu_spm_device.reg_data, cpu), true);
382}
383EXPORT_SYMBOL(msm_spm_reinit);
384
385/*
386 * msm_spm_is_mode_avail() - Specifies if a mode is available for the cpu
387 * It should only be used to decide a mode before lpm driver is probed.
388 * @mode: SPM LPM mode to be selected
389 */
390bool msm_spm_is_mode_avail(unsigned int mode)
391{
392 struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
393 int i;
394
395 for (i = 0; i < dev->num_modes; i++) {
396 if (dev->modes[i].mode == mode)
397 return true;
398 }
399
400 return false;
401}
402
403/**
404 * msm_spm_is_avs_enabled() - Functions returns 1 if AVS is enabled and
405 * 0 if it is not.
406 * @cpu: specifies cpu's avs should be read
407 *
408 * Returns errno in case of failure or AVS enable state otherwise
409 */
410int msm_spm_is_avs_enabled(unsigned int cpu)
411{
412 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
413
414 if (!dev)
415 return -ENXIO;
416
417 return msm_spm_drv_get_avs_enable(&dev->reg_data);
418}
419EXPORT_SYMBOL(msm_spm_is_avs_enabled);
420
421/**
422 * msm_spm_avs_enable() - Enables AVS on the SAW that controls this cpu's
423 * voltage.
424 * @cpu: specifies which cpu's avs should be enabled
425 *
426 * Returns errno in case of failure or 0 if successful
427 */
428int msm_spm_avs_enable(unsigned int cpu)
429{
430 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
431
432 if (!dev)
433 return -ENXIO;
434
435 return msm_spm_drv_set_avs_enable(&dev->reg_data, true);
436}
437EXPORT_SYMBOL(msm_spm_avs_enable);
438
439/**
440 * msm_spm_avs_disable() - Disables AVS on the SAW that controls this cpu's
441 * voltage.
442 * @cpu: specifies which cpu's avs should be enabled
443 *
444 * Returns errno in case of failure or 0 if successful
445 */
446int msm_spm_avs_disable(unsigned int cpu)
447{
448 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
449
450 if (!dev)
451 return -ENXIO;
452
453 return msm_spm_drv_set_avs_enable(&dev->reg_data, false);
454}
455EXPORT_SYMBOL(msm_spm_avs_disable);
456
457/**
458 * msm_spm_avs_set_limit() - Set maximum and minimum AVS limits on the
459 * SAW that controls this cpu's voltage.
460 * @cpu: specify which cpu's avs should be configured
461 * @min_lvl: specifies the minimum PMIC output voltage control register
462 * value that may be sent to the PMIC
463 * @max_lvl: specifies the maximum PMIC output voltage control register
464 * value that may be sent to the PMIC
465 * Returns errno in case of failure or 0 if successful
466 */
467int msm_spm_avs_set_limit(unsigned int cpu,
468 uint32_t min_lvl, uint32_t max_lvl)
469{
470 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
471
472 if (!dev)
473 return -ENXIO;
474
475 return msm_spm_drv_set_avs_limit(&dev->reg_data, min_lvl, max_lvl);
476}
477EXPORT_SYMBOL(msm_spm_avs_set_limit);
478
479/**
480 * msm_spm_avs_enable_irq() - Enable an AVS interrupt
481 * @cpu: specifies which CPU's AVS should be configured
482 * @irq: specifies which interrupt to enable
483 *
484 * Returns errno in case of failure or 0 if successful.
485 */
486int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
487{
488 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
489
490 if (!dev)
491 return -ENXIO;
492
493 return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, true);
494}
495EXPORT_SYMBOL(msm_spm_avs_enable_irq);
496
497/**
498 * msm_spm_avs_disable_irq() - Disable an AVS interrupt
499 * @cpu: specifies which CPU's AVS should be configured
500 * @irq: specifies which interrupt to disable
501 *
502 * Returns errno in case of failure or 0 if successful.
503 */
504int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
505{
506 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
507
508 if (!dev)
509 return -ENXIO;
510
511 return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, false);
512}
513EXPORT_SYMBOL(msm_spm_avs_disable_irq);
514
515/**
516 * msm_spm_avs_clear_irq() - Clear a latched AVS interrupt
517 * @cpu: specifies which CPU's AVS should be configured
518 * @irq: specifies which interrupt to clear
519 *
520 * Returns errno in case of failure or 0 if successful.
521 */
522int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
523{
524 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
525
526 if (!dev)
527 return -ENXIO;
528
529 return msm_spm_drv_avs_clear_irq(&dev->reg_data, irq);
530}
531EXPORT_SYMBOL(msm_spm_avs_clear_irq);
532
533/**
534 * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode
535 * @mode: SPM LPM mode to enter
536 * @notify_rpm: Notify RPM in this mode
537 */
538int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
539{
540 struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
541
542 return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
543}
544EXPORT_SYMBOL(msm_spm_set_low_power_mode);
545
546/**
547 * msm_spm_init(): Board initalization function
548 * @data: platform specific SPM register configuration data
549 * @nr_devs: Number of SPM devices being initialized
550 */
551int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
552{
553 unsigned int cpu;
554 int ret = 0;
555
556 if ((nr_devs < num_possible_cpus()) || !data)
557 return -EINVAL;
558
559 for_each_possible_cpu(cpu) {
560 struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
561
562 ret = msm_spm_dev_init(dev, &data[cpu]);
563 if (ret < 0) {
564 pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
565 cpu, ret);
566 break;
567 }
568 }
569
570 return ret;
571}
572
573struct msm_spm_device *msm_spm_get_device_by_name(const char *name)
574{
575 struct list_head *list;
576
577 list_for_each(list, &spm_list) {
578 struct msm_spm_device *dev
579 = list_entry(list, typeof(*dev), list);
580 if (dev->name && !strcmp(dev->name, name))
581 return dev;
582 }
583 return ERR_PTR(-ENODEV);
584}
585
586int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
587 unsigned int mode, bool notify_rpm)
588{
589 return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
590}
591#ifdef CONFIG_MSM_L2_SPM
592
593/**
594 * msm_spm_apcs_set_phase(): Set number of SMPS phases.
595 * @cpu: cpu which is requesting the change in number of phases.
596 * @phase_cnt: Number of phases to be set active
597 */
598int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt)
599{
600 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
601
602 if (!dev)
603 return -ENXIO;
604
605 return msm_spm_drv_set_pmic_data(&dev->reg_data,
606 MSM_SPM_PMIC_PHASE_PORT, phase_cnt);
607}
608EXPORT_SYMBOL(msm_spm_apcs_set_phase);
609
610/** msm_spm_enable_fts_lpm() : Enable FTS to switch to low power
611 * when the cores are in low power modes
612 * @cpu: cpu that is entering low power mode.
613 * @mode: The mode configuration for FTS
614 */
615int msm_spm_enable_fts_lpm(int cpu, uint32_t mode)
616{
617 struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
618
619 if (!dev)
620 return -ENXIO;
621
622 return msm_spm_drv_set_pmic_data(&dev->reg_data,
623 MSM_SPM_PMIC_PFM_PORT, mode);
624}
625EXPORT_SYMBOL(msm_spm_enable_fts_lpm);
626
627#endif
628
629static int get_cpu_id(struct device_node *node)
630{
631 struct device_node *cpu_node;
632 u32 cpu;
633 char *key = "qcom,cpu";
634
635 cpu_node = of_parse_phandle(node, key, 0);
636 if (cpu_node) {
637 for_each_possible_cpu(cpu) {
638 if (of_get_cpu_node(cpu, NULL) == cpu_node)
639 return cpu;
640 }
641 } else
642 return num_possible_cpus();
643
644 return -EINVAL;
645}
646
647static struct msm_spm_device *msm_spm_get_device(struct platform_device *pdev)
648{
649 struct msm_spm_device *dev = NULL;
650 const char *val = NULL;
651 char *key = "qcom,name";
652 int cpu = get_cpu_id(pdev->dev.of_node);
653
654 if ((cpu >= 0) && cpu < num_possible_cpus())
655 dev = &per_cpu(msm_cpu_spm_device, cpu);
656 else if (cpu == num_possible_cpus())
657 dev = devm_kzalloc(&pdev->dev, sizeof(struct msm_spm_device),
658 GFP_KERNEL);
659
660 if (!dev)
661 return NULL;
662
663 if (of_property_read_string(pdev->dev.of_node, key, &val)) {
664 pr_err("%s(): Cannot find a required node key:%s\n",
665 __func__, key);
666 return NULL;
667 }
668 dev->name = val;
669 list_add(&dev->list, &spm_list);
670
671 return dev;
672}
673
674static void get_cpumask(struct device_node *node, struct cpumask *mask)
675{
676 unsigned int c;
677 int idx = 0;
678 struct device_node *cpu_node;
679 char *key = "qcom,cpu-vctl-list";
680
681 cpu_node = of_parse_phandle(node, key, idx++);
682 while (cpu_node) {
683 for_each_possible_cpu(c) {
684 if (of_get_cpu_node(c, NULL) == cpu_node)
685 cpumask_set_cpu(c, mask);
686 }
687 cpu_node = of_parse_phandle(node, key, idx++);
688 };
689}
690
691static int msm_spm_dev_probe(struct platform_device *pdev)
692{
693 int ret = 0;
694 int cpu = 0;
695 int i = 0;
696 struct device_node *node = pdev->dev.of_node;
697 struct device_node *n = NULL;
698 struct msm_spm_platform_data spm_data;
699 char *key = NULL;
700 uint32_t val = 0;
701 struct msm_spm_seq_entry modes[MSM_SPM_MODE_NR];
702 int len = 0;
703 struct msm_spm_device *dev = NULL;
704 struct resource *res = NULL;
705 uint32_t mode_count = 0;
706
707 struct spm_of {
708 char *key;
709 uint32_t id;
710 };
711
712 struct spm_of spm_of_data[] = {
713 {"qcom,saw2-cfg", MSM_SPM_REG_SAW_CFG},
714 {"qcom,saw2-avs-ctl", MSM_SPM_REG_SAW_AVS_CTL},
715 {"qcom,saw2-avs-hysteresis", MSM_SPM_REG_SAW_AVS_HYSTERESIS},
716 {"qcom,saw2-avs-limit", MSM_SPM_REG_SAW_AVS_LIMIT},
717 {"qcom,saw2-avs-dly", MSM_SPM_REG_SAW_AVS_DLY},
718 {"qcom,saw2-spm-dly", MSM_SPM_REG_SAW_SPM_DLY},
719 {"qcom,saw2-spm-ctl", MSM_SPM_REG_SAW_SPM_CTL},
720 {"qcom,saw2-pmic-data0", MSM_SPM_REG_SAW_PMIC_DATA_0},
721 {"qcom,saw2-pmic-data1", MSM_SPM_REG_SAW_PMIC_DATA_1},
722 {"qcom,saw2-pmic-data2", MSM_SPM_REG_SAW_PMIC_DATA_2},
723 {"qcom,saw2-pmic-data3", MSM_SPM_REG_SAW_PMIC_DATA_3},
724 {"qcom,saw2-pmic-data4", MSM_SPM_REG_SAW_PMIC_DATA_4},
725 {"qcom,saw2-pmic-data5", MSM_SPM_REG_SAW_PMIC_DATA_5},
726 {"qcom,saw2-pmic-data6", MSM_SPM_REG_SAW_PMIC_DATA_6},
727 {"qcom,saw2-pmic-data7", MSM_SPM_REG_SAW_PMIC_DATA_7},
728 };
729
730 struct mode_of {
731 char *key;
732 uint32_t id;
733 };
734
735 struct mode_of mode_of_data[] = {
736 {"qcom,saw2-spm-cmd-wfi", MSM_SPM_MODE_CLOCK_GATING},
737 {"qcom,saw2-spm-cmd-ret", MSM_SPM_MODE_RETENTION},
738 {"qcom,saw2-spm-cmd-gdhs", MSM_SPM_MODE_GDHS},
739 {"qcom,saw2-spm-cmd-spc",
740 MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE},
741 {"qcom,saw2-spm-cmd-pc", MSM_SPM_MODE_POWER_COLLAPSE},
742 {"qcom,saw2-spm-cmd-fpc", MSM_SPM_MODE_FASTPC},
743 };
744
745 dev = msm_spm_get_device(pdev);
746 if (!dev) {
747 /*
748 * For partial goods support some CPUs might not be available
749 * in which case, shouldn't throw an error
750 */
751 return 0;
752 }
753 get_cpumask(node, &dev->mask);
754
755 memset(&spm_data, 0, sizeof(struct msm_spm_platform_data));
756 memset(&modes, 0,
757 (MSM_SPM_MODE_NR - 2) * sizeof(struct msm_spm_seq_entry));
758
759 key = "qcom,saw2-ver-reg";
760 ret = of_property_read_u32(node, key, &val);
761 if (ret)
762 goto fail;
763 spm_data.ver_reg = val;
764
765 key = "qcom,vctl-timeout-us";
766 ret = of_property_read_u32(node, key, &val);
767 if (!ret)
768 spm_data.vctl_timeout_us = val;
769
770 /* SAW start address */
771 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
772 if (!res) {
773 ret = -EFAULT;
774 goto fail;
775 }
776
777 spm_data.reg_base_addr = devm_ioremap(&pdev->dev, res->start,
778 resource_size(res));
779 if (!spm_data.reg_base_addr) {
780 ret = -ENOMEM;
781 goto fail;
782 }
783
784 spm_data.vctl_port = -1;
785 spm_data.phase_port = -1;
786 spm_data.pfm_port = -1;
787
788 key = "qcom,vctl-port";
789 of_property_read_u32(node, key, &spm_data.vctl_port);
790
791 key = "qcom,phase-port";
792 of_property_read_u32(node, key, &spm_data.phase_port);
793
794 key = "qcom,pfm-port";
795 of_property_read_u32(node, key, &spm_data.pfm_port);
796
797 /* Q2S (QChannel-2-SPM) register */
798 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "q2s");
799 if (res) {
800 dev->q2s_reg = devm_ioremap(&pdev->dev, res->start,
801 resource_size(res));
802 if (!dev->q2s_reg) {
803 pr_err("%s(): Unable to iomap Q2S register\n",
804 __func__);
805 ret = -EADDRNOTAVAIL;
806 goto fail;
807 }
808 }
809
810 key = "qcom,use-qchannel-for-pc";
811 dev->qchannel_ignore = !of_property_read_bool(node, key);
812
813 key = "qcom,use-spm-clock-gating";
814 dev->use_spm_clk_gating = of_property_read_bool(node, key);
815
816 key = "qcom,use-qchannel-for-wfi";
817 dev->use_qchannel_for_wfi = of_property_read_bool(node, key);
818
819 /* HW flush address */
820 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hw-flush");
821 if (res) {
822 dev->flush_base_addr = devm_ioremap_resource(&pdev->dev, res);
823 if (IS_ERR(dev->flush_base_addr)) {
824 ret = PTR_ERR(dev->flush_base_addr);
825 pr_err("%s(): Unable to iomap hw flush register %d\n",
826 __func__, ret);
827 goto fail;
828 }
829 }
830
831 /* Sleep req address */
832 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slpreq");
833 if (res) {
834 dev->slpreq_base_addr = devm_ioremap(&pdev->dev, res->start,
835 resource_size(res));
836 if (!dev->slpreq_base_addr) {
837 ret = -ENOMEM;
838 pr_err("%s(): Unable to iomap slpreq register\n",
839 __func__);
840 ret = -EADDRNOTAVAIL;
841 goto fail;
842 }
843 }
844
845 /*
846 * At system boot, cpus and or clusters can remain in reset. CCI SPM
847 * will not be triggered unless SPM_LEGACY_MODE bit is set for the
848 * cluster in reset. Initialize q2s registers and set the
849 * SPM_LEGACY_MODE bit.
850 */
851 msm_spm_config_q2s(dev, MSM_SPM_MODE_POWER_COLLAPSE);
852 msm_spm_drv_reg_init(&dev->reg_data, &spm_data);
853
854 for (i = 0; i < ARRAY_SIZE(spm_of_data); i++) {
855 ret = of_property_read_u32(node, spm_of_data[i].key, &val);
856 if (ret)
857 continue;
858 msm_spm_drv_upd_reg_shadow(&dev->reg_data, spm_of_data[i].id,
859 val);
860 }
861
862 for_each_child_of_node(node, n) {
863 const char *name;
864 bool bit_set;
865 int sync;
866
867 if (!n->name)
868 continue;
869
870 ret = of_property_read_string(n, "qcom,label", &name);
871 if (ret)
872 continue;
873
874 for (i = 0; i < ARRAY_SIZE(mode_of_data); i++)
875 if (!strcmp(name, mode_of_data[i].key))
876 break;
877
878 if (i == ARRAY_SIZE(mode_of_data)) {
879 pr_err("Mode name invalid %s\n", name);
880 break;
881 }
882
883 modes[mode_count].mode = mode_of_data[i].id;
884 modes[mode_count].cmd =
885 (uint8_t *)of_get_property(n, "qcom,sequence", &len);
886 if (!modes[mode_count].cmd) {
887 pr_err("cmd is empty\n");
888 continue;
889 }
890
891 bit_set = of_property_read_bool(n, "qcom,pc_mode");
892 modes[mode_count].ctl |= bit_set ? BIT(PC_MODE_BIT) : 0;
893
894 bit_set = of_property_read_bool(n, "qcom,ret_mode");
895 modes[mode_count].ctl |= bit_set ? BIT(RET_MODE_BIT) : 0;
896
897 bit_set = of_property_read_bool(n, "qcom,slp_cmd_mode");
898 modes[mode_count].ctl |= bit_set ? BIT(SLP_CMD_BIT) : 0;
899
900 bit_set = of_property_read_bool(n, "qcom,isar");
901 modes[mode_count].ctl |= bit_set ? BIT(ISAR_BIT) : 0;
902
903 bit_set = of_property_read_bool(n, "qcom,spm_en");
904 modes[mode_count].ctl |= bit_set ? BIT(SPM_EN_BIT) : 0;
905
906 ret = of_property_read_u32(n, "qcom,event_sync", &sync);
907 if (!ret)
908 modes[mode_count].ctl |= sync << EVENT_SYNC_BIT;
909
910 mode_count++;
911 }
912
913 spm_data.modes = modes;
914 spm_data.num_modes = mode_count;
915
916 key = "qcom,supports-rpm-hs";
917 dev->allow_rpm_hs = of_property_read_bool(pdev->dev.of_node, key);
918
919 ret = msm_spm_dev_init(dev, &spm_data);
920 if (ret)
921 pr_err("SPM modes programming is not available from HLOS\n");
922
923 platform_set_drvdata(pdev, dev);
924
925 for_each_cpu(cpu, &dev->mask)
926 per_cpu(cpu_vctl_device, cpu) = dev;
927
928 if (!spm_data.num_modes)
929 return 0;
930
931 cpu = get_cpu_id(pdev->dev.of_node);
932
933 /* For CPUs that are online, the SPM has to be programmed for
934 * clockgating mode to ensure that it can use SPM for entering these
935 * low power modes.
936 */
937 get_online_cpus();
938 if ((cpu >= 0) && (cpu < num_possible_cpus()) && (cpu_online(cpu)))
939 msm_spm_config_low_power_mode(dev, MSM_SPM_MODE_CLOCK_GATING,
940 false);
941 put_online_cpus();
942 return ret;
943
944fail:
945 cpu = get_cpu_id(pdev->dev.of_node);
946 if (dev && (cpu >= num_possible_cpus() || (cpu < 0))) {
947 for_each_cpu(cpu, &dev->mask)
948 per_cpu(cpu_vctl_device, cpu) = ERR_PTR(ret);
949 }
950
951 pr_err("%s: CPU%d SPM device probe failed: %d\n", __func__, cpu, ret);
952
953 return ret;
954}
955
956static int msm_spm_dev_remove(struct platform_device *pdev)
957{
958 struct msm_spm_device *dev = platform_get_drvdata(pdev);
959
960 list_del(&dev->list);
961 return 0;
962}
963
964static const struct of_device_id msm_spm_match_table[] = {
965 {.compatible = "qcom,spm-v2"},
966 {},
967};
968
969static struct platform_driver msm_spm_device_driver = {
970 .probe = msm_spm_dev_probe,
971 .remove = msm_spm_dev_remove,
972 .driver = {
973 .name = "spm-v2",
974 .owner = THIS_MODULE,
975 .of_match_table = msm_spm_match_table,
976 },
977};
978
979/**
980 * msm_spm_device_init(): Device tree initialization function
981 */
982int __init msm_spm_device_init(void)
983{
984 static bool registered;
985
986 if (registered)
987 return 0;
988 registered = true;
989 return platform_driver_register(&msm_spm_device_driver);
990}
991arch_initcall(msm_spm_device_init);