blob: 878285041c5d1d54cbf4c9b6220c027579530a48 [file] [log] [blame]
Raja Mallikb9ad4522018-04-19 15:23:49 +05301/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/of.h>
17#include <linux/err.h>
18#include <linux/sysfs.h>
19#include <linux/device.h>
20#include <linux/platform_device.h>
21#include <linux/moduleparam.h>
22#include "lpm-levels-legacy.h"
23
24bool use_psci;
25enum lpm_type {
26 IDLE = 0,
27 SUSPEND,
28 LPM_TYPE_NR
29};
30
31struct lpm_type_str {
32 enum lpm_type type;
33 char *str;
34};
35
36static const struct lpm_type_str lpm_types[] = {
37 {IDLE, "idle_enabled"},
38 {SUSPEND, "suspend_enabled"},
39};
40
41static DEFINE_PER_CPU(uint32_t *, max_residency);
42static struct lpm_level_avail *cpu_level_available[NR_CPUS];
43static struct platform_device *lpm_pdev;
44
45static void *get_enabled_ptr(struct kobj_attribute *attr,
46 struct lpm_level_avail *avail)
47{
48 void *arg = NULL;
49
50 if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
51 arg = (void *) &avail->idle_enabled;
52 else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
53 arg = (void *) &avail->suspend_enabled;
54
55 return arg;
56}
57
58static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
59 struct kobj_attribute *attr)
60{
61 struct lpm_level_avail *avail = NULL;
62
63 if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
64 avail = container_of(attr, struct lpm_level_avail,
65 idle_enabled_attr);
66 else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
67 avail = container_of(attr, struct lpm_level_avail,
68 suspend_enabled_attr);
69
70 return avail;
71}
72
73static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
74 bool probe_time)
75{
76 int i, j;
77 bool mode_avail;
78 uint32_t *residency = per_cpu(max_residency, cpu_id);
79
80 for (i = 0; i < cpu->nlevels; i++) {
81 struct power_params *pwr = &cpu->levels[i].pwr;
82
83 mode_avail = probe_time ||
84 lpm_cpu_mode_allow(cpu_id, i, true);
85
86 if (!mode_avail) {
87 residency[i] = 0;
88 continue;
89 }
90
91 residency[i] = ~0;
92 for (j = i + 1; j < cpu->nlevels; j++) {
93 mode_avail = probe_time ||
94 lpm_cpu_mode_allow(cpu_id, j, true);
95
96 if (mode_avail &&
97 (residency[i] > pwr->residencies[j]) &&
98 (pwr->residencies[j] != 0))
99 residency[i] = pwr->residencies[j];
100 }
101 }
102}
103
104static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
105 bool probe_time)
106{
107 int i, j;
108 bool mode_avail;
109
110 for (i = 0; i < cluster->nlevels; i++) {
111 struct power_params *pwr = &cluster->levels[i].pwr;
112
113 mode_avail = probe_time ||
114 lpm_cluster_mode_allow(cluster, i,
115 true);
116
117 if (!mode_avail) {
118 pwr->max_residency = 0;
119 continue;
120 }
121
122 pwr->max_residency = ~0;
123 for (j = i+1; j < cluster->nlevels; j++) {
124 mode_avail = probe_time ||
125 lpm_cluster_mode_allow(cluster, j,
126 true);
127 if (mode_avail &&
128 (pwr->max_residency > pwr->residencies[j]) &&
129 (pwr->residencies[j] != 0))
130 pwr->max_residency = pwr->residencies[j];
131 }
132 }
133}
134
135uint32_t *get_per_cpu_max_residency(int cpu)
136{
137 return per_cpu(max_residency, cpu);
138}
139
140static ssize_t lpm_enable_show(struct kobject *kobj,
141 struct kobj_attribute *attr, char *buf)
142{
143 int ret = 0;
144 struct kernel_param kp;
145
146 kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
147 ret = param_get_bool(buf, &kp);
148 if (ret > 0) {
149 strlcat(buf, "\n", PAGE_SIZE);
150 ret++;
151 }
152
153 return ret;
154}
155
156static ssize_t lpm_enable_store(struct kobject *kobj,
157 struct kobj_attribute *attr, const char *buf, size_t len)
158{
159 int ret = 0;
160 struct kernel_param kp;
161 struct lpm_level_avail *avail;
162
163 avail = get_avail_ptr(kobj, attr);
164 if (WARN_ON(!avail))
165 return -EINVAL;
166
167 kp.arg = get_enabled_ptr(attr, avail);
168 ret = param_set_bool(buf, &kp);
169
170 if (avail->cpu_node)
171 set_optimum_cpu_residency(avail->data, avail->idx, false);
172 else
173 set_optimum_cluster_residency(avail->data, false);
174
175 return ret ? ret : len;
176}
177
178static int create_lvl_avail_nodes(const char *name,
179 struct kobject *parent, struct lpm_level_avail *avail,
180 void *data, int index, bool cpu_node)
181{
182 struct attribute_group *attr_group = NULL;
183 struct attribute **attr = NULL;
184 struct kobject *kobj = NULL;
185 int ret = 0;
186
187 kobj = kobject_create_and_add(name, parent);
188 if (!kobj)
189 return -ENOMEM;
190
191 attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
192 GFP_KERNEL);
193 if (!attr_group) {
194 ret = -ENOMEM;
195 goto failed;
196 }
197
198 attr = devm_kzalloc(&lpm_pdev->dev,
199 sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
200 if (!attr) {
201 ret = -ENOMEM;
202 goto failed;
203 }
204
205 sysfs_attr_init(&avail->idle_enabled_attr.attr);
206 avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
207 avail->idle_enabled_attr.attr.mode = 0644;
208 avail->idle_enabled_attr.show = lpm_enable_show;
209 avail->idle_enabled_attr.store = lpm_enable_store;
210
211 sysfs_attr_init(&avail->suspend_enabled_attr.attr);
212 avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
213 avail->suspend_enabled_attr.attr.mode = 0644;
214 avail->suspend_enabled_attr.show = lpm_enable_show;
215 avail->suspend_enabled_attr.store = lpm_enable_store;
216
217 attr[0] = &avail->idle_enabled_attr.attr;
218 attr[1] = &avail->suspend_enabled_attr.attr;
219 attr[2] = NULL;
220 attr_group->attrs = attr;
221
222 ret = sysfs_create_group(kobj, attr_group);
223 if (ret) {
224 ret = -ENOMEM;
225 goto failed;
226 }
227
228 avail->idle_enabled = true;
229 avail->suspend_enabled = true;
230 avail->kobj = kobj;
231 avail->data = data;
232 avail->idx = index;
233 avail->cpu_node = cpu_node;
234
235 return ret;
236
237failed:
238 kobject_put(kobj);
239 return ret;
240}
241
242static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
243{
244 int cpu;
245 int i, cpu_idx;
246 struct kobject **cpu_kobj = NULL;
247 struct lpm_level_avail *level_list = NULL;
248 char cpu_name[20] = {0};
249 int ret = 0;
250
251 cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
252 cpumask_weight(&p->child_cpus), GFP_KERNEL);
253 if (!cpu_kobj)
254 return -ENOMEM;
255
256 cpu_idx = 0;
257 for_each_cpu(cpu, &p->child_cpus) {
258 snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
259 cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
260 if (!cpu_kobj[cpu_idx]) {
261 ret = -ENOMEM;
262 goto release_kobj;
263 }
264
265 level_list = devm_kzalloc(&lpm_pdev->dev,
266 p->cpu->nlevels * sizeof(*level_list),
267 GFP_KERNEL);
268 if (!level_list) {
269 ret = -ENOMEM;
270 goto release_kobj;
271 }
272
273 for (i = 0; i < p->cpu->nlevels; i++) {
274
275 ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
276 cpu_kobj[cpu_idx], &level_list[i],
277 (void *)p->cpu, cpu, true);
278 if (ret)
279 goto release_kobj;
280 }
281
282 cpu_level_available[cpu] = level_list;
283 cpu_idx++;
284 }
285
286 return ret;
287
288release_kobj:
289 for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
290 kobject_put(cpu_kobj[i]);
291
292 return ret;
293}
294
295int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
296{
297 int ret = 0;
298 struct lpm_cluster *child = NULL;
299 int i;
300 struct kobject *cluster_kobj = NULL;
301
302 if (!p)
303 return -ENODEV;
304
305 cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
306 if (!cluster_kobj)
307 return -ENOMEM;
308
309 for (i = 0; i < p->nlevels; i++) {
310 ret = create_lvl_avail_nodes(p->levels[i].level_name,
311 cluster_kobj, &p->levels[i].available,
312 (void *)p, 0, false);
313 if (ret)
314 return ret;
315 }
316
317 list_for_each_entry(child, &p->child, list) {
318 ret = create_cluster_lvl_nodes(child, cluster_kobj);
319 if (ret)
320 return ret;
321 }
322
323 if (p->cpu) {
324 ret = create_cpu_lvl_nodes(p, cluster_kobj);
325 if (ret)
326 return ret;
327 }
328
329 return 0;
330}
331
332bool lpm_cpu_mode_allow(unsigned int cpu,
333 unsigned int index, bool from_idle)
334{
335 struct lpm_level_avail *avail = cpu_level_available[cpu];
336
337 if (!lpm_pdev || !avail)
338 return !from_idle;
339
340 return !!(from_idle ? avail[index].idle_enabled :
341 avail[index].suspend_enabled);
342}
343
344bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
345 unsigned int mode, bool from_idle)
346{
347 struct lpm_level_avail *avail = &cluster->levels[mode].available;
348
349 if (!lpm_pdev || !avail)
350 return false;
351
352 return !!(from_idle ? avail->idle_enabled :
353 avail->suspend_enabled);
354}
355
356static int parse_legacy_cluster_params(struct device_node *node,
357 struct lpm_cluster *c)
358{
359 int i;
360 char *key;
361 int ret;
362 struct lpm_match {
363 char *devname;
364 int (*set_mode)(struct low_power_ops *, int,
365 struct lpm_cluster_level *);
366 };
367 struct lpm_match match_tbl[] = {
368 {"l2", set_l2_mode},
369 {"cci", set_system_mode},
370 {"l3", set_l3_mode},
371 {"cbf", set_system_mode},
372 };
373
374
375 key = "qcom,spm-device-names";
376 c->ndevices = of_property_count_strings(node, key);
377
378 if (c->ndevices < 0) {
379 pr_info("%s(): Ignoring cluster params\n", __func__);
380 c->no_saw_devices = true;
381 c->ndevices = 0;
382 return 0;
383 }
384
385 c->name = devm_kzalloc(&lpm_pdev->dev, c->ndevices * sizeof(*c->name),
386 GFP_KERNEL);
387 c->lpm_dev = devm_kzalloc(&lpm_pdev->dev,
388 c->ndevices * sizeof(*c->lpm_dev),
389 GFP_KERNEL);
390 if (!c->name || !c->lpm_dev) {
391 ret = -ENOMEM;
392 goto failed;
393 }
394
395 for (i = 0; i < c->ndevices; i++) {
396 char device_name[20];
397 int j;
398
399 ret = of_property_read_string_index(node, key, i, &c->name[i]);
400 if (ret)
401 goto failed;
402 snprintf(device_name, sizeof(device_name), "%s-%s",
403 c->cluster_name, c->name[i]);
404
405 c->lpm_dev[i].spm = msm_spm_get_device_by_name(device_name);
406
407 if (IS_ERR_OR_NULL(c->lpm_dev[i].spm)) {
408 pr_err("Failed to get spm device by name:%s\n",
409 device_name);
410 ret = PTR_ERR(c->lpm_dev[i].spm);
411 goto failed;
412 }
413 for (j = 0; j < ARRAY_SIZE(match_tbl); j++) {
414 if (!strcmp(c->name[i], match_tbl[j].devname))
415 c->lpm_dev[i].set_mode = match_tbl[j].set_mode;
416 }
417
418 if (!c->lpm_dev[i].set_mode) {
419 ret = -ENODEV;
420 goto failed;
421 }
422 }
423
424 key = "qcom,default-level";
425 if (of_property_read_u32(node, key, &c->default_level))
426 c->default_level = 0;
427 return 0;
428failed:
429 pr_err("%s(): Failed reading %s\n", __func__, key);
430 return ret;
431}
432
433static int parse_cluster_params(struct device_node *node,
434 struct lpm_cluster *c)
435{
436 char *key;
437 int ret;
438
439 key = "label";
440 ret = of_property_read_string(node, key, &c->cluster_name);
441 if (ret) {
442 pr_err("%s(): Cannot read required param %s\n", __func__, key);
443 return ret;
444 }
445
446 if (use_psci) {
447 key = "qcom,psci-mode-shift";
448 ret = of_property_read_u32(node, key,
449 &c->psci_mode_shift);
450 if (ret) {
451 pr_err("%s(): Failed to read param: %s\n",
452 __func__, key);
453 return ret;
454 }
455
456 key = "qcom,psci-mode-mask";
457 ret = of_property_read_u32(node, key,
458 &c->psci_mode_mask);
459 if (ret) {
460 pr_err("%s(): Failed to read param: %s\n",
461 __func__, key);
462 return ret;
463 }
464
465 /* Set ndevice to 1 as default */
466 c->ndevices = 1;
467
468 return 0;
469 } else
470 return parse_legacy_cluster_params(node, c);
471}
472
473static int parse_lpm_mode(const char *str)
474{
475 int i;
476 struct lpm_lookup_table mode_lookup[] = {
477 {MSM_SPM_MODE_POWER_COLLAPSE, "pc"},
478 {MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, "spc"},
479 {MSM_SPM_MODE_FASTPC, "fpc"},
480 {MSM_SPM_MODE_GDHS, "gdhs"},
481 {MSM_SPM_MODE_RETENTION, "retention"},
482 {MSM_SPM_MODE_CLOCK_GATING, "wfi"},
483 {MSM_SPM_MODE_DISABLED, "active"}
484 };
485
486 for (i = 0; i < ARRAY_SIZE(mode_lookup); i++)
487 if (!strcmp(str, mode_lookup[i].mode_name))
488 return mode_lookup[i].modes;
489 return -EINVAL;
490}
491
492static int parse_power_params(struct device_node *node,
493 struct power_params *pwr)
494{
495 char *key;
496 int ret;
497
498 key = "qcom,latency-us";
499 ret = of_property_read_u32(node, key, &pwr->latency_us);
500 if (ret)
501 goto fail;
502
503 key = "qcom,ss-power";
504 ret = of_property_read_u32(node, key, &pwr->ss_power);
505 if (ret)
506 goto fail;
507
508 key = "qcom,energy-overhead";
509 ret = of_property_read_u32(node, key, &pwr->energy_overhead);
510 if (ret)
511 goto fail;
512
513 key = "qcom,time-overhead";
514 ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
515 if (ret)
516 goto fail;
517
518fail:
519 if (ret)
520 pr_err("%s(): %s Error reading %s\n", __func__, node->name,
521 key);
522 return ret;
523}
524
525static int parse_cluster_level(struct device_node *node,
526 struct lpm_cluster *cluster)
527{
528 int i = 0;
529 struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
530 int ret = -ENOMEM;
531 char *key;
532
533 key = "label";
534 ret = of_property_read_string(node, key, &level->level_name);
535 if (ret)
536 goto failed;
537
538 if (use_psci) {
539 char *k = "qcom,psci-mode";
540
541 ret = of_property_read_u32(node, k, &level->psci_id);
542 if (ret)
543 goto failed;
544
545 level->is_reset = of_property_read_bool(node, "qcom,is-reset");
546 } else if (!cluster->no_saw_devices) {
547 key = "no saw-devices";
548
549 level->mode = devm_kzalloc(&lpm_pdev->dev,
550 cluster->ndevices * sizeof(*level->mode),
551 GFP_KERNEL);
552 if (!level->mode) {
553 pr_err("Memory allocation failed\n");
554 goto failed;
555 }
556
557 for (i = 0; i < cluster->ndevices; i++) {
558 const char *spm_mode;
559 char key[25] = {0};
560
561 snprintf(key, 25, "qcom,spm-%s-mode", cluster->name[i]);
562 ret = of_property_read_string(node, key, &spm_mode);
563 if (ret)
564 goto failed;
565
566 level->mode[i] = parse_lpm_mode(spm_mode);
567
568 if (level->mode[i] < 0)
569 goto failed;
570
571 if (level->mode[i] == MSM_SPM_MODE_POWER_COLLAPSE
572 || level->mode[i] ==
573 MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE)
574 level->is_reset |= true;
575 }
576 }
577
578 key = "label";
579 ret = of_property_read_string(node, key, &level->level_name);
580 if (ret)
581 goto failed;
582
583 if (cluster->nlevels != cluster->default_level) {
584 key = "min child idx";
585 ret = of_property_read_u32(node, "qcom,min-child-idx",
586 &level->min_child_level);
587 if (ret)
588 goto failed;
589
590 if (cluster->min_child_level > level->min_child_level)
591 cluster->min_child_level = level->min_child_level;
592 }
593
594 level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
595 level->disable_dynamic_routing = of_property_read_bool(node,
596 "qcom,disable-dynamic-int-routing");
597 level->last_core_only = of_property_read_bool(node,
598 "qcom,last-core-only");
599 level->no_cache_flush = of_property_read_bool(node,
600 "qcom,no-cache-flush");
601
602 key = "parse_power_params";
603 ret = parse_power_params(node, &level->pwr);
604 if (ret)
605 goto failed;
606
607 key = "qcom,reset-level";
608 ret = of_property_read_u32(node, key, &level->reset_level);
609 if (ret == -EINVAL)
610 level->reset_level = LPM_RESET_LVL_NONE;
611 else if (ret)
612 goto failed;
613
614 cluster->nlevels++;
615 return 0;
616failed:
617 pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
618 return ret;
619}
620
621static int parse_cpu_spm_mode(const char *mode_name)
622{
623 struct lpm_lookup_table pm_sm_lookup[] = {
624 {MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
625 "wfi"},
626 {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
627 "standalone_pc"},
628 {MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
629 "pc"},
630 {MSM_PM_SLEEP_MODE_RETENTION,
631 "retention"},
632 {MSM_PM_SLEEP_MODE_FASTPC,
633 "fpc"},
634 };
635 int i;
636 int ret = -EINVAL;
637
638 for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
639 if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
640 ret = pm_sm_lookup[i].modes;
641 break;
642 }
643 }
644 return ret;
645}
646
647static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
648{
649 char *key;
650 int ret;
651
652 key = "qcom,spm-cpu-mode";
653 ret = of_property_read_string(n, key, &l->name);
654 if (ret) {
655 pr_err("Failed %s %d\n", n->name, __LINE__);
656 return ret;
657 }
658
659 if (use_psci) {
660 key = "qcom,psci-cpu-mode";
661
662 ret = of_property_read_u32(n, key, &l->psci_id);
663 if (ret) {
664 pr_err("Failed reading %s on device %s\n", key,
665 n->name);
666 return ret;
667 }
668 key = "qcom,hyp-psci";
669
670 l->hyp_psci = of_property_read_bool(n, key);
671 } else {
672 l->mode = parse_cpu_spm_mode(l->name);
673
674 if (l->mode < 0)
675 return l->mode;
676 }
677 return 0;
678
679}
680
681static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
682{
683 struct device_node *cpu_node;
684 int cpu;
685 int idx = 0;
686
687 cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
688 if (!cpu_node) {
689 pr_info("%s: No CPU phandle, assuming single cluster\n",
690 node->full_name);
691 /*
692 * Not all targets have the cpu node populated in the device
693 * tree. If cpu node is not populated assume all possible
694 * nodes belong to this cluster
695 */
696 cpumask_copy(mask, cpu_possible_mask);
697 return 0;
698 }
699
700 while (cpu_node) {
701 for_each_possible_cpu(cpu) {
702 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
703 cpumask_set_cpu(cpu, mask);
704 break;
705 }
706 }
707 of_node_put(cpu_node);
708 cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
709 }
710
711 return 0;
712}
713
714static int calculate_residency(struct power_params *base_pwr,
715 struct power_params *next_pwr)
716{
717 int32_t residency = (int32_t)(next_pwr->energy_overhead -
718 base_pwr->energy_overhead) -
719 ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
720 - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
721
722 residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
723
724 if (residency < 0) {
725 pr_err("%s: residency < 0 for LPM\n",
726 __func__);
727 return next_pwr->time_overhead_us;
728 }
729
730 return residency < next_pwr->time_overhead_us ?
731 next_pwr->time_overhead_us : residency;
732}
733
734static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
735{
736 struct device_node *n;
737 int ret = -ENOMEM;
738 int i, j;
739 char *key;
740
741 c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
742 if (!c->cpu)
743 return ret;
744
745 c->cpu->parent = c;
746 if (use_psci) {
747
748 key = "qcom,psci-mode-shift";
749
750 ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
751 if (ret) {
752 pr_err("Failed reading %s on device %s\n", key,
753 node->name);
754 return ret;
755 }
756 key = "qcom,psci-mode-mask";
757
758 ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
759 if (ret) {
760 pr_err("Failed reading %s on device %s\n", key,
761 node->name);
762 return ret;
763 }
764 }
765 for_each_child_of_node(node, n) {
766 struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
767
768 c->cpu->nlevels++;
769
770 ret = parse_cpu_mode(n, l);
771 if (ret < 0) {
772 pr_info("Failed %s\n", l->name);
773 goto failed;
774 }
775
776 ret = parse_power_params(n, &l->pwr);
777 if (ret)
778 goto failed;
779
780 key = "qcom,use-broadcast-timer";
781 l->use_bc_timer = of_property_read_bool(n, key);
782
783 l->is_reset = of_property_read_bool(n, "qcom,is-reset");
784
785 key = "qcom,jtag-save-restore";
786 l->jtag_save_restore = of_property_read_bool(n, key);
787
788 key = "qcom,reset-level";
789 ret = of_property_read_u32(n, key, &l->reset_level);
790 if (ret == -EINVAL)
791 l->reset_level = LPM_RESET_LVL_NONE;
792 else if (ret)
793 goto failed;
794 of_node_put(n);
795 }
796 for (i = 0; i < c->cpu->nlevels; i++) {
797 for (j = 0; j < c->cpu->nlevels; j++) {
798 if (i >= j) {
799 c->cpu->levels[i].pwr.residencies[j] = 0;
800 continue;
801 }
802
803 c->cpu->levels[i].pwr.residencies[j] =
804 calculate_residency(&c->cpu->levels[i].pwr,
805 &c->cpu->levels[j].pwr);
806
807 pr_err("%s: idx %d %u\n", __func__, j,
808 c->cpu->levels[i].pwr.residencies[j]);
809 }
810 }
811
812 return 0;
813failed:
814 of_node_put(n);
815 pr_err("%s(): Failed with error code:%d\n", __func__, ret);
816 return ret;
817}
818
819void free_cluster_node(struct lpm_cluster *cluster)
820{
821 struct lpm_cluster *cl, *m;
822
823 list_for_each_entry_safe(cl, m, &cluster->child, list) {
824 list_del(&cl->list);
825 free_cluster_node(cl);
826 };
827
828 cluster->ndevices = 0;
829}
830
831/*
832 * TODO:
833 * Expects a CPU or a cluster only. This ensures that affinity
834 * level of a cluster is consistent with reference to its
835 * child nodes.
836 */
837static struct lpm_cluster *parse_cluster(struct device_node *node,
838 struct lpm_cluster *parent)
839{
840 struct lpm_cluster *c;
841 struct device_node *n;
842 char *key;
843 int ret = 0;
844 int i, j;
845
846 c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
847 if (!c)
848 return ERR_PTR(-ENOMEM);
849
850 ret = parse_cluster_params(node, c);
851
852 if (ret)
853 goto failed_parse_params;
854
855 INIT_LIST_HEAD(&c->child);
856 c->parent = parent;
857 spin_lock_init(&c->sync_lock);
858 c->min_child_level = NR_LPM_LEVELS;
859
860 for_each_child_of_node(node, n) {
861
862 if (!n->name)
863 continue;
864 key = "qcom,pm-cluster-level";
865 if (!of_node_cmp(n->name, key)) {
Chinkit Kumar,Kirti Kumar Parmar05811692018-06-30 16:50:54 +0530866 if (parse_cluster_level(n, c)) {
867 of_node_put(n);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530868 goto failed_parse_cluster;
Chinkit Kumar,Kirti Kumar Parmar05811692018-06-30 16:50:54 +0530869 }
870 of_node_put(n);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530871 continue;
872 }
873
874 key = "qcom,pm-cluster";
875 if (!of_node_cmp(n->name, key)) {
876 struct lpm_cluster *child;
877
878 if (c->no_saw_devices)
879 pr_info("%s: SAW device not provided.\n",
880 __func__);
881
882 child = parse_cluster(n, c);
Chinkit Kumar,Kirti Kumar Parmar05811692018-06-30 16:50:54 +0530883 if (!child) {
884 of_node_put(n);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530885 goto failed_parse_cluster;
Chinkit Kumar,Kirti Kumar Parmar05811692018-06-30 16:50:54 +0530886 }
Raja Mallikb9ad4522018-04-19 15:23:49 +0530887
Raja Mallikb9ad4522018-04-19 15:23:49 +0530888 list_add(&child->list, &c->child);
889 cpumask_or(&c->child_cpus, &c->child_cpus,
890 &child->child_cpus);
891 c->aff_level = child->aff_level + 1;
Chinkit Kumar,Kirti Kumar Parmar05811692018-06-30 16:50:54 +0530892 of_node_put(n);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530893 continue;
894 }
895
896 key = "qcom,pm-cpu";
897 if (!of_node_cmp(n->name, key)) {
898 /*
899 * Parse the the cpu node only if a pm-cpu node
900 * is available, though the mask is defined @ the
901 * cluster level
902 */
903 if (get_cpumask_for_node(node, &c->child_cpus))
904 goto failed_parse_cluster;
905
Chinkit Kumar,Kirti Kumar Parmar05811692018-06-30 16:50:54 +0530906 if (parse_cpu_levels(n, c)) {
907 of_node_put(n);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530908 goto failed_parse_cluster;
Chinkit Kumar,Kirti Kumar Parmar05811692018-06-30 16:50:54 +0530909 }
Raja Mallikb9ad4522018-04-19 15:23:49 +0530910
911 c->aff_level = 1;
Chinkit Kumar,Kirti Kumar Parmar05811692018-06-30 16:50:54 +0530912 of_node_put(n);
Raja Mallikb9ad4522018-04-19 15:23:49 +0530913
914 for_each_cpu(i, &c->child_cpus) {
915 per_cpu(max_residency, i) = devm_kzalloc(
916 &lpm_pdev->dev,
917 sizeof(uint32_t) * c->cpu->nlevels,
918 GFP_KERNEL);
919 if (!per_cpu(max_residency, i))
920 return ERR_PTR(-ENOMEM);
921 set_optimum_cpu_residency(c->cpu, i, true);
922 }
923 }
924 }
925
926 if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
927 c->last_level = c->default_level;
928 else
929 c->last_level = c->nlevels-1;
930
931 for (i = 0; i < c->nlevels; i++) {
932 for (j = 0; j < c->nlevels; j++) {
933 if (i >= j) {
934 c->levels[i].pwr.residencies[j] = 0;
935 continue;
936 }
937 c->levels[i].pwr.residencies[j] = calculate_residency(
938 &c->levels[i].pwr, &c->levels[j].pwr);
939 }
940 }
941 set_optimum_cluster_residency(c, true);
942 return c;
943
944failed_parse_cluster:
945 pr_err("Failed parse cluster:%s\n", key);
946 if (parent)
947 list_del(&c->list);
948 free_cluster_node(c);
949failed_parse_params:
950 pr_err("Failed parse params\n");
951 return NULL;
952}
953struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
954{
955 struct device_node *top = NULL;
956 struct lpm_cluster *c;
957
958 use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
959
960 top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
961 if (!top) {
962 pr_err("Failed to find root node\n");
963 return ERR_PTR(-ENODEV);
964 }
965
966 lpm_pdev = pdev;
967 c = parse_cluster(top, NULL);
968 of_node_put(top);
969 return c;
970}
971
972void cluster_dt_walkthrough(struct lpm_cluster *cluster)
973{
974 struct list_head *list;
975 int i, j;
976 static int id;
977 char str[10] = {0};
978
979 if (!cluster)
980 return;
981
982 for (i = 0; i < id; i++)
983 snprintf(str+i, 10 - i, "\t");
984 pr_info("%d\n", __LINE__);
985
986 for (i = 0; i < cluster->nlevels; i++) {
987 struct lpm_cluster_level *l = &cluster->levels[i];
988
989 pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices);
990 for (j = 0; j < cluster->ndevices; j++)
991 pr_info("%sDevice: %pk id:%pk\n", str,
992 &cluster->name[j], &l->mode[i]);
993 }
994
995 if (cluster->cpu) {
996 pr_info("%d\n", __LINE__);
997 for (j = 0; j < cluster->cpu->nlevels; j++)
998 pr_info("%s\tCPU mode: %s id:%d\n", str,
999 cluster->cpu->levels[j].name,
1000 cluster->cpu->levels[j].mode);
1001 }
1002
1003 id++;
1004
1005
1006 list_for_each(list, &cluster->child) {
1007 struct lpm_cluster *n;
1008
1009 pr_info("%d\n", __LINE__);
1010 n = list_entry(list, typeof(*n), list);
1011 cluster_dt_walkthrough(n);
1012 }
1013 id--;
1014}