blob: 39e0484077b073bfe7bcb661757583c014af7132 [file] [log] [blame]
Lina Iyer514248d2017-02-07 15:10:53 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/of.h>
17#include <linux/err.h>
18#include <linux/sysfs.h>
19#include <linux/device.h>
20#include <linux/platform_device.h>
21#include <linux/moduleparam.h>
22#include "lpm-levels.h"
23
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070024enum lpm_type {
25 IDLE = 0,
26 SUSPEND,
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060027 LATENCY,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070028 LPM_TYPE_NR
29};
30
31struct lpm_type_str {
32 enum lpm_type type;
33 char *str;
34};
35
36static const struct lpm_type_str lpm_types[] = {
37 {IDLE, "idle_enabled"},
38 {SUSPEND, "suspend_enabled"},
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060039 {LATENCY, "latency_us"},
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070040};
41
42static DEFINE_PER_CPU(uint32_t *, max_residency);
43static DEFINE_PER_CPU(uint32_t *, min_residency);
44static struct lpm_level_avail *cpu_level_available[NR_CPUS];
45static struct platform_device *lpm_pdev;
46
47static void *get_enabled_ptr(struct kobj_attribute *attr,
48 struct lpm_level_avail *avail)
49{
50 void *arg = NULL;
51
52 if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
53 arg = (void *) &avail->idle_enabled;
54 else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
55 arg = (void *) &avail->suspend_enabled;
56
57 return arg;
58}
59
60static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
61 struct kobj_attribute *attr)
62{
63 struct lpm_level_avail *avail = NULL;
64
65 if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
66 avail = container_of(attr, struct lpm_level_avail,
67 idle_enabled_attr);
68 else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
69 avail = container_of(attr, struct lpm_level_avail,
70 suspend_enabled_attr);
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060071 else if (!strcmp(attr->attr.name, lpm_types[LATENCY].str))
72 avail = container_of(attr, struct lpm_level_avail,
73 latency_attr);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070074
75 return avail;
76}
77
78static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
79 bool probe_time)
80{
81 int i, j;
82 bool mode_avail;
83 uint32_t *maximum_residency = per_cpu(max_residency, cpu_id);
84 uint32_t *minimum_residency = per_cpu(min_residency, cpu_id);
85
86 for (i = 0; i < cpu->nlevels; i++) {
87 struct power_params *pwr = &cpu->levels[i].pwr;
88
89 mode_avail = probe_time ||
90 lpm_cpu_mode_allow(cpu_id, i, true);
91
92 if (!mode_avail) {
93 maximum_residency[i] = 0;
94 minimum_residency[i] = 0;
95 continue;
96 }
97
98 maximum_residency[i] = ~0;
99 for (j = i + 1; j < cpu->nlevels; j++) {
100 mode_avail = probe_time ||
101 lpm_cpu_mode_allow(cpu_id, j, true);
102
103 if (mode_avail &&
104 (maximum_residency[i] > pwr->residencies[j]) &&
105 (pwr->residencies[j] != 0))
106 maximum_residency[i] = pwr->residencies[j];
107 }
108
109 minimum_residency[i] = pwr->time_overhead_us;
110 for (j = i-1; j >= 0; j--) {
111 if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) {
112 minimum_residency[i] = maximum_residency[j] + 1;
113 break;
114 }
115 }
116 }
117}
118
119static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
120 bool probe_time)
121{
122 int i, j;
123 bool mode_avail;
124
125 for (i = 0; i < cluster->nlevels; i++) {
126 struct power_params *pwr = &cluster->levels[i].pwr;
127
128 mode_avail = probe_time ||
129 lpm_cluster_mode_allow(cluster, i,
130 true);
131
132 if (!mode_avail) {
133 pwr->max_residency = 0;
134 pwr->min_residency = 0;
135 continue;
136 }
137
138 pwr->max_residency = ~0;
139 for (j = i+1; j < cluster->nlevels; j++) {
140 mode_avail = probe_time ||
141 lpm_cluster_mode_allow(cluster, j,
142 true);
143 if (mode_avail &&
144 (pwr->max_residency > pwr->residencies[j]) &&
145 (pwr->residencies[j] != 0))
146 pwr->max_residency = pwr->residencies[j];
147 }
148
149 pwr->min_residency = pwr->time_overhead_us;
150 for (j = i-1; j >= 0; j--) {
151 if (probe_time ||
152 lpm_cluster_mode_allow(cluster, j, true)) {
153 pwr->min_residency =
154 cluster->levels[j].pwr.max_residency + 1;
155 break;
156 }
157 }
158 }
159}
160
161uint32_t *get_per_cpu_max_residency(int cpu)
162{
163 return per_cpu(max_residency, cpu);
164}
165
166uint32_t *get_per_cpu_min_residency(int cpu)
167{
168 return per_cpu(min_residency, cpu);
169}
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600170
171static ssize_t lpm_latency_show(struct kobject *kobj,
172 struct kobj_attribute *attr, char *buf)
173{
174 int ret = 0;
175 struct kernel_param kp;
176 struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
177
178 if (!avail)
179 pr_info("Error\n");
180
181 kp.arg = &avail->latency_us;
182
183 ret = param_get_uint(buf, &kp);
184 if (ret > 0) {
185 strlcat(buf, "\n", PAGE_SIZE);
186 ret++;
187 }
188
189 return ret;
190}
191
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700192ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
193 char *buf)
194{
195 int ret = 0;
196 struct kernel_param kp;
197
198 kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
199 ret = param_get_bool(buf, &kp);
200 if (ret > 0) {
201 strlcat(buf, "\n", PAGE_SIZE);
202 ret++;
203 }
204
205 return ret;
206}
207
208ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
209 const char *buf, size_t len)
210{
211 int ret = 0;
212 struct kernel_param kp;
213 struct lpm_level_avail *avail;
214
215 avail = get_avail_ptr(kobj, attr);
216 if (WARN_ON(!avail))
217 return -EINVAL;
218 kp.arg = get_enabled_ptr(attr, avail);
219 ret = param_set_bool(buf, &kp);
220
221 if (avail->cpu_node)
222 set_optimum_cpu_residency(avail->data, avail->idx, false);
223 else
224 set_optimum_cluster_residency(avail->data, false);
225
226 return ret ? ret : len;
227}
228
229static int create_lvl_avail_nodes(const char *name,
230 struct kobject *parent, struct lpm_level_avail *avail,
231 void *data, int index, bool cpu_node)
232{
233 struct attribute_group *attr_group = NULL;
234 struct attribute **attr = NULL;
235 struct kobject *kobj = NULL;
236 int ret = 0;
237
238 kobj = kobject_create_and_add(name, parent);
239 if (!kobj)
240 return -ENOMEM;
241
242 attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
243 GFP_KERNEL);
244 if (!attr_group) {
245 ret = -ENOMEM;
246 goto failed;
247 }
248
249 attr = devm_kzalloc(&lpm_pdev->dev,
250 sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
251 if (!attr) {
252 ret = -ENOMEM;
253 goto failed;
254 }
255
256 sysfs_attr_init(&avail->idle_enabled_attr.attr);
257 avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
258 avail->idle_enabled_attr.attr.mode = 0644;
259 avail->idle_enabled_attr.show = lpm_enable_show;
260 avail->idle_enabled_attr.store = lpm_enable_store;
261
262 sysfs_attr_init(&avail->suspend_enabled_attr.attr);
263 avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
264 avail->suspend_enabled_attr.attr.mode = 0644;
265 avail->suspend_enabled_attr.show = lpm_enable_show;
266 avail->suspend_enabled_attr.store = lpm_enable_store;
267
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600268 sysfs_attr_init(&avail->latency_attr.attr);
269 avail->latency_attr.attr.name = lpm_types[LATENCY].str;
270 avail->latency_attr.attr.mode = 0444;
271 avail->latency_attr.show = lpm_latency_show;
272 avail->latency_attr.store = NULL;
273
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700274 attr[0] = &avail->idle_enabled_attr.attr;
275 attr[1] = &avail->suspend_enabled_attr.attr;
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600276 attr[2] = &avail->latency_attr.attr;
277 attr[3] = NULL;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700278 attr_group->attrs = attr;
279
280 ret = sysfs_create_group(kobj, attr_group);
281 if (ret) {
282 ret = -ENOMEM;
283 goto failed;
284 }
285
286 avail->idle_enabled = true;
287 avail->suspend_enabled = true;
288 avail->kobj = kobj;
289 avail->data = data;
290 avail->idx = index;
291 avail->cpu_node = cpu_node;
292
293 return ret;
294
295failed:
296 kobject_put(kobj);
297 return ret;
298}
299
300static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
301{
302 int cpu;
303 int i, cpu_idx;
304 struct kobject **cpu_kobj = NULL;
305 struct lpm_level_avail *level_list = NULL;
306 char cpu_name[20] = {0};
307 int ret = 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600308 struct list_head *pos;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700309
310 cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
311 cpumask_weight(&p->child_cpus), GFP_KERNEL);
312 if (!cpu_kobj)
313 return -ENOMEM;
314
315 cpu_idx = 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600316 list_for_each(pos, &p->cpu) {
317 struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700318
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600319 for_each_cpu(cpu, &lpm_cpu->related_cpus) {
320 snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
321 cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name,
322 parent);
323 if (!cpu_kobj[cpu_idx]) {
324 ret = -ENOMEM;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700325 goto release_kobj;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600326 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700327
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600328 level_list = devm_kzalloc(&lpm_pdev->dev,
329 lpm_cpu->nlevels * sizeof(*level_list),
330 GFP_KERNEL);
331 if (!level_list) {
332 ret = -ENOMEM;
333 goto release_kobj;
334 }
335
336 /*
337 * Skip enable/disable for WFI. cpuidle expects WFI to
338 * be available at all times.
339 */
340 for (i = 1; i < lpm_cpu->nlevels; i++) {
341 level_list[i].latency_us =
342 p->levels[i].pwr.latency_us;
343 ret = create_lvl_avail_nodes(
344 lpm_cpu->levels[i].name,
345 cpu_kobj[cpu_idx],
346 &level_list[i],
347 (void *)lpm_cpu, cpu, true);
348 if (ret)
349 goto release_kobj;
350 }
351
352 cpu_level_available[cpu] = level_list;
353 cpu_idx++;
354 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700355 }
356
357 return ret;
358
359release_kobj:
360 for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
361 kobject_put(cpu_kobj[i]);
362
363 return ret;
364}
365
366int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
367{
368 int ret = 0;
369 struct lpm_cluster *child = NULL;
370 int i;
371 struct kobject *cluster_kobj = NULL;
372
373 if (!p)
374 return -ENODEV;
375
376 cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
377 if (!cluster_kobj)
378 return -ENOMEM;
379
380 for (i = 0; i < p->nlevels; i++) {
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600381 p->levels[i].available.latency_us = p->levels[i].pwr.latency_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700382 ret = create_lvl_avail_nodes(p->levels[i].level_name,
383 cluster_kobj, &p->levels[i].available,
384 (void *)p, 0, false);
385 if (ret)
386 return ret;
387 }
388
389 list_for_each_entry(child, &p->child, list) {
390 ret = create_cluster_lvl_nodes(child, cluster_kobj);
391 if (ret)
392 return ret;
393 }
394
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600395 if (!list_empty(&p->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700396 ret = create_cpu_lvl_nodes(p, cluster_kobj);
397 if (ret)
398 return ret;
399 }
400
401 return 0;
402}
403
404bool lpm_cpu_mode_allow(unsigned int cpu,
405 unsigned int index, bool from_idle)
406{
407 struct lpm_level_avail *avail = cpu_level_available[cpu];
408
409 if (!lpm_pdev || !avail)
410 return !from_idle;
411
412 return !!(from_idle ? avail[index].idle_enabled :
413 avail[index].suspend_enabled);
414}
415
416bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
417 unsigned int mode, bool from_idle)
418{
419 struct lpm_level_avail *avail = &cluster->levels[mode].available;
420
421 if (!lpm_pdev || !avail)
422 return false;
423
424 return !!(from_idle ? avail->idle_enabled :
425 avail->suspend_enabled);
426}
427
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700428static int parse_cluster_params(struct device_node *node,
429 struct lpm_cluster *c)
430{
431 char *key;
432 int ret;
433
434 key = "label";
435 ret = of_property_read_string(node, key, &c->cluster_name);
436 if (ret) {
437 pr_err("%s(): Cannot read required param %s\n", __func__, key);
438 return ret;
439 }
440
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600441 key = "qcom,psci-mode-shift";
442 ret = of_property_read_u32(node, key,
443 &c->psci_mode_shift);
444 if (ret) {
445 pr_err("%s(): Failed to read param: %s\n",
446 __func__, key);
447 return ret;
448 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700449
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600450 key = "qcom,psci-mode-mask";
451 ret = of_property_read_u32(node, key,
452 &c->psci_mode_mask);
453 if (ret) {
454 pr_err("%s(): Failed to read param: %s\n",
455 __func__, key);
456 return ret;
457 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700458
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600459 /* Set ndevice to 1 as default */
460 c->ndevices = 1;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700461
Mahesh Sivasubramanianb9da25f2016-11-04 14:35:03 -0600462 return 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700463}
464
465static int parse_power_params(struct device_node *node,
466 struct power_params *pwr)
467{
468 char *key;
469 int ret;
470
471 key = "qcom,latency-us";
472 ret = of_property_read_u32(node, key, &pwr->latency_us);
473 if (ret)
474 goto fail;
475
476 key = "qcom,ss-power";
477 ret = of_property_read_u32(node, key, &pwr->ss_power);
478 if (ret)
479 goto fail;
480
481 key = "qcom,energy-overhead";
482 ret = of_property_read_u32(node, key, &pwr->energy_overhead);
483 if (ret)
484 goto fail;
485
486 key = "qcom,time-overhead";
487 ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
488 if (ret)
489 goto fail;
490
491fail:
492 if (ret)
493 pr_err("%s(): %s Error reading %s\n", __func__, node->name,
494 key);
495 return ret;
496}
497
498static int parse_cluster_level(struct device_node *node,
499 struct lpm_cluster *cluster)
500{
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700501 struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
502 int ret = -ENOMEM;
503 char *key;
504
505 key = "label";
506 ret = of_property_read_string(node, key, &level->level_name);
507 if (ret)
508 goto failed;
509
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600510 key = "qcom,psci-mode";
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700511
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600512 ret = of_property_read_u32(node, key, &level->psci_id);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700513 if (ret)
514 goto failed;
515
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600516 level->is_reset = of_property_read_bool(node, "qcom,is-reset");
517
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700518 if (cluster->nlevels != cluster->default_level) {
519 key = "min child idx";
520 ret = of_property_read_u32(node, "qcom,min-child-idx",
521 &level->min_child_level);
522 if (ret)
523 goto failed;
524
525 if (cluster->min_child_level > level->min_child_level)
526 cluster->min_child_level = level->min_child_level;
527 }
528
529 level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700530
531 key = "parse_power_params";
532 ret = parse_power_params(node, &level->pwr);
533 if (ret)
534 goto failed;
535
536 key = "qcom,reset-level";
537 ret = of_property_read_u32(node, key, &level->reset_level);
538 if (ret == -EINVAL)
539 level->reset_level = LPM_RESET_LVL_NONE;
540 else if (ret)
541 goto failed;
542
543 cluster->nlevels++;
544 return 0;
545failed:
546 pr_err("Failed %s() key = %s ret = %d\n", __func__, key, ret);
547 kfree(level->mode);
548 level->mode = NULL;
549 return ret;
550}
551
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700552static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
553{
554 char *key;
555 int ret;
556
557 key = "qcom,spm-cpu-mode";
558 ret = of_property_read_string(n, key, &l->name);
559 if (ret) {
560 pr_err("Failed %s %d\n", n->name, __LINE__);
561 return ret;
562 }
563
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600564 key = "qcom,psci-cpu-mode";
565 ret = of_property_read_u32(n, key, &l->psci_id);
566 if (ret) {
567 pr_err("Failed reading %s on device %s\n", key,
568 n->name);
569 return ret;
570 }
571 key = "qcom,hyp-psci";
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700572
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600573 l->hyp_psci = of_property_read_bool(n, key);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700574 return 0;
575
576}
577
578static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
579{
580 struct device_node *cpu_node;
581 int cpu;
582 int idx = 0;
583
584 cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
585 if (!cpu_node) {
586 pr_info("%s: No CPU phandle, assuming single cluster\n",
587 node->full_name);
588 /*
589 * Not all targets have the cpu node populated in the device
590 * tree. If cpu node is not populated assume all possible
591 * nodes belong to this cluster
592 */
593 cpumask_copy(mask, cpu_possible_mask);
594 return 0;
595 }
596
597 while (cpu_node) {
598 for_each_possible_cpu(cpu) {
599 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
600 cpumask_set_cpu(cpu, mask);
601 break;
602 }
603 }
604 cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
605 }
606
607 return 0;
608}
609
610static int calculate_residency(struct power_params *base_pwr,
611 struct power_params *next_pwr)
612{
613 int32_t residency = (int32_t)(next_pwr->energy_overhead -
614 base_pwr->energy_overhead) -
615 ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
616 - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
617
618 residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
619
620 if (residency < 0) {
621 pr_err("%s: residency < 0 for LPM\n",
622 __func__);
623 return next_pwr->time_overhead_us;
624 }
625
626 return residency < next_pwr->time_overhead_us ?
627 next_pwr->time_overhead_us : residency;
628}
629
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600630static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700631{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600632
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700633 struct device_node *n;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600634 int ret, i, j;
635 const char *key;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700636 for_each_child_of_node(node, n) {
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600637 struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700638
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600639 cpu->nlevels++;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700640
641 ret = parse_cpu_mode(n, l);
642 if (ret < 0) {
643 pr_info("Failed %s\n", l->name);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600644 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700645 }
646
647 ret = parse_power_params(n, &l->pwr);
648 if (ret)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600649 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700650
651 key = "qcom,use-broadcast-timer";
652 l->use_bc_timer = of_property_read_bool(n, key);
653
654 l->is_reset = of_property_read_bool(n, "qcom,is-reset");
655
656 key = "qcom,jtag-save-restore";
657 l->jtag_save_restore = of_property_read_bool(n, key);
658
659 key = "qcom,reset-level";
660 ret = of_property_read_u32(n, key, &l->reset_level);
661 if (ret == -EINVAL)
662 l->reset_level = LPM_RESET_LVL_NONE;
663 else if (ret)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600664 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700665 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600666 for (i = 0; i < cpu->nlevels; i++) {
667 for (j = 0; j < cpu->nlevels; j++) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700668 if (i >= j) {
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600669 cpu->levels[i].pwr.residencies[j] = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700670 continue;
671 }
672
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600673 cpu->levels[i].pwr.residencies[j] =
674 calculate_residency(&cpu->levels[i].pwr,
675 &cpu->levels[j].pwr);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700676
677 pr_err("%s: idx %d %u\n", __func__, j,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600678 cpu->levels[i].pwr.residencies[j]);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700679 }
680 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600681 for_each_cpu(i, &cpu->related_cpus) {
682 per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev,
683 sizeof(uint32_t) * cpu->nlevels,
684 GFP_KERNEL);
685 if (!per_cpu(max_residency, i))
686 return -ENOMEM;
687 per_cpu(min_residency, i) = devm_kzalloc(
688 &lpm_pdev->dev,
689 sizeof(uint32_t) * cpu->nlevels,
690 GFP_KERNEL);
691 if (!per_cpu(min_residency, i))
692 return -ENOMEM;
693 set_optimum_cpu_residency(cpu, i, true);
694 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700695
696 return 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600697}
698
699static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
700{
701 int ret = -ENOMEM, i;
702 char *key;
703 struct lpm_cpu *cpu;
704
705 cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL);
706 if (!cpu)
707 return ret;
708
709 if (get_cpumask_for_node(node, &cpu->related_cpus))
710 return -EINVAL;
711
712 cpu->parent = c;
713
714 key = "qcom,psci-mode-shift";
715 ret = of_property_read_u32(node, key, &cpu->psci_mode_shift);
716 if (ret) {
717 pr_err("Failed reading %s on device %s\n", key,
718 node->name);
719 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700720 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600721 key = "qcom,psci-mode-mask";
722
723 ret = of_property_read_u32(node, key, &cpu->psci_mode_mask);
724 if (ret) {
725 pr_err("Failed reading %s on device %s\n", key,
726 node->name);
727 return ret;
728 }
729
730 if (parse_cpu(node, cpu))
731 goto failed;
732 cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
733 list_add(&cpu->list, &c->cpu);
734 return 0;
735failed:
736 for (i = 0; i < cpu->nlevels; i++) {
737 kfree(cpu->levels[i].name);
738 cpu->levels[i].name = NULL;
739 }
740 kfree(cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700741 pr_err("%s(): Failed with error code:%d\n", __func__, ret);
742 return ret;
743}
744
745void free_cluster_node(struct lpm_cluster *cluster)
746{
747 struct list_head *list;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600748 struct lpm_cpu *cpu, *n;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700749 int i;
750
751 list_for_each(list, &cluster->child) {
752 struct lpm_cluster *n;
753
754 n = list_entry(list, typeof(*n), list);
755 list_del(list);
756 free_cluster_node(n);
757 };
758
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600759 list_for_each_entry_safe(cpu, n, &cluster->cpu, list) {
760 struct lpm_cpu *cpu = list_entry(list, typeof(*cpu), list);
761
762 for (i = 0; i < cpu->nlevels; i++) {
763 kfree(cpu->levels[i].name);
764 cpu->levels[i].name = NULL;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700765 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600766 list_del(list);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700767 }
768 for (i = 0; i < cluster->nlevels; i++) {
769 kfree(cluster->levels[i].mode);
770 cluster->levels[i].mode = NULL;
771 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700772 kfree(cluster->name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700773 cluster->name = NULL;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700774 cluster->ndevices = 0;
775}
776
777/*
778 * TODO:
779 * Expects a CPU or a cluster only. This ensures that affinity
780 * level of a cluster is consistent with reference to its
781 * child nodes.
782 */
783struct lpm_cluster *parse_cluster(struct device_node *node,
784 struct lpm_cluster *parent)
785{
786 struct lpm_cluster *c;
787 struct device_node *n;
788 char *key;
789 int ret = 0;
790 int i, j;
791
792 c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
793 if (!c)
794 return ERR_PTR(-ENOMEM);
795
796 ret = parse_cluster_params(node, c);
797
798 if (ret)
799 goto failed_parse_params;
800
801 INIT_LIST_HEAD(&c->child);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600802 INIT_LIST_HEAD(&c->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700803 c->parent = parent;
804 spin_lock_init(&c->sync_lock);
805 c->min_child_level = NR_LPM_LEVELS;
806
807 for_each_child_of_node(node, n) {
808
809 if (!n->name)
810 continue;
811 key = "qcom,pm-cluster-level";
812 if (!of_node_cmp(n->name, key)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700813 if (parse_cluster_level(n, c))
814 goto failed_parse_cluster;
815 continue;
816 }
817
818 key = "qcom,pm-cluster";
819 if (!of_node_cmp(n->name, key)) {
820 struct lpm_cluster *child;
821
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700822 child = parse_cluster(n, c);
823 if (!child)
824 goto failed_parse_cluster;
825
826 list_add(&child->list, &c->child);
827 cpumask_or(&c->child_cpus, &c->child_cpus,
828 &child->child_cpus);
829 c->aff_level = child->aff_level + 1;
830 continue;
831 }
832
833 key = "qcom,pm-cpu";
834 if (!of_node_cmp(n->name, key)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700835 if (parse_cpu_levels(n, c))
836 goto failed_parse_cluster;
837
838 c->aff_level = 1;
839
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700840 }
841 }
842
843 if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
844 c->last_level = c->default_level;
845 else
846 c->last_level = c->nlevels-1;
847
848 for (i = 0; i < c->nlevels; i++) {
849 for (j = 0; j < c->nlevels; j++) {
850 if (i >= j) {
851 c->levels[i].pwr.residencies[j] = 0;
852 continue;
853 }
854 c->levels[i].pwr.residencies[j] = calculate_residency(
855 &c->levels[i].pwr, &c->levels[j].pwr);
856 }
857 }
858 set_optimum_cluster_residency(c, true);
859 return c;
860
861failed_parse_cluster:
862 pr_err("Failed parse cluster:%s\n", key);
863 if (parent)
864 list_del(&c->list);
865 free_cluster_node(c);
866failed_parse_params:
867 c->parent = NULL;
868 pr_err("Failed parse params\n");
869 kfree(c);
870 return NULL;
871}
872struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
873{
874 struct device_node *top = NULL;
875
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700876 top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
877 if (!top) {
878 pr_err("Failed to find root node\n");
879 return ERR_PTR(-ENODEV);
880 }
881
882 lpm_pdev = pdev;
883 return parse_cluster(top, NULL);
884}
885
886void cluster_dt_walkthrough(struct lpm_cluster *cluster)
887{
888 struct list_head *list;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600889 struct lpm_cpu *cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700890 int i, j;
891 static int id;
892 char str[10] = {0};
893
894 if (!cluster)
895 return;
896
897 for (i = 0; i < id; i++)
898 snprintf(str+i, 10 - i, "\t");
899 pr_info("%d\n", __LINE__);
900
901 for (i = 0; i < cluster->nlevels; i++) {
902 struct lpm_cluster_level *l = &cluster->levels[i];
903
904 pr_info("%d ndevices:%d\n", __LINE__, cluster->ndevices);
905 for (j = 0; j < cluster->ndevices; j++)
906 pr_info("%sDevice: %p id:%p\n", str,
907 &cluster->name[j], &l->mode[i]);
908 }
909
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600910 list_for_each_entry(cpu, &cluster->cpu, list) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700911 pr_info("%d\n", __LINE__);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600912 for (j = 0; j < cpu->nlevels; j++)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700913 pr_info("%s\tCPU mode: %s id:%d\n", str,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600914 cpu->levels[j].name,
915 cpu->levels[j].mode);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700916 }
917
918 id++;
919
920 list_for_each(list, &cluster->child) {
921 struct lpm_cluster *n;
922
923 pr_info("%d\n", __LINE__);
924 n = list_entry(list, typeof(*n), list);
925 cluster_dt_walkthrough(n);
926 }
927 id--;
928}