blob: 0ab4c214157fe6271039f2c136954fc9ea05abcd [file] [log] [blame]
Lina Iyer514248d2017-02-07 15:10:53 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Maulik Shah296ddb12017-07-03 12:25:54 +053013
14#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
15
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070016#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/of.h>
20#include <linux/err.h>
21#include <linux/sysfs.h>
22#include <linux/device.h>
23#include <linux/platform_device.h>
24#include <linux/moduleparam.h>
25#include "lpm-levels.h"
26
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070027enum lpm_type {
28 IDLE = 0,
29 SUSPEND,
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060030 LATENCY,
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070031 LPM_TYPE_NR
32};
33
34struct lpm_type_str {
35 enum lpm_type type;
36 char *str;
37};
38
39static const struct lpm_type_str lpm_types[] = {
40 {IDLE, "idle_enabled"},
41 {SUSPEND, "suspend_enabled"},
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060042 {LATENCY, "latency_us"},
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070043};
44
45static DEFINE_PER_CPU(uint32_t *, max_residency);
46static DEFINE_PER_CPU(uint32_t *, min_residency);
47static struct lpm_level_avail *cpu_level_available[NR_CPUS];
48static struct platform_device *lpm_pdev;
49
50static void *get_enabled_ptr(struct kobj_attribute *attr,
51 struct lpm_level_avail *avail)
52{
53 void *arg = NULL;
54
55 if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
56 arg = (void *) &avail->idle_enabled;
57 else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
58 arg = (void *) &avail->suspend_enabled;
59
60 return arg;
61}
62
63static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
64 struct kobj_attribute *attr)
65{
66 struct lpm_level_avail *avail = NULL;
67
68 if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
69 avail = container_of(attr, struct lpm_level_avail,
70 idle_enabled_attr);
71 else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
72 avail = container_of(attr, struct lpm_level_avail,
73 suspend_enabled_attr);
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -060074 else if (!strcmp(attr->attr.name, lpm_types[LATENCY].str))
75 avail = container_of(attr, struct lpm_level_avail,
76 latency_attr);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -070077
78 return avail;
79}
80
81static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id,
82 bool probe_time)
83{
84 int i, j;
85 bool mode_avail;
86 uint32_t *maximum_residency = per_cpu(max_residency, cpu_id);
87 uint32_t *minimum_residency = per_cpu(min_residency, cpu_id);
88
89 for (i = 0; i < cpu->nlevels; i++) {
90 struct power_params *pwr = &cpu->levels[i].pwr;
91
92 mode_avail = probe_time ||
93 lpm_cpu_mode_allow(cpu_id, i, true);
94
95 if (!mode_avail) {
96 maximum_residency[i] = 0;
97 minimum_residency[i] = 0;
98 continue;
99 }
100
101 maximum_residency[i] = ~0;
102 for (j = i + 1; j < cpu->nlevels; j++) {
103 mode_avail = probe_time ||
104 lpm_cpu_mode_allow(cpu_id, j, true);
105
106 if (mode_avail &&
107 (maximum_residency[i] > pwr->residencies[j]) &&
108 (pwr->residencies[j] != 0))
109 maximum_residency[i] = pwr->residencies[j];
110 }
111
112 minimum_residency[i] = pwr->time_overhead_us;
113 for (j = i-1; j >= 0; j--) {
114 if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) {
115 minimum_residency[i] = maximum_residency[j] + 1;
116 break;
117 }
118 }
119 }
120}
121
122static void set_optimum_cluster_residency(struct lpm_cluster *cluster,
123 bool probe_time)
124{
125 int i, j;
126 bool mode_avail;
127
128 for (i = 0; i < cluster->nlevels; i++) {
129 struct power_params *pwr = &cluster->levels[i].pwr;
130
131 mode_avail = probe_time ||
132 lpm_cluster_mode_allow(cluster, i,
133 true);
134
135 if (!mode_avail) {
136 pwr->max_residency = 0;
137 pwr->min_residency = 0;
138 continue;
139 }
140
141 pwr->max_residency = ~0;
142 for (j = i+1; j < cluster->nlevels; j++) {
143 mode_avail = probe_time ||
144 lpm_cluster_mode_allow(cluster, j,
145 true);
146 if (mode_avail &&
147 (pwr->max_residency > pwr->residencies[j]) &&
148 (pwr->residencies[j] != 0))
149 pwr->max_residency = pwr->residencies[j];
150 }
151
152 pwr->min_residency = pwr->time_overhead_us;
153 for (j = i-1; j >= 0; j--) {
154 if (probe_time ||
155 lpm_cluster_mode_allow(cluster, j, true)) {
156 pwr->min_residency =
157 cluster->levels[j].pwr.max_residency + 1;
158 break;
159 }
160 }
161 }
162}
163
164uint32_t *get_per_cpu_max_residency(int cpu)
165{
166 return per_cpu(max_residency, cpu);
167}
168
169uint32_t *get_per_cpu_min_residency(int cpu)
170{
171 return per_cpu(min_residency, cpu);
172}
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600173
174static ssize_t lpm_latency_show(struct kobject *kobj,
175 struct kobj_attribute *attr, char *buf)
176{
177 int ret = 0;
178 struct kernel_param kp;
179 struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
180
181 if (!avail)
182 pr_info("Error\n");
183
184 kp.arg = &avail->latency_us;
185
186 ret = param_get_uint(buf, &kp);
187 if (ret > 0) {
188 strlcat(buf, "\n", PAGE_SIZE);
189 ret++;
190 }
191
192 return ret;
193}
194
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700195ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
196 char *buf)
197{
198 int ret = 0;
199 struct kernel_param kp;
200
201 kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr));
202 ret = param_get_bool(buf, &kp);
203 if (ret > 0) {
204 strlcat(buf, "\n", PAGE_SIZE);
205 ret++;
206 }
207
208 return ret;
209}
210
211ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
212 const char *buf, size_t len)
213{
214 int ret = 0;
215 struct kernel_param kp;
216 struct lpm_level_avail *avail;
217
218 avail = get_avail_ptr(kobj, attr);
219 if (WARN_ON(!avail))
220 return -EINVAL;
Maulik Shah296ddb12017-07-03 12:25:54 +0530221
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700222 kp.arg = get_enabled_ptr(attr, avail);
223 ret = param_set_bool(buf, &kp);
224
225 if (avail->cpu_node)
226 set_optimum_cpu_residency(avail->data, avail->idx, false);
227 else
228 set_optimum_cluster_residency(avail->data, false);
229
230 return ret ? ret : len;
231}
232
233static int create_lvl_avail_nodes(const char *name,
234 struct kobject *parent, struct lpm_level_avail *avail,
235 void *data, int index, bool cpu_node)
236{
237 struct attribute_group *attr_group = NULL;
238 struct attribute **attr = NULL;
239 struct kobject *kobj = NULL;
240 int ret = 0;
241
242 kobj = kobject_create_and_add(name, parent);
243 if (!kobj)
244 return -ENOMEM;
245
246 attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
247 GFP_KERNEL);
248 if (!attr_group) {
249 ret = -ENOMEM;
250 goto failed;
251 }
252
253 attr = devm_kzalloc(&lpm_pdev->dev,
254 sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
255 if (!attr) {
256 ret = -ENOMEM;
257 goto failed;
258 }
259
260 sysfs_attr_init(&avail->idle_enabled_attr.attr);
261 avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
262 avail->idle_enabled_attr.attr.mode = 0644;
263 avail->idle_enabled_attr.show = lpm_enable_show;
264 avail->idle_enabled_attr.store = lpm_enable_store;
265
266 sysfs_attr_init(&avail->suspend_enabled_attr.attr);
267 avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
268 avail->suspend_enabled_attr.attr.mode = 0644;
269 avail->suspend_enabled_attr.show = lpm_enable_show;
270 avail->suspend_enabled_attr.store = lpm_enable_store;
271
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600272 sysfs_attr_init(&avail->latency_attr.attr);
273 avail->latency_attr.attr.name = lpm_types[LATENCY].str;
274 avail->latency_attr.attr.mode = 0444;
275 avail->latency_attr.show = lpm_latency_show;
276 avail->latency_attr.store = NULL;
277
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700278 attr[0] = &avail->idle_enabled_attr.attr;
279 attr[1] = &avail->suspend_enabled_attr.attr;
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600280 attr[2] = &avail->latency_attr.attr;
281 attr[3] = NULL;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700282 attr_group->attrs = attr;
283
284 ret = sysfs_create_group(kobj, attr_group);
285 if (ret) {
286 ret = -ENOMEM;
287 goto failed;
288 }
289
290 avail->idle_enabled = true;
291 avail->suspend_enabled = true;
292 avail->kobj = kobj;
293 avail->data = data;
294 avail->idx = index;
295 avail->cpu_node = cpu_node;
296
297 return ret;
298
299failed:
300 kobject_put(kobj);
301 return ret;
302}
303
304static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
305{
306 int cpu;
307 int i, cpu_idx;
308 struct kobject **cpu_kobj = NULL;
309 struct lpm_level_avail *level_list = NULL;
310 char cpu_name[20] = {0};
311 int ret = 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600312 struct list_head *pos;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700313
314 cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
315 cpumask_weight(&p->child_cpus), GFP_KERNEL);
316 if (!cpu_kobj)
317 return -ENOMEM;
318
319 cpu_idx = 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600320 list_for_each(pos, &p->cpu) {
321 struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700322
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600323 for_each_cpu(cpu, &lpm_cpu->related_cpus) {
324 snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
325 cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name,
326 parent);
327 if (!cpu_kobj[cpu_idx]) {
328 ret = -ENOMEM;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700329 goto release_kobj;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600330 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700331
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600332 level_list = devm_kzalloc(&lpm_pdev->dev,
333 lpm_cpu->nlevels * sizeof(*level_list),
334 GFP_KERNEL);
335 if (!level_list) {
336 ret = -ENOMEM;
337 goto release_kobj;
338 }
339
340 /*
341 * Skip enable/disable for WFI. cpuidle expects WFI to
342 * be available at all times.
343 */
344 for (i = 1; i < lpm_cpu->nlevels; i++) {
345 level_list[i].latency_us =
346 p->levels[i].pwr.latency_us;
347 ret = create_lvl_avail_nodes(
348 lpm_cpu->levels[i].name,
349 cpu_kobj[cpu_idx],
350 &level_list[i],
351 (void *)lpm_cpu, cpu, true);
352 if (ret)
353 goto release_kobj;
354 }
355
356 cpu_level_available[cpu] = level_list;
357 cpu_idx++;
358 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700359 }
360
361 return ret;
362
363release_kobj:
364 for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
365 kobject_put(cpu_kobj[i]);
366
367 return ret;
368}
369
370int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
371{
372 int ret = 0;
373 struct lpm_cluster *child = NULL;
374 int i;
375 struct kobject *cluster_kobj = NULL;
376
377 if (!p)
378 return -ENODEV;
379
380 cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
381 if (!cluster_kobj)
382 return -ENOMEM;
383
384 for (i = 0; i < p->nlevels; i++) {
Mahesh Sivasubramanian62b3a6b2017-05-19 15:33:11 -0600385 p->levels[i].available.latency_us = p->levels[i].pwr.latency_us;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700386 ret = create_lvl_avail_nodes(p->levels[i].level_name,
387 cluster_kobj, &p->levels[i].available,
388 (void *)p, 0, false);
389 if (ret)
390 return ret;
391 }
392
393 list_for_each_entry(child, &p->child, list) {
394 ret = create_cluster_lvl_nodes(child, cluster_kobj);
395 if (ret)
396 return ret;
397 }
398
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600399 if (!list_empty(&p->cpu)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700400 ret = create_cpu_lvl_nodes(p, cluster_kobj);
401 if (ret)
402 return ret;
403 }
404
Maulik Shah296ddb12017-07-03 12:25:54 +0530405 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700406}
407
408bool lpm_cpu_mode_allow(unsigned int cpu,
409 unsigned int index, bool from_idle)
410{
411 struct lpm_level_avail *avail = cpu_level_available[cpu];
412
413 if (!lpm_pdev || !avail)
414 return !from_idle;
415
416 return !!(from_idle ? avail[index].idle_enabled :
417 avail[index].suspend_enabled);
418}
419
420bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
421 unsigned int mode, bool from_idle)
422{
423 struct lpm_level_avail *avail = &cluster->levels[mode].available;
424
425 if (!lpm_pdev || !avail)
426 return false;
427
428 return !!(from_idle ? avail->idle_enabled :
429 avail->suspend_enabled);
430}
431
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700432static int parse_cluster_params(struct device_node *node,
433 struct lpm_cluster *c)
434{
435 char *key;
436 int ret;
437
438 key = "label";
439 ret = of_property_read_string(node, key, &c->cluster_name);
Maulik Shah296ddb12017-07-03 12:25:54 +0530440 if (ret)
441 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700442
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600443 key = "qcom,psci-mode-shift";
Maulik Shah296ddb12017-07-03 12:25:54 +0530444 ret = of_property_read_u32(node, key, &c->psci_mode_shift);
445 if (ret)
446 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700447
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600448 key = "qcom,psci-mode-mask";
Maulik Shah296ddb12017-07-03 12:25:54 +0530449 ret = of_property_read_u32(node, key, &c->psci_mode_mask);
450 if (ret)
451 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700452
Maulik Shah296ddb12017-07-03 12:25:54 +0530453 /* Set default_level to 0 as default */
454 c->default_level = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700455
Maulik Shah296ddb12017-07-03 12:25:54 +0530456 return ret;
457fail:
458 pr_err("Failed to read key: %s ret: %d\n", key, ret);
459
460 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700461}
462
463static int parse_power_params(struct device_node *node,
464 struct power_params *pwr)
465{
466 char *key;
467 int ret;
468
469 key = "qcom,latency-us";
470 ret = of_property_read_u32(node, key, &pwr->latency_us);
471 if (ret)
472 goto fail;
473
474 key = "qcom,ss-power";
475 ret = of_property_read_u32(node, key, &pwr->ss_power);
476 if (ret)
477 goto fail;
478
479 key = "qcom,energy-overhead";
480 ret = of_property_read_u32(node, key, &pwr->energy_overhead);
481 if (ret)
482 goto fail;
483
484 key = "qcom,time-overhead";
485 ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
486 if (ret)
487 goto fail;
488
Maulik Shah296ddb12017-07-03 12:25:54 +0530489 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700490fail:
Maulik Shah296ddb12017-07-03 12:25:54 +0530491 pr_err("Failed to read key: %s node: %s\n", key, node->name);
492
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700493 return ret;
494}
495
496static int parse_cluster_level(struct device_node *node,
497 struct lpm_cluster *cluster)
498{
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700499 struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
500 int ret = -ENOMEM;
501 char *key;
502
503 key = "label";
504 ret = of_property_read_string(node, key, &level->level_name);
505 if (ret)
506 goto failed;
507
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600508 key = "qcom,psci-mode";
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600509 ret = of_property_read_u32(node, key, &level->psci_id);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700510 if (ret)
511 goto failed;
512
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600513 level->is_reset = of_property_read_bool(node, "qcom,is-reset");
514
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700515 if (cluster->nlevels != cluster->default_level) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530516 key = "qcom,min-child-idx";
517 ret = of_property_read_u32(node, key, &level->min_child_level);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700518 if (ret)
519 goto failed;
520
521 if (cluster->min_child_level > level->min_child_level)
522 cluster->min_child_level = level->min_child_level;
523 }
524
525 level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700526
527 key = "parse_power_params";
528 ret = parse_power_params(node, &level->pwr);
529 if (ret)
530 goto failed;
531
532 key = "qcom,reset-level";
533 ret = of_property_read_u32(node, key, &level->reset_level);
534 if (ret == -EINVAL)
535 level->reset_level = LPM_RESET_LVL_NONE;
536 else if (ret)
537 goto failed;
538
539 cluster->nlevels++;
Maulik Shah296ddb12017-07-03 12:25:54 +0530540
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700541 return 0;
542failed:
Maulik Shah296ddb12017-07-03 12:25:54 +0530543 pr_err("Failed to read key: %s ret: %d\n", key, ret);
544
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700545 return ret;
546}
547
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700548static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
549{
550 char *key;
551 int ret;
552
Maulik Shah296ddb12017-07-03 12:25:54 +0530553 key = "label";
554 ret = of_property_read_string(n, key, &l->name);
555 if (ret)
556 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700557
Mahesh Sivasubramanian060f60c2017-06-02 16:55:51 -0600558 key = "qcom,psci-cpu-mode";
559 ret = of_property_read_u32(n, key, &l->psci_id);
Maulik Shah296ddb12017-07-03 12:25:54 +0530560 if (ret)
561 goto fail;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700562
Maulik Shah296ddb12017-07-03 12:25:54 +0530563 return ret;
564fail:
565 pr_err("Failed to read key: %s level: %s\n", key, l->name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700566
Maulik Shah296ddb12017-07-03 12:25:54 +0530567 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700568}
569
570static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
571{
572 struct device_node *cpu_node;
573 int cpu;
574 int idx = 0;
575
576 cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
577 if (!cpu_node) {
578 pr_info("%s: No CPU phandle, assuming single cluster\n",
579 node->full_name);
580 /*
581 * Not all targets have the cpu node populated in the device
582 * tree. If cpu node is not populated assume all possible
583 * nodes belong to this cluster
584 */
585 cpumask_copy(mask, cpu_possible_mask);
586 return 0;
587 }
588
589 while (cpu_node) {
590 for_each_possible_cpu(cpu) {
591 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
592 cpumask_set_cpu(cpu, mask);
593 break;
594 }
595 }
596 cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
597 }
598
599 return 0;
600}
601
602static int calculate_residency(struct power_params *base_pwr,
603 struct power_params *next_pwr)
604{
605 int32_t residency = (int32_t)(next_pwr->energy_overhead -
606 base_pwr->energy_overhead) -
607 ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
608 - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));
609
610 residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
611
612 if (residency < 0) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530613 pr_err("Residency < 0 for LPM\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700614 return next_pwr->time_overhead_us;
615 }
616
617 return residency < next_pwr->time_overhead_us ?
618 next_pwr->time_overhead_us : residency;
619}
620
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600621static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700622{
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600623
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700624 struct device_node *n;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600625 int ret, i, j;
626 const char *key;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700627 for_each_child_of_node(node, n) {
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600628 struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels];
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700629
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600630 cpu->nlevels++;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700631
632 ret = parse_cpu_mode(n, l);
Maulik Shah296ddb12017-07-03 12:25:54 +0530633 if (ret)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600634 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700635
636 ret = parse_power_params(n, &l->pwr);
637 if (ret)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600638 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700639
640 key = "qcom,use-broadcast-timer";
641 l->use_bc_timer = of_property_read_bool(n, key);
642
Maulik Shah296ddb12017-07-03 12:25:54 +0530643 key = "qcom,is-reset";
644 l->is_reset = of_property_read_bool(n, key);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700645
646 key = "qcom,reset-level";
647 ret = of_property_read_u32(n, key, &l->reset_level);
648 if (ret == -EINVAL)
649 l->reset_level = LPM_RESET_LVL_NONE;
650 else if (ret)
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600651 return ret;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700652 }
Maulik Shah296ddb12017-07-03 12:25:54 +0530653
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600654 for (i = 0; i < cpu->nlevels; i++) {
655 for (j = 0; j < cpu->nlevels; j++) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700656 if (i >= j) {
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600657 cpu->levels[i].pwr.residencies[j] = 0;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700658 continue;
659 }
660
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600661 cpu->levels[i].pwr.residencies[j] =
662 calculate_residency(&cpu->levels[i].pwr,
663 &cpu->levels[j].pwr);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700664
Maulik Shah296ddb12017-07-03 12:25:54 +0530665 pr_info("idx %d %u\n", j,
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600666 cpu->levels[i].pwr.residencies[j]);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700667 }
668 }
Maulik Shah296ddb12017-07-03 12:25:54 +0530669
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600670 for_each_cpu(i, &cpu->related_cpus) {
Maulik Shah296ddb12017-07-03 12:25:54 +0530671
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600672 per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev,
Maulik Shah296ddb12017-07-03 12:25:54 +0530673 sizeof(uint32_t) * cpu->nlevels, GFP_KERNEL);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600674 if (!per_cpu(max_residency, i))
675 return -ENOMEM;
Maulik Shah296ddb12017-07-03 12:25:54 +0530676
677 per_cpu(min_residency, i) = devm_kzalloc(&lpm_pdev->dev,
678 sizeof(uint32_t) * cpu->nlevels, GFP_KERNEL);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600679 if (!per_cpu(min_residency, i))
680 return -ENOMEM;
Maulik Shah296ddb12017-07-03 12:25:54 +0530681
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600682 set_optimum_cpu_residency(cpu, i, true);
683 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700684
685 return 0;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600686}
687
688static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
689{
Maulik Shah296ddb12017-07-03 12:25:54 +0530690 int ret, i;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600691 char *key;
692 struct lpm_cpu *cpu;
693
694 cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL);
695 if (!cpu)
Maulik Shah296ddb12017-07-03 12:25:54 +0530696 return -ENOMEM;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600697
698 if (get_cpumask_for_node(node, &cpu->related_cpus))
699 return -EINVAL;
700
701 cpu->parent = c;
702
703 key = "qcom,psci-mode-shift";
704 ret = of_property_read_u32(node, key, &cpu->psci_mode_shift);
Maulik Shah296ddb12017-07-03 12:25:54 +0530705 if (ret)
706 goto failed_parse_params;
707
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600708 key = "qcom,psci-mode-mask";
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600709 ret = of_property_read_u32(node, key, &cpu->psci_mode_mask);
Maulik Shah296ddb12017-07-03 12:25:54 +0530710 if (ret)
711 goto failed_parse_params;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600712
Maulik Shah296ddb12017-07-03 12:25:54 +0530713 key = "parse_cpu";
714 ret = parse_cpu(node, cpu);
715 if (ret)
716 goto failed_parse_cpu;
717
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600718 cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
719 list_add(&cpu->list, &c->cpu);
Maulik Shah296ddb12017-07-03 12:25:54 +0530720
721 return ret;
722
723failed_parse_cpu:
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600724 for (i = 0; i < cpu->nlevels; i++) {
725 kfree(cpu->levels[i].name);
726 cpu->levels[i].name = NULL;
727 }
Maulik Shah296ddb12017-07-03 12:25:54 +0530728
729failed_parse_params:
730 pr_err("Failed to read key: %s node: %s\n", key, node->name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700731 return ret;
732}
733
734void free_cluster_node(struct lpm_cluster *cluster)
735{
736 struct list_head *list;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600737 struct lpm_cpu *cpu, *n;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700738 int i;
739
740 list_for_each(list, &cluster->child) {
741 struct lpm_cluster *n;
742
743 n = list_entry(list, typeof(*n), list);
744 list_del(list);
745 free_cluster_node(n);
746 };
747
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600748 list_for_each_entry_safe(cpu, n, &cluster->cpu, list) {
749 struct lpm_cpu *cpu = list_entry(list, typeof(*cpu), list);
750
751 for (i = 0; i < cpu->nlevels; i++) {
752 kfree(cpu->levels[i].name);
753 cpu->levels[i].name = NULL;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700754 }
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600755 list_del(list);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700756 }
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700757}
758
759/*
760 * TODO:
761 * Expects a CPU or a cluster only. This ensures that affinity
762 * level of a cluster is consistent with reference to its
763 * child nodes.
764 */
765struct lpm_cluster *parse_cluster(struct device_node *node,
766 struct lpm_cluster *parent)
767{
768 struct lpm_cluster *c;
769 struct device_node *n;
770 char *key;
771 int ret = 0;
772 int i, j;
773
774 c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
775 if (!c)
776 return ERR_PTR(-ENOMEM);
777
778 ret = parse_cluster_params(node, c);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700779 if (ret)
780 goto failed_parse_params;
781
782 INIT_LIST_HEAD(&c->child);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600783 INIT_LIST_HEAD(&c->cpu);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700784 c->parent = parent;
785 spin_lock_init(&c->sync_lock);
786 c->min_child_level = NR_LPM_LEVELS;
787
788 for_each_child_of_node(node, n) {
789
790 if (!n->name)
791 continue;
Maulik Shah296ddb12017-07-03 12:25:54 +0530792
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700793 key = "qcom,pm-cluster-level";
794 if (!of_node_cmp(n->name, key)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700795 if (parse_cluster_level(n, c))
796 goto failed_parse_cluster;
797 continue;
798 }
799
800 key = "qcom,pm-cluster";
801 if (!of_node_cmp(n->name, key)) {
802 struct lpm_cluster *child;
803
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700804 child = parse_cluster(n, c);
805 if (!child)
806 goto failed_parse_cluster;
807
808 list_add(&child->list, &c->child);
809 cpumask_or(&c->child_cpus, &c->child_cpus,
810 &child->child_cpus);
811 c->aff_level = child->aff_level + 1;
812 continue;
813 }
814
815 key = "qcom,pm-cpu";
816 if (!of_node_cmp(n->name, key)) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700817 if (parse_cpu_levels(n, c))
818 goto failed_parse_cluster;
819
820 c->aff_level = 1;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700821 }
822 }
823
824 if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
825 c->last_level = c->default_level;
826 else
827 c->last_level = c->nlevels-1;
828
829 for (i = 0; i < c->nlevels; i++) {
830 for (j = 0; j < c->nlevels; j++) {
831 if (i >= j) {
832 c->levels[i].pwr.residencies[j] = 0;
833 continue;
834 }
835 c->levels[i].pwr.residencies[j] = calculate_residency(
836 &c->levels[i].pwr, &c->levels[j].pwr);
837 }
838 }
839 set_optimum_cluster_residency(c, true);
840 return c;
841
842failed_parse_cluster:
843 pr_err("Failed parse cluster:%s\n", key);
844 if (parent)
845 list_del(&c->list);
846 free_cluster_node(c);
847failed_parse_params:
848 c->parent = NULL;
849 pr_err("Failed parse params\n");
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700850 return NULL;
851}
852struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
853{
854 struct device_node *top = NULL;
855
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700856 top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
857 if (!top) {
858 pr_err("Failed to find root node\n");
859 return ERR_PTR(-ENODEV);
860 }
861
862 lpm_pdev = pdev;
863 return parse_cluster(top, NULL);
864}
865
866void cluster_dt_walkthrough(struct lpm_cluster *cluster)
867{
868 struct list_head *list;
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600869 struct lpm_cpu *cpu;
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700870 int i, j;
871 static int id;
872 char str[10] = {0};
873
874 if (!cluster)
875 return;
876
877 for (i = 0; i < id; i++)
878 snprintf(str+i, 10 - i, "\t");
879 pr_info("%d\n", __LINE__);
880
881 for (i = 0; i < cluster->nlevels; i++) {
882 struct lpm_cluster_level *l = &cluster->levels[i];
Maulik Shah296ddb12017-07-03 12:25:54 +0530883 pr_info("cluster: %s \t level: %s\n", cluster->cluster_name,
884 l->level_name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700885 }
886
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600887 list_for_each_entry(cpu, &cluster->cpu, list) {
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700888 pr_info("%d\n", __LINE__);
Mahesh Sivasubramanian168922d2017-06-09 09:47:52 -0600889 for (j = 0; j < cpu->nlevels; j++)
Maulik Shah296ddb12017-07-03 12:25:54 +0530890 pr_info("%s\tCPU level name: %s\n", str,
891 cpu->levels[j].name);
Mahesh Sivasubramanianc2ea76f2016-02-01 10:40:26 -0700892 }
893
894 id++;
895
896 list_for_each(list, &cluster->child) {
897 struct lpm_cluster *n;
898
899 pr_info("%d\n", __LINE__);
900 n = list_entry(list, typeof(*n), list);
901 cluster_dt_walkthrough(n);
902 }
903 id--;
904}