blob: 67c9eeded4e161893a6a5687a5020e13e5959791 [file] [log] [blame]
Viresh Kumarf47b72a2016-05-05 16:20:33 +05301/*
2 * Generic OPP OF helpers
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/cpu.h>
17#include <linux/errno.h>
18#include <linux/device.h>
19#include <linux/of.h>
Viresh Kumardfbe4672016-12-01 16:28:19 +053020#include <linux/slab.h>
Viresh Kumarf47b72a2016-05-05 16:20:33 +053021#include <linux/export.h>
22
23#include "opp.h"
24
25static struct opp_table *_managed_opp(const struct device_node *np)
26{
27 struct opp_table *opp_table;
28
29 list_for_each_entry_rcu(opp_table, &opp_tables, node) {
30 if (opp_table->np == np) {
31 /*
32 * Multiple devices can point to the same OPP table and
33 * so will have same node-pointer, np.
34 *
35 * But the OPPs will be considered as shared only if the
36 * OPP table contains a "opp-shared" property.
37 */
Viresh Kumar79ee2e82016-06-16 19:03:11 +053038 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
39 return opp_table;
40
41 return NULL;
Viresh Kumarf47b72a2016-05-05 16:20:33 +053042 }
43 }
44
45 return NULL;
46}
47
48void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
49{
50 struct device_node *np;
51
52 /*
53 * Only required for backward compatibility with v1 bindings, but isn't
54 * harmful for other cases. And so we do it unconditionally.
55 */
56 np = of_node_get(dev->of_node);
57 if (np) {
58 u32 val;
59
60 if (!of_property_read_u32(np, "clock-latency", &val))
61 opp_table->clock_latency_ns_max = val;
62 of_property_read_u32(np, "voltage-tolerance",
63 &opp_table->voltage_tolerance_v1);
64 of_node_put(np);
65 }
66}
67
68static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
69 struct device_node *np)
70{
71 unsigned int count = opp_table->supported_hw_count;
72 u32 version;
73 int ret;
74
Dave Gerlacha4ee4542016-09-23 15:07:47 -050075 if (!opp_table->supported_hw) {
76 /*
77 * In the case that no supported_hw has been set by the
78 * platform but there is an opp-supported-hw value set for
79 * an OPP then the OPP should not be enabled as there is
80 * no way to see if the hardware supports it.
81 */
82 if (of_find_property(np, "opp-supported-hw", NULL))
83 return false;
84 else
85 return true;
86 }
Viresh Kumarf47b72a2016-05-05 16:20:33 +053087
88 while (count--) {
89 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
90 &version);
91 if (ret) {
92 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
93 __func__, count, ret);
94 return false;
95 }
96
97 /* Both of these are bitwise masks of the versions */
98 if (!(version & opp_table->supported_hw[count]))
99 return false;
100 }
101
102 return true;
103}
104
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530105static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
106 struct opp_table *opp_table)
107{
Viresh Kumardfbe4672016-12-01 16:28:19 +0530108 u32 *microvolt, *microamp = NULL;
109 int supplies, vcount, icount, ret, i, j;
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530110 struct property *prop = NULL;
111 char name[NAME_MAX];
112
Viresh Kumardfbe4672016-12-01 16:28:19 +0530113 supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
114
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530115 /* Search for "opp-microvolt-<name>" */
116 if (opp_table->prop_name) {
117 snprintf(name, sizeof(name), "opp-microvolt-%s",
118 opp_table->prop_name);
119 prop = of_find_property(opp->np, name, NULL);
120 }
121
122 if (!prop) {
123 /* Search for "opp-microvolt" */
124 sprintf(name, "opp-microvolt");
125 prop = of_find_property(opp->np, name, NULL);
126
127 /* Missing property isn't a problem, but an invalid entry is */
128 if (!prop)
129 return 0;
130 }
131
Viresh Kumardfbe4672016-12-01 16:28:19 +0530132 vcount = of_property_count_u32_elems(opp->np, name);
133 if (vcount < 0) {
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530134 dev_err(dev, "%s: Invalid %s property (%d)\n",
Viresh Kumardfbe4672016-12-01 16:28:19 +0530135 __func__, name, vcount);
136 return vcount;
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530137 }
138
Viresh Kumardfbe4672016-12-01 16:28:19 +0530139 /* There can be one or three elements per supply */
140 if (vcount != supplies && vcount != supplies * 3) {
141 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
142 __func__, name, vcount, supplies);
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530143 return -EINVAL;
144 }
145
Viresh Kumardfbe4672016-12-01 16:28:19 +0530146 microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
147 if (!microvolt)
148 return -ENOMEM;
149
150 ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530151 if (ret) {
152 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
Viresh Kumardfbe4672016-12-01 16:28:19 +0530153 ret = -EINVAL;
154 goto free_microvolt;
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530155 }
156
157 /* Search for "opp-microamp-<name>" */
158 prop = NULL;
159 if (opp_table->prop_name) {
160 snprintf(name, sizeof(name), "opp-microamp-%s",
161 opp_table->prop_name);
162 prop = of_find_property(opp->np, name, NULL);
163 }
164
165 if (!prop) {
166 /* Search for "opp-microamp" */
167 sprintf(name, "opp-microamp");
168 prop = of_find_property(opp->np, name, NULL);
169 }
170
Viresh Kumardfbe4672016-12-01 16:28:19 +0530171 if (prop) {
172 icount = of_property_count_u32_elems(opp->np, name);
173 if (icount < 0) {
174 dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
175 name, icount);
176 ret = icount;
177 goto free_microvolt;
178 }
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530179
Viresh Kumardfbe4672016-12-01 16:28:19 +0530180 if (icount != supplies) {
181 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
182 __func__, name, icount, supplies);
183 ret = -EINVAL;
184 goto free_microvolt;
185 }
186
187 microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
188 if (!microamp) {
189 ret = -EINVAL;
190 goto free_microvolt;
191 }
192
193 ret = of_property_read_u32_array(opp->np, name, microamp,
194 icount);
195 if (ret) {
196 dev_err(dev, "%s: error parsing %s: %d\n", __func__,
197 name, ret);
198 ret = -EINVAL;
199 goto free_microamp;
200 }
201 }
202
203 for (i = 0, j = 0; i < supplies; i++) {
204 opp->supplies[i].u_volt = microvolt[j++];
205
206 if (vcount == supplies) {
207 opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
208 opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
209 } else {
210 opp->supplies[i].u_volt_min = microvolt[j++];
211 opp->supplies[i].u_volt_max = microvolt[j++];
212 }
213
214 if (microamp)
215 opp->supplies[i].u_amp = microamp[i];
216 }
217
218free_microamp:
219 kfree(microamp);
220free_microvolt:
221 kfree(microvolt);
222
223 return ret;
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530224}
225
226/**
227 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
228 * entries
229 * @dev: device pointer used to lookup OPP table.
230 *
231 * Free OPPs created using static entries present in DT.
232 *
233 * Locking: The internal opp_table and opp structures are RCU protected.
234 * Hence this function indirectly uses RCU updater strategy with mutex locks
235 * to keep the integrity of the internal data structures. Callers should ensure
236 * that this function is *NOT* called under RCU protection or in contexts where
237 * mutex cannot be locked.
238 */
239void dev_pm_opp_of_remove_table(struct device *dev)
240{
241 _dev_pm_opp_remove_table(dev, false);
242}
243EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
244
245/* Returns opp descriptor node for a device, caller must do of_node_put() */
Masahiro Yamada4bee77d2016-10-20 15:44:53 +0900246static struct device_node *_of_get_opp_desc_node(struct device *dev)
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530247{
248 /*
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530249 * There should be only ONE phandle present in "operating-points-v2"
250 * property.
251 */
252
253 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
254}
255
256/**
257 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
258 * @dev: device for which we do this operation
259 * @np: device node
260 *
261 * This function adds an opp definition to the opp table and returns status. The
262 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
263 * removed by dev_pm_opp_remove.
264 *
265 * Locking: The internal opp_table and opp structures are RCU protected.
266 * Hence this function internally uses RCU updater strategy with mutex locks
267 * to keep the integrity of the internal data structures. Callers should ensure
268 * that this function is *NOT* called under RCU protection or in contexts where
269 * mutex cannot be locked.
270 *
271 * Return:
272 * 0 On success OR
273 * Duplicate OPPs (both freq and volt are same) and opp->available
274 * -EEXIST Freq are same and volt are different OR
275 * Duplicate OPPs (both freq and volt are same) and !opp->available
276 * -ENOMEM Memory allocation failure
277 * -EINVAL Failed parsing the OPP node
278 */
279static int _opp_add_static_v2(struct device *dev, struct device_node *np)
280{
281 struct opp_table *opp_table;
282 struct dev_pm_opp *new_opp;
283 u64 rate;
284 u32 val;
285 int ret;
286
287 /* Hold our table modification lock here */
288 mutex_lock(&opp_table_lock);
289
Viresh Kumar63a69ea2017-01-02 14:40:57 +0530290 new_opp = _opp_allocate(dev, &opp_table);
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530291 if (!new_opp) {
292 ret = -ENOMEM;
293 goto unlock;
294 }
295
296 ret = of_property_read_u64(np, "opp-hz", &rate);
297 if (ret < 0) {
298 dev_err(dev, "%s: opp-hz not found\n", __func__);
299 goto free_opp;
300 }
301
302 /* Check if the OPP supports hardware's hierarchy of versions or not */
303 if (!_opp_is_supported(dev, opp_table, np)) {
304 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
305 goto free_opp;
306 }
307
308 /*
309 * Rate is defined as an unsigned long in clk API, and so casting
310 * explicitly to its type. Must be fixed once rate is 64 bit
311 * guaranteed in clk API.
312 */
313 new_opp->rate = (unsigned long)rate;
314 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
315
316 new_opp->np = np;
317 new_opp->dynamic = false;
318 new_opp->available = true;
319
320 if (!of_property_read_u32(np, "clock-latency-ns", &val))
321 new_opp->clock_latency_ns = val;
322
323 ret = opp_parse_supplies(new_opp, dev, opp_table);
324 if (ret)
325 goto free_opp;
326
327 ret = _opp_add(dev, new_opp, opp_table);
Viresh Kumar7f8538e2017-01-02 14:40:55 +0530328 if (ret) {
329 /* Don't return error for duplicate OPPs */
330 if (ret == -EBUSY)
331 ret = 0;
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530332 goto free_opp;
Viresh Kumar7f8538e2017-01-02 14:40:55 +0530333 }
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530334
335 /* OPP to select on device suspend */
336 if (of_property_read_bool(np, "opp-suspend")) {
337 if (opp_table->suspend_opp) {
338 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
339 __func__, opp_table->suspend_opp->rate,
340 new_opp->rate);
341 } else {
342 new_opp->suspend = true;
343 opp_table->suspend_opp = new_opp;
344 }
345 }
346
347 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
348 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
349
350 mutex_unlock(&opp_table_lock);
351
352 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
Viresh Kumar0f0fe7e2016-12-01 16:28:17 +0530353 __func__, new_opp->turbo, new_opp->rate,
Viresh Kumardfbe4672016-12-01 16:28:19 +0530354 new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
355 new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530356
357 /*
358 * Notify the changes in the availability of the operable
359 * frequency/voltage list.
360 */
361 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
362 return 0;
363
364free_opp:
Viresh Kumar969fceb2017-01-02 14:40:59 +0530365 _opp_free(new_opp, opp_table);
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530366unlock:
367 mutex_unlock(&opp_table_lock);
368 return ret;
369}
370
371/* Initializes OPP tables based on new bindings */
372static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
373{
374 struct device_node *np;
375 struct opp_table *opp_table;
376 int ret = 0, count = 0;
377
378 mutex_lock(&opp_table_lock);
379
380 opp_table = _managed_opp(opp_np);
381 if (opp_table) {
382 /* OPPs are already managed */
383 if (!_add_opp_dev(dev, opp_table))
384 ret = -ENOMEM;
385 mutex_unlock(&opp_table_lock);
386 return ret;
387 }
388 mutex_unlock(&opp_table_lock);
389
390 /* We have opp-table node now, iterate over it and add OPPs */
391 for_each_available_child_of_node(opp_np, np) {
392 count++;
393
394 ret = _opp_add_static_v2(dev, np);
395 if (ret) {
396 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
397 ret);
398 goto free_table;
399 }
400 }
401
402 /* There should be one of more OPP defined */
403 if (WARN_ON(!count))
404 return -ENOENT;
405
406 mutex_lock(&opp_table_lock);
407
408 opp_table = _find_opp_table(dev);
409 if (WARN_ON(IS_ERR(opp_table))) {
410 ret = PTR_ERR(opp_table);
411 mutex_unlock(&opp_table_lock);
412 goto free_table;
413 }
414
415 opp_table->np = opp_np;
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530416 if (of_property_read_bool(opp_np, "opp-shared"))
417 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
418 else
419 opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530420
421 mutex_unlock(&opp_table_lock);
422
423 return 0;
424
425free_table:
426 dev_pm_opp_of_remove_table(dev);
427
428 return ret;
429}
430
431/* Initializes OPP tables based on old-deprecated bindings */
432static int _of_add_opp_table_v1(struct device *dev)
433{
434 const struct property *prop;
435 const __be32 *val;
Viresh Kumar04a86a82017-01-02 14:40:58 +0530436 int nr, ret;
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530437
438 prop = of_find_property(dev->of_node, "operating-points", NULL);
439 if (!prop)
440 return -ENODEV;
441 if (!prop->value)
442 return -ENODATA;
443
444 /*
445 * Each OPP is a set of tuples consisting of frequency and
446 * voltage like <freq-kHz vol-uV>.
447 */
448 nr = prop->length / sizeof(u32);
449 if (nr % 2) {
450 dev_err(dev, "%s: Invalid OPP table\n", __func__);
451 return -EINVAL;
452 }
453
454 val = prop->value;
455 while (nr) {
456 unsigned long freq = be32_to_cpup(val++) * 1000;
457 unsigned long volt = be32_to_cpup(val++);
458
Viresh Kumar04a86a82017-01-02 14:40:58 +0530459 ret = _opp_add_v1(dev, freq, volt, false);
460 if (ret) {
461 dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
462 __func__, freq, ret);
463 dev_pm_opp_of_remove_table(dev);
464 return ret;
465 }
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530466 nr -= 2;
467 }
468
469 return 0;
470}
471
472/**
473 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
474 * @dev: device pointer used to lookup OPP table.
475 *
476 * Register the initial OPP table with the OPP library for given device.
477 *
478 * Locking: The internal opp_table and opp structures are RCU protected.
479 * Hence this function indirectly uses RCU updater strategy with mutex locks
480 * to keep the integrity of the internal data structures. Callers should ensure
481 * that this function is *NOT* called under RCU protection or in contexts where
482 * mutex cannot be locked.
483 *
484 * Return:
485 * 0 On success OR
486 * Duplicate OPPs (both freq and volt are same) and opp->available
487 * -EEXIST Freq are same and volt are different OR
488 * Duplicate OPPs (both freq and volt are same) and !opp->available
489 * -ENOMEM Memory allocation failure
490 * -ENODEV when 'operating-points' property is not found or is invalid data
491 * in device node.
492 * -ENODATA when empty 'operating-points' property is found
493 * -EINVAL when invalid entries are found in opp-v2 table
494 */
495int dev_pm_opp_of_add_table(struct device *dev)
496{
497 struct device_node *opp_np;
498 int ret;
499
500 /*
501 * OPPs have two version of bindings now. The older one is deprecated,
502 * try for the new binding first.
503 */
504 opp_np = _of_get_opp_desc_node(dev);
505 if (!opp_np) {
506 /*
507 * Try old-deprecated bindings for backward compatibility with
508 * older dtbs.
509 */
510 return _of_add_opp_table_v1(dev);
511 }
512
513 ret = _of_add_opp_table_v2(dev, opp_np);
514 of_node_put(opp_np);
515
516 return ret;
517}
518EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
519
520/* CPU device specific helpers */
521
522/**
523 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
524 * @cpumask: cpumask for which OPP table needs to be removed
525 *
526 * This removes the OPP tables for CPUs present in the @cpumask.
527 * This should be used only to remove static entries created from DT.
528 *
529 * Locking: The internal opp_table and opp structures are RCU protected.
530 * Hence this function internally uses RCU updater strategy with mutex locks
531 * to keep the integrity of the internal data structures. Callers should ensure
532 * that this function is *NOT* called under RCU protection or in contexts where
533 * mutex cannot be locked.
534 */
535void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
536{
537 _dev_pm_opp_cpumask_remove_table(cpumask, true);
538}
539EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
540
541/**
542 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
543 * @cpumask: cpumask for which OPP table needs to be added.
544 *
545 * This adds the OPP tables for CPUs present in the @cpumask.
546 *
547 * Locking: The internal opp_table and opp structures are RCU protected.
548 * Hence this function internally uses RCU updater strategy with mutex locks
549 * to keep the integrity of the internal data structures. Callers should ensure
550 * that this function is *NOT* called under RCU protection or in contexts where
551 * mutex cannot be locked.
552 */
553int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
554{
555 struct device *cpu_dev;
556 int cpu, ret = 0;
557
558 WARN_ON(cpumask_empty(cpumask));
559
560 for_each_cpu(cpu, cpumask) {
561 cpu_dev = get_cpu_device(cpu);
562 if (!cpu_dev) {
563 pr_err("%s: failed to get cpu%d device\n", __func__,
564 cpu);
565 continue;
566 }
567
568 ret = dev_pm_opp_of_add_table(cpu_dev);
569 if (ret) {
570 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
571 __func__, cpu, ret);
572
573 /* Free all other OPPs */
574 dev_pm_opp_of_cpumask_remove_table(cpumask);
575 break;
576 }
577 }
578
579 return ret;
580}
581EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
582
583/*
584 * Works only for OPP v2 bindings.
585 *
586 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
587 */
588/**
589 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
590 * @cpu_dev using operating-points-v2
591 * bindings.
592 *
593 * @cpu_dev: CPU device for which we do this operation
594 * @cpumask: cpumask to update with information of sharing CPUs
595 *
596 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
597 *
598 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
599 *
600 * Locking: The internal opp_table and opp structures are RCU protected.
601 * Hence this function internally uses RCU updater strategy with mutex locks
602 * to keep the integrity of the internal data structures. Callers should ensure
603 * that this function is *NOT* called under RCU protection or in contexts where
604 * mutex cannot be locked.
605 */
606int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
607 struct cpumask *cpumask)
608{
609 struct device_node *np, *tmp_np;
610 struct device *tcpu_dev;
611 int cpu, ret = 0;
612
613 /* Get OPP descriptor node */
614 np = _of_get_opp_desc_node(cpu_dev);
615 if (!np) {
Masahiro Yamada349aa922016-10-20 16:12:49 +0900616 dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530617 return -ENOENT;
618 }
619
620 cpumask_set_cpu(cpu_dev->id, cpumask);
621
622 /* OPPs are shared ? */
623 if (!of_property_read_bool(np, "opp-shared"))
624 goto put_cpu_node;
625
626 for_each_possible_cpu(cpu) {
627 if (cpu == cpu_dev->id)
628 continue;
629
630 tcpu_dev = get_cpu_device(cpu);
631 if (!tcpu_dev) {
632 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
633 __func__, cpu);
634 ret = -ENODEV;
635 goto put_cpu_node;
636 }
637
638 /* Get OPP descriptor node */
639 tmp_np = _of_get_opp_desc_node(tcpu_dev);
640 if (!tmp_np) {
Masahiro Yamada349aa922016-10-20 16:12:49 +0900641 dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530642 __func__);
643 ret = -ENOENT;
644 goto put_cpu_node;
645 }
646
647 /* CPUs are sharing opp node */
648 if (np == tmp_np)
649 cpumask_set_cpu(cpu, cpumask);
650
651 of_node_put(tmp_np);
652 }
653
654put_cpu_node:
655 of_node_put(np);
656 return ret;
657}
658EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);