blob: 95706fa24c73559ecd04aacf8c0a6296616c452f [file] [log] [blame]
Nishanth Menone1f60b22010-10-13 00:13:10 +02001/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/cpufreq.h>
20#include <linux/list.h>
21#include <linux/rculist.h>
22#include <linux/rcupdate.h>
23#include <linux/opp.h>
24
25/*
26 * Internal data structure organization with the OPP layer library is as
27 * follows:
28 * dev_opp_list (root)
29 * |- device 1 (represents voltage domain 1)
30 * | |- opp 1 (availability, freq, voltage)
31 * | |- opp 2 ..
32 * ... ...
33 * | `- opp n ..
34 * |- device 2 (represents the next voltage domain)
35 * ...
36 * `- device m (represents mth voltage domain)
37 * device 1, 2.. are represented by dev_opp structure while each opp
38 * is represented by the opp structure.
39 */
40
41/**
42 * struct opp - Generic OPP description structure
43 * @node: opp list node. The nodes are maintained throughout the lifetime
44 * of boot. It is expected only an optimal set of OPPs are
45 * added to the library by the SoC framework.
46 * RCU usage: opp list is traversed with RCU locks. node
47 * modification is possible realtime, hence the modifications
48 * are protected by the dev_opp_list_lock for integrity.
49 * IMPORTANT: the opp nodes should be maintained in increasing
50 * order.
51 * @available: true/false - marks if this OPP as available or not
52 * @rate: Frequency in hertz
53 * @u_volt: Nominal voltage in microvolts corresponding to this OPP
54 * @dev_opp: points back to the device_opp struct this opp belongs to
55 *
56 * This structure stores the OPP information for a given device.
57 */
58struct opp {
59 struct list_head node;
60
61 bool available;
62 unsigned long rate;
63 unsigned long u_volt;
64
65 struct device_opp *dev_opp;
66};
67
68/**
69 * struct device_opp - Device opp structure
70 * @node: list node - contains the devices with OPPs that
71 * have been registered. Nodes once added are not modified in this
72 * list.
73 * RCU usage: nodes are not modified in the list of device_opp,
74 * however addition is possible and is secured by dev_opp_list_lock
75 * @dev: device pointer
MyungJoo Ham03ca3702011-09-30 22:35:12 +020076 * @head: notifier head to notify the OPP availability changes.
Nishanth Menone1f60b22010-10-13 00:13:10 +020077 * @opp_list: list of opps
78 *
79 * This is an internal data structure maintaining the link to opps attached to
80 * a device. This structure is not meant to be shared to users as it is
81 * meant for book keeping and private to OPP library
82 */
83struct device_opp {
84 struct list_head node;
85
86 struct device *dev;
MyungJoo Ham03ca3702011-09-30 22:35:12 +020087 struct srcu_notifier_head head;
Nishanth Menone1f60b22010-10-13 00:13:10 +020088 struct list_head opp_list;
89};
90
91/*
92 * The root of the list of all devices. All device_opp structures branch off
93 * from here, with each device_opp containing the list of opp it supports in
94 * various states of availability.
95 */
96static LIST_HEAD(dev_opp_list);
97/* Lock to allow exclusive modification to the device and opp lists */
98static DEFINE_MUTEX(dev_opp_list_lock);
99
100/**
101 * find_device_opp() - find device_opp struct using device pointer
102 * @dev: device pointer used to lookup device OPPs
103 *
104 * Search list of device OPPs for one containing matching device. Does a RCU
105 * reader operation to grab the pointer needed.
106 *
107 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
108 * -EINVAL based on type of error.
109 *
110 * Locking: This function must be called under rcu_read_lock(). device_opp
111 * is a RCU protected pointer. This means that device_opp is valid as long
112 * as we are under RCU lock.
113 */
114static struct device_opp *find_device_opp(struct device *dev)
115{
116 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
117
118 if (unlikely(IS_ERR_OR_NULL(dev))) {
119 pr_err("%s: Invalid parameters\n", __func__);
120 return ERR_PTR(-EINVAL);
121 }
122
123 list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
124 if (tmp_dev_opp->dev == dev) {
125 dev_opp = tmp_dev_opp;
126 break;
127 }
128 }
129
130 return dev_opp;
131}
132
133/**
134 * opp_get_voltage() - Gets the voltage corresponding to an available opp
135 * @opp: opp for which voltage has to be returned for
136 *
137 * Return voltage in micro volt corresponding to the opp, else
138 * return 0
139 *
140 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
141 * protected pointer. This means that opp which could have been fetched by
142 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
143 * under RCU lock. The pointer returned by the opp_find_freq family must be
144 * used in the same section as the usage of this function with the pointer
145 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
146 * pointer.
147 */
148unsigned long opp_get_voltage(struct opp *opp)
149{
150 struct opp *tmp_opp;
151 unsigned long v = 0;
152
153 tmp_opp = rcu_dereference(opp);
154 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
155 pr_err("%s: Invalid parameters\n", __func__);
156 else
157 v = tmp_opp->u_volt;
158
159 return v;
160}
161
162/**
163 * opp_get_freq() - Gets the frequency corresponding to an available opp
164 * @opp: opp for which frequency has to be returned for
165 *
166 * Return frequency in hertz corresponding to the opp, else
167 * return 0
168 *
169 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
170 * protected pointer. This means that opp which could have been fetched by
171 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
172 * under RCU lock. The pointer returned by the opp_find_freq family must be
173 * used in the same section as the usage of this function with the pointer
174 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
175 * pointer.
176 */
177unsigned long opp_get_freq(struct opp *opp)
178{
179 struct opp *tmp_opp;
180 unsigned long f = 0;
181
182 tmp_opp = rcu_dereference(opp);
183 if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
184 pr_err("%s: Invalid parameters\n", __func__);
185 else
186 f = tmp_opp->rate;
187
188 return f;
189}
190
191/**
192 * opp_get_opp_count() - Get number of opps available in the opp list
193 * @dev: device for which we do this operation
194 *
195 * This function returns the number of available opps if there are any,
196 * else returns 0 if none or the corresponding error value.
197 *
198 * Locking: This function must be called under rcu_read_lock(). This function
199 * internally references two RCU protected structures: device_opp and opp which
200 * are safe as long as we are under a common RCU locked section.
201 */
202int opp_get_opp_count(struct device *dev)
203{
204 struct device_opp *dev_opp;
205 struct opp *temp_opp;
206 int count = 0;
207
208 dev_opp = find_device_opp(dev);
209 if (IS_ERR(dev_opp)) {
210 int r = PTR_ERR(dev_opp);
211 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
212 return r;
213 }
214
215 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
216 if (temp_opp->available)
217 count++;
218 }
219
220 return count;
221}
222
223/**
224 * opp_find_freq_exact() - search for an exact frequency
225 * @dev: device for which we do this operation
226 * @freq: frequency to search for
Nishanth Menon7ae49612011-02-25 23:46:18 +0100227 * @available: true/false - match for available opp
Nishanth Menone1f60b22010-10-13 00:13:10 +0200228 *
229 * Searches for exact match in the opp list and returns pointer to the matching
230 * opp if found, else returns ERR_PTR in case of error and should be handled
231 * using IS_ERR.
232 *
233 * Note: available is a modifier for the search. if available=true, then the
234 * match is for exact matching frequency and is available in the stored OPP
235 * table. if false, the match is for exact frequency which is not available.
236 *
237 * This provides a mechanism to enable an opp which is not available currently
238 * or the opposite as well.
239 *
240 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
241 * protected pointer. The reason for the same is that the opp pointer which is
242 * returned will remain valid for use with opp_get_{voltage, freq} only while
243 * under the locked area. The pointer returned must be used prior to unlocking
244 * with rcu_read_unlock() to maintain the integrity of the pointer.
245 */
246struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
247 bool available)
248{
249 struct device_opp *dev_opp;
250 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
251
252 dev_opp = find_device_opp(dev);
253 if (IS_ERR(dev_opp)) {
254 int r = PTR_ERR(dev_opp);
255 dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
256 return ERR_PTR(r);
257 }
258
259 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
260 if (temp_opp->available == available &&
261 temp_opp->rate == freq) {
262 opp = temp_opp;
263 break;
264 }
265 }
266
267 return opp;
268}
269
270/**
271 * opp_find_freq_ceil() - Search for an rounded ceil freq
272 * @dev: device for which we do this operation
273 * @freq: Start frequency
274 *
275 * Search for the matching ceil *available* OPP from a starting freq
276 * for a device.
277 *
278 * Returns matching *opp and refreshes *freq accordingly, else returns
279 * ERR_PTR in case of error and should be handled using IS_ERR.
280 *
281 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
282 * protected pointer. The reason for the same is that the opp pointer which is
283 * returned will remain valid for use with opp_get_{voltage, freq} only while
284 * under the locked area. The pointer returned must be used prior to unlocking
285 * with rcu_read_unlock() to maintain the integrity of the pointer.
286 */
287struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
288{
289 struct device_opp *dev_opp;
290 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
291
292 if (!dev || !freq) {
293 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
294 return ERR_PTR(-EINVAL);
295 }
296
297 dev_opp = find_device_opp(dev);
298 if (IS_ERR(dev_opp))
299 return opp;
300
301 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
302 if (temp_opp->available && temp_opp->rate >= *freq) {
303 opp = temp_opp;
304 *freq = opp->rate;
305 break;
306 }
307 }
308
309 return opp;
310}
311
312/**
313 * opp_find_freq_floor() - Search for a rounded floor freq
314 * @dev: device for which we do this operation
315 * @freq: Start frequency
316 *
317 * Search for the matching floor *available* OPP from a starting freq
318 * for a device.
319 *
320 * Returns matching *opp and refreshes *freq accordingly, else returns
321 * ERR_PTR in case of error and should be handled using IS_ERR.
322 *
323 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
324 * protected pointer. The reason for the same is that the opp pointer which is
325 * returned will remain valid for use with opp_get_{voltage, freq} only while
326 * under the locked area. The pointer returned must be used prior to unlocking
327 * with rcu_read_unlock() to maintain the integrity of the pointer.
328 */
329struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
330{
331 struct device_opp *dev_opp;
332 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
333
334 if (!dev || !freq) {
335 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
336 return ERR_PTR(-EINVAL);
337 }
338
339 dev_opp = find_device_opp(dev);
340 if (IS_ERR(dev_opp))
341 return opp;
342
343 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
344 if (temp_opp->available) {
345 /* go to the next node, before choosing prev */
346 if (temp_opp->rate > *freq)
347 break;
348 else
349 opp = temp_opp;
350 }
351 }
352 if (!IS_ERR(opp))
353 *freq = opp->rate;
354
355 return opp;
356}
357
358/**
359 * opp_add() - Add an OPP table from a table definitions
360 * @dev: device for which we do this operation
361 * @freq: Frequency in Hz for this OPP
362 * @u_volt: Voltage in uVolts for this OPP
363 *
364 * This function adds an opp definition to the opp list and returns status.
365 * The opp is made available by default and it can be controlled using
366 * opp_enable/disable functions.
367 *
368 * Locking: The internal device_opp and opp structures are RCU protected.
369 * Hence this function internally uses RCU updater strategy with mutex locks
370 * to keep the integrity of the internal data structures. Callers should ensure
371 * that this function is *NOT* called under RCU protection or in contexts where
372 * mutex cannot be locked.
373 */
374int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
375{
376 struct device_opp *dev_opp = NULL;
377 struct opp *opp, *new_opp;
378 struct list_head *head;
379
380 /* allocate new OPP node */
381 new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
382 if (!new_opp) {
383 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
384 return -ENOMEM;
385 }
386
387 /* Hold our list modification lock here */
388 mutex_lock(&dev_opp_list_lock);
389
390 /* Check for existing list for 'dev' */
391 dev_opp = find_device_opp(dev);
392 if (IS_ERR(dev_opp)) {
393 /*
394 * Allocate a new device OPP table. In the infrequent case
395 * where a new device is needed to be added, we pay this
396 * penalty.
397 */
398 dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
399 if (!dev_opp) {
400 mutex_unlock(&dev_opp_list_lock);
401 kfree(new_opp);
402 dev_warn(dev,
403 "%s: Unable to create device OPP structure\n",
404 __func__);
405 return -ENOMEM;
406 }
407
408 dev_opp->dev = dev;
MyungJoo Ham03ca3702011-09-30 22:35:12 +0200409 srcu_init_notifier_head(&dev_opp->head);
Nishanth Menone1f60b22010-10-13 00:13:10 +0200410 INIT_LIST_HEAD(&dev_opp->opp_list);
411
412 /* Secure the device list modification */
413 list_add_rcu(&dev_opp->node, &dev_opp_list);
414 }
415
416 /* populate the opp table */
417 new_opp->dev_opp = dev_opp;
418 new_opp->rate = freq;
419 new_opp->u_volt = u_volt;
420 new_opp->available = true;
421
422 /* Insert new OPP in order of increasing frequency */
423 head = &dev_opp->opp_list;
424 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
425 if (new_opp->rate < opp->rate)
426 break;
427 else
428 head = &opp->node;
429 }
430
431 list_add_rcu(&new_opp->node, head);
432 mutex_unlock(&dev_opp_list_lock);
433
MyungJoo Ham03ca3702011-09-30 22:35:12 +0200434 /*
435 * Notify the changes in the availability of the operable
436 * frequency/voltage list.
437 */
438 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
Nishanth Menone1f60b22010-10-13 00:13:10 +0200439 return 0;
440}
441
442/**
443 * opp_set_availability() - helper to set the availability of an opp
444 * @dev: device for which we do this operation
445 * @freq: OPP frequency to modify availability
446 * @availability_req: availability status requested for this opp
447 *
448 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
449 * share a common logic which is isolated here.
450 *
451 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
452 * copy operation, returns 0 if no modifcation was done OR modification was
453 * successful.
454 *
455 * Locking: The internal device_opp and opp structures are RCU protected.
456 * Hence this function internally uses RCU updater strategy with mutex locks to
457 * keep the integrity of the internal data structures. Callers should ensure
458 * that this function is *NOT* called under RCU protection or in contexts where
459 * mutex locking or synchronize_rcu() blocking calls cannot be used.
460 */
461static int opp_set_availability(struct device *dev, unsigned long freq,
462 bool availability_req)
463{
Jonghwan Choifc928052011-07-26 16:08:16 -0700464 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
Nishanth Menone1f60b22010-10-13 00:13:10 +0200465 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
466 int r = 0;
467
468 /* keep the node allocated */
469 new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
470 if (!new_opp) {
471 dev_warn(dev, "%s: Unable to create OPP\n", __func__);
472 return -ENOMEM;
473 }
474
475 mutex_lock(&dev_opp_list_lock);
476
477 /* Find the device_opp */
478 list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
479 if (dev == tmp_dev_opp->dev) {
480 dev_opp = tmp_dev_opp;
481 break;
482 }
483 }
484 if (IS_ERR(dev_opp)) {
485 r = PTR_ERR(dev_opp);
486 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
487 goto unlock;
488 }
489
490 /* Do we have the frequency? */
491 list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
492 if (tmp_opp->rate == freq) {
493 opp = tmp_opp;
494 break;
495 }
496 }
497 if (IS_ERR(opp)) {
498 r = PTR_ERR(opp);
499 goto unlock;
500 }
501
502 /* Is update really needed? */
503 if (opp->available == availability_req)
504 goto unlock;
505 /* copy the old data over */
506 *new_opp = *opp;
507
508 /* plug in new node */
509 new_opp->available = availability_req;
510
511 list_replace_rcu(&opp->node, &new_opp->node);
512 mutex_unlock(&dev_opp_list_lock);
513 synchronize_rcu();
514
MyungJoo Ham03ca3702011-09-30 22:35:12 +0200515 /* Notify the change of the OPP availability */
516 if (availability_req)
517 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
518 new_opp);
519 else
520 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
521 new_opp);
522
Nishanth Menone1f60b22010-10-13 00:13:10 +0200523 /* clean up old opp */
524 new_opp = opp;
525 goto out;
526
527unlock:
528 mutex_unlock(&dev_opp_list_lock);
529out:
530 kfree(new_opp);
531 return r;
532}
533
534/**
535 * opp_enable() - Enable a specific OPP
536 * @dev: device for which we do this operation
537 * @freq: OPP frequency to enable
538 *
539 * Enables a provided opp. If the operation is valid, this returns 0, else the
540 * corresponding error value. It is meant to be used for users an OPP available
541 * after being temporarily made unavailable with opp_disable.
542 *
543 * Locking: The internal device_opp and opp structures are RCU protected.
544 * Hence this function indirectly uses RCU and mutex locks to keep the
545 * integrity of the internal data structures. Callers should ensure that
546 * this function is *NOT* called under RCU protection or in contexts where
547 * mutex locking or synchronize_rcu() blocking calls cannot be used.
548 */
549int opp_enable(struct device *dev, unsigned long freq)
550{
551 return opp_set_availability(dev, freq, true);
552}
553
554/**
555 * opp_disable() - Disable a specific OPP
556 * @dev: device for which we do this operation
557 * @freq: OPP frequency to disable
558 *
559 * Disables a provided opp. If the operation is valid, this returns
560 * 0, else the corresponding error value. It is meant to be a temporary
561 * control by users to make this OPP not available until the circumstances are
562 * right to make it available again (with a call to opp_enable).
563 *
564 * Locking: The internal device_opp and opp structures are RCU protected.
565 * Hence this function indirectly uses RCU and mutex locks to keep the
566 * integrity of the internal data structures. Callers should ensure that
567 * this function is *NOT* called under RCU protection or in contexts where
568 * mutex locking or synchronize_rcu() blocking calls cannot be used.
569 */
570int opp_disable(struct device *dev, unsigned long freq)
571{
572 return opp_set_availability(dev, freq, false);
573}
574
575#ifdef CONFIG_CPU_FREQ
576/**
577 * opp_init_cpufreq_table() - create a cpufreq table for a device
578 * @dev: device for which we do this operation
579 * @table: Cpufreq table returned back to caller
580 *
581 * Generate a cpufreq table for a provided device- this assumes that the
582 * opp list is already initialized and ready for usage.
583 *
584 * This function allocates required memory for the cpufreq table. It is
585 * expected that the caller does the required maintenance such as freeing
586 * the table as required.
587 *
588 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
589 * if no memory available for the operation (table is not populated), returns 0
590 * if successful and table is populated.
591 *
592 * WARNING: It is important for the callers to ensure refreshing their copy of
593 * the table if any of the mentioned functions have been invoked in the interim.
594 *
595 * Locking: The internal device_opp and opp structures are RCU protected.
596 * To simplify the logic, we pretend we are updater and hold relevant mutex here
597 * Callers should ensure that this function is *NOT* called under RCU protection
598 * or in contexts where mutex locking cannot be used.
599 */
600int opp_init_cpufreq_table(struct device *dev,
601 struct cpufreq_frequency_table **table)
602{
603 struct device_opp *dev_opp;
604 struct opp *opp;
605 struct cpufreq_frequency_table *freq_table;
606 int i = 0;
607
608 /* Pretend as if I am an updater */
609 mutex_lock(&dev_opp_list_lock);
610
611 dev_opp = find_device_opp(dev);
612 if (IS_ERR(dev_opp)) {
613 int r = PTR_ERR(dev_opp);
614 mutex_unlock(&dev_opp_list_lock);
615 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
616 return r;
617 }
618
619 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
620 (opp_get_opp_count(dev) + 1), GFP_KERNEL);
621 if (!freq_table) {
622 mutex_unlock(&dev_opp_list_lock);
623 dev_warn(dev, "%s: Unable to allocate frequency table\n",
624 __func__);
625 return -ENOMEM;
626 }
627
628 list_for_each_entry(opp, &dev_opp->opp_list, node) {
629 if (opp->available) {
630 freq_table[i].index = i;
631 freq_table[i].frequency = opp->rate / 1000;
632 i++;
633 }
634 }
635 mutex_unlock(&dev_opp_list_lock);
636
637 freq_table[i].index = i;
638 freq_table[i].frequency = CPUFREQ_TABLE_END;
639
640 *table = &freq_table[0];
641
642 return 0;
643}
Nishanth Menon99f381d2011-06-10 20:24:57 +0200644
645/**
646 * opp_free_cpufreq_table() - free the cpufreq table
647 * @dev: device for which we do this operation
648 * @table: table to free
649 *
650 * Free up the table allocated by opp_init_cpufreq_table
651 */
652void opp_free_cpufreq_table(struct device *dev,
653 struct cpufreq_frequency_table **table)
654{
655 if (!table)
656 return;
657
658 kfree(*table);
659 *table = NULL;
660}
Nishanth Menone1f60b22010-10-13 00:13:10 +0200661#endif /* CONFIG_CPU_FREQ */
MyungJoo Ham03ca3702011-09-30 22:35:12 +0200662
663/**
664 * opp_get_notifier() - find notifier_head of the device with opp
665 * @dev: device pointer used to lookup device OPPs.
666 */
667struct srcu_notifier_head *opp_get_notifier(struct device *dev)
668{
669 struct device_opp *dev_opp = find_device_opp(dev);
670
671 if (IS_ERR(dev_opp))
Thomas Meyer156acb12011-11-08 22:34:00 +0100672 return ERR_CAST(dev_opp); /* matching type */
MyungJoo Ham03ca3702011-09-30 22:35:12 +0200673
674 return &dev_opp->head;
675}