blob: e13fb5f44f9f939309c4b0fdad212793ccc15b1c [file] [log] [blame]
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001/*
2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3 * for Non-CPU Devices.
4 *
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
MyungJoo Ham952f6d12011-11-10 10:16:23 +010018#include <linux/module.h>
MyungJoo Hama3c98b82011-10-02 00:19:15 +020019#include <linux/slab.h>
MyungJoo Ham952f6d12011-11-10 10:16:23 +010020#include <linux/stat.h>
MyungJoo Hama3c98b82011-10-02 00:19:15 +020021#include <linux/opp.h>
22#include <linux/devfreq.h>
23#include <linux/workqueue.h>
24#include <linux/platform_device.h>
25#include <linux/list.h>
26#include <linux/printk.h>
27#include <linux/hrtimer.h>
28#include "governor.h"
29
Nishanth Menon177c4ef2012-10-26 01:50:53 +020030static struct class *devfreq_class;
MyungJoo Hama3c98b82011-10-02 00:19:15 +020031
32/*
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +020033 * devfreq core provides delayed work based load monitoring helper
34 * functions. Governors can use these or can implement their own
35 * monitoring mechanism.
MyungJoo Hama3c98b82011-10-02 00:19:15 +020036 */
MyungJoo Hama3c98b82011-10-02 00:19:15 +020037static struct workqueue_struct *devfreq_wq;
MyungJoo Hama3c98b82011-10-02 00:19:15 +020038
Nishanth Menon71502002012-10-29 15:01:43 -050039/* The list of all device-devfreq governors */
40static LIST_HEAD(devfreq_governor_list);
MyungJoo Hama3c98b82011-10-02 00:19:15 +020041/* The list of all device-devfreq */
42static LIST_HEAD(devfreq_list);
43static DEFINE_MUTEX(devfreq_list_lock);
44
45/**
46 * find_device_devfreq() - find devfreq struct using device pointer
47 * @dev: device pointer used to lookup device devfreq.
48 *
49 * Search the list of device devfreqs and return the matched device's
50 * devfreq info. devfreq_list_lock should be held by the caller.
51 */
52static struct devfreq *find_device_devfreq(struct device *dev)
53{
54 struct devfreq *tmp_devfreq;
55
56 if (unlikely(IS_ERR_OR_NULL(dev))) {
57 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
58 return ERR_PTR(-EINVAL);
59 }
60 WARN(!mutex_is_locked(&devfreq_list_lock),
61 "devfreq_list_lock must be locked.");
62
63 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
64 if (tmp_devfreq->dev.parent == dev)
65 return tmp_devfreq;
66 }
67
68 return ERR_PTR(-ENODEV);
69}
70
Jonghwa Lee2468b282012-08-23 20:00:46 +090071/**
Rajagopal Venkat75d05ad2013-06-11 16:42:27 -060072 * devfreq_set_freq_limits() - Set min and max frequency from freq_table
73 * @devfreq: the devfreq instance
74 */
75static void devfreq_set_freq_limits(struct devfreq *devfreq)
76{
77 int idx;
78 unsigned long min = ~0, max = 0;
79
80 if (!devfreq->profile->freq_table)
81 return;
82
83 for (idx = 0; idx < devfreq->profile->max_state; idx++) {
84 if (min > devfreq->profile->freq_table[idx])
85 min = devfreq->profile->freq_table[idx];
86 if (max < devfreq->profile->freq_table[idx])
87 max = devfreq->profile->freq_table[idx];
88 }
89
90 devfreq->min_freq = min;
91 devfreq->max_freq = max;
92}
93
94/**
Jonghwa Lee2468b282012-08-23 20:00:46 +090095 * devfreq_get_freq_level() - Lookup freq_table for the frequency
96 * @devfreq: the devfreq instance
97 * @freq: the target frequency
98 */
Jeremy Gebbena59ce7e2013-03-18 14:14:16 -060099int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
Jonghwa Lee2468b282012-08-23 20:00:46 +0900100{
101 int lev;
102
103 for (lev = 0; lev < devfreq->profile->max_state; lev++)
104 if (freq == devfreq->profile->freq_table[lev])
105 return lev;
106
107 return -EINVAL;
108}
Jeremy Gebbena59ce7e2013-03-18 14:14:16 -0600109EXPORT_SYMBOL(devfreq_get_freq_level);
Jonghwa Lee2468b282012-08-23 20:00:46 +0900110
111/**
112 * devfreq_update_status() - Update statistics of devfreq behavior
113 * @devfreq: the devfreq instance
114 * @freq: the update target frequency
115 */
116static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
117{
118 int lev, prev_lev;
119 unsigned long cur_time;
120
121 lev = devfreq_get_freq_level(devfreq, freq);
122 if (lev < 0)
123 return lev;
124
125 cur_time = jiffies;
126 devfreq->time_in_state[lev] +=
127 cur_time - devfreq->last_stat_updated;
Saravana Kannan736a46f2014-02-06 15:46:50 -0800128 devfreq->last_stat_updated = cur_time;
129
130 if (freq == devfreq->previous_freq)
131 return 0;
132
133 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
134 if (prev_lev < 0)
135 return 0;
136
137 if (lev != prev_lev) {
Jonghwa Lee2468b282012-08-23 20:00:46 +0900138 devfreq->trans_table[(prev_lev *
139 devfreq->profile->max_state) + lev]++;
140 devfreq->total_trans++;
141 }
Jonghwa Lee2468b282012-08-23 20:00:46 +0900142
143 return 0;
144}
145
Nishanth Menon71502002012-10-29 15:01:43 -0500146/**
147 * find_devfreq_governor() - find devfreq governor from name
148 * @name: name of the governor
149 *
150 * Search the list of devfreq governors and return the matched
151 * governor's pointer. devfreq_list_lock should be held by the caller.
152 */
153static struct devfreq_governor *find_devfreq_governor(const char *name)
154{
155 struct devfreq_governor *tmp_governor;
156
157 if (unlikely(IS_ERR_OR_NULL(name))) {
158 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
159 return ERR_PTR(-EINVAL);
160 }
161 WARN(!mutex_is_locked(&devfreq_list_lock),
162 "devfreq_list_lock must be locked.");
163
164 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
165 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
166 return tmp_governor;
167 }
168
169 return ERR_PTR(-ENODEV);
170}
171
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200172/* Load monitoring helper functions for governors use */
173
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200174/**
175 * update_devfreq() - Reevaluate the device and configure frequency.
176 * @devfreq: the devfreq instance.
177 *
178 * Note: Lock devfreq->lock before calling update_devfreq
179 * This function is exported for governors.
180 */
181int update_devfreq(struct devfreq *devfreq)
182{
183 unsigned long freq;
184 int err = 0;
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100185 u32 flags = 0;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200186
187 if (!mutex_is_locked(&devfreq->lock)) {
188 WARN(true, "devfreq->lock must be locked by the caller.\n");
189 return -EINVAL;
190 }
191
Nishanth Menona6134a42012-10-29 15:01:45 -0500192 if (!devfreq->governor)
193 return -EINVAL;
194
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200195 /* Reevaluate the proper frequency */
Lucille Sylvester3b6ee292013-06-27 10:56:18 -0600196 err = devfreq->governor->get_target_freq(devfreq, &freq, &flags);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200197 if (err)
198 return err;
199
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100200 /*
201 * Adjust the freuqency with user freq and QoS.
202 *
203 * List from the highest proiority
204 * max_freq (probably called by thermal when it's too hot)
205 * min_freq
206 */
207
208 if (devfreq->min_freq && freq < devfreq->min_freq) {
209 freq = devfreq->min_freq;
210 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
211 }
212 if (devfreq->max_freq && freq > devfreq->max_freq) {
213 freq = devfreq->max_freq;
214 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
215 }
216
217 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200218 if (err)
219 return err;
220
Jonghwa Lee2468b282012-08-23 20:00:46 +0900221 if (devfreq->profile->freq_table)
222 if (devfreq_update_status(devfreq, freq))
223 dev_err(&devfreq->dev,
224 "Couldn't update frequency transition information.\n");
225
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200226 devfreq->previous_freq = freq;
227 return err;
228}
Nishanth Menonc0679b42012-10-29 15:01:42 -0500229EXPORT_SYMBOL(update_devfreq);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200230
231/**
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200232 * devfreq_monitor() - Periodically poll devfreq objects.
233 * @work: the work struct used to run devfreq_monitor periodically.
234 *
235 */
236static void devfreq_monitor(struct work_struct *work)
237{
238 int err;
239 struct devfreq *devfreq = container_of(work,
240 struct devfreq, work.work);
241
242 mutex_lock(&devfreq->lock);
243 err = update_devfreq(devfreq);
244 if (err)
245 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
246
247 queue_delayed_work(devfreq_wq, &devfreq->work,
248 msecs_to_jiffies(devfreq->profile->polling_ms));
249 mutex_unlock(&devfreq->lock);
250}
251
252/**
253 * devfreq_monitor_start() - Start load monitoring of devfreq instance
254 * @devfreq: the devfreq instance.
255 *
256 * Helper function for starting devfreq device load monitoing. By
257 * default delayed work based monitoring is supported. Function
258 * to be called from governor in response to DEVFREQ_GOV_START
259 * event when device is added to devfreq framework.
260 */
261void devfreq_monitor_start(struct devfreq *devfreq)
262{
263 INIT_DELAYED_WORK_DEFERRABLE(&devfreq->work, devfreq_monitor);
264 if (devfreq->profile->polling_ms)
265 queue_delayed_work(devfreq_wq, &devfreq->work,
266 msecs_to_jiffies(devfreq->profile->polling_ms));
267}
MyungJoo Hamce9dea12012-11-28 20:29:17 +0100268EXPORT_SYMBOL(devfreq_monitor_start);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200269
270/**
271 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
272 * @devfreq: the devfreq instance.
273 *
274 * Helper function to stop devfreq device load monitoing. Function
275 * to be called from governor in response to DEVFREQ_GOV_STOP
276 * event when device is removed from devfreq framework.
277 */
278void devfreq_monitor_stop(struct devfreq *devfreq)
279{
280 cancel_delayed_work_sync(&devfreq->work);
281}
MyungJoo Hamce9dea12012-11-28 20:29:17 +0100282EXPORT_SYMBOL(devfreq_monitor_stop);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200283
284/**
285 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
286 * @devfreq: the devfreq instance.
287 *
288 * Helper function to suspend devfreq device load monitoing. Function
289 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
290 * event or when polling interval is set to zero.
291 *
292 * Note: Though this function is same as devfreq_monitor_stop(),
293 * intentionally kept separate to provide hooks for collecting
294 * transition statistics.
295 */
296void devfreq_monitor_suspend(struct devfreq *devfreq)
297{
298 mutex_lock(&devfreq->lock);
299 if (devfreq->stop_polling) {
300 mutex_unlock(&devfreq->lock);
301 return;
302 }
303
304 devfreq->stop_polling = true;
305 mutex_unlock(&devfreq->lock);
306 cancel_delayed_work_sync(&devfreq->work);
307}
MyungJoo Hamce9dea12012-11-28 20:29:17 +0100308EXPORT_SYMBOL(devfreq_monitor_suspend);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200309
310/**
311 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
312 * @devfreq: the devfreq instance.
313 *
314 * Helper function to resume devfreq device load monitoing. Function
315 * to be called from governor in response to DEVFREQ_GOV_RESUME
316 * event or when polling interval is set to non-zero.
317 */
318void devfreq_monitor_resume(struct devfreq *devfreq)
319{
320 mutex_lock(&devfreq->lock);
321 if (!devfreq->stop_polling)
322 goto out;
323
324 if (!delayed_work_pending(&devfreq->work) &&
325 devfreq->profile->polling_ms)
326 queue_delayed_work(devfreq_wq, &devfreq->work,
327 msecs_to_jiffies(devfreq->profile->polling_ms));
328 devfreq->stop_polling = false;
329
330out:
331 mutex_unlock(&devfreq->lock);
332}
MyungJoo Hamce9dea12012-11-28 20:29:17 +0100333EXPORT_SYMBOL(devfreq_monitor_resume);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200334
335/**
336 * devfreq_interval_update() - Update device devfreq monitoring interval
337 * @devfreq: the devfreq instance.
338 * @delay: new polling interval to be set.
339 *
340 * Helper function to set new load monitoring polling interval. Function
341 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
342 */
343void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
344{
345 unsigned int cur_delay = devfreq->profile->polling_ms;
346 unsigned int new_delay = *delay;
347
348 mutex_lock(&devfreq->lock);
349 devfreq->profile->polling_ms = new_delay;
350
351 if (devfreq->stop_polling)
352 goto out;
353
354 /* if new delay is zero, stop polling */
355 if (!new_delay) {
356 mutex_unlock(&devfreq->lock);
357 cancel_delayed_work_sync(&devfreq->work);
358 return;
359 }
360
361 /* if current delay is zero, start polling with new delay */
362 if (!cur_delay) {
363 queue_delayed_work(devfreq_wq, &devfreq->work,
364 msecs_to_jiffies(devfreq->profile->polling_ms));
365 goto out;
366 }
367
368 /* if current delay is greater than new delay, restart polling */
369 if (cur_delay > new_delay) {
370 mutex_unlock(&devfreq->lock);
371 cancel_delayed_work_sync(&devfreq->work);
372 mutex_lock(&devfreq->lock);
373 if (!devfreq->stop_polling)
374 queue_delayed_work(devfreq_wq, &devfreq->work,
375 msecs_to_jiffies(devfreq->profile->polling_ms));
376 }
377out:
378 mutex_unlock(&devfreq->lock);
379}
MyungJoo Hamce9dea12012-11-28 20:29:17 +0100380EXPORT_SYMBOL(devfreq_interval_update);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200381
382/**
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200383 * devfreq_notifier_call() - Notify that the device frequency requirements
384 * has been changed out of devfreq framework.
Nishanth Menon3b392b52012-10-26 01:50:35 +0200385 * @nb: the notifier_block (supposed to be devfreq->nb)
386 * @type: not used
387 * @devp: not used
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200388 *
389 * Called by a notifier that uses devfreq->nb.
390 */
391static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
392 void *devp)
393{
394 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
395 int ret;
396
397 mutex_lock(&devfreq->lock);
398 ret = update_devfreq(devfreq);
399 mutex_unlock(&devfreq->lock);
400
401 return ret;
402}
403
404/**
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200405 * _remove_devfreq() - Remove devfreq from the list and release its resources.
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200406 * @devfreq: the devfreq struct
407 * @skip: skip calling device_unregister().
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200408 */
409static void _remove_devfreq(struct devfreq *devfreq, bool skip)
410{
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200411 mutex_lock(&devfreq_list_lock);
412 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
413 mutex_unlock(&devfreq_list_lock);
414 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200415 return;
416 }
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200417 list_del(&devfreq->node);
418 mutex_unlock(&devfreq_list_lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200419
Nishanth Menona6134a42012-10-29 15:01:45 -0500420 if (devfreq->governor)
421 devfreq->governor->event_handler(devfreq,
422 DEVFREQ_GOV_STOP, NULL);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200423
424 if (devfreq->profile->exit)
425 devfreq->profile->exit(devfreq->dev.parent);
426
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200427 if (!skip && get_device(&devfreq->dev)) {
428 device_unregister(&devfreq->dev);
429 put_device(&devfreq->dev);
430 }
431
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200432 mutex_destroy(&devfreq->lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200433 kfree(devfreq);
434}
435
436/**
437 * devfreq_dev_release() - Callback for struct device to release the device.
438 * @dev: the devfreq device
439 *
440 * This calls _remove_devfreq() if _remove_devfreq() is not called.
441 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
442 * well as by others unregistering the device.
443 */
444static void devfreq_dev_release(struct device *dev)
445{
446 struct devfreq *devfreq = to_devfreq(dev);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200447
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200448 _remove_devfreq(devfreq, true);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200449}
450
451/**
Jeremy Gebbend24922d2013-03-14 11:30:39 -0600452 * find_governor_data - Find device specific private data for a governor.
453 * @profile: The profile to search.
454 * @governor_name: The governor to search for.
455 *
456 * Look up the device specific data for a governor.
457 */
458static void *find_governor_data(struct devfreq_dev_profile *profile,
459 const char *governor_name)
460{
461 void *data = NULL;
462 int i;
463
464 if (profile->governor_data == NULL)
465 return NULL;
466
467 for (i = 0; i < profile->num_governor_data; i++) {
Vladimir Razgulin4080fb72013-07-17 16:03:17 -0600468 if (strncmp(governor_name, profile->governor_data[i].name,
Jeremy Gebbend24922d2013-03-14 11:30:39 -0600469 DEVFREQ_NAME_LEN) == 0) {
470 data = profile->governor_data[i].data;
471 break;
472 }
473 }
474 return data;
475}
476
477/**
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200478 * devfreq_add_device() - Add devfreq feature to the device
479 * @dev: the device to add devfreq feature.
480 * @profile: device-specific profile to run devfreq.
Nishanth Menona6134a42012-10-29 15:01:45 -0500481 * @governor_name: name of the policy to choose frequency.
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200482 * @data: private data for the governor. The devfreq framework does not
483 * touch this value.
484 */
485struct devfreq *devfreq_add_device(struct device *dev,
486 struct devfreq_dev_profile *profile,
Nishanth Menona6134a42012-10-29 15:01:45 -0500487 const char *governor_name,
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200488 void *data)
489{
490 struct devfreq *devfreq;
Nishanth Menona6134a42012-10-29 15:01:45 -0500491 struct devfreq_governor *governor;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200492 int err = 0;
493
Nishanth Menona6134a42012-10-29 15:01:45 -0500494 if (!dev || !profile || !governor_name) {
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200495 dev_err(dev, "%s: Invalid parameters.\n", __func__);
496 return ERR_PTR(-EINVAL);
497 }
498
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200499 mutex_lock(&devfreq_list_lock);
500 devfreq = find_device_devfreq(dev);
501 mutex_unlock(&devfreq_list_lock);
502 if (!IS_ERR(devfreq)) {
503 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
504 err = -EINVAL;
505 goto err_out;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200506 }
507
508 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
509 if (!devfreq) {
510 dev_err(dev, "%s: Unable to create devfreq for the device\n",
511 __func__);
512 err = -ENOMEM;
Axel Lin3f19f082011-11-15 21:59:09 +0100513 goto err_out;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200514 }
515
516 mutex_init(&devfreq->lock);
517 mutex_lock(&devfreq->lock);
518 devfreq->dev.parent = dev;
519 devfreq->dev.class = devfreq_class;
520 devfreq->dev.release = devfreq_dev_release;
521 devfreq->profile = profile;
Nishanth Menona6134a42012-10-29 15:01:45 -0500522 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200523 devfreq->previous_freq = profile->initial_freq;
Jeremy Gebbend24922d2013-03-14 11:30:39 -0600524
525 devfreq->data = data ? data : find_governor_data(devfreq->profile,
526 governor_name);
527
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200528 devfreq->nb.notifier_call = devfreq_notifier_call;
529
Jonghwa Lee2468b282012-08-23 20:00:46 +0900530 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
531 devfreq->profile->max_state *
532 devfreq->profile->max_state,
533 GFP_KERNEL);
534 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
535 devfreq->profile->max_state,
536 GFP_KERNEL);
537 devfreq->last_stat_updated = jiffies;
Rajagopal Venkat75d05ad2013-06-11 16:42:27 -0600538 devfreq_set_freq_limits(devfreq);
Jonghwa Lee2468b282012-08-23 20:00:46 +0900539
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200540 dev_set_name(&devfreq->dev, dev_name(dev));
541 err = device_register(&devfreq->dev);
542 if (err) {
543 put_device(&devfreq->dev);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200544 mutex_unlock(&devfreq->lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200545 goto err_dev;
546 }
547
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200548 mutex_unlock(&devfreq->lock);
549
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200550 mutex_lock(&devfreq_list_lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200551 list_add(&devfreq->node, &devfreq_list);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200552
Nishanth Menona6134a42012-10-29 15:01:45 -0500553 governor = find_devfreq_governor(devfreq->governor_name);
554 if (!IS_ERR(governor))
555 devfreq->governor = governor;
556 if (devfreq->governor)
557 err = devfreq->governor->event_handler(devfreq,
558 DEVFREQ_GOV_START, NULL);
559 mutex_unlock(&devfreq_list_lock);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200560 if (err) {
561 dev_err(dev, "%s: Unable to start governor for the device\n",
562 __func__);
563 goto err_init;
564 }
565
Axel Lin3f19f082011-11-15 21:59:09 +0100566 return devfreq;
567
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200568err_init:
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200569 list_del(&devfreq->node);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200570 device_unregister(&devfreq->dev);
571err_dev:
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200572 kfree(devfreq);
Axel Lin3f19f082011-11-15 21:59:09 +0100573err_out:
574 return ERR_PTR(err);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200575}
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200576EXPORT_SYMBOL(devfreq_add_device);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200577
578/**
579 * devfreq_remove_device() - Remove devfreq feature from a device.
Nishanth Menon3b392b52012-10-26 01:50:35 +0200580 * @devfreq: the devfreq instance to be removed
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200581 */
582int devfreq_remove_device(struct devfreq *devfreq)
583{
584 if (!devfreq)
585 return -EINVAL;
586
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200587 _remove_devfreq(devfreq, false);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200588
589 return 0;
590}
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200591EXPORT_SYMBOL(devfreq_remove_device);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200592
Rajagopal Venkat6efab212012-10-26 01:50:18 +0200593/**
594 * devfreq_suspend_device() - Suspend devfreq of a device.
595 * @devfreq: the devfreq instance to be suspended
596 */
597int devfreq_suspend_device(struct devfreq *devfreq)
598{
599 if (!devfreq)
600 return -EINVAL;
601
Nishanth Menona6134a42012-10-29 15:01:45 -0500602 if (!devfreq->governor)
603 return 0;
604
Rajagopal Venkat6efab212012-10-26 01:50:18 +0200605 return devfreq->governor->event_handler(devfreq,
606 DEVFREQ_GOV_SUSPEND, NULL);
607}
608EXPORT_SYMBOL(devfreq_suspend_device);
609
610/**
611 * devfreq_resume_device() - Resume devfreq of a device.
612 * @devfreq: the devfreq instance to be resumed
613 */
614int devfreq_resume_device(struct devfreq *devfreq)
615{
616 if (!devfreq)
617 return -EINVAL;
618
Nishanth Menona6134a42012-10-29 15:01:45 -0500619 if (!devfreq->governor)
620 return 0;
621
Rajagopal Venkat6efab212012-10-26 01:50:18 +0200622 return devfreq->governor->event_handler(devfreq,
623 DEVFREQ_GOV_RESUME, NULL);
624}
625EXPORT_SYMBOL(devfreq_resume_device);
626
Nishanth Menon71502002012-10-29 15:01:43 -0500627/**
628 * devfreq_add_governor() - Add devfreq governor
629 * @governor: the devfreq governor to be added
630 */
631int devfreq_add_governor(struct devfreq_governor *governor)
632{
633 struct devfreq_governor *g;
Nishanth Menona6134a42012-10-29 15:01:45 -0500634 struct devfreq *devfreq;
Nishanth Menon71502002012-10-29 15:01:43 -0500635 int err = 0;
636
637 if (!governor) {
638 pr_err("%s: Invalid parameters.\n", __func__);
639 return -EINVAL;
640 }
641
642 mutex_lock(&devfreq_list_lock);
643 g = find_devfreq_governor(governor->name);
644 if (!IS_ERR(g)) {
645 pr_err("%s: governor %s already registered\n", __func__,
646 g->name);
647 err = -EINVAL;
648 goto err_out;
649 }
650
651 list_add(&governor->node, &devfreq_governor_list);
652
Nishanth Menona6134a42012-10-29 15:01:45 -0500653 list_for_each_entry(devfreq, &devfreq_list, node) {
654 int ret = 0;
655 struct device *dev = devfreq->dev.parent;
656
657 if (!strncmp(devfreq->governor_name, governor->name,
658 DEVFREQ_NAME_LEN)) {
659 /* The following should never occur */
660 if (devfreq->governor) {
661 dev_warn(dev,
662 "%s: Governor %s already present\n",
663 __func__, devfreq->governor->name);
664 ret = devfreq->governor->event_handler(devfreq,
665 DEVFREQ_GOV_STOP, NULL);
666 if (ret) {
667 dev_warn(dev,
668 "%s: Governor %s stop = %d\n",
669 __func__,
670 devfreq->governor->name, ret);
671 }
672 /* Fall through */
673 }
674 devfreq->governor = governor;
675 ret = devfreq->governor->event_handler(devfreq,
676 DEVFREQ_GOV_START, NULL);
677 if (ret) {
678 dev_warn(dev, "%s: Governor %s start=%d\n",
679 __func__, devfreq->governor->name,
680 ret);
681 }
682 }
683 }
684
Nishanth Menon71502002012-10-29 15:01:43 -0500685err_out:
686 mutex_unlock(&devfreq_list_lock);
687
688 return err;
689}
690EXPORT_SYMBOL(devfreq_add_governor);
691
692/**
693 * devfreq_remove_device() - Remove devfreq feature from a device.
694 * @governor: the devfreq governor to be removed
695 */
696int devfreq_remove_governor(struct devfreq_governor *governor)
697{
698 struct devfreq_governor *g;
Nishanth Menona6134a42012-10-29 15:01:45 -0500699 struct devfreq *devfreq;
Nishanth Menon71502002012-10-29 15:01:43 -0500700 int err = 0;
701
702 if (!governor) {
703 pr_err("%s: Invalid parameters.\n", __func__);
704 return -EINVAL;
705 }
706
707 mutex_lock(&devfreq_list_lock);
708 g = find_devfreq_governor(governor->name);
709 if (IS_ERR(g)) {
710 pr_err("%s: governor %s not registered\n", __func__,
Sachin Kamat9edac512012-11-21 10:36:13 +0530711 governor->name);
Sachin Kamat7dd4c6a2012-11-21 10:36:14 +0530712 err = PTR_ERR(g);
Nishanth Menon71502002012-10-29 15:01:43 -0500713 goto err_out;
714 }
Nishanth Menona6134a42012-10-29 15:01:45 -0500715 list_for_each_entry(devfreq, &devfreq_list, node) {
716 int ret;
717 struct device *dev = devfreq->dev.parent;
718
719 if (!strncmp(devfreq->governor_name, governor->name,
720 DEVFREQ_NAME_LEN)) {
721 /* we should have a devfreq governor! */
722 if (!devfreq->governor) {
723 dev_warn(dev, "%s: Governor %s NOT present\n",
724 __func__, governor->name);
725 continue;
726 /* Fall through */
727 }
728 ret = devfreq->governor->event_handler(devfreq,
729 DEVFREQ_GOV_STOP, NULL);
730 if (ret) {
731 dev_warn(dev, "%s: Governor %s stop=%d\n",
732 __func__, devfreq->governor->name,
733 ret);
734 }
735 devfreq->governor = NULL;
736 }
737 }
Nishanth Menon71502002012-10-29 15:01:43 -0500738
739 list_del(&governor->node);
740err_out:
741 mutex_unlock(&devfreq_list_lock);
742
743 return err;
744}
745EXPORT_SYMBOL(devfreq_remove_governor);
746
MyungJoo Ham9005b652011-10-02 00:19:28 +0200747static ssize_t show_governor(struct device *dev,
748 struct device_attribute *attr, char *buf)
749{
Nishanth Menona6134a42012-10-29 15:01:45 -0500750 if (!to_devfreq(dev)->governor)
751 return -EINVAL;
752
MyungJoo Ham9005b652011-10-02 00:19:28 +0200753 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
754}
755
Nishanth Menon4dcb6f92012-10-29 15:01:47 -0500756static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
757 const char *buf, size_t count)
758{
759 struct devfreq *df = to_devfreq(dev);
760 int ret;
761 char str_governor[DEVFREQ_NAME_LEN + 1];
762 struct devfreq_governor *governor;
763
764 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
765 if (ret != 1)
766 return -EINVAL;
767
768 mutex_lock(&devfreq_list_lock);
769 governor = find_devfreq_governor(str_governor);
770 if (IS_ERR(governor)) {
771 ret = PTR_ERR(governor);
772 goto out;
773 }
774 if (df->governor == governor)
775 goto out;
776
777 if (df->governor) {
778 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
779 if (ret) {
780 dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
781 __func__, df->governor->name, ret);
782 goto out;
783 }
784 }
Jeremy Gebbend24922d2013-03-14 11:30:39 -0600785 df->data = find_governor_data(df->profile, str_governor);
Nishanth Menon4dcb6f92012-10-29 15:01:47 -0500786 df->governor = governor;
787 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
788 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
789 if (ret)
790 dev_warn(dev, "%s: Governor %s not started(%d)\n",
791 __func__, df->governor->name, ret);
792out:
793 mutex_unlock(&devfreq_list_lock);
794
795 if (!ret)
796 ret = count;
797 return ret;
798}
Nishanth Menon708c2372012-10-29 15:01:48 -0500799static ssize_t show_available_governors(struct device *d,
800 struct device_attribute *attr,
801 char *buf)
802{
803 struct devfreq_governor *tmp_governor;
804 ssize_t count = 0;
805
806 mutex_lock(&devfreq_list_lock);
807 list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
808 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
809 "%s ", tmp_governor->name);
810 mutex_unlock(&devfreq_list_lock);
811
812 /* Truncate the trailing space */
813 if (count)
814 count--;
815
816 count += sprintf(&buf[count], "\n");
817
818 return count;
819}
Nishanth Menon4dcb6f92012-10-29 15:01:47 -0500820
MyungJoo Ham9005b652011-10-02 00:19:28 +0200821static ssize_t show_freq(struct device *dev,
822 struct device_attribute *attr, char *buf)
823{
Rajagopal Venkat37926742012-10-26 01:50:26 +0200824 unsigned long freq;
825 struct devfreq *devfreq = to_devfreq(dev);
826
827 if (devfreq->profile->get_cur_freq &&
828 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
829 return sprintf(buf, "%lu\n", freq);
830
831 return sprintf(buf, "%lu\n", devfreq->previous_freq);
832}
833
834static ssize_t show_target_freq(struct device *dev,
835 struct device_attribute *attr, char *buf)
836{
MyungJoo Ham9005b652011-10-02 00:19:28 +0200837 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
838}
839
840static ssize_t show_polling_interval(struct device *dev,
841 struct device_attribute *attr, char *buf)
842{
843 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
844}
845
846static ssize_t store_polling_interval(struct device *dev,
847 struct device_attribute *attr,
848 const char *buf, size_t count)
849{
850 struct devfreq *df = to_devfreq(dev);
851 unsigned int value;
852 int ret;
853
Nishanth Menona6134a42012-10-29 15:01:45 -0500854 if (!df->governor)
855 return -EINVAL;
856
MyungJoo Ham9005b652011-10-02 00:19:28 +0200857 ret = sscanf(buf, "%u", &value);
858 if (ret != 1)
Nishanth Menonda059042012-10-26 01:50:43 +0200859 return -EINVAL;
MyungJoo Ham9005b652011-10-02 00:19:28 +0200860
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200861 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
MyungJoo Ham9005b652011-10-02 00:19:28 +0200862 ret = count;
863
MyungJoo Ham9005b652011-10-02 00:19:28 +0200864 return ret;
865}
866
MyungJoo Ham6530b9d2011-12-09 16:42:19 +0900867static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
868 const char *buf, size_t count)
869{
870 struct devfreq *df = to_devfreq(dev);
871 unsigned long value;
872 int ret;
873 unsigned long max;
874
875 ret = sscanf(buf, "%lu", &value);
876 if (ret != 1)
Nishanth Menonda059042012-10-26 01:50:43 +0200877 return -EINVAL;
MyungJoo Ham6530b9d2011-12-09 16:42:19 +0900878
879 mutex_lock(&df->lock);
880 max = df->max_freq;
881 if (value && max && value > max) {
882 ret = -EINVAL;
883 goto unlock;
884 }
885
886 df->min_freq = value;
887 update_devfreq(df);
888 ret = count;
889unlock:
890 mutex_unlock(&df->lock);
MyungJoo Ham6530b9d2011-12-09 16:42:19 +0900891 return ret;
892}
893
894static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
895 char *buf)
896{
897 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
898}
899
900static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
901 const char *buf, size_t count)
902{
903 struct devfreq *df = to_devfreq(dev);
904 unsigned long value;
905 int ret;
906 unsigned long min;
907
908 ret = sscanf(buf, "%lu", &value);
909 if (ret != 1)
Nishanth Menonda059042012-10-26 01:50:43 +0200910 return -EINVAL;
MyungJoo Ham6530b9d2011-12-09 16:42:19 +0900911
912 mutex_lock(&df->lock);
913 min = df->min_freq;
914 if (value && min && value < min) {
915 ret = -EINVAL;
916 goto unlock;
917 }
918
919 df->max_freq = value;
920 update_devfreq(df);
921 ret = count;
922unlock:
923 mutex_unlock(&df->lock);
MyungJoo Ham6530b9d2011-12-09 16:42:19 +0900924 return ret;
925}
926
927static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
928 char *buf)
929{
930 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
931}
932
Nishanth Menone1ab0612012-10-25 19:48:59 -0500933static ssize_t show_available_freqs(struct device *d,
934 struct device_attribute *attr,
935 char *buf)
936{
937 struct devfreq *df = to_devfreq(d);
938 struct device *dev = df->dev.parent;
939 struct opp *opp;
940 ssize_t count = 0;
941 unsigned long freq = 0;
942
943 rcu_read_lock();
944 do {
945 opp = opp_find_freq_ceil(dev, &freq);
946 if (IS_ERR(opp))
947 break;
948
949 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
950 "%lu ", freq);
951 freq++;
952 } while (1);
953 rcu_read_unlock();
954
955 /* Truncate the trailing space */
956 if (count)
957 count--;
958
959 count += sprintf(&buf[count], "\n");
960
961 return count;
962}
963
Jonghwa Lee2468b282012-08-23 20:00:46 +0900964static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
965 char *buf)
966{
967 struct devfreq *devfreq = to_devfreq(dev);
968 ssize_t len;
969 int i, j, err;
970 unsigned int max_state = devfreq->profile->max_state;
971
972 err = devfreq_update_status(devfreq, devfreq->previous_freq);
973 if (err)
974 return 0;
975
976 len = sprintf(buf, " From : To\n");
977 len += sprintf(buf + len, " :");
978 for (i = 0; i < max_state; i++)
979 len += sprintf(buf + len, "%8u",
980 devfreq->profile->freq_table[i]);
981
982 len += sprintf(buf + len, " time(ms)\n");
983
984 for (i = 0; i < max_state; i++) {
985 if (devfreq->profile->freq_table[i]
986 == devfreq->previous_freq) {
987 len += sprintf(buf + len, "*");
988 } else {
989 len += sprintf(buf + len, " ");
990 }
991 len += sprintf(buf + len, "%8u:",
992 devfreq->profile->freq_table[i]);
993 for (j = 0; j < max_state; j++)
994 len += sprintf(buf + len, "%8u",
995 devfreq->trans_table[(i * max_state) + j]);
996 len += sprintf(buf + len, "%10u\n",
997 jiffies_to_msecs(devfreq->time_in_state[i]));
998 }
999
1000 len += sprintf(buf + len, "Total transition : %u\n",
1001 devfreq->total_trans);
1002 return len;
1003}
1004
MyungJoo Ham9005b652011-10-02 00:19:28 +02001005static struct device_attribute devfreq_attrs[] = {
Nishanth Menon4dcb6f92012-10-29 15:01:47 -05001006 __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
Nishanth Menon708c2372012-10-29 15:01:48 -05001007 __ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
MyungJoo Ham9005b652011-10-02 00:19:28 +02001008 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
Nishanth Menone1ab0612012-10-25 19:48:59 -05001009 __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
Rajagopal Venkat37926742012-10-26 01:50:26 +02001010 __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
MyungJoo Ham9005b652011-10-02 00:19:28 +02001011 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
1012 store_polling_interval),
MyungJoo Ham6530b9d2011-12-09 16:42:19 +09001013 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
1014 __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
Jonghwa Lee2468b282012-08-23 20:00:46 +09001015 __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
MyungJoo Ham9005b652011-10-02 00:19:28 +02001016 { },
1017};
1018
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001019static int __init devfreq_init(void)
1020{
1021 devfreq_class = class_create(THIS_MODULE, "devfreq");
1022 if (IS_ERR(devfreq_class)) {
1023 pr_err("%s: couldn't create class\n", __FILE__);
1024 return PTR_ERR(devfreq_class);
1025 }
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +02001026
1027 devfreq_wq = create_freezable_workqueue("devfreq_wq");
1028 if (IS_ERR(devfreq_wq)) {
1029 class_destroy(devfreq_class);
1030 pr_err("%s: couldn't create workqueue\n", __FILE__);
1031 return PTR_ERR(devfreq_wq);
1032 }
MyungJoo Ham9005b652011-10-02 00:19:28 +02001033 devfreq_class->dev_attrs = devfreq_attrs;
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +02001034
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001035 return 0;
1036}
1037subsys_initcall(devfreq_init);
1038
1039static void __exit devfreq_exit(void)
1040{
1041 class_destroy(devfreq_class);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +02001042 destroy_workqueue(devfreq_wq);
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001043}
1044module_exit(devfreq_exit);
1045
1046/*
1047 * The followings are helper functions for devfreq user device drivers with
1048 * OPP framework.
1049 */
1050
1051/**
1052 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1053 * freq value given to target callback.
Nishanth Menon3b392b52012-10-26 01:50:35 +02001054 * @dev: The devfreq user device. (parent of devfreq)
1055 * @freq: The frequency given to target function
1056 * @flags: Flags handed from devfreq framework.
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001057 *
Nishanth Menonda4ddd02013-01-18 19:52:34 +00001058 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
1059 * protected pointer. The reason for the same is that the opp pointer which is
1060 * returned will remain valid for use with opp_get_{voltage, freq} only while
1061 * under the locked area. The pointer returned must be used prior to unlocking
1062 * with rcu_read_unlock() to maintain the integrity of the pointer.
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001063 */
MyungJoo Hamab5f2992012-03-16 21:54:53 +01001064struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
1065 u32 flags)
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001066{
MyungJoo Hamab5f2992012-03-16 21:54:53 +01001067 struct opp *opp;
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001068
MyungJoo Hamab5f2992012-03-16 21:54:53 +01001069 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1070 /* The freq is an upper bound. opp should be lower */
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001071 opp = opp_find_freq_floor(dev, freq);
MyungJoo Hamab5f2992012-03-16 21:54:53 +01001072
1073 /* If not available, use the closest opp */
Nishanth Menonc36e1372012-10-24 22:00:12 +02001074 if (opp == ERR_PTR(-ERANGE))
MyungJoo Hamab5f2992012-03-16 21:54:53 +01001075 opp = opp_find_freq_ceil(dev, freq);
1076 } else {
1077 /* The freq is an lower bound. opp should be higher */
1078 opp = opp_find_freq_ceil(dev, freq);
1079
1080 /* If not available, use the closest opp */
Nishanth Menonc36e1372012-10-24 22:00:12 +02001081 if (opp == ERR_PTR(-ERANGE))
MyungJoo Hamab5f2992012-03-16 21:54:53 +01001082 opp = opp_find_freq_floor(dev, freq);
1083 }
1084
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001085 return opp;
1086}
1087
1088/**
1089 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1090 * for any changes in the OPP availability
1091 * changes
Nishanth Menon3b392b52012-10-26 01:50:35 +02001092 * @dev: The devfreq user device. (parent of devfreq)
1093 * @devfreq: The devfreq object.
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001094 */
1095int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1096{
MyungJoo Ham33659722012-11-21 19:04:51 +09001097 struct srcu_notifier_head *nh;
1098 int ret = 0;
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001099
MyungJoo Ham33659722012-11-21 19:04:51 +09001100 rcu_read_lock();
1101 nh = opp_get_notifier(dev);
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001102 if (IS_ERR(nh))
MyungJoo Ham33659722012-11-21 19:04:51 +09001103 ret = PTR_ERR(nh);
1104 rcu_read_unlock();
1105 if (!ret)
1106 ret = srcu_notifier_chain_register(nh, &devfreq->nb);
1107
1108 return ret;
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001109}
1110
1111/**
1112 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1113 * notified for any changes in the OPP
1114 * availability changes anymore.
Nishanth Menon3b392b52012-10-26 01:50:35 +02001115 * @dev: The devfreq user device. (parent of devfreq)
1116 * @devfreq: The devfreq object.
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001117 *
1118 * At exit() callback of devfreq_dev_profile, this must be included if
1119 * devfreq_recommended_opp is used.
1120 */
1121int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1122{
MyungJoo Ham33659722012-11-21 19:04:51 +09001123 struct srcu_notifier_head *nh;
1124 int ret = 0;
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001125
MyungJoo Ham33659722012-11-21 19:04:51 +09001126 rcu_read_lock();
1127 nh = opp_get_notifier(dev);
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001128 if (IS_ERR(nh))
MyungJoo Ham33659722012-11-21 19:04:51 +09001129 ret = PTR_ERR(nh);
1130 rcu_read_unlock();
1131 if (!ret)
1132 ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
1133
1134 return ret;
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001135}
1136
1137MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1138MODULE_DESCRIPTION("devfreq class support");
1139MODULE_LICENSE("GPL");