blob: 359107f1527372f53284a07c22e26c8fee69fa1b [file] [log] [blame]
MyungJoo Hama3c98b82011-10-02 00:19:15 +02001/*
2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3 * for Non-CPU Devices.
4 *
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
MyungJoo Ham952f6d12011-11-10 10:16:23 +010018#include <linux/module.h>
MyungJoo Hama3c98b82011-10-02 00:19:15 +020019#include <linux/slab.h>
MyungJoo Ham952f6d12011-11-10 10:16:23 +010020#include <linux/stat.h>
MyungJoo Hama3c98b82011-10-02 00:19:15 +020021#include <linux/opp.h>
22#include <linux/devfreq.h>
23#include <linux/workqueue.h>
24#include <linux/platform_device.h>
25#include <linux/list.h>
26#include <linux/printk.h>
27#include <linux/hrtimer.h>
28#include "governor.h"
29
30struct class *devfreq_class;
31
32/*
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +020033 * devfreq core provides delayed work based load monitoring helper
34 * functions. Governors can use these or can implement their own
35 * monitoring mechanism.
MyungJoo Hama3c98b82011-10-02 00:19:15 +020036 */
MyungJoo Hama3c98b82011-10-02 00:19:15 +020037static struct workqueue_struct *devfreq_wq;
MyungJoo Hama3c98b82011-10-02 00:19:15 +020038
39/* The list of all device-devfreq */
40static LIST_HEAD(devfreq_list);
41static DEFINE_MUTEX(devfreq_list_lock);
42
43/**
44 * find_device_devfreq() - find devfreq struct using device pointer
45 * @dev: device pointer used to lookup device devfreq.
46 *
47 * Search the list of device devfreqs and return the matched device's
48 * devfreq info. devfreq_list_lock should be held by the caller.
49 */
50static struct devfreq *find_device_devfreq(struct device *dev)
51{
52 struct devfreq *tmp_devfreq;
53
54 if (unlikely(IS_ERR_OR_NULL(dev))) {
55 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
56 return ERR_PTR(-EINVAL);
57 }
58 WARN(!mutex_is_locked(&devfreq_list_lock),
59 "devfreq_list_lock must be locked.");
60
61 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
62 if (tmp_devfreq->dev.parent == dev)
63 return tmp_devfreq;
64 }
65
66 return ERR_PTR(-ENODEV);
67}
68
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +020069/* Load monitoring helper functions for governors use */
70
MyungJoo Hama3c98b82011-10-02 00:19:15 +020071/**
72 * update_devfreq() - Reevaluate the device and configure frequency.
73 * @devfreq: the devfreq instance.
74 *
75 * Note: Lock devfreq->lock before calling update_devfreq
76 * This function is exported for governors.
77 */
78int update_devfreq(struct devfreq *devfreq)
79{
80 unsigned long freq;
81 int err = 0;
MyungJoo Hamab5f2992012-03-16 21:54:53 +010082 u32 flags = 0;
MyungJoo Hama3c98b82011-10-02 00:19:15 +020083
84 if (!mutex_is_locked(&devfreq->lock)) {
85 WARN(true, "devfreq->lock must be locked by the caller.\n");
86 return -EINVAL;
87 }
88
89 /* Reevaluate the proper frequency */
90 err = devfreq->governor->get_target_freq(devfreq, &freq);
91 if (err)
92 return err;
93
MyungJoo Hamab5f2992012-03-16 21:54:53 +010094 /*
95 * Adjust the freuqency with user freq and QoS.
96 *
97 * List from the highest proiority
98 * max_freq (probably called by thermal when it's too hot)
99 * min_freq
100 */
101
102 if (devfreq->min_freq && freq < devfreq->min_freq) {
103 freq = devfreq->min_freq;
104 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
105 }
106 if (devfreq->max_freq && freq > devfreq->max_freq) {
107 freq = devfreq->max_freq;
108 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
109 }
110
111 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200112 if (err)
113 return err;
114
115 devfreq->previous_freq = freq;
116 return err;
117}
118
119/**
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200120 * devfreq_monitor() - Periodically poll devfreq objects.
121 * @work: the work struct used to run devfreq_monitor periodically.
122 *
123 */
124static void devfreq_monitor(struct work_struct *work)
125{
126 int err;
127 struct devfreq *devfreq = container_of(work,
128 struct devfreq, work.work);
129
130 mutex_lock(&devfreq->lock);
131 err = update_devfreq(devfreq);
132 if (err)
133 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
134
135 queue_delayed_work(devfreq_wq, &devfreq->work,
136 msecs_to_jiffies(devfreq->profile->polling_ms));
137 mutex_unlock(&devfreq->lock);
138}
139
140/**
141 * devfreq_monitor_start() - Start load monitoring of devfreq instance
142 * @devfreq: the devfreq instance.
143 *
144 * Helper function for starting devfreq device load monitoing. By
145 * default delayed work based monitoring is supported. Function
146 * to be called from governor in response to DEVFREQ_GOV_START
147 * event when device is added to devfreq framework.
148 */
149void devfreq_monitor_start(struct devfreq *devfreq)
150{
151 INIT_DELAYED_WORK_DEFERRABLE(&devfreq->work, devfreq_monitor);
152 if (devfreq->profile->polling_ms)
153 queue_delayed_work(devfreq_wq, &devfreq->work,
154 msecs_to_jiffies(devfreq->profile->polling_ms));
155}
156
157/**
158 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
159 * @devfreq: the devfreq instance.
160 *
161 * Helper function to stop devfreq device load monitoing. Function
162 * to be called from governor in response to DEVFREQ_GOV_STOP
163 * event when device is removed from devfreq framework.
164 */
165void devfreq_monitor_stop(struct devfreq *devfreq)
166{
167 cancel_delayed_work_sync(&devfreq->work);
168}
169
170/**
171 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
172 * @devfreq: the devfreq instance.
173 *
174 * Helper function to suspend devfreq device load monitoing. Function
175 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
176 * event or when polling interval is set to zero.
177 *
178 * Note: Though this function is same as devfreq_monitor_stop(),
179 * intentionally kept separate to provide hooks for collecting
180 * transition statistics.
181 */
182void devfreq_monitor_suspend(struct devfreq *devfreq)
183{
184 mutex_lock(&devfreq->lock);
185 if (devfreq->stop_polling) {
186 mutex_unlock(&devfreq->lock);
187 return;
188 }
189
190 devfreq->stop_polling = true;
191 mutex_unlock(&devfreq->lock);
192 cancel_delayed_work_sync(&devfreq->work);
193}
194
195/**
196 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
197 * @devfreq: the devfreq instance.
198 *
199 * Helper function to resume devfreq device load monitoing. Function
200 * to be called from governor in response to DEVFREQ_GOV_RESUME
201 * event or when polling interval is set to non-zero.
202 */
203void devfreq_monitor_resume(struct devfreq *devfreq)
204{
205 mutex_lock(&devfreq->lock);
206 if (!devfreq->stop_polling)
207 goto out;
208
209 if (!delayed_work_pending(&devfreq->work) &&
210 devfreq->profile->polling_ms)
211 queue_delayed_work(devfreq_wq, &devfreq->work,
212 msecs_to_jiffies(devfreq->profile->polling_ms));
213 devfreq->stop_polling = false;
214
215out:
216 mutex_unlock(&devfreq->lock);
217}
218
219/**
220 * devfreq_interval_update() - Update device devfreq monitoring interval
221 * @devfreq: the devfreq instance.
222 * @delay: new polling interval to be set.
223 *
224 * Helper function to set new load monitoring polling interval. Function
225 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
226 */
227void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
228{
229 unsigned int cur_delay = devfreq->profile->polling_ms;
230 unsigned int new_delay = *delay;
231
232 mutex_lock(&devfreq->lock);
233 devfreq->profile->polling_ms = new_delay;
234
235 if (devfreq->stop_polling)
236 goto out;
237
238 /* if new delay is zero, stop polling */
239 if (!new_delay) {
240 mutex_unlock(&devfreq->lock);
241 cancel_delayed_work_sync(&devfreq->work);
242 return;
243 }
244
245 /* if current delay is zero, start polling with new delay */
246 if (!cur_delay) {
247 queue_delayed_work(devfreq_wq, &devfreq->work,
248 msecs_to_jiffies(devfreq->profile->polling_ms));
249 goto out;
250 }
251
252 /* if current delay is greater than new delay, restart polling */
253 if (cur_delay > new_delay) {
254 mutex_unlock(&devfreq->lock);
255 cancel_delayed_work_sync(&devfreq->work);
256 mutex_lock(&devfreq->lock);
257 if (!devfreq->stop_polling)
258 queue_delayed_work(devfreq_wq, &devfreq->work,
259 msecs_to_jiffies(devfreq->profile->polling_ms));
260 }
261out:
262 mutex_unlock(&devfreq->lock);
263}
264
265/**
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200266 * devfreq_notifier_call() - Notify that the device frequency requirements
267 * has been changed out of devfreq framework.
268 * @nb the notifier_block (supposed to be devfreq->nb)
269 * @type not used
270 * @devp not used
271 *
272 * Called by a notifier that uses devfreq->nb.
273 */
274static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
275 void *devp)
276{
277 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
278 int ret;
279
280 mutex_lock(&devfreq->lock);
281 ret = update_devfreq(devfreq);
282 mutex_unlock(&devfreq->lock);
283
284 return ret;
285}
286
287/**
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200288 * _remove_devfreq() - Remove devfreq from the list and release its resources.
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200289 * @devfreq: the devfreq struct
290 * @skip: skip calling device_unregister().
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200291 */
292static void _remove_devfreq(struct devfreq *devfreq, bool skip)
293{
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200294 mutex_lock(&devfreq_list_lock);
295 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
296 mutex_unlock(&devfreq_list_lock);
297 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200298 return;
299 }
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200300 list_del(&devfreq->node);
301 mutex_unlock(&devfreq_list_lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200302
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200303 devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200304
305 if (devfreq->profile->exit)
306 devfreq->profile->exit(devfreq->dev.parent);
307
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200308 if (!skip && get_device(&devfreq->dev)) {
309 device_unregister(&devfreq->dev);
310 put_device(&devfreq->dev);
311 }
312
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200313 mutex_destroy(&devfreq->lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200314 kfree(devfreq);
315}
316
317/**
318 * devfreq_dev_release() - Callback for struct device to release the device.
319 * @dev: the devfreq device
320 *
321 * This calls _remove_devfreq() if _remove_devfreq() is not called.
322 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
323 * well as by others unregistering the device.
324 */
325static void devfreq_dev_release(struct device *dev)
326{
327 struct devfreq *devfreq = to_devfreq(dev);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200328
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200329 _remove_devfreq(devfreq, true);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200330}
331
332/**
333 * devfreq_add_device() - Add devfreq feature to the device
334 * @dev: the device to add devfreq feature.
335 * @profile: device-specific profile to run devfreq.
336 * @governor: the policy to choose frequency.
337 * @data: private data for the governor. The devfreq framework does not
338 * touch this value.
339 */
340struct devfreq *devfreq_add_device(struct device *dev,
341 struct devfreq_dev_profile *profile,
342 const struct devfreq_governor *governor,
343 void *data)
344{
345 struct devfreq *devfreq;
346 int err = 0;
347
348 if (!dev || !profile || !governor) {
349 dev_err(dev, "%s: Invalid parameters.\n", __func__);
350 return ERR_PTR(-EINVAL);
351 }
352
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200353 mutex_lock(&devfreq_list_lock);
354 devfreq = find_device_devfreq(dev);
355 mutex_unlock(&devfreq_list_lock);
356 if (!IS_ERR(devfreq)) {
357 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
358 err = -EINVAL;
359 goto err_out;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200360 }
361
362 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
363 if (!devfreq) {
364 dev_err(dev, "%s: Unable to create devfreq for the device\n",
365 __func__);
366 err = -ENOMEM;
Axel Lin3f19f082011-11-15 21:59:09 +0100367 goto err_out;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200368 }
369
370 mutex_init(&devfreq->lock);
371 mutex_lock(&devfreq->lock);
372 devfreq->dev.parent = dev;
373 devfreq->dev.class = devfreq_class;
374 devfreq->dev.release = devfreq_dev_release;
375 devfreq->profile = profile;
376 devfreq->governor = governor;
377 devfreq->previous_freq = profile->initial_freq;
378 devfreq->data = data;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200379 devfreq->nb.notifier_call = devfreq_notifier_call;
380
381 dev_set_name(&devfreq->dev, dev_name(dev));
382 err = device_register(&devfreq->dev);
383 if (err) {
384 put_device(&devfreq->dev);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200385 mutex_unlock(&devfreq->lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200386 goto err_dev;
387 }
388
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200389 mutex_unlock(&devfreq->lock);
390
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200391 mutex_lock(&devfreq_list_lock);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200392 list_add(&devfreq->node, &devfreq_list);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200393 mutex_unlock(&devfreq_list_lock);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200394
395 err = devfreq->governor->event_handler(devfreq,
396 DEVFREQ_GOV_START, NULL);
397 if (err) {
398 dev_err(dev, "%s: Unable to start governor for the device\n",
399 __func__);
400 goto err_init;
401 }
402
Axel Lin3f19f082011-11-15 21:59:09 +0100403 return devfreq;
404
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200405err_init:
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200406 list_del(&devfreq->node);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200407 device_unregister(&devfreq->dev);
408err_dev:
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200409 kfree(devfreq);
Axel Lin3f19f082011-11-15 21:59:09 +0100410err_out:
411 return ERR_PTR(err);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200412}
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200413EXPORT_SYMBOL(devfreq_add_device);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200414
415/**
416 * devfreq_remove_device() - Remove devfreq feature from a device.
417 * @devfreq the devfreq instance to be removed
418 */
419int devfreq_remove_device(struct devfreq *devfreq)
420{
421 if (!devfreq)
422 return -EINVAL;
423
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200424 _remove_devfreq(devfreq, false);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200425
426 return 0;
427}
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200428EXPORT_SYMBOL(devfreq_remove_device);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200429
Rajagopal Venkat6efab212012-10-26 01:50:18 +0200430/**
431 * devfreq_suspend_device() - Suspend devfreq of a device.
432 * @devfreq: the devfreq instance to be suspended
433 */
434int devfreq_suspend_device(struct devfreq *devfreq)
435{
436 if (!devfreq)
437 return -EINVAL;
438
439 return devfreq->governor->event_handler(devfreq,
440 DEVFREQ_GOV_SUSPEND, NULL);
441}
442EXPORT_SYMBOL(devfreq_suspend_device);
443
444/**
445 * devfreq_resume_device() - Resume devfreq of a device.
446 * @devfreq: the devfreq instance to be resumed
447 */
448int devfreq_resume_device(struct devfreq *devfreq)
449{
450 if (!devfreq)
451 return -EINVAL;
452
453 return devfreq->governor->event_handler(devfreq,
454 DEVFREQ_GOV_RESUME, NULL);
455}
456EXPORT_SYMBOL(devfreq_resume_device);
457
MyungJoo Ham9005b652011-10-02 00:19:28 +0200458static ssize_t show_governor(struct device *dev,
459 struct device_attribute *attr, char *buf)
460{
461 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
462}
463
464static ssize_t show_freq(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
468}
469
470static ssize_t show_polling_interval(struct device *dev,
471 struct device_attribute *attr, char *buf)
472{
473 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
474}
475
476static ssize_t store_polling_interval(struct device *dev,
477 struct device_attribute *attr,
478 const char *buf, size_t count)
479{
480 struct devfreq *df = to_devfreq(dev);
481 unsigned int value;
482 int ret;
483
484 ret = sscanf(buf, "%u", &value);
485 if (ret != 1)
486 goto out;
487
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200488 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
MyungJoo Ham9005b652011-10-02 00:19:28 +0200489 ret = count;
490
MyungJoo Ham9005b652011-10-02 00:19:28 +0200491out:
492 return ret;
493}
494
MyungJoo Ham6530b9d2011-12-09 16:42:19 +0900495static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
496 const char *buf, size_t count)
497{
498 struct devfreq *df = to_devfreq(dev);
499 unsigned long value;
500 int ret;
501 unsigned long max;
502
503 ret = sscanf(buf, "%lu", &value);
504 if (ret != 1)
505 goto out;
506
507 mutex_lock(&df->lock);
508 max = df->max_freq;
509 if (value && max && value > max) {
510 ret = -EINVAL;
511 goto unlock;
512 }
513
514 df->min_freq = value;
515 update_devfreq(df);
516 ret = count;
517unlock:
518 mutex_unlock(&df->lock);
519out:
520 return ret;
521}
522
523static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
524 char *buf)
525{
526 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
527}
528
529static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
530 const char *buf, size_t count)
531{
532 struct devfreq *df = to_devfreq(dev);
533 unsigned long value;
534 int ret;
535 unsigned long min;
536
537 ret = sscanf(buf, "%lu", &value);
538 if (ret != 1)
539 goto out;
540
541 mutex_lock(&df->lock);
542 min = df->min_freq;
543 if (value && min && value < min) {
544 ret = -EINVAL;
545 goto unlock;
546 }
547
548 df->max_freq = value;
549 update_devfreq(df);
550 ret = count;
551unlock:
552 mutex_unlock(&df->lock);
553out:
554 return ret;
555}
556
557static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
558 char *buf)
559{
560 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
561}
562
MyungJoo Ham9005b652011-10-02 00:19:28 +0200563static struct device_attribute devfreq_attrs[] = {
564 __ATTR(governor, S_IRUGO, show_governor, NULL),
565 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
MyungJoo Ham9005b652011-10-02 00:19:28 +0200566 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
567 store_polling_interval),
MyungJoo Ham6530b9d2011-12-09 16:42:19 +0900568 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
569 __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
MyungJoo Ham9005b652011-10-02 00:19:28 +0200570 { },
571};
572
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200573static int __init devfreq_init(void)
574{
575 devfreq_class = class_create(THIS_MODULE, "devfreq");
576 if (IS_ERR(devfreq_class)) {
577 pr_err("%s: couldn't create class\n", __FILE__);
578 return PTR_ERR(devfreq_class);
579 }
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200580
581 devfreq_wq = create_freezable_workqueue("devfreq_wq");
582 if (IS_ERR(devfreq_wq)) {
583 class_destroy(devfreq_class);
584 pr_err("%s: couldn't create workqueue\n", __FILE__);
585 return PTR_ERR(devfreq_wq);
586 }
MyungJoo Ham9005b652011-10-02 00:19:28 +0200587 devfreq_class->dev_attrs = devfreq_attrs;
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200588
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200589 return 0;
590}
591subsys_initcall(devfreq_init);
592
593static void __exit devfreq_exit(void)
594{
595 class_destroy(devfreq_class);
Rajagopal Venkat97ad73e2012-10-26 01:50:09 +0200596 destroy_workqueue(devfreq_wq);
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200597}
598module_exit(devfreq_exit);
599
600/*
601 * The followings are helper functions for devfreq user device drivers with
602 * OPP framework.
603 */
604
605/**
606 * devfreq_recommended_opp() - Helper function to get proper OPP for the
607 * freq value given to target callback.
608 * @dev The devfreq user device. (parent of devfreq)
609 * @freq The frequency given to target function
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100610 * @flags Flags handed from devfreq framework.
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200611 *
612 */
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100613struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
614 u32 flags)
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200615{
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100616 struct opp *opp;
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200617
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100618 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
619 /* The freq is an upper bound. opp should be lower */
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200620 opp = opp_find_freq_floor(dev, freq);
MyungJoo Hamab5f2992012-03-16 21:54:53 +0100621
622 /* If not available, use the closest opp */
623 if (opp == ERR_PTR(-ENODEV))
624 opp = opp_find_freq_ceil(dev, freq);
625 } else {
626 /* The freq is an lower bound. opp should be higher */
627 opp = opp_find_freq_ceil(dev, freq);
628
629 /* If not available, use the closest opp */
630 if (opp == ERR_PTR(-ENODEV))
631 opp = opp_find_freq_floor(dev, freq);
632 }
633
MyungJoo Hama3c98b82011-10-02 00:19:15 +0200634 return opp;
635}
636
637/**
638 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
639 * for any changes in the OPP availability
640 * changes
641 * @dev The devfreq user device. (parent of devfreq)
642 * @devfreq The devfreq object.
643 */
644int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
645{
646 struct srcu_notifier_head *nh = opp_get_notifier(dev);
647
648 if (IS_ERR(nh))
649 return PTR_ERR(nh);
650 return srcu_notifier_chain_register(nh, &devfreq->nb);
651}
652
653/**
654 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
655 * notified for any changes in the OPP
656 * availability changes anymore.
657 * @dev The devfreq user device. (parent of devfreq)
658 * @devfreq The devfreq object.
659 *
660 * At exit() callback of devfreq_dev_profile, this must be included if
661 * devfreq_recommended_opp is used.
662 */
663int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
664{
665 struct srcu_notifier_head *nh = opp_get_notifier(dev);
666
667 if (IS_ERR(nh))
668 return PTR_ERR(nh);
669 return srcu_notifier_chain_unregister(nh, &devfreq->nb);
670}
671
672MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
673MODULE_DESCRIPTION("devfreq class support");
674MODULE_LICENSE("GPL");