blob: ec08f1ae63f18e58ac728678fc972b77ccabbf5b [file] [log] [blame]
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/sched.h>
10#include <linux/pm_runtime.h>
11#include <linux/jiffies.h>
12
13static int __pm_runtime_resume(struct device *dev, bool from_wq);
14static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev);
16
17/**
Alan Stern47693732010-09-25 23:34:46 +020018 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
20 *
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
26 * correctly.
27 */
28void update_pm_runtime_accounting(struct device *dev)
29{
30 unsigned long now = jiffies;
31 int delta;
32
33 delta = now - dev->power.accounting_timestamp;
34
35 if (delta < 0)
36 delta = 0;
37
38 dev->power.accounting_timestamp = now;
39
40 if (dev->power.disable_depth > 0)
41 return;
42
43 if (dev->power.runtime_status == RPM_SUSPENDED)
44 dev->power.suspended_jiffies += delta;
45 else
46 dev->power.active_jiffies += delta;
47}
48
49static void __update_runtime_status(struct device *dev, enum rpm_status status)
50{
51 update_pm_runtime_accounting(dev);
52 dev->power.runtime_status = status;
53}
54
55/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020056 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57 * @dev: Device to handle.
58 */
59static void pm_runtime_deactivate_timer(struct device *dev)
60{
61 if (dev->power.timer_expires > 0) {
62 del_timer(&dev->power.suspend_timer);
63 dev->power.timer_expires = 0;
64 }
65}
66
67/**
68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69 * @dev: Device to handle.
70 */
71static void pm_runtime_cancel_pending(struct device *dev)
72{
73 pm_runtime_deactivate_timer(dev);
74 /*
75 * In case there's a request pending, make sure its work function will
76 * return without doing anything.
77 */
78 dev->power.request = RPM_REQ_NONE;
79}
80
81/**
82 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
83 * @dev: Device to notify the bus type about.
84 *
85 * This function must be called under dev->power.lock with interrupts disabled.
86 */
87static int __pm_runtime_idle(struct device *dev)
88 __releases(&dev->power.lock) __acquires(&dev->power.lock)
89{
90 int retval = 0;
91
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020092 if (dev->power.runtime_error)
93 retval = -EINVAL;
94 else if (dev->power.idle_notification)
95 retval = -EINPROGRESS;
96 else if (atomic_read(&dev->power.usage_count) > 0
97 || dev->power.disable_depth > 0
98 || dev->power.runtime_status != RPM_ACTIVE)
99 retval = -EAGAIN;
100 else if (!pm_children_suspended(dev))
101 retval = -EBUSY;
102 if (retval)
103 goto out;
104
105 if (dev->power.request_pending) {
106 /*
107 * If an idle notification request is pending, cancel it. Any
108 * other pending request takes precedence over us.
109 */
110 if (dev->power.request == RPM_REQ_IDLE) {
111 dev->power.request = RPM_REQ_NONE;
112 } else if (dev->power.request != RPM_REQ_NONE) {
113 retval = -EAGAIN;
114 goto out;
115 }
116 }
117
118 dev->power.idle_notification = true;
119
120 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
121 spin_unlock_irq(&dev->power.lock);
122
123 dev->bus->pm->runtime_idle(dev);
124
125 spin_lock_irq(&dev->power.lock);
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100126 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
127 spin_unlock_irq(&dev->power.lock);
128
129 dev->type->pm->runtime_idle(dev);
130
131 spin_lock_irq(&dev->power.lock);
132 } else if (dev->class && dev->class->pm
133 && dev->class->pm->runtime_idle) {
134 spin_unlock_irq(&dev->power.lock);
135
136 dev->class->pm->runtime_idle(dev);
137
138 spin_lock_irq(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200139 }
140
141 dev->power.idle_notification = false;
142 wake_up_all(&dev->power.wait_queue);
143
144 out:
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200145 return retval;
146}
147
148/**
149 * pm_runtime_idle - Notify device bus type if the device can be suspended.
150 * @dev: Device to notify the bus type about.
151 */
152int pm_runtime_idle(struct device *dev)
153{
154 int retval;
155
156 spin_lock_irq(&dev->power.lock);
157 retval = __pm_runtime_idle(dev);
158 spin_unlock_irq(&dev->power.lock);
159
160 return retval;
161}
162EXPORT_SYMBOL_GPL(pm_runtime_idle);
163
164/**
165 * __pm_runtime_suspend - Carry out run-time suspend of given device.
166 * @dev: Device to suspend.
167 * @from_wq: If set, the function has been called via pm_wq.
168 *
169 * Check if the device can be suspended and run the ->runtime_suspend() callback
170 * provided by its bus type. If another suspend has been started earlier, wait
171 * for it to finish. If an idle notification or suspend request is pending or
172 * scheduled, cancel it.
173 *
174 * This function must be called under dev->power.lock with interrupts disabled.
175 */
176int __pm_runtime_suspend(struct device *dev, bool from_wq)
177 __releases(&dev->power.lock) __acquires(&dev->power.lock)
178{
179 struct device *parent = NULL;
180 bool notify = false;
181 int retval = 0;
182
183 dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
184 from_wq ? " from workqueue" : "");
185
186 repeat:
187 if (dev->power.runtime_error) {
188 retval = -EINVAL;
189 goto out;
190 }
191
192 /* Pending resume requests take precedence over us. */
193 if (dev->power.request_pending
194 && dev->power.request == RPM_REQ_RESUME) {
195 retval = -EAGAIN;
196 goto out;
197 }
198
199 /* Other scheduled or pending requests need to be canceled. */
200 pm_runtime_cancel_pending(dev);
201
202 if (dev->power.runtime_status == RPM_SUSPENDED)
203 retval = 1;
204 else if (dev->power.runtime_status == RPM_RESUMING
205 || dev->power.disable_depth > 0
206 || atomic_read(&dev->power.usage_count) > 0)
207 retval = -EAGAIN;
208 else if (!pm_children_suspended(dev))
209 retval = -EBUSY;
210 if (retval)
211 goto out;
212
213 if (dev->power.runtime_status == RPM_SUSPENDING) {
214 DEFINE_WAIT(wait);
215
216 if (from_wq) {
217 retval = -EINPROGRESS;
218 goto out;
219 }
220
221 /* Wait for the other suspend running in parallel with us. */
222 for (;;) {
223 prepare_to_wait(&dev->power.wait_queue, &wait,
224 TASK_UNINTERRUPTIBLE);
225 if (dev->power.runtime_status != RPM_SUSPENDING)
226 break;
227
228 spin_unlock_irq(&dev->power.lock);
229
230 schedule();
231
232 spin_lock_irq(&dev->power.lock);
233 }
234 finish_wait(&dev->power.wait_queue, &wait);
235 goto repeat;
236 }
237
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200238 __update_runtime_status(dev, RPM_SUSPENDING);
Alan Stern63c94802009-12-03 20:22:34 +0100239 dev->power.deferred_resume = false;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200240
241 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
242 spin_unlock_irq(&dev->power.lock);
243
244 retval = dev->bus->pm->runtime_suspend(dev);
245
246 spin_lock_irq(&dev->power.lock);
247 dev->power.runtime_error = retval;
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100248 } else if (dev->type && dev->type->pm
249 && dev->type->pm->runtime_suspend) {
250 spin_unlock_irq(&dev->power.lock);
251
252 retval = dev->type->pm->runtime_suspend(dev);
253
254 spin_lock_irq(&dev->power.lock);
255 dev->power.runtime_error = retval;
256 } else if (dev->class && dev->class->pm
257 && dev->class->pm->runtime_suspend) {
258 spin_unlock_irq(&dev->power.lock);
259
260 retval = dev->class->pm->runtime_suspend(dev);
261
262 spin_lock_irq(&dev->power.lock);
263 dev->power.runtime_error = retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200264 } else {
265 retval = -ENOSYS;
266 }
267
268 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200269 __update_runtime_status(dev, RPM_ACTIVE);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200270 if (retval == -EAGAIN || retval == -EBUSY) {
Alan Stern240c7332010-03-23 00:50:07 +0100271 if (dev->power.timer_expires == 0)
272 notify = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200273 dev->power.runtime_error = 0;
Alan Stern240c7332010-03-23 00:50:07 +0100274 } else {
275 pm_runtime_cancel_pending(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200276 }
277 } else {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200278 __update_runtime_status(dev, RPM_SUSPENDED);
Alan Stern240c7332010-03-23 00:50:07 +0100279 pm_runtime_deactivate_timer(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200280
281 if (dev->parent) {
282 parent = dev->parent;
283 atomic_add_unless(&parent->power.child_count, -1, 0);
284 }
285 }
286 wake_up_all(&dev->power.wait_queue);
287
288 if (dev->power.deferred_resume) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200289 __pm_runtime_resume(dev, false);
290 retval = -EAGAIN;
291 goto out;
292 }
293
294 if (notify)
295 __pm_runtime_idle(dev);
296
297 if (parent && !parent->power.ignore_children) {
298 spin_unlock_irq(&dev->power.lock);
299
300 pm_request_idle(parent);
301
302 spin_lock_irq(&dev->power.lock);
303 }
304
305 out:
306 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
307
308 return retval;
309}
310
311/**
312 * pm_runtime_suspend - Carry out run-time suspend of given device.
313 * @dev: Device to suspend.
314 */
315int pm_runtime_suspend(struct device *dev)
316{
317 int retval;
318
319 spin_lock_irq(&dev->power.lock);
320 retval = __pm_runtime_suspend(dev, false);
321 spin_unlock_irq(&dev->power.lock);
322
323 return retval;
324}
325EXPORT_SYMBOL_GPL(pm_runtime_suspend);
326
327/**
328 * __pm_runtime_resume - Carry out run-time resume of given device.
329 * @dev: Device to resume.
330 * @from_wq: If set, the function has been called via pm_wq.
331 *
332 * Check if the device can be woken up and run the ->runtime_resume() callback
333 * provided by its bus type. If another resume has been started earlier, wait
334 * for it to finish. If there's a suspend running in parallel with this
335 * function, wait for it to finish and resume the device. Cancel any scheduled
336 * or pending requests.
337 *
338 * This function must be called under dev->power.lock with interrupts disabled.
339 */
340int __pm_runtime_resume(struct device *dev, bool from_wq)
341 __releases(&dev->power.lock) __acquires(&dev->power.lock)
342{
343 struct device *parent = NULL;
344 int retval = 0;
345
346 dev_dbg(dev, "__pm_runtime_resume()%s!\n",
347 from_wq ? " from workqueue" : "");
348
349 repeat:
350 if (dev->power.runtime_error) {
351 retval = -EINVAL;
352 goto out;
353 }
354
355 pm_runtime_cancel_pending(dev);
356
357 if (dev->power.runtime_status == RPM_ACTIVE)
358 retval = 1;
359 else if (dev->power.disable_depth > 0)
360 retval = -EAGAIN;
361 if (retval)
362 goto out;
363
364 if (dev->power.runtime_status == RPM_RESUMING
365 || dev->power.runtime_status == RPM_SUSPENDING) {
366 DEFINE_WAIT(wait);
367
368 if (from_wq) {
369 if (dev->power.runtime_status == RPM_SUSPENDING)
370 dev->power.deferred_resume = true;
371 retval = -EINPROGRESS;
372 goto out;
373 }
374
375 /* Wait for the operation carried out in parallel with us. */
376 for (;;) {
377 prepare_to_wait(&dev->power.wait_queue, &wait,
378 TASK_UNINTERRUPTIBLE);
379 if (dev->power.runtime_status != RPM_RESUMING
380 && dev->power.runtime_status != RPM_SUSPENDING)
381 break;
382
383 spin_unlock_irq(&dev->power.lock);
384
385 schedule();
386
387 spin_lock_irq(&dev->power.lock);
388 }
389 finish_wait(&dev->power.wait_queue, &wait);
390 goto repeat;
391 }
392
393 if (!parent && dev->parent) {
394 /*
395 * Increment the parent's resume counter and resume it if
396 * necessary.
397 */
398 parent = dev->parent;
Alan Stern862f89b2009-11-25 01:06:37 +0100399 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200400
401 pm_runtime_get_noresume(parent);
402
Alan Stern862f89b2009-11-25 01:06:37 +0100403 spin_lock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200404 /*
405 * We can resume if the parent's run-time PM is disabled or it
406 * is set to ignore children.
407 */
408 if (!parent->power.disable_depth
409 && !parent->power.ignore_children) {
410 __pm_runtime_resume(parent, false);
411 if (parent->power.runtime_status != RPM_ACTIVE)
412 retval = -EBUSY;
413 }
Alan Stern862f89b2009-11-25 01:06:37 +0100414 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200415
Alan Stern862f89b2009-11-25 01:06:37 +0100416 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200417 if (retval)
418 goto out;
419 goto repeat;
420 }
421
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200422 __update_runtime_status(dev, RPM_RESUMING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200423
424 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
425 spin_unlock_irq(&dev->power.lock);
426
427 retval = dev->bus->pm->runtime_resume(dev);
428
429 spin_lock_irq(&dev->power.lock);
430 dev->power.runtime_error = retval;
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100431 } else if (dev->type && dev->type->pm
432 && dev->type->pm->runtime_resume) {
433 spin_unlock_irq(&dev->power.lock);
434
435 retval = dev->type->pm->runtime_resume(dev);
436
437 spin_lock_irq(&dev->power.lock);
438 dev->power.runtime_error = retval;
439 } else if (dev->class && dev->class->pm
440 && dev->class->pm->runtime_resume) {
441 spin_unlock_irq(&dev->power.lock);
442
443 retval = dev->class->pm->runtime_resume(dev);
444
445 spin_lock_irq(&dev->power.lock);
446 dev->power.runtime_error = retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200447 } else {
448 retval = -ENOSYS;
449 }
450
451 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200452 __update_runtime_status(dev, RPM_SUSPENDED);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200453 pm_runtime_cancel_pending(dev);
454 } else {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200455 __update_runtime_status(dev, RPM_ACTIVE);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200456 if (parent)
457 atomic_inc(&parent->power.child_count);
458 }
459 wake_up_all(&dev->power.wait_queue);
460
461 if (!retval)
462 __pm_request_idle(dev);
463
464 out:
465 if (parent) {
466 spin_unlock_irq(&dev->power.lock);
467
468 pm_runtime_put(parent);
469
470 spin_lock_irq(&dev->power.lock);
471 }
472
473 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
474
475 return retval;
476}
477
478/**
479 * pm_runtime_resume - Carry out run-time resume of given device.
480 * @dev: Device to suspend.
481 */
482int pm_runtime_resume(struct device *dev)
483{
484 int retval;
485
486 spin_lock_irq(&dev->power.lock);
487 retval = __pm_runtime_resume(dev, false);
488 spin_unlock_irq(&dev->power.lock);
489
490 return retval;
491}
492EXPORT_SYMBOL_GPL(pm_runtime_resume);
493
494/**
495 * pm_runtime_work - Universal run-time PM work function.
496 * @work: Work structure used for scheduling the execution of this function.
497 *
498 * Use @work to get the device object the work is to be done for, determine what
499 * is to be done and execute the appropriate run-time PM function.
500 */
501static void pm_runtime_work(struct work_struct *work)
502{
503 struct device *dev = container_of(work, struct device, power.work);
504 enum rpm_request req;
505
506 spin_lock_irq(&dev->power.lock);
507
508 if (!dev->power.request_pending)
509 goto out;
510
511 req = dev->power.request;
512 dev->power.request = RPM_REQ_NONE;
513 dev->power.request_pending = false;
514
515 switch (req) {
516 case RPM_REQ_NONE:
517 break;
518 case RPM_REQ_IDLE:
519 __pm_runtime_idle(dev);
520 break;
521 case RPM_REQ_SUSPEND:
522 __pm_runtime_suspend(dev, true);
523 break;
524 case RPM_REQ_RESUME:
525 __pm_runtime_resume(dev, true);
526 break;
527 }
528
529 out:
530 spin_unlock_irq(&dev->power.lock);
531}
532
533/**
534 * __pm_request_idle - Submit an idle notification request for given device.
535 * @dev: Device to handle.
536 *
537 * Check if the device's run-time PM status is correct for suspending the device
538 * and queue up a request to run __pm_runtime_idle() for it.
539 *
540 * This function must be called under dev->power.lock with interrupts disabled.
541 */
542static int __pm_request_idle(struct device *dev)
543{
544 int retval = 0;
545
546 if (dev->power.runtime_error)
547 retval = -EINVAL;
548 else if (atomic_read(&dev->power.usage_count) > 0
549 || dev->power.disable_depth > 0
550 || dev->power.runtime_status == RPM_SUSPENDED
551 || dev->power.runtime_status == RPM_SUSPENDING)
552 retval = -EAGAIN;
553 else if (!pm_children_suspended(dev))
554 retval = -EBUSY;
555 if (retval)
556 return retval;
557
558 if (dev->power.request_pending) {
559 /* Any requests other then RPM_REQ_IDLE take precedence. */
560 if (dev->power.request == RPM_REQ_NONE)
561 dev->power.request = RPM_REQ_IDLE;
562 else if (dev->power.request != RPM_REQ_IDLE)
563 retval = -EAGAIN;
564 return retval;
565 }
566
567 dev->power.request = RPM_REQ_IDLE;
568 dev->power.request_pending = true;
569 queue_work(pm_wq, &dev->power.work);
570
571 return retval;
572}
573
574/**
575 * pm_request_idle - Submit an idle notification request for given device.
576 * @dev: Device to handle.
577 */
578int pm_request_idle(struct device *dev)
579{
580 unsigned long flags;
581 int retval;
582
583 spin_lock_irqsave(&dev->power.lock, flags);
584 retval = __pm_request_idle(dev);
585 spin_unlock_irqrestore(&dev->power.lock, flags);
586
587 return retval;
588}
589EXPORT_SYMBOL_GPL(pm_request_idle);
590
591/**
592 * __pm_request_suspend - Submit a suspend request for given device.
593 * @dev: Device to suspend.
594 *
595 * This function must be called under dev->power.lock with interrupts disabled.
596 */
597static int __pm_request_suspend(struct device *dev)
598{
599 int retval = 0;
600
601 if (dev->power.runtime_error)
602 return -EINVAL;
603
604 if (dev->power.runtime_status == RPM_SUSPENDED)
605 retval = 1;
606 else if (atomic_read(&dev->power.usage_count) > 0
607 || dev->power.disable_depth > 0)
608 retval = -EAGAIN;
609 else if (dev->power.runtime_status == RPM_SUSPENDING)
610 retval = -EINPROGRESS;
611 else if (!pm_children_suspended(dev))
612 retval = -EBUSY;
613 if (retval < 0)
614 return retval;
615
616 pm_runtime_deactivate_timer(dev);
617
618 if (dev->power.request_pending) {
619 /*
620 * Pending resume requests take precedence over us, but we can
621 * overtake any other pending request.
622 */
623 if (dev->power.request == RPM_REQ_RESUME)
624 retval = -EAGAIN;
625 else if (dev->power.request != RPM_REQ_SUSPEND)
626 dev->power.request = retval ?
627 RPM_REQ_NONE : RPM_REQ_SUSPEND;
628 return retval;
629 } else if (retval) {
630 return retval;
631 }
632
633 dev->power.request = RPM_REQ_SUSPEND;
634 dev->power.request_pending = true;
635 queue_work(pm_wq, &dev->power.work);
636
637 return 0;
638}
639
640/**
641 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
642 * @data: Device pointer passed by pm_schedule_suspend().
643 *
644 * Check if the time is right and execute __pm_request_suspend() in that case.
645 */
646static void pm_suspend_timer_fn(unsigned long data)
647{
648 struct device *dev = (struct device *)data;
649 unsigned long flags;
650 unsigned long expires;
651
652 spin_lock_irqsave(&dev->power.lock, flags);
653
654 expires = dev->power.timer_expires;
655 /* If 'expire' is after 'jiffies' we've been called too early. */
656 if (expires > 0 && !time_after(expires, jiffies)) {
657 dev->power.timer_expires = 0;
658 __pm_request_suspend(dev);
659 }
660
661 spin_unlock_irqrestore(&dev->power.lock, flags);
662}
663
664/**
665 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
666 * @dev: Device to suspend.
667 * @delay: Time to wait before submitting a suspend request, in milliseconds.
668 */
669int pm_schedule_suspend(struct device *dev, unsigned int delay)
670{
671 unsigned long flags;
672 int retval = 0;
673
674 spin_lock_irqsave(&dev->power.lock, flags);
675
676 if (dev->power.runtime_error) {
677 retval = -EINVAL;
678 goto out;
679 }
680
681 if (!delay) {
682 retval = __pm_request_suspend(dev);
683 goto out;
684 }
685
686 pm_runtime_deactivate_timer(dev);
687
688 if (dev->power.request_pending) {
689 /*
690 * Pending resume requests take precedence over us, but any
691 * other pending requests have to be canceled.
692 */
693 if (dev->power.request == RPM_REQ_RESUME) {
694 retval = -EAGAIN;
695 goto out;
696 }
697 dev->power.request = RPM_REQ_NONE;
698 }
699
700 if (dev->power.runtime_status == RPM_SUSPENDED)
701 retval = 1;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200702 else if (atomic_read(&dev->power.usage_count) > 0
703 || dev->power.disable_depth > 0)
704 retval = -EAGAIN;
705 else if (!pm_children_suspended(dev))
706 retval = -EBUSY;
707 if (retval)
708 goto out;
709
710 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
Rafael J. Wysocki0ddf0ed2009-12-03 21:03:57 +0100711 if (!dev->power.timer_expires)
712 dev->power.timer_expires = 1;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200713 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
714
715 out:
716 spin_unlock_irqrestore(&dev->power.lock, flags);
717
718 return retval;
719}
720EXPORT_SYMBOL_GPL(pm_schedule_suspend);
721
722/**
723 * pm_request_resume - Submit a resume request for given device.
724 * @dev: Device to resume.
725 *
726 * This function must be called under dev->power.lock with interrupts disabled.
727 */
728static int __pm_request_resume(struct device *dev)
729{
730 int retval = 0;
731
732 if (dev->power.runtime_error)
733 return -EINVAL;
734
735 if (dev->power.runtime_status == RPM_ACTIVE)
736 retval = 1;
737 else if (dev->power.runtime_status == RPM_RESUMING)
738 retval = -EINPROGRESS;
739 else if (dev->power.disable_depth > 0)
740 retval = -EAGAIN;
741 if (retval < 0)
742 return retval;
743
744 pm_runtime_deactivate_timer(dev);
745
Alan Stern63c94802009-12-03 20:22:34 +0100746 if (dev->power.runtime_status == RPM_SUSPENDING) {
747 dev->power.deferred_resume = true;
748 return retval;
749 }
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200750 if (dev->power.request_pending) {
751 /* If non-resume request is pending, we can overtake it. */
752 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
753 return retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200754 }
Alan Stern63c94802009-12-03 20:22:34 +0100755 if (retval)
756 return retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200757
758 dev->power.request = RPM_REQ_RESUME;
759 dev->power.request_pending = true;
760 queue_work(pm_wq, &dev->power.work);
761
762 return retval;
763}
764
765/**
766 * pm_request_resume - Submit a resume request for given device.
767 * @dev: Device to resume.
768 */
769int pm_request_resume(struct device *dev)
770{
771 unsigned long flags;
772 int retval;
773
774 spin_lock_irqsave(&dev->power.lock, flags);
775 retval = __pm_request_resume(dev);
776 spin_unlock_irqrestore(&dev->power.lock, flags);
777
778 return retval;
779}
780EXPORT_SYMBOL_GPL(pm_request_resume);
781
782/**
783 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
784 * @dev: Device to handle.
785 * @sync: If set and the device is suspended, resume it synchronously.
786 *
Alan Stern1d531c12009-12-13 20:28:30 +0100787 * Increment the usage count of the device and resume it or submit a resume
788 * request for it, depending on the value of @sync.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200789 */
790int __pm_runtime_get(struct device *dev, bool sync)
791{
Alan Stern1d531c12009-12-13 20:28:30 +0100792 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200793
Alan Stern1d531c12009-12-13 20:28:30 +0100794 atomic_inc(&dev->power.usage_count);
795 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200796
797 return retval;
798}
799EXPORT_SYMBOL_GPL(__pm_runtime_get);
800
801/**
802 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
803 * @dev: Device to handle.
804 * @sync: If the device's bus type is to be notified, do that synchronously.
805 *
806 * Decrement the usage count of the device and if it reaches zero, carry out a
807 * synchronous idle notification or submit an idle notification request for it,
808 * depending on the value of @sync.
809 */
810int __pm_runtime_put(struct device *dev, bool sync)
811{
812 int retval = 0;
813
814 if (atomic_dec_and_test(&dev->power.usage_count))
815 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
816
817 return retval;
818}
819EXPORT_SYMBOL_GPL(__pm_runtime_put);
820
821/**
822 * __pm_runtime_set_status - Set run-time PM status of a device.
823 * @dev: Device to handle.
824 * @status: New run-time PM status of the device.
825 *
826 * If run-time PM of the device is disabled or its power.runtime_error field is
827 * different from zero, the status may be changed either to RPM_ACTIVE, or to
828 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
829 * However, if the device has a parent and the parent is not active, and the
830 * parent's power.ignore_children flag is unset, the device's status cannot be
831 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
832 *
833 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
834 * and the device parent's counter of unsuspended children is modified to
835 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
836 * notification request for the parent is submitted.
837 */
838int __pm_runtime_set_status(struct device *dev, unsigned int status)
839{
840 struct device *parent = dev->parent;
841 unsigned long flags;
842 bool notify_parent = false;
843 int error = 0;
844
845 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
846 return -EINVAL;
847
848 spin_lock_irqsave(&dev->power.lock, flags);
849
850 if (!dev->power.runtime_error && !dev->power.disable_depth) {
851 error = -EAGAIN;
852 goto out;
853 }
854
855 if (dev->power.runtime_status == status)
856 goto out_set;
857
858 if (status == RPM_SUSPENDED) {
859 /* It always is possible to set the status to 'suspended'. */
860 if (parent) {
861 atomic_add_unless(&parent->power.child_count, -1, 0);
862 notify_parent = !parent->power.ignore_children;
863 }
864 goto out_set;
865 }
866
867 if (parent) {
Rafael J. Wysockibab636b2009-12-03 20:21:21 +0100868 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200869
870 /*
871 * It is invalid to put an active child under a parent that is
872 * not active, has run-time PM enabled and the
873 * 'power.ignore_children' flag unset.
874 */
875 if (!parent->power.disable_depth
876 && !parent->power.ignore_children
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100877 && parent->power.runtime_status != RPM_ACTIVE)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200878 error = -EBUSY;
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100879 else if (dev->power.runtime_status == RPM_SUSPENDED)
880 atomic_inc(&parent->power.child_count);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200881
Alan Stern862f89b2009-11-25 01:06:37 +0100882 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200883
884 if (error)
885 goto out;
886 }
887
888 out_set:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200889 __update_runtime_status(dev, status);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200890 dev->power.runtime_error = 0;
891 out:
892 spin_unlock_irqrestore(&dev->power.lock, flags);
893
894 if (notify_parent)
895 pm_request_idle(parent);
896
897 return error;
898}
899EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
900
901/**
902 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
903 * @dev: Device to handle.
904 *
905 * Flush all pending requests for the device from pm_wq and wait for all
906 * run-time PM operations involving the device in progress to complete.
907 *
908 * Should be called under dev->power.lock with interrupts disabled.
909 */
910static void __pm_runtime_barrier(struct device *dev)
911{
912 pm_runtime_deactivate_timer(dev);
913
914 if (dev->power.request_pending) {
915 dev->power.request = RPM_REQ_NONE;
916 spin_unlock_irq(&dev->power.lock);
917
918 cancel_work_sync(&dev->power.work);
919
920 spin_lock_irq(&dev->power.lock);
921 dev->power.request_pending = false;
922 }
923
924 if (dev->power.runtime_status == RPM_SUSPENDING
925 || dev->power.runtime_status == RPM_RESUMING
926 || dev->power.idle_notification) {
927 DEFINE_WAIT(wait);
928
929 /* Suspend, wake-up or idle notification in progress. */
930 for (;;) {
931 prepare_to_wait(&dev->power.wait_queue, &wait,
932 TASK_UNINTERRUPTIBLE);
933 if (dev->power.runtime_status != RPM_SUSPENDING
934 && dev->power.runtime_status != RPM_RESUMING
935 && !dev->power.idle_notification)
936 break;
937 spin_unlock_irq(&dev->power.lock);
938
939 schedule();
940
941 spin_lock_irq(&dev->power.lock);
942 }
943 finish_wait(&dev->power.wait_queue, &wait);
944 }
945}
946
947/**
948 * pm_runtime_barrier - Flush pending requests and wait for completions.
949 * @dev: Device to handle.
950 *
951 * Prevent the device from being suspended by incrementing its usage counter and
952 * if there's a pending resume request for the device, wake the device up.
953 * Next, make sure that all pending requests for the device have been flushed
954 * from pm_wq and wait for all run-time PM operations involving the device in
955 * progress to complete.
956 *
957 * Return value:
958 * 1, if there was a resume request pending and the device had to be woken up,
959 * 0, otherwise
960 */
961int pm_runtime_barrier(struct device *dev)
962{
963 int retval = 0;
964
965 pm_runtime_get_noresume(dev);
966 spin_lock_irq(&dev->power.lock);
967
968 if (dev->power.request_pending
969 && dev->power.request == RPM_REQ_RESUME) {
970 __pm_runtime_resume(dev, false);
971 retval = 1;
972 }
973
974 __pm_runtime_barrier(dev);
975
976 spin_unlock_irq(&dev->power.lock);
977 pm_runtime_put_noidle(dev);
978
979 return retval;
980}
981EXPORT_SYMBOL_GPL(pm_runtime_barrier);
982
983/**
984 * __pm_runtime_disable - Disable run-time PM of a device.
985 * @dev: Device to handle.
986 * @check_resume: If set, check if there's a resume request for the device.
987 *
988 * Increment power.disable_depth for the device and if was zero previously,
989 * cancel all pending run-time PM requests for the device and wait for all
990 * operations in progress to complete. The device can be either active or
991 * suspended after its run-time PM has been disabled.
992 *
993 * If @check_resume is set and there's a resume request pending when
994 * __pm_runtime_disable() is called and power.disable_depth is zero, the
995 * function will wake up the device before disabling its run-time PM.
996 */
997void __pm_runtime_disable(struct device *dev, bool check_resume)
998{
999 spin_lock_irq(&dev->power.lock);
1000
1001 if (dev->power.disable_depth > 0) {
1002 dev->power.disable_depth++;
1003 goto out;
1004 }
1005
1006 /*
1007 * Wake up the device if there's a resume request pending, because that
1008 * means there probably is some I/O to process and disabling run-time PM
1009 * shouldn't prevent the device from processing the I/O.
1010 */
1011 if (check_resume && dev->power.request_pending
1012 && dev->power.request == RPM_REQ_RESUME) {
1013 /*
1014 * Prevent suspends and idle notifications from being carried
1015 * out after we have woken up the device.
1016 */
1017 pm_runtime_get_noresume(dev);
1018
1019 __pm_runtime_resume(dev, false);
1020
1021 pm_runtime_put_noidle(dev);
1022 }
1023
1024 if (!dev->power.disable_depth++)
1025 __pm_runtime_barrier(dev);
1026
1027 out:
1028 spin_unlock_irq(&dev->power.lock);
1029}
1030EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1031
1032/**
1033 * pm_runtime_enable - Enable run-time PM of a device.
1034 * @dev: Device to handle.
1035 */
1036void pm_runtime_enable(struct device *dev)
1037{
1038 unsigned long flags;
1039
1040 spin_lock_irqsave(&dev->power.lock, flags);
1041
1042 if (dev->power.disable_depth > 0)
1043 dev->power.disable_depth--;
1044 else
1045 dev_warn(dev, "Unbalanced %s!\n", __func__);
1046
1047 spin_unlock_irqrestore(&dev->power.lock, flags);
1048}
1049EXPORT_SYMBOL_GPL(pm_runtime_enable);
1050
1051/**
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001052 * pm_runtime_forbid - Block run-time PM of a device.
1053 * @dev: Device to handle.
1054 *
1055 * Increase the device's usage count and clear its power.runtime_auto flag,
1056 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1057 * for it.
1058 */
1059void pm_runtime_forbid(struct device *dev)
1060{
1061 spin_lock_irq(&dev->power.lock);
1062 if (!dev->power.runtime_auto)
1063 goto out;
1064
1065 dev->power.runtime_auto = false;
1066 atomic_inc(&dev->power.usage_count);
1067 __pm_runtime_resume(dev, false);
1068
1069 out:
1070 spin_unlock_irq(&dev->power.lock);
1071}
1072EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1073
1074/**
1075 * pm_runtime_allow - Unblock run-time PM of a device.
1076 * @dev: Device to handle.
1077 *
1078 * Decrease the device's usage count and set its power.runtime_auto flag.
1079 */
1080void pm_runtime_allow(struct device *dev)
1081{
1082 spin_lock_irq(&dev->power.lock);
1083 if (dev->power.runtime_auto)
1084 goto out;
1085
1086 dev->power.runtime_auto = true;
1087 if (atomic_dec_and_test(&dev->power.usage_count))
1088 __pm_runtime_idle(dev);
1089
1090 out:
1091 spin_unlock_irq(&dev->power.lock);
1092}
1093EXPORT_SYMBOL_GPL(pm_runtime_allow);
1094
1095/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001096 * pm_runtime_init - Initialize run-time PM fields in given device object.
1097 * @dev: Device object to initialize.
1098 */
1099void pm_runtime_init(struct device *dev)
1100{
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001101 dev->power.runtime_status = RPM_SUSPENDED;
1102 dev->power.idle_notification = false;
1103
1104 dev->power.disable_depth = 1;
1105 atomic_set(&dev->power.usage_count, 0);
1106
1107 dev->power.runtime_error = 0;
1108
1109 atomic_set(&dev->power.child_count, 0);
1110 pm_suspend_ignore_children(dev, false);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001111 dev->power.runtime_auto = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001112
1113 dev->power.request_pending = false;
1114 dev->power.request = RPM_REQ_NONE;
1115 dev->power.deferred_resume = false;
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +02001116 dev->power.accounting_timestamp = jiffies;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001117 INIT_WORK(&dev->power.work, pm_runtime_work);
1118
1119 dev->power.timer_expires = 0;
1120 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1121 (unsigned long)dev);
1122
1123 init_waitqueue_head(&dev->power.wait_queue);
1124}
1125
1126/**
1127 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1128 * @dev: Device object being removed from device hierarchy.
1129 */
1130void pm_runtime_remove(struct device *dev)
1131{
1132 __pm_runtime_disable(dev, false);
1133
1134 /* Change the status back to 'suspended' to match the initial status. */
1135 if (dev->power.runtime_status == RPM_ACTIVE)
1136 pm_runtime_set_suspended(dev);
1137}