blob: 0c1db879544bdf218a01aca8b0d9fa4390e5764d [file] [log] [blame]
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/sched.h>
10#include <linux/pm_runtime.h>
11#include <linux/jiffies.h>
12
Alan Stern3f9af0512010-09-25 23:34:54 +020013static int __pm_runtime_resume(struct device *dev, int rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020014static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev);
16
17/**
Alan Stern47693732010-09-25 23:34:46 +020018 * update_pm_runtime_accounting - Update the time accounting of power states
19 * @dev: Device to update the accounting for
20 *
21 * In order to be able to have time accounting of the various power states
22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
23 * PM), we need to track the time spent in each state.
24 * update_pm_runtime_accounting must be called each time before the
25 * runtime_status field is updated, to account the time in the old state
26 * correctly.
27 */
28void update_pm_runtime_accounting(struct device *dev)
29{
30 unsigned long now = jiffies;
31 int delta;
32
33 delta = now - dev->power.accounting_timestamp;
34
35 if (delta < 0)
36 delta = 0;
37
38 dev->power.accounting_timestamp = now;
39
40 if (dev->power.disable_depth > 0)
41 return;
42
43 if (dev->power.runtime_status == RPM_SUSPENDED)
44 dev->power.suspended_jiffies += delta;
45 else
46 dev->power.active_jiffies += delta;
47}
48
49static void __update_runtime_status(struct device *dev, enum rpm_status status)
50{
51 update_pm_runtime_accounting(dev);
52 dev->power.runtime_status = status;
53}
54
55/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020056 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
57 * @dev: Device to handle.
58 */
59static void pm_runtime_deactivate_timer(struct device *dev)
60{
61 if (dev->power.timer_expires > 0) {
62 del_timer(&dev->power.suspend_timer);
63 dev->power.timer_expires = 0;
64 }
65}
66
67/**
68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
69 * @dev: Device to handle.
70 */
71static void pm_runtime_cancel_pending(struct device *dev)
72{
73 pm_runtime_deactivate_timer(dev);
74 /*
75 * In case there's a request pending, make sure its work function will
76 * return without doing anything.
77 */
78 dev->power.request = RPM_REQ_NONE;
79}
80
81/**
82 * __pm_runtime_idle - Notify device bus type if the device can be suspended.
83 * @dev: Device to notify the bus type about.
84 *
85 * This function must be called under dev->power.lock with interrupts disabled.
86 */
87static int __pm_runtime_idle(struct device *dev)
88 __releases(&dev->power.lock) __acquires(&dev->power.lock)
89{
90 int retval = 0;
91
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020092 if (dev->power.runtime_error)
93 retval = -EINVAL;
94 else if (dev->power.idle_notification)
95 retval = -EINPROGRESS;
96 else if (atomic_read(&dev->power.usage_count) > 0
97 || dev->power.disable_depth > 0
98 || dev->power.runtime_status != RPM_ACTIVE)
99 retval = -EAGAIN;
100 else if (!pm_children_suspended(dev))
101 retval = -EBUSY;
102 if (retval)
103 goto out;
104
105 if (dev->power.request_pending) {
106 /*
107 * If an idle notification request is pending, cancel it. Any
108 * other pending request takes precedence over us.
109 */
110 if (dev->power.request == RPM_REQ_IDLE) {
111 dev->power.request = RPM_REQ_NONE;
112 } else if (dev->power.request != RPM_REQ_NONE) {
113 retval = -EAGAIN;
114 goto out;
115 }
116 }
117
118 dev->power.idle_notification = true;
119
120 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
121 spin_unlock_irq(&dev->power.lock);
122
123 dev->bus->pm->runtime_idle(dev);
124
125 spin_lock_irq(&dev->power.lock);
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100126 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
127 spin_unlock_irq(&dev->power.lock);
128
129 dev->type->pm->runtime_idle(dev);
130
131 spin_lock_irq(&dev->power.lock);
132 } else if (dev->class && dev->class->pm
133 && dev->class->pm->runtime_idle) {
134 spin_unlock_irq(&dev->power.lock);
135
136 dev->class->pm->runtime_idle(dev);
137
138 spin_lock_irq(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200139 }
140
141 dev->power.idle_notification = false;
142 wake_up_all(&dev->power.wait_queue);
143
144 out:
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200145 return retval;
146}
147
148/**
149 * pm_runtime_idle - Notify device bus type if the device can be suspended.
150 * @dev: Device to notify the bus type about.
151 */
152int pm_runtime_idle(struct device *dev)
153{
154 int retval;
155
156 spin_lock_irq(&dev->power.lock);
157 retval = __pm_runtime_idle(dev);
158 spin_unlock_irq(&dev->power.lock);
159
160 return retval;
161}
162EXPORT_SYMBOL_GPL(pm_runtime_idle);
163
164/**
165 * __pm_runtime_suspend - Carry out run-time suspend of given device.
166 * @dev: Device to suspend.
Alan Stern3f9af0512010-09-25 23:34:54 +0200167 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200168 *
169 * Check if the device can be suspended and run the ->runtime_suspend() callback
Alan Stern3f9af0512010-09-25 23:34:54 +0200170 * provided by its bus type. If another suspend has been started earlier,
171 * either return immediately or wait for it to finish, depending on the
172 * RPM_NOWAIT flag. If an idle notification or suspend request is pending or
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200173 * scheduled, cancel it.
174 *
175 * This function must be called under dev->power.lock with interrupts disabled.
176 */
Alan Stern3f9af0512010-09-25 23:34:54 +0200177static int __pm_runtime_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200178 __releases(&dev->power.lock) __acquires(&dev->power.lock)
179{
180 struct device *parent = NULL;
181 bool notify = false;
182 int retval = 0;
183
Alan Stern3f9af0512010-09-25 23:34:54 +0200184 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200185
186 repeat:
187 if (dev->power.runtime_error) {
188 retval = -EINVAL;
189 goto out;
190 }
191
192 /* Pending resume requests take precedence over us. */
193 if (dev->power.request_pending
194 && dev->power.request == RPM_REQ_RESUME) {
195 retval = -EAGAIN;
196 goto out;
197 }
198
199 /* Other scheduled or pending requests need to be canceled. */
200 pm_runtime_cancel_pending(dev);
201
202 if (dev->power.runtime_status == RPM_SUSPENDED)
203 retval = 1;
204 else if (dev->power.runtime_status == RPM_RESUMING
205 || dev->power.disable_depth > 0
206 || atomic_read(&dev->power.usage_count) > 0)
207 retval = -EAGAIN;
208 else if (!pm_children_suspended(dev))
209 retval = -EBUSY;
210 if (retval)
211 goto out;
212
213 if (dev->power.runtime_status == RPM_SUSPENDING) {
214 DEFINE_WAIT(wait);
215
Alan Stern3f9af0512010-09-25 23:34:54 +0200216 if (rpmflags & RPM_NOWAIT) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200217 retval = -EINPROGRESS;
218 goto out;
219 }
220
221 /* Wait for the other suspend running in parallel with us. */
222 for (;;) {
223 prepare_to_wait(&dev->power.wait_queue, &wait,
224 TASK_UNINTERRUPTIBLE);
225 if (dev->power.runtime_status != RPM_SUSPENDING)
226 break;
227
228 spin_unlock_irq(&dev->power.lock);
229
230 schedule();
231
232 spin_lock_irq(&dev->power.lock);
233 }
234 finish_wait(&dev->power.wait_queue, &wait);
235 goto repeat;
236 }
237
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200238 __update_runtime_status(dev, RPM_SUSPENDING);
Alan Stern63c94802009-12-03 20:22:34 +0100239 dev->power.deferred_resume = false;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200240
241 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
242 spin_unlock_irq(&dev->power.lock);
243
244 retval = dev->bus->pm->runtime_suspend(dev);
245
246 spin_lock_irq(&dev->power.lock);
247 dev->power.runtime_error = retval;
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100248 } else if (dev->type && dev->type->pm
249 && dev->type->pm->runtime_suspend) {
250 spin_unlock_irq(&dev->power.lock);
251
252 retval = dev->type->pm->runtime_suspend(dev);
253
254 spin_lock_irq(&dev->power.lock);
255 dev->power.runtime_error = retval;
256 } else if (dev->class && dev->class->pm
257 && dev->class->pm->runtime_suspend) {
258 spin_unlock_irq(&dev->power.lock);
259
260 retval = dev->class->pm->runtime_suspend(dev);
261
262 spin_lock_irq(&dev->power.lock);
263 dev->power.runtime_error = retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200264 } else {
265 retval = -ENOSYS;
266 }
267
268 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200269 __update_runtime_status(dev, RPM_ACTIVE);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200270 if (retval == -EAGAIN || retval == -EBUSY) {
Alan Stern240c7332010-03-23 00:50:07 +0100271 if (dev->power.timer_expires == 0)
272 notify = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200273 dev->power.runtime_error = 0;
Alan Stern240c7332010-03-23 00:50:07 +0100274 } else {
275 pm_runtime_cancel_pending(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200276 }
277 } else {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200278 __update_runtime_status(dev, RPM_SUSPENDED);
Alan Stern240c7332010-03-23 00:50:07 +0100279 pm_runtime_deactivate_timer(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200280
281 if (dev->parent) {
282 parent = dev->parent;
283 atomic_add_unless(&parent->power.child_count, -1, 0);
284 }
285 }
286 wake_up_all(&dev->power.wait_queue);
287
288 if (dev->power.deferred_resume) {
Alan Stern3f9af0512010-09-25 23:34:54 +0200289 __pm_runtime_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200290 retval = -EAGAIN;
291 goto out;
292 }
293
294 if (notify)
295 __pm_runtime_idle(dev);
296
297 if (parent && !parent->power.ignore_children) {
298 spin_unlock_irq(&dev->power.lock);
299
300 pm_request_idle(parent);
301
302 spin_lock_irq(&dev->power.lock);
303 }
304
305 out:
Alan Stern3f9af0512010-09-25 23:34:54 +0200306 dev_dbg(dev, "%s returns %d\n", __func__, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200307
308 return retval;
309}
310
311/**
312 * pm_runtime_suspend - Carry out run-time suspend of given device.
313 * @dev: Device to suspend.
314 */
315int pm_runtime_suspend(struct device *dev)
316{
317 int retval;
318
319 spin_lock_irq(&dev->power.lock);
Alan Stern3f9af0512010-09-25 23:34:54 +0200320 retval = __pm_runtime_suspend(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200321 spin_unlock_irq(&dev->power.lock);
322
323 return retval;
324}
325EXPORT_SYMBOL_GPL(pm_runtime_suspend);
326
327/**
328 * __pm_runtime_resume - Carry out run-time resume of given device.
329 * @dev: Device to resume.
Alan Stern3f9af0512010-09-25 23:34:54 +0200330 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200331 *
332 * Check if the device can be woken up and run the ->runtime_resume() callback
Alan Stern3f9af0512010-09-25 23:34:54 +0200333 * provided by its bus type. If another resume has been started earlier,
334 * either return imediately or wait for it to finish, depending on the
335 * RPM_NOWAIT flag. If there's a suspend running in parallel with this
336 * function, either tell the other process to resume after suspending
337 * (deferred_resume) or wait for it to finish, depending on the RPM_NOWAIT
338 * flag. Cancel any scheduled or pending requests.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200339 *
340 * This function must be called under dev->power.lock with interrupts disabled.
341 */
Alan Stern3f9af0512010-09-25 23:34:54 +0200342static int __pm_runtime_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200343 __releases(&dev->power.lock) __acquires(&dev->power.lock)
344{
345 struct device *parent = NULL;
346 int retval = 0;
347
Alan Stern3f9af0512010-09-25 23:34:54 +0200348 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200349
350 repeat:
351 if (dev->power.runtime_error) {
352 retval = -EINVAL;
353 goto out;
354 }
355
356 pm_runtime_cancel_pending(dev);
357
358 if (dev->power.runtime_status == RPM_ACTIVE)
359 retval = 1;
360 else if (dev->power.disable_depth > 0)
361 retval = -EAGAIN;
362 if (retval)
363 goto out;
364
365 if (dev->power.runtime_status == RPM_RESUMING
366 || dev->power.runtime_status == RPM_SUSPENDING) {
367 DEFINE_WAIT(wait);
368
Alan Stern3f9af0512010-09-25 23:34:54 +0200369 if (rpmflags & RPM_NOWAIT) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200370 if (dev->power.runtime_status == RPM_SUSPENDING)
371 dev->power.deferred_resume = true;
372 retval = -EINPROGRESS;
373 goto out;
374 }
375
376 /* Wait for the operation carried out in parallel with us. */
377 for (;;) {
378 prepare_to_wait(&dev->power.wait_queue, &wait,
379 TASK_UNINTERRUPTIBLE);
380 if (dev->power.runtime_status != RPM_RESUMING
381 && dev->power.runtime_status != RPM_SUSPENDING)
382 break;
383
384 spin_unlock_irq(&dev->power.lock);
385
386 schedule();
387
388 spin_lock_irq(&dev->power.lock);
389 }
390 finish_wait(&dev->power.wait_queue, &wait);
391 goto repeat;
392 }
393
394 if (!parent && dev->parent) {
395 /*
396 * Increment the parent's resume counter and resume it if
397 * necessary.
398 */
399 parent = dev->parent;
Alan Stern862f89b2009-11-25 01:06:37 +0100400 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200401
402 pm_runtime_get_noresume(parent);
403
Alan Stern862f89b2009-11-25 01:06:37 +0100404 spin_lock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200405 /*
406 * We can resume if the parent's run-time PM is disabled or it
407 * is set to ignore children.
408 */
409 if (!parent->power.disable_depth
410 && !parent->power.ignore_children) {
Alan Stern3f9af0512010-09-25 23:34:54 +0200411 __pm_runtime_resume(parent, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200412 if (parent->power.runtime_status != RPM_ACTIVE)
413 retval = -EBUSY;
414 }
Alan Stern862f89b2009-11-25 01:06:37 +0100415 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200416
Alan Stern862f89b2009-11-25 01:06:37 +0100417 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200418 if (retval)
419 goto out;
420 goto repeat;
421 }
422
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200423 __update_runtime_status(dev, RPM_RESUMING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200424
425 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
426 spin_unlock_irq(&dev->power.lock);
427
428 retval = dev->bus->pm->runtime_resume(dev);
429
430 spin_lock_irq(&dev->power.lock);
431 dev->power.runtime_error = retval;
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100432 } else if (dev->type && dev->type->pm
433 && dev->type->pm->runtime_resume) {
434 spin_unlock_irq(&dev->power.lock);
435
436 retval = dev->type->pm->runtime_resume(dev);
437
438 spin_lock_irq(&dev->power.lock);
439 dev->power.runtime_error = retval;
440 } else if (dev->class && dev->class->pm
441 && dev->class->pm->runtime_resume) {
442 spin_unlock_irq(&dev->power.lock);
443
444 retval = dev->class->pm->runtime_resume(dev);
445
446 spin_lock_irq(&dev->power.lock);
447 dev->power.runtime_error = retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200448 } else {
449 retval = -ENOSYS;
450 }
451
452 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200453 __update_runtime_status(dev, RPM_SUSPENDED);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200454 pm_runtime_cancel_pending(dev);
455 } else {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200456 __update_runtime_status(dev, RPM_ACTIVE);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200457 if (parent)
458 atomic_inc(&parent->power.child_count);
459 }
460 wake_up_all(&dev->power.wait_queue);
461
462 if (!retval)
463 __pm_request_idle(dev);
464
465 out:
466 if (parent) {
467 spin_unlock_irq(&dev->power.lock);
468
469 pm_runtime_put(parent);
470
471 spin_lock_irq(&dev->power.lock);
472 }
473
Alan Stern3f9af0512010-09-25 23:34:54 +0200474 dev_dbg(dev, "%s returns %d\n", __func__, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200475
476 return retval;
477}
478
479/**
480 * pm_runtime_resume - Carry out run-time resume of given device.
481 * @dev: Device to suspend.
482 */
483int pm_runtime_resume(struct device *dev)
484{
485 int retval;
486
487 spin_lock_irq(&dev->power.lock);
Alan Stern3f9af0512010-09-25 23:34:54 +0200488 retval = __pm_runtime_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200489 spin_unlock_irq(&dev->power.lock);
490
491 return retval;
492}
493EXPORT_SYMBOL_GPL(pm_runtime_resume);
494
495/**
496 * pm_runtime_work - Universal run-time PM work function.
497 * @work: Work structure used for scheduling the execution of this function.
498 *
499 * Use @work to get the device object the work is to be done for, determine what
500 * is to be done and execute the appropriate run-time PM function.
501 */
502static void pm_runtime_work(struct work_struct *work)
503{
504 struct device *dev = container_of(work, struct device, power.work);
505 enum rpm_request req;
506
507 spin_lock_irq(&dev->power.lock);
508
509 if (!dev->power.request_pending)
510 goto out;
511
512 req = dev->power.request;
513 dev->power.request = RPM_REQ_NONE;
514 dev->power.request_pending = false;
515
516 switch (req) {
517 case RPM_REQ_NONE:
518 break;
519 case RPM_REQ_IDLE:
520 __pm_runtime_idle(dev);
521 break;
522 case RPM_REQ_SUSPEND:
Alan Stern3f9af0512010-09-25 23:34:54 +0200523 __pm_runtime_suspend(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200524 break;
525 case RPM_REQ_RESUME:
Alan Stern3f9af0512010-09-25 23:34:54 +0200526 __pm_runtime_resume(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200527 break;
528 }
529
530 out:
531 spin_unlock_irq(&dev->power.lock);
532}
533
534/**
535 * __pm_request_idle - Submit an idle notification request for given device.
536 * @dev: Device to handle.
537 *
538 * Check if the device's run-time PM status is correct for suspending the device
539 * and queue up a request to run __pm_runtime_idle() for it.
540 *
541 * This function must be called under dev->power.lock with interrupts disabled.
542 */
543static int __pm_request_idle(struct device *dev)
544{
545 int retval = 0;
546
547 if (dev->power.runtime_error)
548 retval = -EINVAL;
549 else if (atomic_read(&dev->power.usage_count) > 0
550 || dev->power.disable_depth > 0
551 || dev->power.runtime_status == RPM_SUSPENDED
552 || dev->power.runtime_status == RPM_SUSPENDING)
553 retval = -EAGAIN;
554 else if (!pm_children_suspended(dev))
555 retval = -EBUSY;
556 if (retval)
557 return retval;
558
559 if (dev->power.request_pending) {
560 /* Any requests other then RPM_REQ_IDLE take precedence. */
561 if (dev->power.request == RPM_REQ_NONE)
562 dev->power.request = RPM_REQ_IDLE;
563 else if (dev->power.request != RPM_REQ_IDLE)
564 retval = -EAGAIN;
565 return retval;
566 }
567
568 dev->power.request = RPM_REQ_IDLE;
569 dev->power.request_pending = true;
570 queue_work(pm_wq, &dev->power.work);
571
572 return retval;
573}
574
575/**
576 * pm_request_idle - Submit an idle notification request for given device.
577 * @dev: Device to handle.
578 */
579int pm_request_idle(struct device *dev)
580{
581 unsigned long flags;
582 int retval;
583
584 spin_lock_irqsave(&dev->power.lock, flags);
585 retval = __pm_request_idle(dev);
586 spin_unlock_irqrestore(&dev->power.lock, flags);
587
588 return retval;
589}
590EXPORT_SYMBOL_GPL(pm_request_idle);
591
592/**
593 * __pm_request_suspend - Submit a suspend request for given device.
594 * @dev: Device to suspend.
595 *
596 * This function must be called under dev->power.lock with interrupts disabled.
597 */
598static int __pm_request_suspend(struct device *dev)
599{
600 int retval = 0;
601
602 if (dev->power.runtime_error)
603 return -EINVAL;
604
605 if (dev->power.runtime_status == RPM_SUSPENDED)
606 retval = 1;
607 else if (atomic_read(&dev->power.usage_count) > 0
608 || dev->power.disable_depth > 0)
609 retval = -EAGAIN;
610 else if (dev->power.runtime_status == RPM_SUSPENDING)
611 retval = -EINPROGRESS;
612 else if (!pm_children_suspended(dev))
613 retval = -EBUSY;
614 if (retval < 0)
615 return retval;
616
617 pm_runtime_deactivate_timer(dev);
618
619 if (dev->power.request_pending) {
620 /*
621 * Pending resume requests take precedence over us, but we can
622 * overtake any other pending request.
623 */
624 if (dev->power.request == RPM_REQ_RESUME)
625 retval = -EAGAIN;
626 else if (dev->power.request != RPM_REQ_SUSPEND)
627 dev->power.request = retval ?
628 RPM_REQ_NONE : RPM_REQ_SUSPEND;
629 return retval;
630 } else if (retval) {
631 return retval;
632 }
633
634 dev->power.request = RPM_REQ_SUSPEND;
635 dev->power.request_pending = true;
636 queue_work(pm_wq, &dev->power.work);
637
638 return 0;
639}
640
641/**
642 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
643 * @data: Device pointer passed by pm_schedule_suspend().
644 *
645 * Check if the time is right and execute __pm_request_suspend() in that case.
646 */
647static void pm_suspend_timer_fn(unsigned long data)
648{
649 struct device *dev = (struct device *)data;
650 unsigned long flags;
651 unsigned long expires;
652
653 spin_lock_irqsave(&dev->power.lock, flags);
654
655 expires = dev->power.timer_expires;
656 /* If 'expire' is after 'jiffies' we've been called too early. */
657 if (expires > 0 && !time_after(expires, jiffies)) {
658 dev->power.timer_expires = 0;
659 __pm_request_suspend(dev);
660 }
661
662 spin_unlock_irqrestore(&dev->power.lock, flags);
663}
664
665/**
666 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
667 * @dev: Device to suspend.
668 * @delay: Time to wait before submitting a suspend request, in milliseconds.
669 */
670int pm_schedule_suspend(struct device *dev, unsigned int delay)
671{
672 unsigned long flags;
673 int retval = 0;
674
675 spin_lock_irqsave(&dev->power.lock, flags);
676
677 if (dev->power.runtime_error) {
678 retval = -EINVAL;
679 goto out;
680 }
681
682 if (!delay) {
683 retval = __pm_request_suspend(dev);
684 goto out;
685 }
686
687 pm_runtime_deactivate_timer(dev);
688
689 if (dev->power.request_pending) {
690 /*
691 * Pending resume requests take precedence over us, but any
692 * other pending requests have to be canceled.
693 */
694 if (dev->power.request == RPM_REQ_RESUME) {
695 retval = -EAGAIN;
696 goto out;
697 }
698 dev->power.request = RPM_REQ_NONE;
699 }
700
701 if (dev->power.runtime_status == RPM_SUSPENDED)
702 retval = 1;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200703 else if (atomic_read(&dev->power.usage_count) > 0
704 || dev->power.disable_depth > 0)
705 retval = -EAGAIN;
706 else if (!pm_children_suspended(dev))
707 retval = -EBUSY;
708 if (retval)
709 goto out;
710
711 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
Rafael J. Wysocki0ddf0ed2009-12-03 21:03:57 +0100712 if (!dev->power.timer_expires)
713 dev->power.timer_expires = 1;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200714 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
715
716 out:
717 spin_unlock_irqrestore(&dev->power.lock, flags);
718
719 return retval;
720}
721EXPORT_SYMBOL_GPL(pm_schedule_suspend);
722
723/**
724 * pm_request_resume - Submit a resume request for given device.
725 * @dev: Device to resume.
726 *
727 * This function must be called under dev->power.lock with interrupts disabled.
728 */
729static int __pm_request_resume(struct device *dev)
730{
731 int retval = 0;
732
733 if (dev->power.runtime_error)
734 return -EINVAL;
735
736 if (dev->power.runtime_status == RPM_ACTIVE)
737 retval = 1;
738 else if (dev->power.runtime_status == RPM_RESUMING)
739 retval = -EINPROGRESS;
740 else if (dev->power.disable_depth > 0)
741 retval = -EAGAIN;
742 if (retval < 0)
743 return retval;
744
745 pm_runtime_deactivate_timer(dev);
746
Alan Stern63c94802009-12-03 20:22:34 +0100747 if (dev->power.runtime_status == RPM_SUSPENDING) {
748 dev->power.deferred_resume = true;
749 return retval;
750 }
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200751 if (dev->power.request_pending) {
752 /* If non-resume request is pending, we can overtake it. */
753 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
754 return retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200755 }
Alan Stern63c94802009-12-03 20:22:34 +0100756 if (retval)
757 return retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200758
759 dev->power.request = RPM_REQ_RESUME;
760 dev->power.request_pending = true;
761 queue_work(pm_wq, &dev->power.work);
762
763 return retval;
764}
765
766/**
767 * pm_request_resume - Submit a resume request for given device.
768 * @dev: Device to resume.
769 */
770int pm_request_resume(struct device *dev)
771{
772 unsigned long flags;
773 int retval;
774
775 spin_lock_irqsave(&dev->power.lock, flags);
776 retval = __pm_request_resume(dev);
777 spin_unlock_irqrestore(&dev->power.lock, flags);
778
779 return retval;
780}
781EXPORT_SYMBOL_GPL(pm_request_resume);
782
783/**
784 * __pm_runtime_get - Reference count a device and wake it up, if necessary.
785 * @dev: Device to handle.
Alan Stern3f9af0512010-09-25 23:34:54 +0200786 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200787 *
Alan Stern1d531c12009-12-13 20:28:30 +0100788 * Increment the usage count of the device and resume it or submit a resume
Alan Stern3f9af0512010-09-25 23:34:54 +0200789 * request for it, depending on the RPM_ASYNC flag bit.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200790 */
Alan Stern3f9af0512010-09-25 23:34:54 +0200791int __pm_runtime_get(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200792{
Alan Stern1d531c12009-12-13 20:28:30 +0100793 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200794
Alan Stern1d531c12009-12-13 20:28:30 +0100795 atomic_inc(&dev->power.usage_count);
Alan Stern3f9af0512010-09-25 23:34:54 +0200796 retval = (rpmflags & RPM_ASYNC) ?
797 pm_request_resume(dev) : pm_runtime_resume(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200798
799 return retval;
800}
801EXPORT_SYMBOL_GPL(__pm_runtime_get);
802
803/**
804 * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
805 * @dev: Device to handle.
Alan Stern3f9af0512010-09-25 23:34:54 +0200806 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200807 *
808 * Decrement the usage count of the device and if it reaches zero, carry out a
809 * synchronous idle notification or submit an idle notification request for it,
Alan Stern3f9af0512010-09-25 23:34:54 +0200810 * depending on the RPM_ASYNC flag bit.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200811 */
Alan Stern3f9af0512010-09-25 23:34:54 +0200812int __pm_runtime_put(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200813{
814 int retval = 0;
815
816 if (atomic_dec_and_test(&dev->power.usage_count))
Alan Stern3f9af0512010-09-25 23:34:54 +0200817 retval = (rpmflags & RPM_ASYNC) ?
818 pm_request_idle(dev) : pm_runtime_idle(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200819
820 return retval;
821}
822EXPORT_SYMBOL_GPL(__pm_runtime_put);
823
824/**
825 * __pm_runtime_set_status - Set run-time PM status of a device.
826 * @dev: Device to handle.
827 * @status: New run-time PM status of the device.
828 *
829 * If run-time PM of the device is disabled or its power.runtime_error field is
830 * different from zero, the status may be changed either to RPM_ACTIVE, or to
831 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
832 * However, if the device has a parent and the parent is not active, and the
833 * parent's power.ignore_children flag is unset, the device's status cannot be
834 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
835 *
836 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
837 * and the device parent's counter of unsuspended children is modified to
838 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
839 * notification request for the parent is submitted.
840 */
841int __pm_runtime_set_status(struct device *dev, unsigned int status)
842{
843 struct device *parent = dev->parent;
844 unsigned long flags;
845 bool notify_parent = false;
846 int error = 0;
847
848 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
849 return -EINVAL;
850
851 spin_lock_irqsave(&dev->power.lock, flags);
852
853 if (!dev->power.runtime_error && !dev->power.disable_depth) {
854 error = -EAGAIN;
855 goto out;
856 }
857
858 if (dev->power.runtime_status == status)
859 goto out_set;
860
861 if (status == RPM_SUSPENDED) {
862 /* It always is possible to set the status to 'suspended'. */
863 if (parent) {
864 atomic_add_unless(&parent->power.child_count, -1, 0);
865 notify_parent = !parent->power.ignore_children;
866 }
867 goto out_set;
868 }
869
870 if (parent) {
Rafael J. Wysockibab636b2009-12-03 20:21:21 +0100871 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200872
873 /*
874 * It is invalid to put an active child under a parent that is
875 * not active, has run-time PM enabled and the
876 * 'power.ignore_children' flag unset.
877 */
878 if (!parent->power.disable_depth
879 && !parent->power.ignore_children
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100880 && parent->power.runtime_status != RPM_ACTIVE)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200881 error = -EBUSY;
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100882 else if (dev->power.runtime_status == RPM_SUSPENDED)
883 atomic_inc(&parent->power.child_count);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200884
Alan Stern862f89b2009-11-25 01:06:37 +0100885 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200886
887 if (error)
888 goto out;
889 }
890
891 out_set:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200892 __update_runtime_status(dev, status);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200893 dev->power.runtime_error = 0;
894 out:
895 spin_unlock_irqrestore(&dev->power.lock, flags);
896
897 if (notify_parent)
898 pm_request_idle(parent);
899
900 return error;
901}
902EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
903
904/**
905 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
906 * @dev: Device to handle.
907 *
908 * Flush all pending requests for the device from pm_wq and wait for all
909 * run-time PM operations involving the device in progress to complete.
910 *
911 * Should be called under dev->power.lock with interrupts disabled.
912 */
913static void __pm_runtime_barrier(struct device *dev)
914{
915 pm_runtime_deactivate_timer(dev);
916
917 if (dev->power.request_pending) {
918 dev->power.request = RPM_REQ_NONE;
919 spin_unlock_irq(&dev->power.lock);
920
921 cancel_work_sync(&dev->power.work);
922
923 spin_lock_irq(&dev->power.lock);
924 dev->power.request_pending = false;
925 }
926
927 if (dev->power.runtime_status == RPM_SUSPENDING
928 || dev->power.runtime_status == RPM_RESUMING
929 || dev->power.idle_notification) {
930 DEFINE_WAIT(wait);
931
932 /* Suspend, wake-up or idle notification in progress. */
933 for (;;) {
934 prepare_to_wait(&dev->power.wait_queue, &wait,
935 TASK_UNINTERRUPTIBLE);
936 if (dev->power.runtime_status != RPM_SUSPENDING
937 && dev->power.runtime_status != RPM_RESUMING
938 && !dev->power.idle_notification)
939 break;
940 spin_unlock_irq(&dev->power.lock);
941
942 schedule();
943
944 spin_lock_irq(&dev->power.lock);
945 }
946 finish_wait(&dev->power.wait_queue, &wait);
947 }
948}
949
950/**
951 * pm_runtime_barrier - Flush pending requests and wait for completions.
952 * @dev: Device to handle.
953 *
954 * Prevent the device from being suspended by incrementing its usage counter and
955 * if there's a pending resume request for the device, wake the device up.
956 * Next, make sure that all pending requests for the device have been flushed
957 * from pm_wq and wait for all run-time PM operations involving the device in
958 * progress to complete.
959 *
960 * Return value:
961 * 1, if there was a resume request pending and the device had to be woken up,
962 * 0, otherwise
963 */
964int pm_runtime_barrier(struct device *dev)
965{
966 int retval = 0;
967
968 pm_runtime_get_noresume(dev);
969 spin_lock_irq(&dev->power.lock);
970
971 if (dev->power.request_pending
972 && dev->power.request == RPM_REQ_RESUME) {
Alan Stern3f9af0512010-09-25 23:34:54 +0200973 __pm_runtime_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200974 retval = 1;
975 }
976
977 __pm_runtime_barrier(dev);
978
979 spin_unlock_irq(&dev->power.lock);
980 pm_runtime_put_noidle(dev);
981
982 return retval;
983}
984EXPORT_SYMBOL_GPL(pm_runtime_barrier);
985
986/**
987 * __pm_runtime_disable - Disable run-time PM of a device.
988 * @dev: Device to handle.
989 * @check_resume: If set, check if there's a resume request for the device.
990 *
991 * Increment power.disable_depth for the device and if was zero previously,
992 * cancel all pending run-time PM requests for the device and wait for all
993 * operations in progress to complete. The device can be either active or
994 * suspended after its run-time PM has been disabled.
995 *
996 * If @check_resume is set and there's a resume request pending when
997 * __pm_runtime_disable() is called and power.disable_depth is zero, the
998 * function will wake up the device before disabling its run-time PM.
999 */
1000void __pm_runtime_disable(struct device *dev, bool check_resume)
1001{
1002 spin_lock_irq(&dev->power.lock);
1003
1004 if (dev->power.disable_depth > 0) {
1005 dev->power.disable_depth++;
1006 goto out;
1007 }
1008
1009 /*
1010 * Wake up the device if there's a resume request pending, because that
1011 * means there probably is some I/O to process and disabling run-time PM
1012 * shouldn't prevent the device from processing the I/O.
1013 */
1014 if (check_resume && dev->power.request_pending
1015 && dev->power.request == RPM_REQ_RESUME) {
1016 /*
1017 * Prevent suspends and idle notifications from being carried
1018 * out after we have woken up the device.
1019 */
1020 pm_runtime_get_noresume(dev);
1021
Alan Stern3f9af0512010-09-25 23:34:54 +02001022 __pm_runtime_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001023
1024 pm_runtime_put_noidle(dev);
1025 }
1026
1027 if (!dev->power.disable_depth++)
1028 __pm_runtime_barrier(dev);
1029
1030 out:
1031 spin_unlock_irq(&dev->power.lock);
1032}
1033EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1034
1035/**
1036 * pm_runtime_enable - Enable run-time PM of a device.
1037 * @dev: Device to handle.
1038 */
1039void pm_runtime_enable(struct device *dev)
1040{
1041 unsigned long flags;
1042
1043 spin_lock_irqsave(&dev->power.lock, flags);
1044
1045 if (dev->power.disable_depth > 0)
1046 dev->power.disable_depth--;
1047 else
1048 dev_warn(dev, "Unbalanced %s!\n", __func__);
1049
1050 spin_unlock_irqrestore(&dev->power.lock, flags);
1051}
1052EXPORT_SYMBOL_GPL(pm_runtime_enable);
1053
1054/**
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001055 * pm_runtime_forbid - Block run-time PM of a device.
1056 * @dev: Device to handle.
1057 *
1058 * Increase the device's usage count and clear its power.runtime_auto flag,
1059 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1060 * for it.
1061 */
1062void pm_runtime_forbid(struct device *dev)
1063{
1064 spin_lock_irq(&dev->power.lock);
1065 if (!dev->power.runtime_auto)
1066 goto out;
1067
1068 dev->power.runtime_auto = false;
1069 atomic_inc(&dev->power.usage_count);
Alan Stern3f9af0512010-09-25 23:34:54 +02001070 __pm_runtime_resume(dev, 0);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001071
1072 out:
1073 spin_unlock_irq(&dev->power.lock);
1074}
1075EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1076
1077/**
1078 * pm_runtime_allow - Unblock run-time PM of a device.
1079 * @dev: Device to handle.
1080 *
1081 * Decrease the device's usage count and set its power.runtime_auto flag.
1082 */
1083void pm_runtime_allow(struct device *dev)
1084{
1085 spin_lock_irq(&dev->power.lock);
1086 if (dev->power.runtime_auto)
1087 goto out;
1088
1089 dev->power.runtime_auto = true;
1090 if (atomic_dec_and_test(&dev->power.usage_count))
1091 __pm_runtime_idle(dev);
1092
1093 out:
1094 spin_unlock_irq(&dev->power.lock);
1095}
1096EXPORT_SYMBOL_GPL(pm_runtime_allow);
1097
1098/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001099 * pm_runtime_init - Initialize run-time PM fields in given device object.
1100 * @dev: Device object to initialize.
1101 */
1102void pm_runtime_init(struct device *dev)
1103{
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001104 dev->power.runtime_status = RPM_SUSPENDED;
1105 dev->power.idle_notification = false;
1106
1107 dev->power.disable_depth = 1;
1108 atomic_set(&dev->power.usage_count, 0);
1109
1110 dev->power.runtime_error = 0;
1111
1112 atomic_set(&dev->power.child_count, 0);
1113 pm_suspend_ignore_children(dev, false);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001114 dev->power.runtime_auto = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001115
1116 dev->power.request_pending = false;
1117 dev->power.request = RPM_REQ_NONE;
1118 dev->power.deferred_resume = false;
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +02001119 dev->power.accounting_timestamp = jiffies;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001120 INIT_WORK(&dev->power.work, pm_runtime_work);
1121
1122 dev->power.timer_expires = 0;
1123 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1124 (unsigned long)dev);
1125
1126 init_waitqueue_head(&dev->power.wait_queue);
1127}
1128
1129/**
1130 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1131 * @dev: Device object being removed from device hierarchy.
1132 */
1133void pm_runtime_remove(struct device *dev)
1134{
1135 __pm_runtime_disable(dev, false);
1136
1137 /* Change the status back to 'suspended' to match the initial status. */
1138 if (dev->power.runtime_status == RPM_ACTIVE)
1139 pm_runtime_set_suspended(dev);
1140}