blob: c56efd756531f2d64af905b54d5ba7a8612a3a48 [file] [log] [blame]
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001/*
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02002 * drivers/base/power/runtime.c - Helper functions for device runtime PM
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02003 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
Alan Stern1bfee5b2010-09-25 23:35:00 +02005 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02006 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040011#include <linux/export.h>
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020012#include <linux/pm_runtime.h>
Ming Leic3dc2f12011-09-27 22:54:41 +020013#include <trace/events/rpm.h>
Alan Stern7490e442010-09-25 23:35:15 +020014#include "power.h"
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020015
Alan Stern140a6c92010-09-25 23:35:07 +020016static int rpm_resume(struct device *dev, int rpmflags);
Alan Stern7490e442010-09-25 23:35:15 +020017static int rpm_suspend(struct device *dev, int rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020018
19/**
Alan Stern47693732010-09-25 23:34:46 +020020 * update_pm_runtime_accounting - Update the time accounting of power states
21 * @dev: Device to update the accounting for
22 *
23 * In order to be able to have time accounting of the various power states
24 * (as used by programs such as PowerTOP to show the effectiveness of runtime
25 * PM), we need to track the time spent in each state.
26 * update_pm_runtime_accounting must be called each time before the
27 * runtime_status field is updated, to account the time in the old state
28 * correctly.
29 */
30void update_pm_runtime_accounting(struct device *dev)
31{
32 unsigned long now = jiffies;
venu byravarasudef0c0a32011-11-03 10:12:14 +010033 unsigned long delta;
Alan Stern47693732010-09-25 23:34:46 +020034
35 delta = now - dev->power.accounting_timestamp;
36
Alan Stern47693732010-09-25 23:34:46 +020037 dev->power.accounting_timestamp = now;
38
39 if (dev->power.disable_depth > 0)
40 return;
41
42 if (dev->power.runtime_status == RPM_SUSPENDED)
43 dev->power.suspended_jiffies += delta;
44 else
45 dev->power.active_jiffies += delta;
46}
47
48static void __update_runtime_status(struct device *dev, enum rpm_status status)
49{
50 update_pm_runtime_accounting(dev);
51 dev->power.runtime_status = status;
52}
53
54/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020055 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
56 * @dev: Device to handle.
57 */
58static void pm_runtime_deactivate_timer(struct device *dev)
59{
60 if (dev->power.timer_expires > 0) {
61 del_timer(&dev->power.suspend_timer);
62 dev->power.timer_expires = 0;
63 }
64}
65
66/**
67 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
68 * @dev: Device to handle.
69 */
70static void pm_runtime_cancel_pending(struct device *dev)
71{
72 pm_runtime_deactivate_timer(dev);
73 /*
74 * In case there's a request pending, make sure its work function will
75 * return without doing anything.
76 */
77 dev->power.request = RPM_REQ_NONE;
78}
79
Alan Stern15bcb91d2010-09-25 23:35:21 +020080/*
81 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
82 * @dev: Device to handle.
83 *
84 * Compute the autosuspend-delay expiration time based on the device's
85 * power.last_busy time. If the delay has already expired or is disabled
86 * (negative) or the power.use_autosuspend flag isn't set, return 0.
87 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
88 *
89 * This function may be called either with or without dev->power.lock held.
90 * Either way it can be racy, since power.last_busy may be updated at any time.
91 */
92unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
93{
94 int autosuspend_delay;
95 long elapsed;
96 unsigned long last_busy;
97 unsigned long expires = 0;
98
99 if (!dev->power.use_autosuspend)
100 goto out;
101
102 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
103 if (autosuspend_delay < 0)
104 goto out;
105
106 last_busy = ACCESS_ONCE(dev->power.last_busy);
107 elapsed = jiffies - last_busy;
108 if (elapsed < 0)
109 goto out; /* jiffies has wrapped around. */
110
111 /*
112 * If the autosuspend_delay is >= 1 second, align the timer by rounding
113 * up to the nearest second.
114 */
115 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
116 if (autosuspend_delay >= 1000)
117 expires = round_jiffies(expires);
118 expires += !expires;
119 if (elapsed >= expires - last_busy)
120 expires = 0; /* Already expired. */
121
122 out:
123 return expires;
124}
125EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
126
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200127/**
Alan Stern1bfee5b2010-09-25 23:35:00 +0200128 * rpm_check_suspend_allowed - Test whether a device may be suspended.
129 * @dev: Device to test.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200130 */
Alan Stern1bfee5b2010-09-25 23:35:00 +0200131static int rpm_check_suspend_allowed(struct device *dev)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200132{
133 int retval = 0;
134
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200135 if (dev->power.runtime_error)
136 retval = -EINVAL;
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200137 else if (dev->power.disable_depth > 0)
138 retval = -EACCES;
139 else if (atomic_read(&dev->power.usage_count) > 0)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200140 retval = -EAGAIN;
141 else if (!pm_children_suspended(dev))
142 retval = -EBUSY;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200143
144 /* Pending resume requests take precedence over suspends. */
145 else if ((dev->power.deferred_resume
Kevin Winchester78ca7c32010-10-29 15:29:55 +0200146 && dev->power.runtime_status == RPM_SUSPENDING)
Alan Stern1bfee5b2010-09-25 23:35:00 +0200147 || (dev->power.request_pending
148 && dev->power.request == RPM_REQ_RESUME))
149 retval = -EAGAIN;
150 else if (dev->power.runtime_status == RPM_SUSPENDED)
151 retval = 1;
152
153 return retval;
154}
155
Alan Stern1bfee5b2010-09-25 23:35:00 +0200156/**
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200157 * __rpm_callback - Run a given runtime PM callback for a given device.
158 * @cb: Runtime PM callback to run.
159 * @dev: Device to run the callback for.
160 */
161static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
162 __releases(&dev->power.lock) __acquires(&dev->power.lock)
163{
164 int retval;
165
166 if (dev->power.irq_safe)
167 spin_unlock(&dev->power.lock);
168 else
169 spin_unlock_irq(&dev->power.lock);
170
171 retval = cb(dev);
172
173 if (dev->power.irq_safe)
174 spin_lock(&dev->power.lock);
175 else
176 spin_lock_irq(&dev->power.lock);
177
178 return retval;
179}
180
181/**
Alan Stern140a6c92010-09-25 23:35:07 +0200182 * rpm_idle - Notify device bus type if the device can be suspended.
Alan Stern1bfee5b2010-09-25 23:35:00 +0200183 * @dev: Device to notify the bus type about.
184 * @rpmflags: Flag bits.
185 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200186 * Check if the device's runtime PM status allows it to be suspended. If
Alan Stern1bfee5b2010-09-25 23:35:00 +0200187 * another idle notification has been started earlier, return immediately. If
188 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
189 * run the ->runtime_idle() callback directly.
190 *
191 * This function must be called under dev->power.lock with interrupts disabled.
192 */
Alan Stern140a6c92010-09-25 23:35:07 +0200193static int rpm_idle(struct device *dev, int rpmflags)
Alan Stern1bfee5b2010-09-25 23:35:00 +0200194{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200195 int (*callback)(struct device *);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200196 int retval;
197
Ming Leic3dc2f12011-09-27 22:54:41 +0200198 trace_rpm_idle(dev, rpmflags);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200199 retval = rpm_check_suspend_allowed(dev);
200 if (retval < 0)
201 ; /* Conditions are wrong. */
202
203 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
204 else if (dev->power.runtime_status != RPM_ACTIVE)
205 retval = -EAGAIN;
206
207 /*
208 * Any pending request other than an idle notification takes
209 * precedence over us, except that the timer may be running.
210 */
211 else if (dev->power.request_pending &&
212 dev->power.request > RPM_REQ_IDLE)
213 retval = -EAGAIN;
214
215 /* Act as though RPM_NOWAIT is always set. */
216 else if (dev->power.idle_notification)
217 retval = -EINPROGRESS;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200218 if (retval)
219 goto out;
220
Alan Stern1bfee5b2010-09-25 23:35:00 +0200221 /* Pending requests need to be canceled. */
222 dev->power.request = RPM_REQ_NONE;
223
Alan Stern7490e442010-09-25 23:35:15 +0200224 if (dev->power.no_callbacks) {
225 /* Assume ->runtime_idle() callback would have suspended. */
226 retval = rpm_suspend(dev, rpmflags);
227 goto out;
228 }
229
Alan Stern1bfee5b2010-09-25 23:35:00 +0200230 /* Carry out an asynchronous or a synchronous idle notification. */
231 if (rpmflags & RPM_ASYNC) {
232 dev->power.request = RPM_REQ_IDLE;
233 if (!dev->power.request_pending) {
234 dev->power.request_pending = true;
235 queue_work(pm_wq, &dev->power.work);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200236 }
Alan Stern1bfee5b2010-09-25 23:35:00 +0200237 goto out;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200238 }
239
240 dev->power.idle_notification = true;
241
Rafael J. Wysocki564b9052011-06-23 01:52:55 +0200242 if (dev->pm_domain)
243 callback = dev->pm_domain->ops.runtime_idle;
Rafael J. Wysocki4d27e9d2011-04-29 00:35:50 +0200244 else if (dev->type && dev->type->pm)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200245 callback = dev->type->pm->runtime_idle;
246 else if (dev->class && dev->class->pm)
247 callback = dev->class->pm->runtime_idle;
Rafael J. Wysocki9659cc02011-02-18 23:20:21 +0100248 else if (dev->bus && dev->bus->pm)
249 callback = dev->bus->pm->runtime_idle;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200250 else
251 callback = NULL;
252
Rafael J. Wysocki35cd1332011-12-18 00:34:13 +0100253 if (!callback && dev->driver && dev->driver->pm)
254 callback = dev->driver->pm->runtime_idle;
255
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200256 if (callback)
257 __rpm_callback(callback, dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200258
259 dev->power.idle_notification = false;
260 wake_up_all(&dev->power.wait_queue);
261
262 out:
Ming Leic3dc2f12011-09-27 22:54:41 +0200263 trace_rpm_return_int(dev, _THIS_IP_, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200264 return retval;
265}
266
267/**
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200268 * rpm_callback - Run a given runtime PM callback for a given device.
269 * @cb: Runtime PM callback to run.
270 * @dev: Device to run the callback for.
271 */
272static int rpm_callback(int (*cb)(struct device *), struct device *dev)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200273{
274 int retval;
275
276 if (!cb)
277 return -ENOSYS;
278
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200279 retval = __rpm_callback(cb, dev);
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200280
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200281 dev->power.runtime_error = retval;
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200282 return retval != -EACCES ? retval : -EIO;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200283}
284
285/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200286 * rpm_suspend - Carry out runtime suspend of given device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200287 * @dev: Device to suspend.
Alan Stern3f9af0512010-09-25 23:34:54 +0200288 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200289 *
Ming Lei47d8f0b2011-10-12 11:53:32 +0800290 * Check if the device's runtime PM status allows it to be suspended.
291 * Cancel a pending idle notification, autosuspend or suspend. If
292 * another suspend has been started earlier, either return immediately
293 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
294 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
Ming Lei857b36c2011-10-12 22:59:33 +0200295 * otherwise run the ->runtime_suspend() callback directly. When
296 * ->runtime_suspend succeeded, if a deferred resume was requested while
297 * the callback was running then carry it out, otherwise send an idle
298 * notification for its parent (if the suspend succeeded and both
299 * ignore_children of parent->power and irq_safe of dev->power are not set).
Alan Stern886486b2011-11-03 23:39:18 +0100300 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
301 * flag is set and the next autosuspend-delay expiration time is in the
302 * future, schedule another autosuspend attempt.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200303 *
304 * This function must be called under dev->power.lock with interrupts disabled.
305 */
Alan Stern140a6c92010-09-25 23:35:07 +0200306static int rpm_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200307 __releases(&dev->power.lock) __acquires(&dev->power.lock)
308{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200309 int (*callback)(struct device *);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200310 struct device *parent = NULL;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200311 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200312
Ming Leic3dc2f12011-09-27 22:54:41 +0200313 trace_rpm_suspend(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200314
315 repeat:
Alan Stern1bfee5b2010-09-25 23:35:00 +0200316 retval = rpm_check_suspend_allowed(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200317
Alan Stern1bfee5b2010-09-25 23:35:00 +0200318 if (retval < 0)
319 ; /* Conditions are wrong. */
320
321 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
322 else if (dev->power.runtime_status == RPM_RESUMING &&
323 !(rpmflags & RPM_ASYNC))
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200324 retval = -EAGAIN;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200325 if (retval)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200326 goto out;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200327
Alan Stern15bcb91d2010-09-25 23:35:21 +0200328 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
329 if ((rpmflags & RPM_AUTO)
330 && dev->power.runtime_status != RPM_SUSPENDING) {
331 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
332
333 if (expires != 0) {
334 /* Pending requests need to be canceled. */
335 dev->power.request = RPM_REQ_NONE;
336
337 /*
338 * Optimization: If the timer is already running and is
339 * set to expire at or before the autosuspend delay,
340 * avoid the overhead of resetting it. Just let it
341 * expire; pm_suspend_timer_fn() will take care of the
342 * rest.
343 */
344 if (!(dev->power.timer_expires && time_before_eq(
345 dev->power.timer_expires, expires))) {
346 dev->power.timer_expires = expires;
347 mod_timer(&dev->power.suspend_timer, expires);
348 }
349 dev->power.timer_autosuspends = 1;
350 goto out;
351 }
352 }
353
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200354 /* Other scheduled or pending requests need to be canceled. */
355 pm_runtime_cancel_pending(dev);
356
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200357 if (dev->power.runtime_status == RPM_SUSPENDING) {
358 DEFINE_WAIT(wait);
359
Alan Stern1bfee5b2010-09-25 23:35:00 +0200360 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200361 retval = -EINPROGRESS;
362 goto out;
363 }
364
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200365 if (dev->power.irq_safe) {
366 spin_unlock(&dev->power.lock);
367
368 cpu_relax();
369
370 spin_lock(&dev->power.lock);
371 goto repeat;
372 }
373
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200374 /* Wait for the other suspend running in parallel with us. */
375 for (;;) {
376 prepare_to_wait(&dev->power.wait_queue, &wait,
377 TASK_UNINTERRUPTIBLE);
378 if (dev->power.runtime_status != RPM_SUSPENDING)
379 break;
380
381 spin_unlock_irq(&dev->power.lock);
382
383 schedule();
384
385 spin_lock_irq(&dev->power.lock);
386 }
387 finish_wait(&dev->power.wait_queue, &wait);
388 goto repeat;
389 }
390
Alan Stern7490e442010-09-25 23:35:15 +0200391 dev->power.deferred_resume = false;
392 if (dev->power.no_callbacks)
393 goto no_callback; /* Assume success. */
394
Alan Stern1bfee5b2010-09-25 23:35:00 +0200395 /* Carry out an asynchronous or a synchronous suspend. */
396 if (rpmflags & RPM_ASYNC) {
Alan Stern15bcb91d2010-09-25 23:35:21 +0200397 dev->power.request = (rpmflags & RPM_AUTO) ?
398 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200399 if (!dev->power.request_pending) {
400 dev->power.request_pending = true;
401 queue_work(pm_wq, &dev->power.work);
402 }
403 goto out;
404 }
405
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200406 __update_runtime_status(dev, RPM_SUSPENDING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200407
Rafael J. Wysocki564b9052011-06-23 01:52:55 +0200408 if (dev->pm_domain)
409 callback = dev->pm_domain->ops.runtime_suspend;
Rafael J. Wysocki4d27e9d2011-04-29 00:35:50 +0200410 else if (dev->type && dev->type->pm)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200411 callback = dev->type->pm->runtime_suspend;
412 else if (dev->class && dev->class->pm)
413 callback = dev->class->pm->runtime_suspend;
Rafael J. Wysocki9659cc02011-02-18 23:20:21 +0100414 else if (dev->bus && dev->bus->pm)
415 callback = dev->bus->pm->runtime_suspend;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200416 else
417 callback = NULL;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200418
Rafael J. Wysocki35cd1332011-12-18 00:34:13 +0100419 if (!callback && dev->driver && dev->driver->pm)
420 callback = dev->driver->pm->runtime_suspend;
421
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200422 retval = rpm_callback(callback, dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200423 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200424 __update_runtime_status(dev, RPM_ACTIVE);
ShuoX Liu2cffff12011-07-08 20:53:55 +0200425 dev->power.deferred_resume = false;
Alan Stern886486b2011-11-03 23:39:18 +0100426 if (retval == -EAGAIN || retval == -EBUSY) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200427 dev->power.runtime_error = 0;
Alan Stern886486b2011-11-03 23:39:18 +0100428
429 /*
430 * If the callback routine failed an autosuspend, and
431 * if the last_busy time has been updated so that there
432 * is a new autosuspend expiration time, automatically
433 * reschedule another autosuspend.
434 */
435 if ((rpmflags & RPM_AUTO) &&
436 pm_runtime_autosuspend_expiration(dev) != 0)
437 goto repeat;
438 } else {
Alan Stern240c7332010-03-23 00:50:07 +0100439 pm_runtime_cancel_pending(dev);
Alan Stern886486b2011-11-03 23:39:18 +0100440 }
Ming Lei857b36c2011-10-12 22:59:33 +0200441 wake_up_all(&dev->power.wait_queue);
442 goto out;
443 }
Alan Stern7490e442010-09-25 23:35:15 +0200444 no_callback:
Ming Lei857b36c2011-10-12 22:59:33 +0200445 __update_runtime_status(dev, RPM_SUSPENDED);
446 pm_runtime_deactivate_timer(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200447
Ming Lei857b36c2011-10-12 22:59:33 +0200448 if (dev->parent) {
449 parent = dev->parent;
450 atomic_add_unless(&parent->power.child_count, -1, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200451 }
452 wake_up_all(&dev->power.wait_queue);
453
454 if (dev->power.deferred_resume) {
Alan Stern140a6c92010-09-25 23:35:07 +0200455 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200456 retval = -EAGAIN;
457 goto out;
458 }
459
Alan Sternc3810c82011-01-25 20:50:07 +0100460 /* Maybe the parent is now able to suspend. */
Alan Sternc7b61de2010-12-01 00:14:42 +0100461 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
Alan Sternc3810c82011-01-25 20:50:07 +0100462 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200463
Alan Sternc3810c82011-01-25 20:50:07 +0100464 spin_lock(&parent->power.lock);
465 rpm_idle(parent, RPM_ASYNC);
466 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200467
Alan Sternc3810c82011-01-25 20:50:07 +0100468 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200469 }
470
471 out:
Ming Leic3dc2f12011-09-27 22:54:41 +0200472 trace_rpm_return_int(dev, _THIS_IP_, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200473
474 return retval;
475}
476
477/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200478 * rpm_resume - Carry out runtime resume of given device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200479 * @dev: Device to resume.
Alan Stern3f9af0512010-09-25 23:34:54 +0200480 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200481 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200482 * Check if the device's runtime PM status allows it to be resumed. Cancel
Alan Stern1bfee5b2010-09-25 23:35:00 +0200483 * any scheduled or pending requests. If another resume has been started
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300484 * earlier, either return immediately or wait for it to finish, depending on the
Alan Stern1bfee5b2010-09-25 23:35:00 +0200485 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
486 * parallel with this function, either tell the other process to resume after
487 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
488 * flag is set then queue a resume request; otherwise run the
489 * ->runtime_resume() callback directly. Queue an idle notification for the
490 * device if the resume succeeded.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200491 *
492 * This function must be called under dev->power.lock with interrupts disabled.
493 */
Alan Stern140a6c92010-09-25 23:35:07 +0200494static int rpm_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200495 __releases(&dev->power.lock) __acquires(&dev->power.lock)
496{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200497 int (*callback)(struct device *);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200498 struct device *parent = NULL;
499 int retval = 0;
500
Ming Leic3dc2f12011-09-27 22:54:41 +0200501 trace_rpm_resume(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200502
503 repeat:
Alan Stern1bfee5b2010-09-25 23:35:00 +0200504 if (dev->power.runtime_error)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200505 retval = -EINVAL;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200506 else if (dev->power.disable_depth > 0)
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200507 retval = -EACCES;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200508 if (retval)
509 goto out;
510
Alan Stern15bcb91d2010-09-25 23:35:21 +0200511 /*
512 * Other scheduled or pending requests need to be canceled. Small
513 * optimization: If an autosuspend timer is running, leave it running
514 * rather than cancelling it now only to restart it again in the near
515 * future.
516 */
517 dev->power.request = RPM_REQ_NONE;
518 if (!dev->power.timer_autosuspends)
519 pm_runtime_deactivate_timer(dev);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200520
521 if (dev->power.runtime_status == RPM_ACTIVE) {
522 retval = 1;
523 goto out;
524 }
525
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200526 if (dev->power.runtime_status == RPM_RESUMING
527 || dev->power.runtime_status == RPM_SUSPENDING) {
528 DEFINE_WAIT(wait);
529
Alan Stern1bfee5b2010-09-25 23:35:00 +0200530 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200531 if (dev->power.runtime_status == RPM_SUSPENDING)
532 dev->power.deferred_resume = true;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200533 else
534 retval = -EINPROGRESS;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200535 goto out;
536 }
537
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200538 if (dev->power.irq_safe) {
539 spin_unlock(&dev->power.lock);
540
541 cpu_relax();
542
543 spin_lock(&dev->power.lock);
544 goto repeat;
545 }
546
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200547 /* Wait for the operation carried out in parallel with us. */
548 for (;;) {
549 prepare_to_wait(&dev->power.wait_queue, &wait,
550 TASK_UNINTERRUPTIBLE);
551 if (dev->power.runtime_status != RPM_RESUMING
552 && dev->power.runtime_status != RPM_SUSPENDING)
553 break;
554
555 spin_unlock_irq(&dev->power.lock);
556
557 schedule();
558
559 spin_lock_irq(&dev->power.lock);
560 }
561 finish_wait(&dev->power.wait_queue, &wait);
562 goto repeat;
563 }
564
Alan Stern7490e442010-09-25 23:35:15 +0200565 /*
566 * See if we can skip waking up the parent. This is safe only if
567 * power.no_callbacks is set, because otherwise we don't know whether
568 * the resume will actually succeed.
569 */
570 if (dev->power.no_callbacks && !parent && dev->parent) {
Ming Leid63be5f2010-10-22 23:48:14 +0200571 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
Alan Stern7490e442010-09-25 23:35:15 +0200572 if (dev->parent->power.disable_depth > 0
573 || dev->parent->power.ignore_children
574 || dev->parent->power.runtime_status == RPM_ACTIVE) {
575 atomic_inc(&dev->parent->power.child_count);
576 spin_unlock(&dev->parent->power.lock);
577 goto no_callback; /* Assume success. */
578 }
579 spin_unlock(&dev->parent->power.lock);
580 }
581
Alan Stern1bfee5b2010-09-25 23:35:00 +0200582 /* Carry out an asynchronous or a synchronous resume. */
583 if (rpmflags & RPM_ASYNC) {
584 dev->power.request = RPM_REQ_RESUME;
585 if (!dev->power.request_pending) {
586 dev->power.request_pending = true;
587 queue_work(pm_wq, &dev->power.work);
588 }
589 retval = 0;
590 goto out;
591 }
592
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200593 if (!parent && dev->parent) {
594 /*
Alan Sternc7b61de2010-12-01 00:14:42 +0100595 * Increment the parent's usage counter and resume it if
596 * necessary. Not needed if dev is irq-safe; then the
597 * parent is permanently resumed.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200598 */
599 parent = dev->parent;
Alan Sternc7b61de2010-12-01 00:14:42 +0100600 if (dev->power.irq_safe)
601 goto skip_parent;
Alan Stern862f89b2009-11-25 01:06:37 +0100602 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200603
604 pm_runtime_get_noresume(parent);
605
Alan Stern862f89b2009-11-25 01:06:37 +0100606 spin_lock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200607 /*
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200608 * We can resume if the parent's runtime PM is disabled or it
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200609 * is set to ignore children.
610 */
611 if (!parent->power.disable_depth
612 && !parent->power.ignore_children) {
Alan Stern140a6c92010-09-25 23:35:07 +0200613 rpm_resume(parent, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200614 if (parent->power.runtime_status != RPM_ACTIVE)
615 retval = -EBUSY;
616 }
Alan Stern862f89b2009-11-25 01:06:37 +0100617 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200618
Alan Stern862f89b2009-11-25 01:06:37 +0100619 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200620 if (retval)
621 goto out;
622 goto repeat;
623 }
Alan Sternc7b61de2010-12-01 00:14:42 +0100624 skip_parent:
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200625
Alan Stern7490e442010-09-25 23:35:15 +0200626 if (dev->power.no_callbacks)
627 goto no_callback; /* Assume success. */
628
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200629 __update_runtime_status(dev, RPM_RESUMING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200630
Rafael J. Wysocki564b9052011-06-23 01:52:55 +0200631 if (dev->pm_domain)
632 callback = dev->pm_domain->ops.runtime_resume;
Rafael J. Wysocki4d27e9d2011-04-29 00:35:50 +0200633 else if (dev->type && dev->type->pm)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200634 callback = dev->type->pm->runtime_resume;
635 else if (dev->class && dev->class->pm)
636 callback = dev->class->pm->runtime_resume;
Rafael J. Wysocki9659cc02011-02-18 23:20:21 +0100637 else if (dev->bus && dev->bus->pm)
638 callback = dev->bus->pm->runtime_resume;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200639 else
640 callback = NULL;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200641
Rafael J. Wysocki35cd1332011-12-18 00:34:13 +0100642 if (!callback && dev->driver && dev->driver->pm)
643 callback = dev->driver->pm->runtime_resume;
644
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200645 retval = rpm_callback(callback, dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200646 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200647 __update_runtime_status(dev, RPM_SUSPENDED);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200648 pm_runtime_cancel_pending(dev);
649 } else {
Alan Stern7490e442010-09-25 23:35:15 +0200650 no_callback:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200651 __update_runtime_status(dev, RPM_ACTIVE);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200652 if (parent)
653 atomic_inc(&parent->power.child_count);
654 }
655 wake_up_all(&dev->power.wait_queue);
656
657 if (!retval)
Alan Stern140a6c92010-09-25 23:35:07 +0200658 rpm_idle(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200659
660 out:
Alan Sternc7b61de2010-12-01 00:14:42 +0100661 if (parent && !dev->power.irq_safe) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200662 spin_unlock_irq(&dev->power.lock);
663
664 pm_runtime_put(parent);
665
666 spin_lock_irq(&dev->power.lock);
667 }
668
Ming Leic3dc2f12011-09-27 22:54:41 +0200669 trace_rpm_return_int(dev, _THIS_IP_, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200670
671 return retval;
672}
673
674/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200675 * pm_runtime_work - Universal runtime PM work function.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200676 * @work: Work structure used for scheduling the execution of this function.
677 *
678 * Use @work to get the device object the work is to be done for, determine what
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200679 * is to be done and execute the appropriate runtime PM function.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200680 */
681static void pm_runtime_work(struct work_struct *work)
682{
683 struct device *dev = container_of(work, struct device, power.work);
684 enum rpm_request req;
685
686 spin_lock_irq(&dev->power.lock);
687
688 if (!dev->power.request_pending)
689 goto out;
690
691 req = dev->power.request;
692 dev->power.request = RPM_REQ_NONE;
693 dev->power.request_pending = false;
694
695 switch (req) {
696 case RPM_REQ_NONE:
697 break;
698 case RPM_REQ_IDLE:
Alan Stern140a6c92010-09-25 23:35:07 +0200699 rpm_idle(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200700 break;
701 case RPM_REQ_SUSPEND:
Alan Stern140a6c92010-09-25 23:35:07 +0200702 rpm_suspend(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200703 break;
Alan Stern15bcb91d2010-09-25 23:35:21 +0200704 case RPM_REQ_AUTOSUSPEND:
705 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
706 break;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200707 case RPM_REQ_RESUME:
Alan Stern140a6c92010-09-25 23:35:07 +0200708 rpm_resume(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200709 break;
710 }
711
712 out:
713 spin_unlock_irq(&dev->power.lock);
714}
715
716/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200717 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
718 * @data: Device pointer passed by pm_schedule_suspend().
719 *
Alan Stern1bfee5b2010-09-25 23:35:00 +0200720 * Check if the time is right and queue a suspend request.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200721 */
722static void pm_suspend_timer_fn(unsigned long data)
723{
724 struct device *dev = (struct device *)data;
725 unsigned long flags;
726 unsigned long expires;
727
728 spin_lock_irqsave(&dev->power.lock, flags);
729
730 expires = dev->power.timer_expires;
731 /* If 'expire' is after 'jiffies' we've been called too early. */
732 if (expires > 0 && !time_after(expires, jiffies)) {
733 dev->power.timer_expires = 0;
Alan Stern15bcb91d2010-09-25 23:35:21 +0200734 rpm_suspend(dev, dev->power.timer_autosuspends ?
735 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200736 }
737
738 spin_unlock_irqrestore(&dev->power.lock, flags);
739}
740
741/**
742 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
743 * @dev: Device to suspend.
744 * @delay: Time to wait before submitting a suspend request, in milliseconds.
745 */
746int pm_schedule_suspend(struct device *dev, unsigned int delay)
747{
748 unsigned long flags;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200749 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200750
751 spin_lock_irqsave(&dev->power.lock, flags);
752
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200753 if (!delay) {
Alan Stern140a6c92010-09-25 23:35:07 +0200754 retval = rpm_suspend(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200755 goto out;
756 }
757
Alan Stern1bfee5b2010-09-25 23:35:00 +0200758 retval = rpm_check_suspend_allowed(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200759 if (retval)
760 goto out;
761
Alan Stern1bfee5b2010-09-25 23:35:00 +0200762 /* Other scheduled or pending requests need to be canceled. */
763 pm_runtime_cancel_pending(dev);
764
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200765 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200766 dev->power.timer_expires += !dev->power.timer_expires;
Alan Stern15bcb91d2010-09-25 23:35:21 +0200767 dev->power.timer_autosuspends = 0;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200768 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
769
770 out:
771 spin_unlock_irqrestore(&dev->power.lock, flags);
772
773 return retval;
774}
775EXPORT_SYMBOL_GPL(pm_schedule_suspend);
776
777/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200778 * __pm_runtime_idle - Entry point for runtime idle operations.
Alan Stern140a6c92010-09-25 23:35:07 +0200779 * @dev: Device to send idle notification for.
780 * @rpmflags: Flag bits.
781 *
782 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
783 * return immediately if it is larger than zero. Then carry out an idle
784 * notification, either synchronous or asynchronous.
785 *
Colin Cross311aab72011-08-08 23:39:36 +0200786 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
787 * or if pm_runtime_irq_safe() has been called.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200788 */
Alan Stern140a6c92010-09-25 23:35:07 +0200789int __pm_runtime_idle(struct device *dev, int rpmflags)
790{
791 unsigned long flags;
792 int retval;
793
Colin Cross311aab72011-08-08 23:39:36 +0200794 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
795
Alan Stern140a6c92010-09-25 23:35:07 +0200796 if (rpmflags & RPM_GET_PUT) {
797 if (!atomic_dec_and_test(&dev->power.usage_count))
798 return 0;
799 }
800
801 spin_lock_irqsave(&dev->power.lock, flags);
802 retval = rpm_idle(dev, rpmflags);
803 spin_unlock_irqrestore(&dev->power.lock, flags);
804
805 return retval;
806}
807EXPORT_SYMBOL_GPL(__pm_runtime_idle);
808
809/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200810 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
Alan Stern140a6c92010-09-25 23:35:07 +0200811 * @dev: Device to suspend.
812 * @rpmflags: Flag bits.
813 *
Alan Stern15bcb91d2010-09-25 23:35:21 +0200814 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
815 * return immediately if it is larger than zero. Then carry out a suspend,
816 * either synchronous or asynchronous.
Alan Stern140a6c92010-09-25 23:35:07 +0200817 *
Colin Cross311aab72011-08-08 23:39:36 +0200818 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
819 * or if pm_runtime_irq_safe() has been called.
Alan Stern140a6c92010-09-25 23:35:07 +0200820 */
821int __pm_runtime_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200822{
823 unsigned long flags;
824 int retval;
825
Colin Cross311aab72011-08-08 23:39:36 +0200826 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
827
Alan Stern15bcb91d2010-09-25 23:35:21 +0200828 if (rpmflags & RPM_GET_PUT) {
829 if (!atomic_dec_and_test(&dev->power.usage_count))
830 return 0;
831 }
832
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200833 spin_lock_irqsave(&dev->power.lock, flags);
Alan Stern140a6c92010-09-25 23:35:07 +0200834 retval = rpm_suspend(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200835 spin_unlock_irqrestore(&dev->power.lock, flags);
836
837 return retval;
838}
Alan Stern140a6c92010-09-25 23:35:07 +0200839EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200840
841/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200842 * __pm_runtime_resume - Entry point for runtime resume operations.
Alan Stern140a6c92010-09-25 23:35:07 +0200843 * @dev: Device to resume.
Alan Stern3f9af0512010-09-25 23:34:54 +0200844 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200845 *
Alan Stern140a6c92010-09-25 23:35:07 +0200846 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
847 * carry out a resume, either synchronous or asynchronous.
848 *
Colin Cross311aab72011-08-08 23:39:36 +0200849 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
850 * or if pm_runtime_irq_safe() has been called.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200851 */
Alan Stern140a6c92010-09-25 23:35:07 +0200852int __pm_runtime_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200853{
Alan Stern140a6c92010-09-25 23:35:07 +0200854 unsigned long flags;
Alan Stern1d531c12009-12-13 20:28:30 +0100855 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200856
Colin Cross311aab72011-08-08 23:39:36 +0200857 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
858
Alan Stern140a6c92010-09-25 23:35:07 +0200859 if (rpmflags & RPM_GET_PUT)
860 atomic_inc(&dev->power.usage_count);
861
862 spin_lock_irqsave(&dev->power.lock, flags);
863 retval = rpm_resume(dev, rpmflags);
864 spin_unlock_irqrestore(&dev->power.lock, flags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200865
866 return retval;
867}
Alan Stern140a6c92010-09-25 23:35:07 +0200868EXPORT_SYMBOL_GPL(__pm_runtime_resume);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200869
870/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200871 * __pm_runtime_set_status - Set runtime PM status of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200872 * @dev: Device to handle.
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200873 * @status: New runtime PM status of the device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200874 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200875 * If runtime PM of the device is disabled or its power.runtime_error field is
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200876 * different from zero, the status may be changed either to RPM_ACTIVE, or to
877 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
878 * However, if the device has a parent and the parent is not active, and the
879 * parent's power.ignore_children flag is unset, the device's status cannot be
880 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
881 *
882 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
883 * and the device parent's counter of unsuspended children is modified to
884 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
885 * notification request for the parent is submitted.
886 */
887int __pm_runtime_set_status(struct device *dev, unsigned int status)
888{
889 struct device *parent = dev->parent;
890 unsigned long flags;
891 bool notify_parent = false;
892 int error = 0;
893
894 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
895 return -EINVAL;
896
897 spin_lock_irqsave(&dev->power.lock, flags);
898
899 if (!dev->power.runtime_error && !dev->power.disable_depth) {
900 error = -EAGAIN;
901 goto out;
902 }
903
904 if (dev->power.runtime_status == status)
905 goto out_set;
906
907 if (status == RPM_SUSPENDED) {
908 /* It always is possible to set the status to 'suspended'. */
909 if (parent) {
910 atomic_add_unless(&parent->power.child_count, -1, 0);
911 notify_parent = !parent->power.ignore_children;
912 }
913 goto out_set;
914 }
915
916 if (parent) {
Rafael J. Wysockibab636b2009-12-03 20:21:21 +0100917 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200918
919 /*
920 * It is invalid to put an active child under a parent that is
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200921 * not active, has runtime PM enabled and the
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200922 * 'power.ignore_children' flag unset.
923 */
924 if (!parent->power.disable_depth
925 && !parent->power.ignore_children
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100926 && parent->power.runtime_status != RPM_ACTIVE)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200927 error = -EBUSY;
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100928 else if (dev->power.runtime_status == RPM_SUSPENDED)
929 atomic_inc(&parent->power.child_count);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200930
Alan Stern862f89b2009-11-25 01:06:37 +0100931 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200932
933 if (error)
934 goto out;
935 }
936
937 out_set:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200938 __update_runtime_status(dev, status);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200939 dev->power.runtime_error = 0;
940 out:
941 spin_unlock_irqrestore(&dev->power.lock, flags);
942
943 if (notify_parent)
944 pm_request_idle(parent);
945
946 return error;
947}
948EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
949
950/**
951 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
952 * @dev: Device to handle.
953 *
954 * Flush all pending requests for the device from pm_wq and wait for all
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200955 * runtime PM operations involving the device in progress to complete.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200956 *
957 * Should be called under dev->power.lock with interrupts disabled.
958 */
959static void __pm_runtime_barrier(struct device *dev)
960{
961 pm_runtime_deactivate_timer(dev);
962
963 if (dev->power.request_pending) {
964 dev->power.request = RPM_REQ_NONE;
965 spin_unlock_irq(&dev->power.lock);
966
967 cancel_work_sync(&dev->power.work);
968
969 spin_lock_irq(&dev->power.lock);
970 dev->power.request_pending = false;
971 }
972
973 if (dev->power.runtime_status == RPM_SUSPENDING
974 || dev->power.runtime_status == RPM_RESUMING
975 || dev->power.idle_notification) {
976 DEFINE_WAIT(wait);
977
978 /* Suspend, wake-up or idle notification in progress. */
979 for (;;) {
980 prepare_to_wait(&dev->power.wait_queue, &wait,
981 TASK_UNINTERRUPTIBLE);
982 if (dev->power.runtime_status != RPM_SUSPENDING
983 && dev->power.runtime_status != RPM_RESUMING
984 && !dev->power.idle_notification)
985 break;
986 spin_unlock_irq(&dev->power.lock);
987
988 schedule();
989
990 spin_lock_irq(&dev->power.lock);
991 }
992 finish_wait(&dev->power.wait_queue, &wait);
993 }
994}
995
996/**
997 * pm_runtime_barrier - Flush pending requests and wait for completions.
998 * @dev: Device to handle.
999 *
1000 * Prevent the device from being suspended by incrementing its usage counter and
1001 * if there's a pending resume request for the device, wake the device up.
1002 * Next, make sure that all pending requests for the device have been flushed
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001003 * from pm_wq and wait for all runtime PM operations involving the device in
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001004 * progress to complete.
1005 *
1006 * Return value:
1007 * 1, if there was a resume request pending and the device had to be woken up,
1008 * 0, otherwise
1009 */
1010int pm_runtime_barrier(struct device *dev)
1011{
1012 int retval = 0;
1013
1014 pm_runtime_get_noresume(dev);
1015 spin_lock_irq(&dev->power.lock);
1016
1017 if (dev->power.request_pending
1018 && dev->power.request == RPM_REQ_RESUME) {
Alan Stern140a6c92010-09-25 23:35:07 +02001019 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001020 retval = 1;
1021 }
1022
1023 __pm_runtime_barrier(dev);
1024
1025 spin_unlock_irq(&dev->power.lock);
1026 pm_runtime_put_noidle(dev);
1027
1028 return retval;
1029}
1030EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1031
1032/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001033 * __pm_runtime_disable - Disable runtime PM of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001034 * @dev: Device to handle.
1035 * @check_resume: If set, check if there's a resume request for the device.
1036 *
1037 * Increment power.disable_depth for the device and if was zero previously,
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001038 * cancel all pending runtime PM requests for the device and wait for all
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001039 * operations in progress to complete. The device can be either active or
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001040 * suspended after its runtime PM has been disabled.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001041 *
1042 * If @check_resume is set and there's a resume request pending when
1043 * __pm_runtime_disable() is called and power.disable_depth is zero, the
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001044 * function will wake up the device before disabling its runtime PM.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001045 */
1046void __pm_runtime_disable(struct device *dev, bool check_resume)
1047{
1048 spin_lock_irq(&dev->power.lock);
1049
1050 if (dev->power.disable_depth > 0) {
1051 dev->power.disable_depth++;
1052 goto out;
1053 }
1054
1055 /*
1056 * Wake up the device if there's a resume request pending, because that
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001057 * means there probably is some I/O to process and disabling runtime PM
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001058 * shouldn't prevent the device from processing the I/O.
1059 */
1060 if (check_resume && dev->power.request_pending
1061 && dev->power.request == RPM_REQ_RESUME) {
1062 /*
1063 * Prevent suspends and idle notifications from being carried
1064 * out after we have woken up the device.
1065 */
1066 pm_runtime_get_noresume(dev);
1067
Alan Stern140a6c92010-09-25 23:35:07 +02001068 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001069
1070 pm_runtime_put_noidle(dev);
1071 }
1072
1073 if (!dev->power.disable_depth++)
1074 __pm_runtime_barrier(dev);
1075
1076 out:
1077 spin_unlock_irq(&dev->power.lock);
1078}
1079EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1080
1081/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001082 * pm_runtime_enable - Enable runtime PM of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001083 * @dev: Device to handle.
1084 */
1085void pm_runtime_enable(struct device *dev)
1086{
1087 unsigned long flags;
1088
1089 spin_lock_irqsave(&dev->power.lock, flags);
1090
1091 if (dev->power.disable_depth > 0)
1092 dev->power.disable_depth--;
1093 else
1094 dev_warn(dev, "Unbalanced %s!\n", __func__);
1095
1096 spin_unlock_irqrestore(&dev->power.lock, flags);
1097}
1098EXPORT_SYMBOL_GPL(pm_runtime_enable);
1099
1100/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001101 * pm_runtime_forbid - Block runtime PM of a device.
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001102 * @dev: Device to handle.
1103 *
1104 * Increase the device's usage count and clear its power.runtime_auto flag,
1105 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1106 * for it.
1107 */
1108void pm_runtime_forbid(struct device *dev)
1109{
1110 spin_lock_irq(&dev->power.lock);
1111 if (!dev->power.runtime_auto)
1112 goto out;
1113
1114 dev->power.runtime_auto = false;
1115 atomic_inc(&dev->power.usage_count);
Alan Stern140a6c92010-09-25 23:35:07 +02001116 rpm_resume(dev, 0);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001117
1118 out:
1119 spin_unlock_irq(&dev->power.lock);
1120}
1121EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1122
1123/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001124 * pm_runtime_allow - Unblock runtime PM of a device.
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001125 * @dev: Device to handle.
1126 *
1127 * Decrease the device's usage count and set its power.runtime_auto flag.
1128 */
1129void pm_runtime_allow(struct device *dev)
1130{
1131 spin_lock_irq(&dev->power.lock);
1132 if (dev->power.runtime_auto)
1133 goto out;
1134
1135 dev->power.runtime_auto = true;
1136 if (atomic_dec_and_test(&dev->power.usage_count))
Alan Stern15bcb91d2010-09-25 23:35:21 +02001137 rpm_idle(dev, RPM_AUTO);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001138
1139 out:
1140 spin_unlock_irq(&dev->power.lock);
1141}
1142EXPORT_SYMBOL_GPL(pm_runtime_allow);
1143
1144/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001145 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
Alan Stern7490e442010-09-25 23:35:15 +02001146 * @dev: Device to handle.
1147 *
1148 * Set the power.no_callbacks flag, which tells the PM core that this
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001149 * device is power-managed through its parent and has no runtime PM
1150 * callbacks of its own. The runtime sysfs attributes will be removed.
Alan Stern7490e442010-09-25 23:35:15 +02001151 */
1152void pm_runtime_no_callbacks(struct device *dev)
1153{
1154 spin_lock_irq(&dev->power.lock);
1155 dev->power.no_callbacks = 1;
1156 spin_unlock_irq(&dev->power.lock);
1157 if (device_is_registered(dev))
1158 rpm_sysfs_remove(dev);
1159}
1160EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1161
1162/**
Alan Sternc7b61de2010-12-01 00:14:42 +01001163 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1164 * @dev: Device to handle
1165 *
1166 * Set the power.irq_safe flag, which tells the PM core that the
1167 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1168 * always be invoked with the spinlock held and interrupts disabled. It also
1169 * causes the parent's usage counter to be permanently incremented, preventing
1170 * the parent from runtime suspending -- otherwise an irq-safe child might have
1171 * to wait for a non-irq-safe parent.
1172 */
1173void pm_runtime_irq_safe(struct device *dev)
1174{
1175 if (dev->parent)
1176 pm_runtime_get_sync(dev->parent);
1177 spin_lock_irq(&dev->power.lock);
1178 dev->power.irq_safe = 1;
1179 spin_unlock_irq(&dev->power.lock);
1180}
1181EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1182
1183/**
Alan Stern15bcb91d2010-09-25 23:35:21 +02001184 * update_autosuspend - Handle a change to a device's autosuspend settings.
1185 * @dev: Device to handle.
1186 * @old_delay: The former autosuspend_delay value.
1187 * @old_use: The former use_autosuspend value.
1188 *
1189 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1190 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1191 *
1192 * This function must be called under dev->power.lock with interrupts disabled.
1193 */
1194static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1195{
1196 int delay = dev->power.autosuspend_delay;
1197
1198 /* Should runtime suspend be prevented now? */
1199 if (dev->power.use_autosuspend && delay < 0) {
1200
1201 /* If it used to be allowed then prevent it. */
1202 if (!old_use || old_delay >= 0) {
1203 atomic_inc(&dev->power.usage_count);
1204 rpm_resume(dev, 0);
1205 }
1206 }
1207
1208 /* Runtime suspend should be allowed now. */
1209 else {
1210
1211 /* If it used to be prevented then allow it. */
1212 if (old_use && old_delay < 0)
1213 atomic_dec(&dev->power.usage_count);
1214
1215 /* Maybe we can autosuspend now. */
1216 rpm_idle(dev, RPM_AUTO);
1217 }
1218}
1219
1220/**
1221 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1222 * @dev: Device to handle.
1223 * @delay: Value of the new delay in milliseconds.
1224 *
1225 * Set the device's power.autosuspend_delay value. If it changes to negative
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001226 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1227 * changes the other way, allow runtime suspends.
Alan Stern15bcb91d2010-09-25 23:35:21 +02001228 */
1229void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1230{
1231 int old_delay, old_use;
1232
1233 spin_lock_irq(&dev->power.lock);
1234 old_delay = dev->power.autosuspend_delay;
1235 old_use = dev->power.use_autosuspend;
1236 dev->power.autosuspend_delay = delay;
1237 update_autosuspend(dev, old_delay, old_use);
1238 spin_unlock_irq(&dev->power.lock);
1239}
1240EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1241
1242/**
1243 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1244 * @dev: Device to handle.
1245 * @use: New value for use_autosuspend.
1246 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001247 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
Alan Stern15bcb91d2010-09-25 23:35:21 +02001248 * suspends as needed.
1249 */
1250void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1251{
1252 int old_delay, old_use;
1253
1254 spin_lock_irq(&dev->power.lock);
1255 old_delay = dev->power.autosuspend_delay;
1256 old_use = dev->power.use_autosuspend;
1257 dev->power.use_autosuspend = use;
1258 update_autosuspend(dev, old_delay, old_use);
1259 spin_unlock_irq(&dev->power.lock);
1260}
1261EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1262
1263/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001264 * pm_runtime_init - Initialize runtime PM fields in given device object.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001265 * @dev: Device object to initialize.
1266 */
1267void pm_runtime_init(struct device *dev)
1268{
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001269 dev->power.runtime_status = RPM_SUSPENDED;
1270 dev->power.idle_notification = false;
1271
1272 dev->power.disable_depth = 1;
1273 atomic_set(&dev->power.usage_count, 0);
1274
1275 dev->power.runtime_error = 0;
1276
1277 atomic_set(&dev->power.child_count, 0);
1278 pm_suspend_ignore_children(dev, false);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001279 dev->power.runtime_auto = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001280
1281 dev->power.request_pending = false;
1282 dev->power.request = RPM_REQ_NONE;
1283 dev->power.deferred_resume = false;
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +02001284 dev->power.accounting_timestamp = jiffies;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001285 INIT_WORK(&dev->power.work, pm_runtime_work);
1286
1287 dev->power.timer_expires = 0;
1288 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1289 (unsigned long)dev);
1290
1291 init_waitqueue_head(&dev->power.wait_queue);
1292}
1293
1294/**
1295 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1296 * @dev: Device object being removed from device hierarchy.
1297 */
1298void pm_runtime_remove(struct device *dev)
1299{
1300 __pm_runtime_disable(dev, false);
1301
1302 /* Change the status back to 'suspended' to match the initial status. */
1303 if (dev->power.runtime_status == RPM_ACTIVE)
1304 pm_runtime_set_suspended(dev);
Alan Sternc7b61de2010-12-01 00:14:42 +01001305 if (dev->power.irq_safe && dev->parent)
1306 pm_runtime_put_sync(dev->parent);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001307}