blob: 7c5c7410d76c031b955bf3bf7246a674c9d84e8c [file] [log] [blame]
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
Rafael J. Wysockif7218892011-07-01 22:12:45 +02009#include <linux/kernel.h>
10#include <linux/io.h>
Tomasz Figaaa422402014-09-19 20:27:36 +020011#include <linux/platform_device.h>
Rafael J. Wysockif7218892011-07-01 22:12:45 +020012#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +020014#include <linux/pm_qos.h>
Rafael J. Wysockif7218892011-07-01 22:12:45 +020015#include <linux/slab.h>
16#include <linux/err.h>
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +020017#include <linux/sched.h>
18#include <linux/suspend.h>
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010019#include <linux/export.h>
20
21#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
22({ \
23 type (*__routine)(struct device *__d); \
24 type __ret = (type)0; \
25 \
26 __routine = genpd->dev_ops.callback; \
27 if (__routine) { \
28 __ret = __routine(dev); \
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010029 } \
30 __ret; \
31})
Rafael J. Wysockif7218892011-07-01 22:12:45 +020032
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +010033#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
34({ \
35 ktime_t __start = ktime_get(); \
36 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
37 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +020038 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
39 if (!__retval && __elapsed > __td->field) { \
40 __td->field = __elapsed; \
Philipp Zabel7d1af282014-02-27 19:26:44 +010041 dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +010042 __elapsed); \
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +020043 genpd->max_off_time_changed = true; \
44 __td->constraint_changed = true; \
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +010045 } \
46 __retval; \
47})
48
Rafael J. Wysocki5125bbf382011-07-13 12:31:52 +020049static LIST_HEAD(gpd_list);
50static DEFINE_MUTEX(gpd_list_lock);
51
Rafael J. Wysocki8bc02512012-08-07 01:11:14 +020052static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
53{
54 struct generic_pm_domain *genpd = NULL, *gpd;
55
56 if (IS_ERR_OR_NULL(domain_name))
57 return NULL;
58
59 mutex_lock(&gpd_list_lock);
60 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
61 if (!strcmp(gpd->name, domain_name)) {
62 genpd = gpd;
63 break;
64 }
65 }
66 mutex_unlock(&gpd_list_lock);
67 return genpd;
68}
69
Rafael J. Wysockib02c9992011-12-01 00:02:05 +010070struct generic_pm_domain *dev_to_genpd(struct device *dev)
Rafael J. Wysocki52480512011-07-01 22:13:10 +020071{
72 if (IS_ERR_OR_NULL(dev->pm_domain))
73 return ERR_PTR(-EINVAL);
74
Rafael J. Wysocki596ba342011-07-01 22:13:19 +020075 return pd_to_genpd(dev->pm_domain);
Rafael J. Wysocki52480512011-07-01 22:13:10 +020076}
Rafael J. Wysockif7218892011-07-01 22:12:45 +020077
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010078static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
79{
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +010080 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
81 stop_latency_ns, "stop");
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010082}
83
84static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
85{
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +010086 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
87 start_latency_ns, "start");
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010088}
89
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +020090static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
Rafael J. Wysockif7218892011-07-01 22:12:45 +020091{
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +020092 bool ret = false;
93
94 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
95 ret = !!atomic_dec_and_test(&genpd->sd_count);
96
97 return ret;
98}
99
100static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
101{
102 atomic_inc(&genpd->sd_count);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100103 smp_mb__after_atomic();
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200104}
105
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200106static void genpd_acquire_lock(struct generic_pm_domain *genpd)
107{
108 DEFINE_WAIT(wait);
109
110 mutex_lock(&genpd->lock);
111 /*
112 * Wait for the domain to transition into either the active,
113 * or the power off state.
114 */
115 for (;;) {
116 prepare_to_wait(&genpd->status_wait_queue, &wait,
117 TASK_UNINTERRUPTIBLE);
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200118 if (genpd->status == GPD_STATE_ACTIVE
119 || genpd->status == GPD_STATE_POWER_OFF)
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200120 break;
121 mutex_unlock(&genpd->lock);
122
123 schedule();
124
125 mutex_lock(&genpd->lock);
126 }
127 finish_wait(&genpd->status_wait_queue, &wait);
128}
129
130static void genpd_release_lock(struct generic_pm_domain *genpd)
131{
132 mutex_unlock(&genpd->lock);
133}
134
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200135static void genpd_set_active(struct generic_pm_domain *genpd)
136{
137 if (genpd->resume_count == 0)
138 genpd->status = GPD_STATE_ACTIVE;
139}
140
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200141static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
142{
143 s64 usecs64;
144
Ulf Hanssonf39cb172014-10-02 21:12:34 +0200145 if (!genpd->cpuidle_data)
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200146 return;
147
148 usecs64 = genpd->power_on_latency_ns;
149 do_div(usecs64, NSEC_PER_USEC);
Ulf Hanssonf39cb172014-10-02 21:12:34 +0200150 usecs64 += genpd->cpuidle_data->saved_exit_latency;
151 genpd->cpuidle_data->idle_state->exit_latency = usecs64;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200152}
153
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200154/**
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200155 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200156 * @genpd: PM domain to power up.
157 *
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200158 * Restore power to @genpd and all of its masters so that it is possible to
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200159 * resume a device belonging to it.
160 */
Sachin Kamat8951ef02012-07-10 21:47:07 +0200161static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200162 __releases(&genpd->lock) __acquires(&genpd->lock)
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200163{
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200164 struct gpd_link *link;
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200165 DEFINE_WAIT(wait);
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200166 int ret = 0;
167
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200168 /* If the domain's master is being waited for, we have to wait too. */
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200169 for (;;) {
170 prepare_to_wait(&genpd->status_wait_queue, &wait,
171 TASK_UNINTERRUPTIBLE);
Rafael J. Wysocki17877eb2011-08-08 23:43:50 +0200172 if (genpd->status != GPD_STATE_WAIT_MASTER)
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200173 break;
174 mutex_unlock(&genpd->lock);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200175
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200176 schedule();
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200177
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200178 mutex_lock(&genpd->lock);
179 }
180 finish_wait(&genpd->status_wait_queue, &wait);
181
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200182 if (genpd->status == GPD_STATE_ACTIVE
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200183 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200184 return 0;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200185
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200186 if (genpd->status != GPD_STATE_POWER_OFF) {
187 genpd_set_active(genpd);
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200188 return 0;
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200189 }
190
Ulf Hanssonf39cb172014-10-02 21:12:34 +0200191 if (genpd->cpuidle_data) {
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200192 cpuidle_pause_and_lock();
Ulf Hanssonf39cb172014-10-02 21:12:34 +0200193 genpd->cpuidle_data->idle_state->disabled = true;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200194 cpuidle_resume_and_unlock();
195 goto out;
196 }
197
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200198 /*
199 * The list is guaranteed not to change while the loop below is being
200 * executed, unless one of the masters' .power_on() callbacks fiddles
201 * with it.
202 */
203 list_for_each_entry(link, &genpd->slave_links, slave_node) {
204 genpd_sd_counter_inc(link->master);
Rafael J. Wysocki17877eb2011-08-08 23:43:50 +0200205 genpd->status = GPD_STATE_WAIT_MASTER;
Rafael J. Wysocki3c07cbc2011-08-08 23:43:14 +0200206
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200207 mutex_unlock(&genpd->lock);
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200208
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200209 ret = pm_genpd_poweron(link->master);
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200210
211 mutex_lock(&genpd->lock);
212
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200213 /*
214 * The "wait for parent" status is guaranteed not to change
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200215 * while the master is powering on.
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200216 */
217 genpd->status = GPD_STATE_POWER_OFF;
218 wake_up_all(&genpd->status_wait_queue);
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200219 if (ret) {
220 genpd_sd_counter_dec(link->master);
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200221 goto err;
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200222 }
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200223 }
224
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200225 if (genpd->power_on) {
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +0100226 ktime_t time_start = ktime_get();
227 s64 elapsed_ns;
228
Rafael J. Wysockife202fd2011-08-05 21:45:11 +0200229 ret = genpd->power_on(genpd);
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200230 if (ret)
231 goto err;
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +0100232
233 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
Rafael J. Wysockie84b2c22011-12-06 22:19:54 +0100234 if (elapsed_ns > genpd->power_on_latency_ns) {
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +0100235 genpd->power_on_latency_ns = elapsed_ns;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200236 genpd->max_off_time_changed = true;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200237 genpd_recalc_cpu_exit_latency(genpd);
Rafael J. Wysockie84b2c22011-12-06 22:19:54 +0100238 if (genpd->name)
239 pr_warning("%s: Power-on latency exceeded, "
240 "new value %lld ns\n", genpd->name,
241 elapsed_ns);
242 }
Rafael J. Wysocki3c07cbc2011-08-08 23:43:14 +0200243 }
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200244
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200245 out:
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200246 genpd_set_active(genpd);
247
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200248 return 0;
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200249
250 err:
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200251 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
252 genpd_sd_counter_dec(link->master);
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200253
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200254 return ret;
255}
256
257/**
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200258 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200259 * @genpd: PM domain to power up.
260 */
261int pm_genpd_poweron(struct generic_pm_domain *genpd)
262{
263 int ret;
264
265 mutex_lock(&genpd->lock);
266 ret = __pm_genpd_poweron(genpd);
267 mutex_unlock(&genpd->lock);
268 return ret;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200269}
270
Rafael J. Wysocki8bc02512012-08-07 01:11:14 +0200271/**
272 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
273 * @domain_name: Name of the PM domain to power up.
274 */
275int pm_genpd_name_poweron(const char *domain_name)
276{
277 struct generic_pm_domain *genpd;
278
279 genpd = pm_genpd_lookup_name(domain_name);
280 return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
281}
282
Sachin Kamatb3d3b9f2012-09-06 08:18:57 +0000283static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
284 struct device *dev)
285{
286 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
287}
288
Rafael J. Wysocki8e9afaf2012-07-11 22:42:52 +0200289static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
290{
291 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
292 save_state_latency_ns, "state save");
293}
294
295static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
296{
297 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
298 restore_state_latency_ns,
299 "state restore");
300}
301
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200302static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
303 unsigned long val, void *ptr)
304{
305 struct generic_pm_domain_data *gpd_data;
306 struct device *dev;
307
308 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
309
310 mutex_lock(&gpd_data->lock);
311 dev = gpd_data->base.dev;
312 if (!dev) {
313 mutex_unlock(&gpd_data->lock);
314 return NOTIFY_DONE;
315 }
316 mutex_unlock(&gpd_data->lock);
317
318 for (;;) {
319 struct generic_pm_domain *genpd;
320 struct pm_domain_data *pdd;
321
322 spin_lock_irq(&dev->power.lock);
323
324 pdd = dev->power.subsys_data ?
325 dev->power.subsys_data->domain_data : NULL;
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +0200326 if (pdd && pdd->dev) {
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200327 to_gpd_data(pdd)->td.constraint_changed = true;
328 genpd = dev_to_genpd(dev);
329 } else {
330 genpd = ERR_PTR(-ENODATA);
331 }
332
333 spin_unlock_irq(&dev->power.lock);
334
335 if (!IS_ERR(genpd)) {
336 mutex_lock(&genpd->lock);
337 genpd->max_off_time_changed = true;
338 mutex_unlock(&genpd->lock);
339 }
340
341 dev = dev->parent;
342 if (!dev || dev->power.ignore_children)
343 break;
344 }
345
346 return NOTIFY_DONE;
347}
348
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200349/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200350 * __pm_genpd_save_device - Save the pre-suspend state of a device.
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200351 * @pdd: Domain data of the device to save the state of.
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200352 * @genpd: PM domain the device belongs to.
353 */
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200354static int __pm_genpd_save_device(struct pm_domain_data *pdd,
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200355 struct generic_pm_domain *genpd)
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200356 __releases(&genpd->lock) __acquires(&genpd->lock)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200357{
Rafael J. Wysockicd0ea672011-09-26 20:22:02 +0200358 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200359 struct device *dev = pdd->dev;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200360 int ret = 0;
361
Ulf Hansson67732cd2014-11-11 11:07:08 +0100362 if (gpd_data->need_restore > 0)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200363 return 0;
364
Ulf Hansson67732cd2014-11-11 11:07:08 +0100365 /*
366 * If the value of the need_restore flag is still unknown at this point,
367 * we trust that pm_genpd_poweroff() has verified that the device is
368 * already runtime PM suspended.
369 */
370 if (gpd_data->need_restore < 0) {
371 gpd_data->need_restore = 1;
372 return 0;
373 }
374
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200375 mutex_unlock(&genpd->lock);
376
Rafael J. Wysockiecf00472011-11-27 13:11:44 +0100377 genpd_start_dev(genpd, dev);
378 ret = genpd_save_dev(genpd, dev);
379 genpd_stop_dev(genpd, dev);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200380
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200381 mutex_lock(&genpd->lock);
382
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200383 if (!ret)
Ulf Hansson67732cd2014-11-11 11:07:08 +0100384 gpd_data->need_restore = 1;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200385
386 return ret;
387}
388
389/**
390 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200391 * @pdd: Domain data of the device to restore the state of.
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200392 * @genpd: PM domain the device belongs to.
393 */
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200394static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200395 struct generic_pm_domain *genpd)
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200396 __releases(&genpd->lock) __acquires(&genpd->lock)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200397{
Rafael J. Wysockicd0ea672011-09-26 20:22:02 +0200398 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200399 struct device *dev = pdd->dev;
Ulf Hansson67732cd2014-11-11 11:07:08 +0100400 int need_restore = gpd_data->need_restore;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200401
Ulf Hansson67732cd2014-11-11 11:07:08 +0100402 gpd_data->need_restore = 0;
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200403 mutex_unlock(&genpd->lock);
404
Rafael J. Wysockiecf00472011-11-27 13:11:44 +0100405 genpd_start_dev(genpd, dev);
Ulf Hansson67732cd2014-11-11 11:07:08 +0100406
407 /*
408 * Call genpd_restore_dev() for recently added devices too (need_restore
409 * is negative then).
410 */
Rafael J. Wysocki80de3d72012-06-16 00:02:34 +0200411 if (need_restore)
412 genpd_restore_dev(genpd, dev);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200413
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200414 mutex_lock(&genpd->lock);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200415}
416
417/**
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200418 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
419 * @genpd: PM domain to check.
420 *
421 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
422 * a "power off" operation, which means that a "power on" has occured in the
423 * meantime, or if its resume_count field is different from zero, which means
424 * that one of its devices has been resumed in the meantime.
425 */
426static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
427{
Rafael J. Wysocki17877eb2011-08-08 23:43:50 +0200428 return genpd->status == GPD_STATE_WAIT_MASTER
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200429 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200430}
431
432/**
Rafael J. Wysocki56375fd2011-07-12 00:40:03 +0200433 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
434 * @genpd: PM domait to power off.
435 *
436 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
437 * before.
438 */
Ulf Hanssond971f0b2014-09-03 12:52:25 +0200439static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
Rafael J. Wysocki56375fd2011-07-12 00:40:03 +0200440{
Tejun Heoa4ca26a2013-01-11 13:37:23 +0100441 queue_work(pm_wq, &genpd->power_off_work);
Rafael J. Wysocki56375fd2011-07-12 00:40:03 +0200442}
443
444/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200445 * pm_genpd_poweroff - Remove power from a given PM domain.
446 * @genpd: PM domain to power down.
447 *
448 * If all of the @genpd's devices have been suspended and all of its subdomains
449 * have been powered down, run the runtime suspend callbacks provided by all of
450 * the @genpd's devices' drivers and remove power from @genpd.
451 */
452static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200453 __releases(&genpd->lock) __acquires(&genpd->lock)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200454{
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200455 struct pm_domain_data *pdd;
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200456 struct gpd_link *link;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200457 unsigned int not_suspended;
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200458 int ret = 0;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200459
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200460 start:
461 /*
462 * Do not try to power off the domain in the following situations:
463 * (1) The domain is already in the "power off" state.
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200464 * (2) The domain is waiting for its master to power up.
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200465 * (3) One of the domain's devices is being resumed right now.
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200466 * (4) System suspend is in progress.
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200467 */
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200468 if (genpd->status == GPD_STATE_POWER_OFF
Rafael J. Wysocki17877eb2011-08-08 23:43:50 +0200469 || genpd->status == GPD_STATE_WAIT_MASTER
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200470 || genpd->resume_count > 0 || genpd->prepared_count > 0)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200471 return 0;
472
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +0200473 if (atomic_read(&genpd->sd_count) > 0)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200474 return -EBUSY;
475
476 not_suspended = 0;
Rafael J. Wysocki34b1f762012-10-24 02:08:30 +0200477 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
478 enum pm_qos_flags_status stat;
479
480 stat = dev_pm_qos_flags(pdd->dev,
481 PM_QOS_FLAG_NO_POWER_OFF
482 | PM_QOS_FLAG_REMOTE_WAKEUP);
483 if (stat > PM_QOS_FLAGS_NONE)
484 return -EBUSY;
485
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200486 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
Rafael J. Wysockifeb70af2012-08-13 14:00:25 +0200487 || pdd->dev->power.irq_safe))
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200488 not_suspended++;
Rafael J. Wysocki34b1f762012-10-24 02:08:30 +0200489 }
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200490
491 if (not_suspended > genpd->in_progress)
492 return -EBUSY;
493
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200494 if (genpd->poweroff_task) {
495 /*
496 * Another instance of pm_genpd_poweroff() is executing
497 * callbacks, so tell it to start over and return.
498 */
499 genpd->status = GPD_STATE_REPEAT;
500 return 0;
501 }
502
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200503 if (genpd->gov && genpd->gov->power_down_ok) {
504 if (!genpd->gov->power_down_ok(&genpd->domain))
505 return -EAGAIN;
506 }
507
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200508 genpd->status = GPD_STATE_BUSY;
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200509 genpd->poweroff_task = current;
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200510
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200511 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
Rafael J. Wysocki3c07cbc2011-08-08 23:43:14 +0200512 ret = atomic_read(&genpd->sd_count) == 0 ?
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +0200513 __pm_genpd_save_device(pdd, genpd) : -EBUSY;
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200514
515 if (genpd_abort_poweroff(genpd))
516 goto out;
517
Rafael J. Wysocki697a7f32011-07-12 00:39:48 +0200518 if (ret) {
519 genpd_set_active(genpd);
520 goto out;
521 }
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200522
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200523 if (genpd->status == GPD_STATE_REPEAT) {
524 genpd->poweroff_task = NULL;
525 goto start;
526 }
527 }
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200528
Ulf Hanssonf39cb172014-10-02 21:12:34 +0200529 if (genpd->cpuidle_data) {
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200530 /*
Ulf Hanssonf39cb172014-10-02 21:12:34 +0200531 * If cpuidle_data is set, cpuidle should turn the domain off
532 * when the CPU in it is idle. In that case we don't decrement
533 * the subdomain counts of the master domains, so that power is
534 * not removed from the current domain prematurely as a result
535 * of cutting off the masters' power.
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200536 */
537 genpd->status = GPD_STATE_POWER_OFF;
538 cpuidle_pause_and_lock();
Ulf Hanssonf39cb172014-10-02 21:12:34 +0200539 genpd->cpuidle_data->idle_state->disabled = false;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +0200540 cpuidle_resume_and_unlock();
541 goto out;
542 }
543
Rafael J. Wysocki3c07cbc2011-08-08 23:43:14 +0200544 if (genpd->power_off) {
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +0100545 ktime_t time_start;
546 s64 elapsed_ns;
547
Rafael J. Wysocki3c07cbc2011-08-08 23:43:14 +0200548 if (atomic_read(&genpd->sd_count) > 0) {
549 ret = -EBUSY;
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200550 goto out;
551 }
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200552
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +0100553 time_start = ktime_get();
554
Rafael J. Wysocki3c07cbc2011-08-08 23:43:14 +0200555 /*
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200556 * If sd_count > 0 at this point, one of the subdomains hasn't
557 * managed to call pm_genpd_poweron() for the master yet after
Rafael J. Wysocki3c07cbc2011-08-08 23:43:14 +0200558 * incrementing it. In that case pm_genpd_poweron() will wait
559 * for us to drop the lock, so we can call .power_off() and let
560 * the pm_genpd_poweron() restore power for us (this shouldn't
561 * happen very often).
562 */
Rafael J. Wysockid2805402011-07-14 20:59:20 +0200563 ret = genpd->power_off(genpd);
564 if (ret == -EBUSY) {
565 genpd_set_active(genpd);
Rafael J. Wysockid2805402011-07-14 20:59:20 +0200566 goto out;
567 }
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +0100568
569 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
Rafael J. Wysockie84b2c22011-12-06 22:19:54 +0100570 if (elapsed_ns > genpd->power_off_latency_ns) {
Rafael J. Wysocki0140d8b2011-12-01 00:02:17 +0100571 genpd->power_off_latency_ns = elapsed_ns;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200572 genpd->max_off_time_changed = true;
Rafael J. Wysockie84b2c22011-12-06 22:19:54 +0100573 if (genpd->name)
574 pr_warning("%s: Power-off latency exceeded, "
575 "new value %lld ns\n", genpd->name,
576 elapsed_ns);
577 }
Rafael J. Wysockid2805402011-07-14 20:59:20 +0200578 }
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200579
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200580 genpd->status = GPD_STATE_POWER_OFF;
Rafael J. Wysocki221e9b52011-12-01 00:02:10 +0100581
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200582 list_for_each_entry(link, &genpd->slave_links, slave_node) {
583 genpd_sd_counter_dec(link->master);
584 genpd_queue_power_off_work(link->master);
585 }
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200586
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200587 out:
588 genpd->poweroff_task = NULL;
589 wake_up_all(&genpd->status_wait_queue);
590 return ret;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200591}
592
593/**
594 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
595 * @work: Work structure used for scheduling the execution of this function.
596 */
597static void genpd_power_off_work_fn(struct work_struct *work)
598{
599 struct generic_pm_domain *genpd;
600
601 genpd = container_of(work, struct generic_pm_domain, power_off_work);
602
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200603 genpd_acquire_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200604 pm_genpd_poweroff(genpd);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200605 genpd_release_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200606}
607
608/**
609 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
610 * @dev: Device to suspend.
611 *
612 * Carry out a runtime suspend of a device under the assumption that its
613 * pm_domain field points to the domain member of an object of type
614 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
615 */
616static int pm_genpd_runtime_suspend(struct device *dev)
617{
618 struct generic_pm_domain *genpd;
Ulf Hansson67732cd2014-11-11 11:07:08 +0100619 struct generic_pm_domain_data *gpd_data;
Rafael J. Wysockib02c9992011-12-01 00:02:05 +0100620 bool (*stop_ok)(struct device *__dev);
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100621 int ret;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200622
623 dev_dbg(dev, "%s()\n", __func__);
624
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200625 genpd = dev_to_genpd(dev);
626 if (IS_ERR(genpd))
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200627 return -EINVAL;
628
Rafael J. Wysockib02c9992011-12-01 00:02:05 +0100629 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
630 if (stop_ok && !stop_ok(dev))
631 return -EBUSY;
632
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100633 ret = genpd_stop_dev(genpd, dev);
634 if (ret)
635 return ret;
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200636
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200637 /*
638 * If power.irq_safe is set, this routine will be run with interrupts
639 * off, so it can't use mutexes.
640 */
641 if (dev->power.irq_safe)
642 return 0;
643
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200644 mutex_lock(&genpd->lock);
Ulf Hansson67732cd2014-11-11 11:07:08 +0100645
646 /*
647 * If we have an unknown state of the need_restore flag, it means none
648 * of the runtime PM callbacks has been invoked yet. Let's update the
649 * flag to reflect that the current state is active.
650 */
651 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
652 if (gpd_data->need_restore < 0)
653 gpd_data->need_restore = 0;
654
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200655 genpd->in_progress++;
656 pm_genpd_poweroff(genpd);
657 genpd->in_progress--;
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200658 mutex_unlock(&genpd->lock);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200659
660 return 0;
661}
662
663/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200664 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
665 * @dev: Device to resume.
666 *
667 * Carry out a runtime resume of a device under the assumption that its
668 * pm_domain field points to the domain member of an object of type
669 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
670 */
671static int pm_genpd_runtime_resume(struct device *dev)
672{
673 struct generic_pm_domain *genpd;
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200674 DEFINE_WAIT(wait);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200675 int ret;
676
677 dev_dbg(dev, "%s()\n", __func__);
678
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200679 genpd = dev_to_genpd(dev);
680 if (IS_ERR(genpd))
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200681 return -EINVAL;
682
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200683 /* If power.irq_safe, the PM domain is never powered off. */
684 if (dev->power.irq_safe)
Rafael J. Wysockie2e3e4e52012-08-06 01:47:29 +0200685 return genpd_start_dev_no_timing(genpd, dev);
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200686
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200687 mutex_lock(&genpd->lock);
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200688 ret = __pm_genpd_poweron(genpd);
689 if (ret) {
690 mutex_unlock(&genpd->lock);
691 return ret;
692 }
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200693 genpd->status = GPD_STATE_BUSY;
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200694 genpd->resume_count++;
695 for (;;) {
696 prepare_to_wait(&genpd->status_wait_queue, &wait,
697 TASK_UNINTERRUPTIBLE);
698 /*
699 * If current is the powering off task, we have been called
700 * reentrantly from one of the device callbacks, so we should
701 * not wait.
702 */
703 if (!genpd->poweroff_task || genpd->poweroff_task == current)
704 break;
705 mutex_unlock(&genpd->lock);
706
707 schedule();
708
709 mutex_lock(&genpd->lock);
710 }
711 finish_wait(&genpd->status_wait_queue, &wait);
Rafael J. Wysockicd0ea672011-09-26 20:22:02 +0200712 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200713 genpd->resume_count--;
714 genpd_set_active(genpd);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200715 wake_up_all(&genpd->status_wait_queue);
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +0200716 mutex_unlock(&genpd->lock);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200717
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200718 return 0;
719}
720
Tushar Behera39ac5ba2014-03-28 10:50:21 +0530721static bool pd_ignore_unused;
722static int __init pd_ignore_unused_setup(char *__unused)
723{
724 pd_ignore_unused = true;
725 return 1;
726}
727__setup("pd_ignore_unused", pd_ignore_unused_setup);
728
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +0200729/**
730 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
731 */
732void pm_genpd_poweroff_unused(void)
733{
734 struct generic_pm_domain *genpd;
735
Tushar Behera39ac5ba2014-03-28 10:50:21 +0530736 if (pd_ignore_unused) {
737 pr_warn("genpd: Not disabling unused power domains\n");
738 return;
739 }
740
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +0200741 mutex_lock(&gpd_list_lock);
742
743 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
744 genpd_queue_power_off_work(genpd);
745
746 mutex_unlock(&gpd_list_lock);
747}
748
Ulf Hansson2fe71dc2014-09-03 12:52:26 +0200749static int __init genpd_poweroff_unused(void)
750{
751 pm_genpd_poweroff_unused();
752 return 0;
753}
754late_initcall(genpd_poweroff_unused);
755
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200756#ifdef CONFIG_PM_SLEEP
757
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +0200758/**
759 * pm_genpd_present - Check if the given PM domain has been initialized.
760 * @genpd: PM domain to check.
761 */
762static bool pm_genpd_present(struct generic_pm_domain *genpd)
763{
764 struct generic_pm_domain *gpd;
765
766 if (IS_ERR_OR_NULL(genpd))
767 return false;
768
769 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
770 if (gpd == genpd)
771 return true;
772
773 return false;
774}
775
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100776static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
777 struct device *dev)
778{
779 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
780}
781
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200782/**
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200783 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200784 * @genpd: PM domain to power off, if possible.
785 *
786 * Check if the given PM domain can be powered off (during system suspend or
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200787 * hibernation) and do that if so. Also, in that case propagate to its masters.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200788 *
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +0200789 * This function is only called in "noirq" and "syscore" stages of system power
790 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
791 * executed sequentially, so it is guaranteed that it will never run twice in
792 * parallel).
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200793 */
794static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
795{
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200796 struct gpd_link *link;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200797
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200798 if (genpd->status == GPD_STATE_POWER_OFF)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200799 return;
800
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +0200801 if (genpd->suspended_count != genpd->device_count
802 || atomic_read(&genpd->sd_count) > 0)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200803 return;
804
805 if (genpd->power_off)
806 genpd->power_off(genpd);
807
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200808 genpd->status = GPD_STATE_POWER_OFF;
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200809
810 list_for_each_entry(link, &genpd->slave_links, slave_node) {
811 genpd_sd_counter_dec(link->master);
812 pm_genpd_sync_poweroff(link->master);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200813 }
814}
815
816/**
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +0200817 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
818 * @genpd: PM domain to power on.
819 *
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +0200820 * This function is only called in "noirq" and "syscore" stages of system power
821 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
822 * executed sequentially, so it is guaranteed that it will never run twice in
823 * parallel).
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +0200824 */
825static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
826{
827 struct gpd_link *link;
828
829 if (genpd->status != GPD_STATE_POWER_OFF)
830 return;
831
832 list_for_each_entry(link, &genpd->slave_links, slave_node) {
833 pm_genpd_sync_poweron(link->master);
834 genpd_sd_counter_inc(link->master);
835 }
836
837 if (genpd->power_on)
838 genpd->power_on(genpd);
839
840 genpd->status = GPD_STATE_ACTIVE;
841}
842
843/**
Rafael J. Wysocki4ecd6e62011-07-12 00:39:57 +0200844 * resume_needed - Check whether to resume a device before system suspend.
845 * @dev: Device to check.
846 * @genpd: PM domain the device belongs to.
847 *
848 * There are two cases in which a device that can wake up the system from sleep
849 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
850 * to wake up the system and it has to remain active for this purpose while the
851 * system is in the sleep state and (2) if the device is not enabled to wake up
852 * the system from sleep states and it generally doesn't generate wakeup signals
853 * by itself (those signals are generated on its behalf by other parts of the
854 * system). In the latter case it may be necessary to reconfigure the device's
855 * wakeup settings during system suspend, because it may have been set up to
856 * signal remote wakeup from the system's working state as needed by runtime PM.
857 * Return 'true' in either of the above cases.
858 */
859static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
860{
861 bool active_wakeup;
862
863 if (!device_can_wakeup(dev))
864 return false;
865
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100866 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
Rafael J. Wysocki4ecd6e62011-07-12 00:39:57 +0200867 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
868}
869
870/**
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200871 * pm_genpd_prepare - Start power transition of a device in a PM domain.
872 * @dev: Device to start the transition of.
873 *
874 * Start a power transition of a device (during a system-wide power transition)
875 * under the assumption that its pm_domain field points to the domain member of
876 * an object of type struct generic_pm_domain representing a PM domain
877 * consisting of I/O devices.
878 */
879static int pm_genpd_prepare(struct device *dev)
880{
881 struct generic_pm_domain *genpd;
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +0200882 int ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200883
884 dev_dbg(dev, "%s()\n", __func__);
885
886 genpd = dev_to_genpd(dev);
887 if (IS_ERR(genpd))
888 return -EINVAL;
889
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200890 /*
891 * If a wakeup request is pending for the device, it should be woken up
892 * at this point and a system wakeup event should be reported if it's
893 * set up to wake up the system from sleep states.
894 */
895 pm_runtime_get_noresume(dev);
896 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
897 pm_wakeup_event(dev, 0);
898
899 if (pm_wakeup_pending()) {
Ulf Hansson84167032013-04-12 09:41:44 +0000900 pm_runtime_put(dev);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200901 return -EBUSY;
902 }
903
Rafael J. Wysocki4ecd6e62011-07-12 00:39:57 +0200904 if (resume_needed(dev, genpd))
905 pm_runtime_resume(dev);
906
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200907 genpd_acquire_lock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200908
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +0100909 if (genpd->prepared_count++ == 0) {
910 genpd->suspended_count = 0;
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200911 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +0100912 }
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200913
914 genpd_release_lock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200915
916 if (genpd->suspend_power_off) {
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200917 pm_runtime_put_noidle(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200918 return 0;
919 }
920
921 /*
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200922 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
923 * so pm_genpd_poweron() will return immediately, but if the device
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100924 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200925 * to make it operational.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200926 */
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200927 pm_runtime_resume(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200928 __pm_runtime_disable(dev, false);
929
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +0200930 ret = pm_generic_prepare(dev);
931 if (ret) {
932 mutex_lock(&genpd->lock);
933
934 if (--genpd->prepared_count == 0)
935 genpd->suspend_power_off = false;
936
937 mutex_unlock(&genpd->lock);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200938 pm_runtime_enable(dev);
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +0200939 }
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200940
Ulf Hansson84167032013-04-12 09:41:44 +0000941 pm_runtime_put(dev);
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +0200942 return ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200943}
944
945/**
946 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
947 * @dev: Device to suspend.
948 *
949 * Suspend a device under the assumption that its pm_domain field points to the
950 * domain member of an object of type struct generic_pm_domain representing
951 * a PM domain consisting of I/O devices.
952 */
953static int pm_genpd_suspend(struct device *dev)
954{
955 struct generic_pm_domain *genpd;
956
957 dev_dbg(dev, "%s()\n", __func__);
958
959 genpd = dev_to_genpd(dev);
960 if (IS_ERR(genpd))
961 return -EINVAL;
962
Ulf Hansson1e0407c2014-09-03 12:52:19 +0200963 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200964}
965
966/**
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +0100967 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200968 * @dev: Device to suspend.
969 *
970 * Carry out a late suspend of a device under the assumption that its
971 * pm_domain field points to the domain member of an object of type
972 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
973 */
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +0100974static int pm_genpd_suspend_late(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200975{
976 struct generic_pm_domain *genpd;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200977
978 dev_dbg(dev, "%s()\n", __func__);
979
980 genpd = dev_to_genpd(dev);
981 if (IS_ERR(genpd))
982 return -EINVAL;
983
Ulf Hansson1e0407c2014-09-03 12:52:19 +0200984 return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +0100985}
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200986
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +0100987/**
988 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
989 * @dev: Device to suspend.
990 *
991 * Stop the device and remove power from the domain if all devices in it have
992 * been stopped.
993 */
994static int pm_genpd_suspend_noirq(struct device *dev)
995{
996 struct generic_pm_domain *genpd;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200997
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +0100998 dev_dbg(dev, "%s()\n", __func__);
999
1000 genpd = dev_to_genpd(dev);
1001 if (IS_ERR(genpd))
1002 return -EINVAL;
1003
Rafael J. Wysockidbf37412012-08-06 01:46:39 +02001004 if (genpd->suspend_power_off
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001005 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
Rafael J. Wysockid4f2d872011-07-01 22:13:29 +02001006 return 0;
1007
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +01001008 genpd_stop_dev(genpd, dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001009
1010 /*
1011 * Since all of the "noirq" callbacks are executed sequentially, it is
1012 * guaranteed that this function will never run twice in parallel for
1013 * the same PM domain, so it is not necessary to use locking here.
1014 */
1015 genpd->suspended_count++;
1016 pm_genpd_sync_poweroff(genpd);
1017
1018 return 0;
1019}
1020
1021/**
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001022 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001023 * @dev: Device to resume.
1024 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001025 * Restore power to the device's PM domain, if necessary, and start the device.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001026 */
1027static int pm_genpd_resume_noirq(struct device *dev)
1028{
1029 struct generic_pm_domain *genpd;
1030
1031 dev_dbg(dev, "%s()\n", __func__);
1032
1033 genpd = dev_to_genpd(dev);
1034 if (IS_ERR(genpd))
1035 return -EINVAL;
1036
Rafael J. Wysockidbf37412012-08-06 01:46:39 +02001037 if (genpd->suspend_power_off
Rafael J. Wysockicc85b202012-03-13 22:39:31 +01001038 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001039 return 0;
1040
1041 /*
1042 * Since all of the "noirq" callbacks are executed sequentially, it is
1043 * guaranteed that this function will never run twice in parallel for
1044 * the same PM domain, so it is not necessary to use locking here.
1045 */
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001046 pm_genpd_sync_poweron(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001047 genpd->suspended_count--;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001048
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001049 return genpd_start_dev(genpd, dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001050}
1051
1052/**
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001053 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1054 * @dev: Device to resume.
1055 *
1056 * Carry out an early resume of a device under the assumption that its
1057 * pm_domain field points to the domain member of an object of type
1058 * struct generic_pm_domain representing a power domain consisting of I/O
1059 * devices.
1060 */
1061static int pm_genpd_resume_early(struct device *dev)
1062{
1063 struct generic_pm_domain *genpd;
1064
1065 dev_dbg(dev, "%s()\n", __func__);
1066
1067 genpd = dev_to_genpd(dev);
1068 if (IS_ERR(genpd))
1069 return -EINVAL;
1070
Ulf Hansson1e0407c2014-09-03 12:52:19 +02001071 return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001072}
1073
1074/**
1075 * pm_genpd_resume - Resume of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001076 * @dev: Device to resume.
1077 *
1078 * Resume a device under the assumption that its pm_domain field points to the
1079 * domain member of an object of type struct generic_pm_domain representing
1080 * a power domain consisting of I/O devices.
1081 */
1082static int pm_genpd_resume(struct device *dev)
1083{
1084 struct generic_pm_domain *genpd;
1085
1086 dev_dbg(dev, "%s()\n", __func__);
1087
1088 genpd = dev_to_genpd(dev);
1089 if (IS_ERR(genpd))
1090 return -EINVAL;
1091
Ulf Hansson1e0407c2014-09-03 12:52:19 +02001092 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001093}
1094
1095/**
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001096 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001097 * @dev: Device to freeze.
1098 *
1099 * Freeze a device under the assumption that its pm_domain field points to the
1100 * domain member of an object of type struct generic_pm_domain representing
1101 * a power domain consisting of I/O devices.
1102 */
1103static int pm_genpd_freeze(struct device *dev)
1104{
1105 struct generic_pm_domain *genpd;
1106
1107 dev_dbg(dev, "%s()\n", __func__);
1108
1109 genpd = dev_to_genpd(dev);
1110 if (IS_ERR(genpd))
1111 return -EINVAL;
1112
Ulf Hansson1e0407c2014-09-03 12:52:19 +02001113 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001114}
1115
1116/**
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001117 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1118 * @dev: Device to freeze.
1119 *
1120 * Carry out a late freeze of a device under the assumption that its
1121 * pm_domain field points to the domain member of an object of type
1122 * struct generic_pm_domain representing a power domain consisting of I/O
1123 * devices.
1124 */
1125static int pm_genpd_freeze_late(struct device *dev)
1126{
1127 struct generic_pm_domain *genpd;
1128
1129 dev_dbg(dev, "%s()\n", __func__);
1130
1131 genpd = dev_to_genpd(dev);
1132 if (IS_ERR(genpd))
1133 return -EINVAL;
1134
Ulf Hansson1e0407c2014-09-03 12:52:19 +02001135 return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001136}
1137
1138/**
1139 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001140 * @dev: Device to freeze.
1141 *
1142 * Carry out a late freeze of a device under the assumption that its
1143 * pm_domain field points to the domain member of an object of type
1144 * struct generic_pm_domain representing a power domain consisting of I/O
1145 * devices.
1146 */
1147static int pm_genpd_freeze_noirq(struct device *dev)
1148{
1149 struct generic_pm_domain *genpd;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001150
1151 dev_dbg(dev, "%s()\n", __func__);
1152
1153 genpd = dev_to_genpd(dev);
1154 if (IS_ERR(genpd))
1155 return -EINVAL;
1156
Rafael J. Wysockidbf37412012-08-06 01:46:39 +02001157 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001158}
1159
1160/**
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001161 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001162 * @dev: Device to thaw.
1163 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001164 * Start the device, unless power has been removed from the domain already
1165 * before the system transition.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001166 */
1167static int pm_genpd_thaw_noirq(struct device *dev)
1168{
1169 struct generic_pm_domain *genpd;
1170
1171 dev_dbg(dev, "%s()\n", __func__);
1172
1173 genpd = dev_to_genpd(dev);
1174 if (IS_ERR(genpd))
1175 return -EINVAL;
1176
Rafael J. Wysockidbf37412012-08-06 01:46:39 +02001177 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001178}
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001179
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001180/**
1181 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1182 * @dev: Device to thaw.
1183 *
1184 * Carry out an early thaw of a device under the assumption that its
1185 * pm_domain field points to the domain member of an object of type
1186 * struct generic_pm_domain representing a power domain consisting of I/O
1187 * devices.
1188 */
1189static int pm_genpd_thaw_early(struct device *dev)
1190{
1191 struct generic_pm_domain *genpd;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001192
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001193 dev_dbg(dev, "%s()\n", __func__);
1194
1195 genpd = dev_to_genpd(dev);
1196 if (IS_ERR(genpd))
1197 return -EINVAL;
1198
Ulf Hansson1e0407c2014-09-03 12:52:19 +02001199 return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001200}
1201
1202/**
1203 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1204 * @dev: Device to thaw.
1205 *
1206 * Thaw a device under the assumption that its pm_domain field points to the
1207 * domain member of an object of type struct generic_pm_domain representing
1208 * a power domain consisting of I/O devices.
1209 */
1210static int pm_genpd_thaw(struct device *dev)
1211{
1212 struct generic_pm_domain *genpd;
1213
1214 dev_dbg(dev, "%s()\n", __func__);
1215
1216 genpd = dev_to_genpd(dev);
1217 if (IS_ERR(genpd))
1218 return -EINVAL;
1219
Ulf Hansson1e0407c2014-09-03 12:52:19 +02001220 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001221}
1222
1223/**
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001224 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001225 * @dev: Device to resume.
1226 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001227 * Make sure the domain will be in the same power state as before the
1228 * hibernation the system is resuming from and start the device if necessary.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001229 */
1230static int pm_genpd_restore_noirq(struct device *dev)
1231{
1232 struct generic_pm_domain *genpd;
1233
1234 dev_dbg(dev, "%s()\n", __func__);
1235
1236 genpd = dev_to_genpd(dev);
1237 if (IS_ERR(genpd))
1238 return -EINVAL;
1239
1240 /*
1241 * Since all of the "noirq" callbacks are executed sequentially, it is
1242 * guaranteed that this function will never run twice in parallel for
1243 * the same PM domain, so it is not necessary to use locking here.
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001244 *
1245 * At this point suspended_count == 0 means we are being run for the
1246 * first time for the given domain in the present cycle.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001247 */
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001248 if (genpd->suspended_count++ == 0) {
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001249 /*
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001250 * The boot kernel might put the domain into arbitrary state,
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001251 * so make it appear as powered off to pm_genpd_sync_poweron(),
1252 * so that it tries to power it on in case it was really off.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001253 */
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001254 genpd->status = GPD_STATE_POWER_OFF;
1255 if (genpd->suspend_power_off) {
1256 /*
1257 * If the domain was off before the hibernation, make
1258 * sure it will be off going forward.
1259 */
1260 if (genpd->power_off)
1261 genpd->power_off(genpd);
1262
1263 return 0;
1264 }
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001265 }
1266
Rafael J. Wysocki18dd2ec2012-03-19 10:38:14 +01001267 if (genpd->suspend_power_off)
1268 return 0;
1269
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001270 pm_genpd_sync_poweron(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001271
Rafael J. Wysockidbf37412012-08-06 01:46:39 +02001272 return genpd_start_dev(genpd, dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001273}
1274
1275/**
1276 * pm_genpd_complete - Complete power transition of a device in a power domain.
1277 * @dev: Device to complete the transition of.
1278 *
1279 * Complete a power transition of a device (during a system-wide power
1280 * transition) under the assumption that its pm_domain field points to the
1281 * domain member of an object of type struct generic_pm_domain representing
1282 * a power domain consisting of I/O devices.
1283 */
1284static void pm_genpd_complete(struct device *dev)
1285{
1286 struct generic_pm_domain *genpd;
1287 bool run_complete;
1288
1289 dev_dbg(dev, "%s()\n", __func__);
1290
1291 genpd = dev_to_genpd(dev);
1292 if (IS_ERR(genpd))
1293 return;
1294
1295 mutex_lock(&genpd->lock);
1296
1297 run_complete = !genpd->suspend_power_off;
1298 if (--genpd->prepared_count == 0)
1299 genpd->suspend_power_off = false;
1300
1301 mutex_unlock(&genpd->lock);
1302
1303 if (run_complete) {
1304 pm_generic_complete(dev);
Rafael J. Wysocki6f00ff72011-07-12 00:39:10 +02001305 pm_runtime_set_active(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001306 pm_runtime_enable(dev);
Ulf Hanssonaf939332013-04-12 09:41:06 +00001307 pm_request_idle(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001308 }
1309}
1310
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001311/**
Ulf Hanssond47e6462014-09-03 12:52:24 +02001312 * genpd_syscore_switch - Switch power during system core suspend or resume.
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001313 * @dev: Device that normally is marked as "always on" to switch power for.
1314 *
1315 * This routine may only be called during the system core (syscore) suspend or
1316 * resume phase for devices whose "always on" flags are set.
1317 */
Ulf Hanssond47e6462014-09-03 12:52:24 +02001318static void genpd_syscore_switch(struct device *dev, bool suspend)
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001319{
1320 struct generic_pm_domain *genpd;
1321
1322 genpd = dev_to_genpd(dev);
1323 if (!pm_genpd_present(genpd))
1324 return;
1325
1326 if (suspend) {
1327 genpd->suspended_count++;
1328 pm_genpd_sync_poweroff(genpd);
1329 } else {
1330 pm_genpd_sync_poweron(genpd);
1331 genpd->suspended_count--;
1332 }
1333}
Ulf Hanssond47e6462014-09-03 12:52:24 +02001334
1335void pm_genpd_syscore_poweroff(struct device *dev)
1336{
1337 genpd_syscore_switch(dev, true);
1338}
1339EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1340
1341void pm_genpd_syscore_poweron(struct device *dev)
1342{
1343 genpd_syscore_switch(dev, false);
1344}
1345EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001346
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01001347#else /* !CONFIG_PM_SLEEP */
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001348
1349#define pm_genpd_prepare NULL
1350#define pm_genpd_suspend NULL
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001351#define pm_genpd_suspend_late NULL
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001352#define pm_genpd_suspend_noirq NULL
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001353#define pm_genpd_resume_early NULL
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001354#define pm_genpd_resume_noirq NULL
1355#define pm_genpd_resume NULL
1356#define pm_genpd_freeze NULL
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001357#define pm_genpd_freeze_late NULL
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001358#define pm_genpd_freeze_noirq NULL
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001359#define pm_genpd_thaw_early NULL
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001360#define pm_genpd_thaw_noirq NULL
1361#define pm_genpd_thaw NULL
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001362#define pm_genpd_restore_noirq NULL
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001363#define pm_genpd_complete NULL
1364
1365#endif /* CONFIG_PM_SLEEP */
1366
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001367static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1368{
1369 struct generic_pm_domain_data *gpd_data;
1370
1371 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1372 if (!gpd_data)
1373 return NULL;
1374
1375 mutex_init(&gpd_data->lock);
1376 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1377 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1378 return gpd_data;
1379}
1380
1381static void __pm_genpd_free_dev_data(struct device *dev,
1382 struct generic_pm_domain_data *gpd_data)
1383{
1384 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1385 kfree(gpd_data);
1386}
1387
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001388/**
Rafael J. Wysockib02c9992011-12-01 00:02:05 +01001389 * __pm_genpd_add_device - Add a device to an I/O PM domain.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001390 * @genpd: PM domain to add the device to.
1391 * @dev: Device to be added.
Rafael J. Wysockib02c9992011-12-01 00:02:05 +01001392 * @td: Set of PM QoS timing parameters to attach to the device.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001393 */
Rafael J. Wysockib02c9992011-12-01 00:02:05 +01001394int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1395 struct gpd_timing_data *td)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001396{
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001397 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +02001398 struct pm_domain_data *pdd;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001399 int ret = 0;
1400
1401 dev_dbg(dev, "%s()\n", __func__);
1402
1403 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1404 return -EINVAL;
1405
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001406 gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1407 if (!gpd_data_new)
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001408 return -ENOMEM;
1409
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001410 genpd_acquire_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001411
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001412 if (genpd->prepared_count > 0) {
1413 ret = -EAGAIN;
1414 goto out;
1415 }
1416
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +02001417 list_for_each_entry(pdd, &genpd->dev_list, list_node)
1418 if (pdd->dev == dev) {
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001419 ret = -EINVAL;
1420 goto out;
1421 }
1422
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001423 ret = dev_pm_get_subsys_data(dev);
1424 if (ret)
1425 goto out;
1426
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001427 genpd->device_count++;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001428 genpd->max_off_time_changed = true;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001429
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001430 spin_lock_irq(&dev->power.lock);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001431
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001432 dev->pm_domain = &genpd->domain;
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001433 if (dev->power.subsys_data->domain_data) {
1434 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1435 } else {
1436 gpd_data = gpd_data_new;
1437 dev->power.subsys_data->domain_data = &gpd_data->base;
1438 }
1439 gpd_data->refcount++;
Rafael J. Wysockib02c9992011-12-01 00:02:05 +01001440 if (td)
1441 gpd_data->td = *td;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001442
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001443 spin_unlock_irq(&dev->power.lock);
1444
Geert Uytterhoevend79b6fe2014-09-25 18:28:28 +02001445 if (genpd->attach_dev)
Ulf Hanssonc16561e2014-11-06 00:37:08 +01001446 genpd->attach_dev(genpd, dev);
Geert Uytterhoevend79b6fe2014-09-25 18:28:28 +02001447
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001448 mutex_lock(&gpd_data->lock);
1449 gpd_data->base.dev = dev;
1450 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
Ulf Hansson67732cd2014-11-11 11:07:08 +01001451 gpd_data->need_restore = -1;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001452 gpd_data->td.constraint_changed = true;
1453 gpd_data->td.effective_constraint_ns = -1;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001454 mutex_unlock(&gpd_data->lock);
1455
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001456 out:
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001457 genpd_release_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001458
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001459 if (gpd_data != gpd_data_new)
1460 __pm_genpd_free_dev_data(dev, gpd_data_new);
1461
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001462 return ret;
1463}
1464
1465/**
Rafael J. Wysockib5abb082012-08-07 01:06:11 +02001466 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1467 * @domain_name: Name of the PM domain to add the device to.
1468 * @dev: Device to be added.
1469 * @td: Set of PM QoS timing parameters to attach to the device.
1470 */
1471int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1472 struct gpd_timing_data *td)
1473{
Rafael J. Wysocki8bc02512012-08-07 01:11:14 +02001474 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
Rafael J. Wysockib5abb082012-08-07 01:06:11 +02001475}
1476
Thomas Abrahamc8aa1302012-01-27 15:22:07 +09001477/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001478 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1479 * @genpd: PM domain to remove the device from.
1480 * @dev: Device to be removed.
1481 */
1482int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1483 struct device *dev)
1484{
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001485 struct generic_pm_domain_data *gpd_data;
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +02001486 struct pm_domain_data *pdd;
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001487 bool remove = false;
Rafael J. Wysockiefa69022012-05-01 21:33:53 +02001488 int ret = 0;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001489
1490 dev_dbg(dev, "%s()\n", __func__);
1491
Rafael J. Wysockiefa69022012-05-01 21:33:53 +02001492 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1493 || IS_ERR_OR_NULL(dev->pm_domain)
1494 || pd_to_genpd(dev->pm_domain) != genpd)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001495 return -EINVAL;
1496
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001497 genpd_acquire_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001498
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001499 if (genpd->prepared_count > 0) {
1500 ret = -EAGAIN;
1501 goto out;
1502 }
1503
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001504 genpd->device_count--;
1505 genpd->max_off_time_changed = true;
1506
Geert Uytterhoevend79b6fe2014-09-25 18:28:28 +02001507 if (genpd->detach_dev)
Ulf Hanssonc16561e2014-11-06 00:37:08 +01001508 genpd->detach_dev(genpd, dev);
Geert Uytterhoevend79b6fe2014-09-25 18:28:28 +02001509
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001510 spin_lock_irq(&dev->power.lock);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001511
Rafael J. Wysockiefa69022012-05-01 21:33:53 +02001512 dev->pm_domain = NULL;
1513 pdd = dev->power.subsys_data->domain_data;
1514 list_del_init(&pdd->list_node);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001515 gpd_data = to_gpd_data(pdd);
1516 if (--gpd_data->refcount == 0) {
1517 dev->power.subsys_data->domain_data = NULL;
1518 remove = true;
1519 }
1520
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001521 spin_unlock_irq(&dev->power.lock);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001522
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001523 mutex_lock(&gpd_data->lock);
1524 pdd->dev = NULL;
1525 mutex_unlock(&gpd_data->lock);
1526
1527 genpd_release_lock(genpd);
1528
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001529 dev_pm_put_subsys_data(dev);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001530 if (remove)
1531 __pm_genpd_free_dev_data(dev, gpd_data);
1532
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001533 return 0;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001534
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001535 out:
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001536 genpd_release_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001537
1538 return ret;
1539}
1540
1541/**
Rafael J. Wysockica1d72f2012-05-14 21:45:52 +02001542 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1543 * @dev: Device to set/unset the flag for.
1544 * @val: The new value of the device's "need restore" flag.
1545 */
1546void pm_genpd_dev_need_restore(struct device *dev, bool val)
1547{
1548 struct pm_subsys_data *psd;
1549 unsigned long flags;
1550
1551 spin_lock_irqsave(&dev->power.lock, flags);
1552
1553 psd = dev_to_psd(dev);
1554 if (psd && psd->domain_data)
Ulf Hansson67732cd2014-11-11 11:07:08 +01001555 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
Rafael J. Wysockica1d72f2012-05-14 21:45:52 +02001556
1557 spin_unlock_irqrestore(&dev->power.lock, flags);
1558}
1559EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1560
1561/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001562 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1563 * @genpd: Master PM domain to add the subdomain to.
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001564 * @subdomain: Subdomain to be added.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001565 */
1566int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001567 struct generic_pm_domain *subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001568{
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001569 struct gpd_link *link;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001570 int ret = 0;
1571
Rafael J. Wysockifb7268b2012-08-07 01:08:37 +02001572 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1573 || genpd == subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001574 return -EINVAL;
1575
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001576 start:
1577 genpd_acquire_lock(genpd);
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001578 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001579
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001580 if (subdomain->status != GPD_STATE_POWER_OFF
1581 && subdomain->status != GPD_STATE_ACTIVE) {
1582 mutex_unlock(&subdomain->lock);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001583 genpd_release_lock(genpd);
1584 goto start;
1585 }
1586
1587 if (genpd->status == GPD_STATE_POWER_OFF
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001588 && subdomain->status != GPD_STATE_POWER_OFF) {
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001589 ret = -EINVAL;
1590 goto out;
1591 }
1592
Huang Ying4fcac102012-05-07 21:35:45 +02001593 list_for_each_entry(link, &genpd->master_links, master_node) {
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001594 if (link->slave == subdomain && link->master == genpd) {
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001595 ret = -EINVAL;
1596 goto out;
1597 }
1598 }
1599
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001600 link = kzalloc(sizeof(*link), GFP_KERNEL);
1601 if (!link) {
1602 ret = -ENOMEM;
1603 goto out;
1604 }
1605 link->master = genpd;
1606 list_add_tail(&link->master_node, &genpd->master_links);
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001607 link->slave = subdomain;
1608 list_add_tail(&link->slave_node, &subdomain->slave_links);
1609 if (subdomain->status != GPD_STATE_POWER_OFF)
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +02001610 genpd_sd_counter_inc(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001611
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001612 out:
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001613 mutex_unlock(&subdomain->lock);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001614 genpd_release_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001615
1616 return ret;
1617}
1618
1619/**
Rafael J. Wysockifb7268b2012-08-07 01:08:37 +02001620 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1621 * @master_name: Name of the master PM domain to add the subdomain to.
1622 * @subdomain_name: Name of the subdomain to be added.
1623 */
1624int pm_genpd_add_subdomain_names(const char *master_name,
1625 const char *subdomain_name)
1626{
1627 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1628
1629 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1630 return -EINVAL;
1631
1632 mutex_lock(&gpd_list_lock);
1633 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1634 if (!master && !strcmp(gpd->name, master_name))
1635 master = gpd;
1636
1637 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1638 subdomain = gpd;
1639
1640 if (master && subdomain)
1641 break;
1642 }
1643 mutex_unlock(&gpd_list_lock);
1644
1645 return pm_genpd_add_subdomain(master, subdomain);
1646}
1647
1648/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001649 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1650 * @genpd: Master PM domain to remove the subdomain from.
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001651 * @subdomain: Subdomain to be removed.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001652 */
1653int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001654 struct generic_pm_domain *subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001655{
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001656 struct gpd_link *link;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001657 int ret = -EINVAL;
1658
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001659 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001660 return -EINVAL;
1661
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001662 start:
1663 genpd_acquire_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001664
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001665 list_for_each_entry(link, &genpd->master_links, master_node) {
1666 if (link->slave != subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001667 continue;
1668
1669 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1670
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001671 if (subdomain->status != GPD_STATE_POWER_OFF
1672 && subdomain->status != GPD_STATE_ACTIVE) {
1673 mutex_unlock(&subdomain->lock);
1674 genpd_release_lock(genpd);
1675 goto start;
1676 }
1677
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001678 list_del(&link->master_node);
1679 list_del(&link->slave_node);
1680 kfree(link);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001681 if (subdomain->status != GPD_STATE_POWER_OFF)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001682 genpd_sd_counter_dec(genpd);
1683
1684 mutex_unlock(&subdomain->lock);
1685
1686 ret = 0;
1687 break;
1688 }
1689
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001690 genpd_release_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001691
1692 return ret;
1693}
1694
1695/**
Rafael J. Wysocki40114442012-08-15 20:32:43 +02001696 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1697 * @genpd: PM domain to be connected with cpuidle.
1698 * @state: cpuidle state this domain can disable/enable.
1699 *
1700 * Make a PM domain behave as though it contained a CPU core, that is, instead
1701 * of calling its power down routine it will enable the given cpuidle state so
1702 * that the cpuidle subsystem can power it down (if possible and desirable).
1703 */
1704int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001705{
1706 struct cpuidle_driver *cpuidle_drv;
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001707 struct gpd_cpuidle_data *cpuidle_data;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001708 struct cpuidle_state *idle_state;
1709 int ret = 0;
1710
1711 if (IS_ERR_OR_NULL(genpd) || state < 0)
1712 return -EINVAL;
1713
1714 genpd_acquire_lock(genpd);
1715
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001716 if (genpd->cpuidle_data) {
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001717 ret = -EEXIST;
1718 goto out;
1719 }
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001720 cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1721 if (!cpuidle_data) {
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001722 ret = -ENOMEM;
1723 goto out;
1724 }
1725 cpuidle_drv = cpuidle_driver_ref();
1726 if (!cpuidle_drv) {
1727 ret = -ENODEV;
jhbird.choi@samsung.comdebe0812012-10-23 00:54:38 +02001728 goto err_drv;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001729 }
1730 if (cpuidle_drv->state_count <= state) {
1731 ret = -EINVAL;
1732 goto err;
1733 }
1734 idle_state = &cpuidle_drv->states[state];
1735 if (!idle_state->disabled) {
1736 ret = -EAGAIN;
1737 goto err;
1738 }
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001739 cpuidle_data->idle_state = idle_state;
1740 cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1741 genpd->cpuidle_data = cpuidle_data;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001742 genpd_recalc_cpu_exit_latency(genpd);
1743
1744 out:
1745 genpd_release_lock(genpd);
1746 return ret;
1747
1748 err:
1749 cpuidle_driver_unref();
jhbird.choi@samsung.comdebe0812012-10-23 00:54:38 +02001750
1751 err_drv:
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001752 kfree(cpuidle_data);
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001753 goto out;
1754}
1755
Rafael J. Wysocki40114442012-08-15 20:32:43 +02001756/**
Rafael J. Wysocki74a2799a2012-08-15 20:32:59 +02001757 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1758 * @name: Name of the domain to connect to cpuidle.
1759 * @state: cpuidle state this domain can manipulate.
1760 */
1761int pm_genpd_name_attach_cpuidle(const char *name, int state)
1762{
1763 return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1764}
1765
1766/**
Rafael J. Wysocki40114442012-08-15 20:32:43 +02001767 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1768 * @genpd: PM domain to remove the cpuidle connection from.
1769 *
1770 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1771 * given PM domain.
1772 */
1773int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001774{
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001775 struct gpd_cpuidle_data *cpuidle_data;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001776 struct cpuidle_state *idle_state;
1777 int ret = 0;
1778
1779 if (IS_ERR_OR_NULL(genpd))
1780 return -EINVAL;
1781
1782 genpd_acquire_lock(genpd);
1783
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001784 cpuidle_data = genpd->cpuidle_data;
1785 if (!cpuidle_data) {
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001786 ret = -ENODEV;
1787 goto out;
1788 }
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001789 idle_state = cpuidle_data->idle_state;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001790 if (!idle_state->disabled) {
1791 ret = -EAGAIN;
1792 goto out;
1793 }
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001794 idle_state->exit_latency = cpuidle_data->saved_exit_latency;
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001795 cpuidle_driver_unref();
Ulf Hanssonf39cb172014-10-02 21:12:34 +02001796 genpd->cpuidle_data = NULL;
1797 kfree(cpuidle_data);
Rafael J. Wysockicbc9ef02012-07-03 19:07:42 +02001798
1799 out:
1800 genpd_release_lock(genpd);
1801 return ret;
1802}
1803
Rafael J. Wysocki74a2799a2012-08-15 20:32:59 +02001804/**
1805 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1806 * @name: Name of the domain to disconnect cpuidle from.
1807 */
1808int pm_genpd_name_detach_cpuidle(const char *name)
1809{
1810 return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1811}
1812
Rafael J. Wysockid23b9b02011-11-27 13:11:51 +01001813/* Default device callbacks for generic PM domains. */
1814
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +01001815/**
Geert Uytterhoeven12e10bb2014-09-16 21:59:39 +02001816 * pm_genpd_default_save_state - Default "save device state" for PM domains.
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001817 * @dev: Device to handle.
1818 */
1819static int pm_genpd_default_save_state(struct device *dev)
1820{
1821 int (*cb)(struct device *__dev);
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001822
Rafael J. Wysocki0b5897412012-06-16 00:02:22 +02001823 if (dev->type && dev->type->pm)
1824 cb = dev->type->pm->runtime_suspend;
1825 else if (dev->class && dev->class->pm)
1826 cb = dev->class->pm->runtime_suspend;
1827 else if (dev->bus && dev->bus->pm)
1828 cb = dev->bus->pm->runtime_suspend;
1829 else
1830 cb = NULL;
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001831
Rafael J. Wysocki0b5897412012-06-16 00:02:22 +02001832 if (!cb && dev->driver && dev->driver->pm)
1833 cb = dev->driver->pm->runtime_suspend;
1834
1835 return cb ? cb(dev) : 0;
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001836}
1837
1838/**
Geert Uytterhoeven12e10bb2014-09-16 21:59:39 +02001839 * pm_genpd_default_restore_state - Default PM domains "restore device state".
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001840 * @dev: Device to handle.
1841 */
1842static int pm_genpd_default_restore_state(struct device *dev)
1843{
1844 int (*cb)(struct device *__dev);
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001845
Rafael J. Wysocki0b5897412012-06-16 00:02:22 +02001846 if (dev->type && dev->type->pm)
1847 cb = dev->type->pm->runtime_resume;
1848 else if (dev->class && dev->class->pm)
1849 cb = dev->class->pm->runtime_resume;
1850 else if (dev->bus && dev->bus->pm)
1851 cb = dev->bus->pm->runtime_resume;
1852 else
1853 cb = NULL;
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001854
Rafael J. Wysocki0b5897412012-06-16 00:02:22 +02001855 if (!cb && dev->driver && dev->driver->pm)
1856 cb = dev->driver->pm->runtime_resume;
1857
1858 return cb ? cb(dev) : 0;
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001859}
1860
Rafael J. Wysockid23b9b02011-11-27 13:11:51 +01001861/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001862 * pm_genpd_init - Initialize a generic I/O PM domain object.
1863 * @genpd: PM domain object to initialize.
1864 * @gov: PM domain governor to associate with the domain (may be NULL).
1865 * @is_off: Initial value of the domain's power_is_off field.
1866 */
1867void pm_genpd_init(struct generic_pm_domain *genpd,
1868 struct dev_power_governor *gov, bool is_off)
1869{
1870 if (IS_ERR_OR_NULL(genpd))
1871 return;
1872
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001873 INIT_LIST_HEAD(&genpd->master_links);
1874 INIT_LIST_HEAD(&genpd->slave_links);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001875 INIT_LIST_HEAD(&genpd->dev_list);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001876 mutex_init(&genpd->lock);
1877 genpd->gov = gov;
1878 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1879 genpd->in_progress = 0;
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +02001880 atomic_set(&genpd->sd_count, 0);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001881 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1882 init_waitqueue_head(&genpd->status_wait_queue);
Rafael J. Wysockic6d22b32011-07-12 00:39:36 +02001883 genpd->poweroff_task = NULL;
1884 genpd->resume_count = 0;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001885 genpd->device_count = 0;
Rafael J. Wysocki221e9b52011-12-01 00:02:10 +01001886 genpd->max_off_time_ns = -1;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001887 genpd->max_off_time_changed = true;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001888 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1889 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001890 genpd->domain.ops.prepare = pm_genpd_prepare;
1891 genpd->domain.ops.suspend = pm_genpd_suspend;
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001892 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001893 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1894 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001895 genpd->domain.ops.resume_early = pm_genpd_resume_early;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001896 genpd->domain.ops.resume = pm_genpd_resume;
1897 genpd->domain.ops.freeze = pm_genpd_freeze;
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001898 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001899 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1900 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001901 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001902 genpd->domain.ops.thaw = pm_genpd_thaw;
Rafael J. Wysockid23b9b02011-11-27 13:11:51 +01001903 genpd->domain.ops.poweroff = pm_genpd_suspend;
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001904 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
Rafael J. Wysockid23b9b02011-11-27 13:11:51 +01001905 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001906 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001907 genpd->domain.ops.restore_early = pm_genpd_resume_early;
Rafael J. Wysockid23b9b02011-11-27 13:11:51 +01001908 genpd->domain.ops.restore = pm_genpd_resume;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001909 genpd->domain.ops.complete = pm_genpd_complete;
Rafael J. Wysockiecf00472011-11-27 13:11:44 +01001910 genpd->dev_ops.save_state = pm_genpd_default_save_state;
1911 genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
Rafael J. Wysocki5125bbf382011-07-13 12:31:52 +02001912 mutex_lock(&gpd_list_lock);
1913 list_add(&genpd->gpd_list_node, &gpd_list);
1914 mutex_unlock(&gpd_list_lock);
1915}
Tomasz Figaaa422402014-09-19 20:27:36 +02001916
1917#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1918/*
1919 * Device Tree based PM domain providers.
1920 *
1921 * The code below implements generic device tree based PM domain providers that
1922 * bind device tree nodes with generic PM domains registered in the system.
1923 *
1924 * Any driver that registers generic PM domains and needs to support binding of
1925 * devices to these domains is supposed to register a PM domain provider, which
1926 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1927 *
1928 * Two simple mapping functions have been provided for convenience:
1929 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1930 * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1931 * index.
1932 */
1933
1934/**
1935 * struct of_genpd_provider - PM domain provider registration structure
1936 * @link: Entry in global list of PM domain providers
1937 * @node: Pointer to device tree node of PM domain provider
1938 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1939 * into a PM domain.
1940 * @data: context pointer to be passed into @xlate callback
1941 */
1942struct of_genpd_provider {
1943 struct list_head link;
1944 struct device_node *node;
1945 genpd_xlate_t xlate;
1946 void *data;
1947};
1948
1949/* List of registered PM domain providers. */
1950static LIST_HEAD(of_genpd_providers);
1951/* Mutex to protect the list above. */
1952static DEFINE_MUTEX(of_genpd_mutex);
1953
1954/**
1955 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1956 * @genpdspec: OF phandle args to map into a PM domain
1957 * @data: xlate function private data - pointer to struct generic_pm_domain
1958 *
1959 * This is a generic xlate function that can be used to model PM domains that
1960 * have their own device tree nodes. The private data of xlate function needs
1961 * to be a valid pointer to struct generic_pm_domain.
1962 */
1963struct generic_pm_domain *__of_genpd_xlate_simple(
1964 struct of_phandle_args *genpdspec,
1965 void *data)
1966{
1967 if (genpdspec->args_count != 0)
1968 return ERR_PTR(-EINVAL);
1969 return data;
1970}
1971EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
1972
1973/**
1974 * __of_genpd_xlate_onecell() - Xlate function using a single index.
1975 * @genpdspec: OF phandle args to map into a PM domain
1976 * @data: xlate function private data - pointer to struct genpd_onecell_data
1977 *
1978 * This is a generic xlate function that can be used to model simple PM domain
1979 * controllers that have one device tree node and provide multiple PM domains.
1980 * A single cell is used as an index into an array of PM domains specified in
1981 * the genpd_onecell_data struct when registering the provider.
1982 */
1983struct generic_pm_domain *__of_genpd_xlate_onecell(
1984 struct of_phandle_args *genpdspec,
1985 void *data)
1986{
1987 struct genpd_onecell_data *genpd_data = data;
1988 unsigned int idx = genpdspec->args[0];
1989
1990 if (genpdspec->args_count != 1)
1991 return ERR_PTR(-EINVAL);
1992
1993 if (idx >= genpd_data->num_domains) {
1994 pr_err("%s: invalid domain index %u\n", __func__, idx);
1995 return ERR_PTR(-EINVAL);
1996 }
1997
1998 if (!genpd_data->domains[idx])
1999 return ERR_PTR(-ENOENT);
2000
2001 return genpd_data->domains[idx];
2002}
2003EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
2004
2005/**
2006 * __of_genpd_add_provider() - Register a PM domain provider for a node
2007 * @np: Device node pointer associated with the PM domain provider.
2008 * @xlate: Callback for decoding PM domain from phandle arguments.
2009 * @data: Context pointer for @xlate callback.
2010 */
2011int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2012 void *data)
2013{
2014 struct of_genpd_provider *cp;
2015
2016 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2017 if (!cp)
2018 return -ENOMEM;
2019
2020 cp->node = of_node_get(np);
2021 cp->data = data;
2022 cp->xlate = xlate;
2023
2024 mutex_lock(&of_genpd_mutex);
2025 list_add(&cp->link, &of_genpd_providers);
2026 mutex_unlock(&of_genpd_mutex);
2027 pr_debug("Added domain provider from %s\n", np->full_name);
2028
2029 return 0;
2030}
2031EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
2032
2033/**
2034 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2035 * @np: Device node pointer associated with the PM domain provider
2036 */
2037void of_genpd_del_provider(struct device_node *np)
2038{
2039 struct of_genpd_provider *cp;
2040
2041 mutex_lock(&of_genpd_mutex);
2042 list_for_each_entry(cp, &of_genpd_providers, link) {
2043 if (cp->node == np) {
2044 list_del(&cp->link);
2045 of_node_put(cp->node);
2046 kfree(cp);
2047 break;
2048 }
2049 }
2050 mutex_unlock(&of_genpd_mutex);
2051}
2052EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2053
2054/**
2055 * of_genpd_get_from_provider() - Look-up PM domain
2056 * @genpdspec: OF phandle args to use for look-up
2057 *
2058 * Looks for a PM domain provider under the node specified by @genpdspec and if
2059 * found, uses xlate function of the provider to map phandle args to a PM
2060 * domain.
2061 *
2062 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2063 * on failure.
2064 */
2065static struct generic_pm_domain *of_genpd_get_from_provider(
2066 struct of_phandle_args *genpdspec)
2067{
2068 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2069 struct of_genpd_provider *provider;
2070
2071 mutex_lock(&of_genpd_mutex);
2072
2073 /* Check if we have such a provider in our array */
2074 list_for_each_entry(provider, &of_genpd_providers, link) {
2075 if (provider->node == genpdspec->np)
2076 genpd = provider->xlate(genpdspec, provider->data);
2077 if (!IS_ERR(genpd))
2078 break;
2079 }
2080
2081 mutex_unlock(&of_genpd_mutex);
2082
2083 return genpd;
2084}
2085
2086/**
2087 * genpd_dev_pm_detach - Detach a device from its PM domain.
2088 * @dev: Device to attach.
2089 * @power_off: Currently not used
2090 *
2091 * Try to locate a corresponding generic PM domain, which the device was
2092 * attached to previously. If such is found, the device is detached from it.
2093 */
2094static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2095{
2096 struct generic_pm_domain *pd = NULL, *gpd;
2097 int ret = 0;
2098
2099 if (!dev->pm_domain)
2100 return;
2101
2102 mutex_lock(&gpd_list_lock);
2103 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2104 if (&gpd->domain == dev->pm_domain) {
2105 pd = gpd;
2106 break;
2107 }
2108 }
2109 mutex_unlock(&gpd_list_lock);
2110
2111 if (!pd)
2112 return;
2113
2114 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2115
2116 while (1) {
2117 ret = pm_genpd_remove_device(pd, dev);
2118 if (ret != -EAGAIN)
2119 break;
2120 cond_resched();
2121 }
2122
2123 if (ret < 0) {
2124 dev_err(dev, "failed to remove from PM domain %s: %d",
2125 pd->name, ret);
2126 return;
2127 }
2128
2129 /* Check if PM domain can be powered off after removing this device. */
2130 genpd_queue_power_off_work(pd);
2131}
2132
2133/**
2134 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2135 * @dev: Device to attach.
2136 *
2137 * Parse device's OF node to find a PM domain specifier. If such is found,
2138 * attaches the device to retrieved pm_domain ops.
2139 *
2140 * Both generic and legacy Samsung-specific DT bindings are supported to keep
2141 * backwards compatibility with existing DTBs.
2142 *
2143 * Returns 0 on successfully attached PM domain or negative error code.
2144 */
2145int genpd_dev_pm_attach(struct device *dev)
2146{
2147 struct of_phandle_args pd_args;
2148 struct generic_pm_domain *pd;
2149 int ret;
2150
2151 if (!dev->of_node)
2152 return -ENODEV;
2153
2154 if (dev->pm_domain)
2155 return -EEXIST;
2156
2157 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2158 "#power-domain-cells", 0, &pd_args);
2159 if (ret < 0) {
2160 if (ret != -ENOENT)
2161 return ret;
2162
2163 /*
2164 * Try legacy Samsung-specific bindings
2165 * (for backwards compatibility of DT ABI)
2166 */
2167 pd_args.args_count = 0;
2168 pd_args.np = of_parse_phandle(dev->of_node,
2169 "samsung,power-domain", 0);
2170 if (!pd_args.np)
2171 return -ENOENT;
2172 }
2173
2174 pd = of_genpd_get_from_provider(&pd_args);
2175 if (IS_ERR(pd)) {
2176 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2177 __func__, PTR_ERR(pd));
2178 of_node_put(dev->of_node);
2179 return PTR_ERR(pd);
2180 }
2181
2182 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2183
2184 while (1) {
2185 ret = pm_genpd_add_device(pd, dev);
2186 if (ret != -EAGAIN)
2187 break;
2188 cond_resched();
2189 }
2190
2191 if (ret < 0) {
2192 dev_err(dev, "failed to add to PM domain %s: %d",
2193 pd->name, ret);
2194 of_node_put(dev->of_node);
2195 return ret;
2196 }
2197
2198 dev->pm_domain->detach = genpd_dev_pm_detach;
2199
2200 return 0;
2201}
2202EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01002203#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002204
2205
2206/*** debugfs support ***/
2207
2208#ifdef CONFIG_PM_ADVANCED_DEBUG
2209#include <linux/pm.h>
2210#include <linux/device.h>
2211#include <linux/debugfs.h>
2212#include <linux/seq_file.h>
2213#include <linux/init.h>
2214#include <linux/kobject.h>
2215static struct dentry *pm_genpd_debugfs_dir;
2216
2217/*
2218 * TODO: This function is a slightly modified version of rtpm_status_show
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01002219 * from sysfs.c, so generalize it.
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002220 */
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002221static void rtpm_status_str(struct seq_file *s, struct device *dev)
2222{
2223 static const char * const status_lookup[] = {
2224 [RPM_ACTIVE] = "active",
2225 [RPM_RESUMING] = "resuming",
2226 [RPM_SUSPENDED] = "suspended",
2227 [RPM_SUSPENDING] = "suspending"
2228 };
2229 const char *p = "";
2230
2231 if (dev->power.runtime_error)
2232 p = "error";
2233 else if (dev->power.disable_depth)
2234 p = "unsupported";
2235 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2236 p = status_lookup[dev->power.runtime_status];
2237 else
2238 WARN_ON(1);
2239
2240 seq_puts(s, p);
2241}
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002242
2243static int pm_genpd_summary_one(struct seq_file *s,
2244 struct generic_pm_domain *gpd)
2245{
2246 static const char * const status_lookup[] = {
2247 [GPD_STATE_ACTIVE] = "on",
2248 [GPD_STATE_WAIT_MASTER] = "wait-master",
2249 [GPD_STATE_BUSY] = "busy",
2250 [GPD_STATE_REPEAT] = "off-in-progress",
2251 [GPD_STATE_POWER_OFF] = "off"
2252 };
2253 struct pm_domain_data *pm_data;
2254 const char *kobj_path;
2255 struct gpd_link *link;
2256 int ret;
2257
2258 ret = mutex_lock_interruptible(&gpd->lock);
2259 if (ret)
2260 return -ERESTARTSYS;
2261
2262 if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
2263 goto exit;
2264 seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]);
2265
2266 /*
2267 * Modifications on the list require holding locks on both
2268 * master and slave, so we are safe.
2269 * Also gpd->name is immutable.
2270 */
2271 list_for_each_entry(link, &gpd->master_links, master_node) {
2272 seq_printf(s, "%s", link->slave->name);
2273 if (!list_is_last(&link->master_node, &gpd->master_links))
2274 seq_puts(s, ", ");
2275 }
2276
2277 list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
2278 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2279 if (kobj_path == NULL)
2280 continue;
2281
2282 seq_printf(s, "\n %-50s ", kobj_path);
2283 rtpm_status_str(s, pm_data->dev);
2284 kfree(kobj_path);
2285 }
2286
2287 seq_puts(s, "\n");
2288exit:
2289 mutex_unlock(&gpd->lock);
2290
2291 return 0;
2292}
2293
2294static int pm_genpd_summary_show(struct seq_file *s, void *data)
2295{
2296 struct generic_pm_domain *gpd;
2297 int ret = 0;
2298
2299 seq_puts(s, " domain status slaves\n");
2300 seq_puts(s, " /device runtime status\n");
2301 seq_puts(s, "----------------------------------------------------------------------\n");
2302
2303 ret = mutex_lock_interruptible(&gpd_list_lock);
2304 if (ret)
2305 return -ERESTARTSYS;
2306
2307 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2308 ret = pm_genpd_summary_one(s, gpd);
2309 if (ret)
2310 break;
2311 }
2312 mutex_unlock(&gpd_list_lock);
2313
2314 return ret;
2315}
2316
2317static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2318{
2319 return single_open(file, pm_genpd_summary_show, NULL);
2320}
2321
2322static const struct file_operations pm_genpd_summary_fops = {
2323 .open = pm_genpd_summary_open,
2324 .read = seq_read,
2325 .llseek = seq_lseek,
2326 .release = single_release,
2327};
2328
2329static int __init pm_genpd_debug_init(void)
2330{
2331 struct dentry *d;
2332
2333 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2334
2335 if (!pm_genpd_debugfs_dir)
2336 return -ENOMEM;
2337
2338 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2339 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2340 if (!d)
2341 return -ENOMEM;
2342
2343 return 0;
2344}
2345late_initcall(pm_genpd_debug_init);
2346
2347static void __exit pm_genpd_debug_exit(void)
2348{
2349 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2350}
2351__exitcall(pm_genpd_debug_exit);
2352#endif /* CONFIG_PM_ADVANCED_DEBUG */