blob: 2d8ce50877d81565ee8f6331a4c4ebc48f3eaa67 [file] [log] [blame]
Jean Pihete8db0be2011-08-25 15:35:03 +02001#ifndef _LINUX_PM_QOS_H
2#define _LINUX_PM_QOS_H
3/* interface for the pm_qos_power infrastructure of the linux kernel.
4 *
5 * Mark Gross <mgross@linux.intel.com>
6 */
7#include <linux/plist.h>
8#include <linux/notifier.h>
9#include <linux/miscdevice.h>
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +020010#include <linux/device.h>
MyungJoo Hamc4772d12012-03-28 23:31:24 +020011#include <linux/workqueue.h>
Jean Pihete8db0be2011-08-25 15:35:03 +020012
Alex Fridd031e1d2012-01-29 20:39:25 +010013enum {
14 PM_QOS_RESERVED = 0,
15 PM_QOS_CPU_DMA_LATENCY,
16 PM_QOS_NETWORK_LATENCY,
17 PM_QOS_NETWORK_THROUGHPUT,
Jean Pihete8db0be2011-08-25 15:35:03 +020018
Alex Fridd031e1d2012-01-29 20:39:25 +010019 /* insert new class ID */
20 PM_QOS_NUM_CLASSES,
21};
22
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020023enum pm_qos_flags_status {
24 PM_QOS_FLAGS_UNDEFINED = -1,
25 PM_QOS_FLAGS_NONE,
26 PM_QOS_FLAGS_SOME,
27 PM_QOS_FLAGS_ALL,
28};
29
Jean Pihete8db0be2011-08-25 15:35:03 +020030#define PM_QOS_DEFAULT_VALUE -1
31
32#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
33#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
34#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
Rafael J. Wysockib02f6692014-02-11 00:35:23 +010035#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
Jean Pihete8db0be2011-08-25 15:35:03 +020036
Rafael J. Wysockie39473d2012-10-24 02:08:18 +020037#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
38#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
39
Jean Pihetcc749982011-08-25 15:35:12 +020040struct pm_qos_request {
41 struct plist_node node;
Jean Pihete8db0be2011-08-25 15:35:03 +020042 int pm_qos_class;
MyungJoo Hamc4772d12012-03-28 23:31:24 +020043 struct delayed_work work; /* for pm_qos_update_request_timeout */
Jean Pihete8db0be2011-08-25 15:35:03 +020044};
45
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +020046struct pm_qos_flags_request {
47 struct list_head node;
48 s32 flags; /* Do not change to 64 bit */
49};
50
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020051enum dev_pm_qos_req_type {
Rafael J. Wysockib02f6692014-02-11 00:35:23 +010052 DEV_PM_QOS_RESUME_LATENCY = 1,
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020053 DEV_PM_QOS_FLAGS,
54};
55
Jean Pihet91ff4cb2011-08-25 15:35:41 +020056struct dev_pm_qos_request {
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020057 enum dev_pm_qos_req_type type;
Rafael J. Wysocki021c8702012-10-23 01:09:00 +020058 union {
59 struct plist_node pnode;
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020060 struct pm_qos_flags_request flr;
Rafael J. Wysocki021c8702012-10-23 01:09:00 +020061 } data;
Jean Pihet91ff4cb2011-08-25 15:35:41 +020062 struct device *dev;
63};
64
Jean Pihet4e1779b2011-08-25 15:35:27 +020065enum pm_qos_type {
66 PM_QOS_UNITIALIZED,
67 PM_QOS_MAX, /* return the largest value */
68 PM_QOS_MIN /* return the smallest value */
69};
70
71/*
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +020072 * Note: The lockless read path depends on the CPU accessing target_value
73 * or effective_flags atomically. Atomic access is only guaranteed on all CPU
Jean Pihet4e1779b2011-08-25 15:35:27 +020074 * types linux supports for 32 bit quantites
75 */
76struct pm_qos_constraints {
77 struct plist_head list;
78 s32 target_value; /* Do not change to 64 bit */
79 s32 default_value;
Rafael J. Wysocki327adae2014-02-11 00:35:29 +010080 s32 no_constraint_value;
Jean Pihet4e1779b2011-08-25 15:35:27 +020081 enum pm_qos_type type;
82 struct blocking_notifier_head *notifiers;
83};
84
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +020085struct pm_qos_flags {
86 struct list_head list;
87 s32 effective_flags; /* Do not change to 64 bit */
88};
89
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +020090struct dev_pm_qos {
Rafael J. Wysockib02f6692014-02-11 00:35:23 +010091 struct pm_qos_constraints resume_latency;
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020092 struct pm_qos_flags flags;
Rafael J. Wysockib02f6692014-02-11 00:35:23 +010093 struct dev_pm_qos_request *resume_latency_req;
Rafael J. Wysockie39473d2012-10-24 02:08:18 +020094 struct dev_pm_qos_request *flags_req;
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +020095};
96
Jean Pihetabe98ec2011-08-25 15:35:34 +020097/* Action requested to pm_qos_update_target */
98enum pm_qos_req_action {
99 PM_QOS_ADD_REQ, /* Add a new request */
100 PM_QOS_UPDATE_REQ, /* Update an existing request */
101 PM_QOS_REMOVE_REQ /* Remove an existing request */
102};
103
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200104static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
105{
Sachin Kamat83618092012-07-17 22:38:18 +0200106 return req->dev != NULL;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200107}
108
Jean Pihetabe98ec2011-08-25 15:35:34 +0200109int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
110 enum pm_qos_req_action action, int value);
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +0200111bool pm_qos_update_flags(struct pm_qos_flags *pqf,
112 struct pm_qos_flags_request *req,
113 enum pm_qos_req_action action, s32 val);
Jean Pihetcc749982011-08-25 15:35:12 +0200114void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
115 s32 value);
116void pm_qos_update_request(struct pm_qos_request *req,
Jean Pihete8db0be2011-08-25 15:35:03 +0200117 s32 new_value);
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200118void pm_qos_update_request_timeout(struct pm_qos_request *req,
119 s32 new_value, unsigned long timeout_us);
Jean Pihetcc749982011-08-25 15:35:12 +0200120void pm_qos_remove_request(struct pm_qos_request *req);
Jean Pihete8db0be2011-08-25 15:35:03 +0200121
122int pm_qos_request(int pm_qos_class);
123int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
124int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
Jean Pihetcc749982011-08-25 15:35:12 +0200125int pm_qos_request_active(struct pm_qos_request *req);
Jean Pihetb66213c2011-08-25 15:35:47 +0200126s32 pm_qos_read_value(struct pm_qos_constraints *c);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200127
Jean Piheta9b542e2012-02-13 16:23:42 +0100128#ifdef CONFIG_PM
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200129enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
130enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100131s32 __dev_pm_qos_read_value(struct device *dev);
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200132s32 dev_pm_qos_read_value(struct device *dev);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200133int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200134 enum dev_pm_qos_req_type type, s32 value);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200135int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
136int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
137int dev_pm_qos_add_notifier(struct device *dev,
138 struct notifier_block *notifier);
139int dev_pm_qos_remove_notifier(struct device *dev,
140 struct notifier_block *notifier);
Jean Pihetb66213c2011-08-25 15:35:47 +0200141int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
142int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200143void dev_pm_qos_constraints_init(struct device *dev);
144void dev_pm_qos_constraints_destroy(struct device *dev);
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100145int dev_pm_qos_add_ancestor_request(struct device *dev,
146 struct dev_pm_qos_request *req, s32 value);
Jean Pihete8db0be2011-08-25 15:35:03 +0200147#else
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200148static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
149 s32 mask)
150 { return PM_QOS_FLAGS_UNDEFINED; }
151static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
152 s32 mask)
153 { return PM_QOS_FLAGS_UNDEFINED; }
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100154static inline s32 __dev_pm_qos_read_value(struct device *dev)
155 { return 0; }
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200156static inline s32 dev_pm_qos_read_value(struct device *dev)
157 { return 0; }
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200158static inline int dev_pm_qos_add_request(struct device *dev,
159 struct dev_pm_qos_request *req,
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200160 enum dev_pm_qos_req_type type,
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200161 s32 value)
162 { return 0; }
163static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
164 s32 new_value)
165 { return 0; }
166static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
167 { return 0; }
168static inline int dev_pm_qos_add_notifier(struct device *dev,
169 struct notifier_block *notifier)
170 { return 0; }
171static inline int dev_pm_qos_remove_notifier(struct device *dev,
172 struct notifier_block *notifier)
173 { return 0; }
Jean Pihetb66213c2011-08-25 15:35:47 +0200174static inline int dev_pm_qos_add_global_notifier(
175 struct notifier_block *notifier)
176 { return 0; }
177static inline int dev_pm_qos_remove_global_notifier(
178 struct notifier_block *notifier)
179 { return 0; }
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200180static inline void dev_pm_qos_constraints_init(struct device *dev)
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200181{
182 dev->power.power_state = PMSG_ON;
183}
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200184static inline void dev_pm_qos_constraints_destroy(struct device *dev)
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200185{
186 dev->power.power_state = PMSG_INVALID;
187}
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100188static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
189 struct dev_pm_qos_request *req, s32 value)
190 { return 0; }
Jean Pihete8db0be2011-08-25 15:35:03 +0200191#endif
192
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100193#ifdef CONFIG_PM_RUNTIME
194int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
195void dev_pm_qos_hide_latency_limit(struct device *dev);
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200196int dev_pm_qos_expose_flags(struct device *dev, s32 value);
197void dev_pm_qos_hide_flags(struct device *dev);
198int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
199
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100200static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200201{
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100202 return dev->power.qos->resume_latency_req->data.pnode.prio;
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200203}
204
205static inline s32 dev_pm_qos_requested_flags(struct device *dev)
206{
207 return dev->power.qos->flags_req->data.flr.flags;
208}
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100209#else
210static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
211 { return 0; }
212static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200213static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
214 { return 0; }
215static inline void dev_pm_qos_hide_flags(struct device *dev) {}
216static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
217 { return 0; }
218
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100219static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200220static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100221#endif
222
Jean Pihete8db0be2011-08-25 15:35:03 +0200223#endif