blob: 3af7d8573c237cc8b6e9d06263d46d0ff3ea6da3 [file] [log] [blame]
Jean Pihete8db0be2011-08-25 15:35:03 +02001#ifndef _LINUX_PM_QOS_H
2#define _LINUX_PM_QOS_H
3/* interface for the pm_qos_power infrastructure of the linux kernel.
4 *
5 * Mark Gross <mgross@linux.intel.com>
6 */
7#include <linux/plist.h>
8#include <linux/notifier.h>
9#include <linux/miscdevice.h>
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +020010#include <linux/device.h>
MyungJoo Hamc4772d12012-03-28 23:31:24 +020011#include <linux/workqueue.h>
Jean Pihete8db0be2011-08-25 15:35:03 +020012
Alex Fridd031e1d2012-01-29 20:39:25 +010013enum {
14 PM_QOS_RESERVED = 0,
15 PM_QOS_CPU_DMA_LATENCY,
16 PM_QOS_NETWORK_LATENCY,
17 PM_QOS_NETWORK_THROUGHPUT,
Jean Pihete8db0be2011-08-25 15:35:03 +020018
Alex Fridd031e1d2012-01-29 20:39:25 +010019 /* insert new class ID */
20 PM_QOS_NUM_CLASSES,
21};
22
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020023enum pm_qos_flags_status {
24 PM_QOS_FLAGS_UNDEFINED = -1,
25 PM_QOS_FLAGS_NONE,
26 PM_QOS_FLAGS_SOME,
27 PM_QOS_FLAGS_ALL,
28};
29
Jean Pihete8db0be2011-08-25 15:35:03 +020030#define PM_QOS_DEFAULT_VALUE -1
31
32#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
33#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
34#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
Jean Pihet91ff4cb2011-08-25 15:35:41 +020035#define PM_QOS_DEV_LAT_DEFAULT_VALUE 0
Jean Pihete8db0be2011-08-25 15:35:03 +020036
Jean Pihetcc749982011-08-25 15:35:12 +020037struct pm_qos_request {
38 struct plist_node node;
Jean Pihete8db0be2011-08-25 15:35:03 +020039 int pm_qos_class;
MyungJoo Hamc4772d12012-03-28 23:31:24 +020040 struct delayed_work work; /* for pm_qos_update_request_timeout */
Jean Pihete8db0be2011-08-25 15:35:03 +020041};
42
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +020043struct pm_qos_flags_request {
44 struct list_head node;
45 s32 flags; /* Do not change to 64 bit */
46};
47
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020048enum dev_pm_qos_req_type {
49 DEV_PM_QOS_LATENCY = 1,
50 DEV_PM_QOS_FLAGS,
51};
52
Jean Pihet91ff4cb2011-08-25 15:35:41 +020053struct dev_pm_qos_request {
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020054 enum dev_pm_qos_req_type type;
Rafael J. Wysocki021c8702012-10-23 01:09:00 +020055 union {
56 struct plist_node pnode;
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020057 struct pm_qos_flags_request flr;
Rafael J. Wysocki021c8702012-10-23 01:09:00 +020058 } data;
Jean Pihet91ff4cb2011-08-25 15:35:41 +020059 struct device *dev;
60};
61
Jean Pihet4e1779b2011-08-25 15:35:27 +020062enum pm_qos_type {
63 PM_QOS_UNITIALIZED,
64 PM_QOS_MAX, /* return the largest value */
65 PM_QOS_MIN /* return the smallest value */
66};
67
68/*
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +020069 * Note: The lockless read path depends on the CPU accessing target_value
70 * or effective_flags atomically. Atomic access is only guaranteed on all CPU
Jean Pihet4e1779b2011-08-25 15:35:27 +020071 * types linux supports for 32 bit quantites
72 */
73struct pm_qos_constraints {
74 struct plist_head list;
75 s32 target_value; /* Do not change to 64 bit */
76 s32 default_value;
77 enum pm_qos_type type;
78 struct blocking_notifier_head *notifiers;
79};
80
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +020081struct pm_qos_flags {
82 struct list_head list;
83 s32 effective_flags; /* Do not change to 64 bit */
84};
85
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +020086struct dev_pm_qos {
87 struct pm_qos_constraints latency;
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020088 struct pm_qos_flags flags;
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +020089};
90
Jean Pihetabe98ec2011-08-25 15:35:34 +020091/* Action requested to pm_qos_update_target */
92enum pm_qos_req_action {
93 PM_QOS_ADD_REQ, /* Add a new request */
94 PM_QOS_UPDATE_REQ, /* Update an existing request */
95 PM_QOS_REMOVE_REQ /* Remove an existing request */
96};
97
Jean Pihet91ff4cb2011-08-25 15:35:41 +020098static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
99{
Sachin Kamat83618092012-07-17 22:38:18 +0200100 return req->dev != NULL;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200101}
102
Jean Pihetabe98ec2011-08-25 15:35:34 +0200103int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
104 enum pm_qos_req_action action, int value);
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +0200105bool pm_qos_update_flags(struct pm_qos_flags *pqf,
106 struct pm_qos_flags_request *req,
107 enum pm_qos_req_action action, s32 val);
Jean Pihetcc749982011-08-25 15:35:12 +0200108void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
109 s32 value);
110void pm_qos_update_request(struct pm_qos_request *req,
Jean Pihete8db0be2011-08-25 15:35:03 +0200111 s32 new_value);
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200112void pm_qos_update_request_timeout(struct pm_qos_request *req,
113 s32 new_value, unsigned long timeout_us);
Jean Pihetcc749982011-08-25 15:35:12 +0200114void pm_qos_remove_request(struct pm_qos_request *req);
Jean Pihete8db0be2011-08-25 15:35:03 +0200115
116int pm_qos_request(int pm_qos_class);
117int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
118int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
Jean Pihetcc749982011-08-25 15:35:12 +0200119int pm_qos_request_active(struct pm_qos_request *req);
Jean Pihetb66213c2011-08-25 15:35:47 +0200120s32 pm_qos_read_value(struct pm_qos_constraints *c);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200121
Jean Piheta9b542e2012-02-13 16:23:42 +0100122#ifdef CONFIG_PM
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200123enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
124enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100125s32 __dev_pm_qos_read_value(struct device *dev);
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200126s32 dev_pm_qos_read_value(struct device *dev);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200127int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200128 enum dev_pm_qos_req_type type, s32 value);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200129int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
130int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
131int dev_pm_qos_add_notifier(struct device *dev,
132 struct notifier_block *notifier);
133int dev_pm_qos_remove_notifier(struct device *dev,
134 struct notifier_block *notifier);
Jean Pihetb66213c2011-08-25 15:35:47 +0200135int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
136int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200137void dev_pm_qos_constraints_init(struct device *dev);
138void dev_pm_qos_constraints_destroy(struct device *dev);
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100139int dev_pm_qos_add_ancestor_request(struct device *dev,
140 struct dev_pm_qos_request *req, s32 value);
Jean Pihete8db0be2011-08-25 15:35:03 +0200141#else
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200142static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
143 s32 mask)
144 { return PM_QOS_FLAGS_UNDEFINED; }
145static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
146 s32 mask)
147 { return PM_QOS_FLAGS_UNDEFINED; }
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100148static inline s32 __dev_pm_qos_read_value(struct device *dev)
149 { return 0; }
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200150static inline s32 dev_pm_qos_read_value(struct device *dev)
151 { return 0; }
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200152static inline int dev_pm_qos_add_request(struct device *dev,
153 struct dev_pm_qos_request *req,
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200154 enum dev_pm_qos_req_type type,
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200155 s32 value)
156 { return 0; }
157static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
158 s32 new_value)
159 { return 0; }
160static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
161 { return 0; }
162static inline int dev_pm_qos_add_notifier(struct device *dev,
163 struct notifier_block *notifier)
164 { return 0; }
165static inline int dev_pm_qos_remove_notifier(struct device *dev,
166 struct notifier_block *notifier)
167 { return 0; }
Jean Pihetb66213c2011-08-25 15:35:47 +0200168static inline int dev_pm_qos_add_global_notifier(
169 struct notifier_block *notifier)
170 { return 0; }
171static inline int dev_pm_qos_remove_global_notifier(
172 struct notifier_block *notifier)
173 { return 0; }
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200174static inline void dev_pm_qos_constraints_init(struct device *dev)
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200175{
176 dev->power.power_state = PMSG_ON;
177}
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200178static inline void dev_pm_qos_constraints_destroy(struct device *dev)
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200179{
180 dev->power.power_state = PMSG_INVALID;
181}
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100182static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
183 struct dev_pm_qos_request *req, s32 value)
184 { return 0; }
Jean Pihete8db0be2011-08-25 15:35:03 +0200185#endif
186
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100187#ifdef CONFIG_PM_RUNTIME
188int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
189void dev_pm_qos_hide_latency_limit(struct device *dev);
190#else
191static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
192 { return 0; }
193static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
194#endif
195
Jean Pihete8db0be2011-08-25 15:35:03 +0200196#endif