Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 1 | #ifndef _LINUX_PM_QOS_H |
| 2 | #define _LINUX_PM_QOS_H |
| 3 | /* interface for the pm_qos_power infrastructure of the linux kernel. |
| 4 | * |
| 5 | * Mark Gross <mgross@linux.intel.com> |
| 6 | */ |
| 7 | #include <linux/plist.h> |
| 8 | #include <linux/notifier.h> |
| 9 | #include <linux/miscdevice.h> |
Rafael J. Wysocki | 1a9a915 | 2011-09-29 22:29:44 +0200 | [diff] [blame] | 10 | #include <linux/device.h> |
MyungJoo Ham | c4772d1 | 2012-03-28 23:31:24 +0200 | [diff] [blame] | 11 | #include <linux/workqueue.h> |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 12 | |
Alex Frid | d031e1d | 2012-01-29 20:39:25 +0100 | [diff] [blame] | 13 | enum { |
| 14 | PM_QOS_RESERVED = 0, |
| 15 | PM_QOS_CPU_DMA_LATENCY, |
| 16 | PM_QOS_NETWORK_LATENCY, |
| 17 | PM_QOS_NETWORK_THROUGHPUT, |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 18 | |
Alex Frid | d031e1d | 2012-01-29 20:39:25 +0100 | [diff] [blame] | 19 | /* insert new class ID */ |
| 20 | PM_QOS_NUM_CLASSES, |
| 21 | }; |
| 22 | |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 23 | #define PM_QOS_DEFAULT_VALUE -1 |
| 24 | |
| 25 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
| 26 | #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
| 27 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 28 | #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 29 | |
Jean Pihet | cc74998 | 2011-08-25 15:35:12 +0200 | [diff] [blame] | 30 | struct pm_qos_request { |
| 31 | struct plist_node node; |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 32 | int pm_qos_class; |
MyungJoo Ham | c4772d1 | 2012-03-28 23:31:24 +0200 | [diff] [blame] | 33 | struct delayed_work work; /* for pm_qos_update_request_timeout */ |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 34 | }; |
| 35 | |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 36 | struct dev_pm_qos_request { |
| 37 | struct plist_node node; |
| 38 | struct device *dev; |
| 39 | }; |
| 40 | |
Jean Pihet | 4e1779b | 2011-08-25 15:35:27 +0200 | [diff] [blame] | 41 | enum pm_qos_type { |
| 42 | PM_QOS_UNITIALIZED, |
| 43 | PM_QOS_MAX, /* return the largest value */ |
| 44 | PM_QOS_MIN /* return the smallest value */ |
| 45 | }; |
| 46 | |
| 47 | /* |
| 48 | * Note: The lockless read path depends on the CPU accessing |
| 49 | * target_value atomically. Atomic access is only guaranteed on all CPU |
| 50 | * types linux supports for 32 bit quantites |
| 51 | */ |
| 52 | struct pm_qos_constraints { |
| 53 | struct plist_head list; |
| 54 | s32 target_value; /* Do not change to 64 bit */ |
| 55 | s32 default_value; |
| 56 | enum pm_qos_type type; |
| 57 | struct blocking_notifier_head *notifiers; |
| 58 | }; |
| 59 | |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 60 | /* Action requested to pm_qos_update_target */ |
| 61 | enum pm_qos_req_action { |
| 62 | PM_QOS_ADD_REQ, /* Add a new request */ |
| 63 | PM_QOS_UPDATE_REQ, /* Update an existing request */ |
| 64 | PM_QOS_REMOVE_REQ /* Remove an existing request */ |
| 65 | }; |
| 66 | |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 67 | static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) |
| 68 | { |
Sachin Kamat | 8361809 | 2012-07-17 22:38:18 +0200 | [diff] [blame] | 69 | return req->dev != NULL; |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 70 | } |
| 71 | |
Jean Pihet | abe98ec | 2011-08-25 15:35:34 +0200 | [diff] [blame] | 72 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, |
| 73 | enum pm_qos_req_action action, int value); |
Jean Pihet | cc74998 | 2011-08-25 15:35:12 +0200 | [diff] [blame] | 74 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, |
| 75 | s32 value); |
| 76 | void pm_qos_update_request(struct pm_qos_request *req, |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 77 | s32 new_value); |
MyungJoo Ham | c4772d1 | 2012-03-28 23:31:24 +0200 | [diff] [blame] | 78 | void pm_qos_update_request_timeout(struct pm_qos_request *req, |
| 79 | s32 new_value, unsigned long timeout_us); |
Jean Pihet | cc74998 | 2011-08-25 15:35:12 +0200 | [diff] [blame] | 80 | void pm_qos_remove_request(struct pm_qos_request *req); |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 81 | |
| 82 | int pm_qos_request(int pm_qos_class); |
| 83 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); |
| 84 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); |
Jean Pihet | cc74998 | 2011-08-25 15:35:12 +0200 | [diff] [blame] | 85 | int pm_qos_request_active(struct pm_qos_request *req); |
Jean Pihet | b66213c | 2011-08-25 15:35:47 +0200 | [diff] [blame] | 86 | s32 pm_qos_read_value(struct pm_qos_constraints *c); |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 87 | |
Jean Pihet | a9b542e | 2012-02-13 16:23:42 +0100 | [diff] [blame] | 88 | #ifdef CONFIG_PM |
Rafael J. Wysocki | 00dc9ad | 2011-12-01 00:01:31 +0100 | [diff] [blame] | 89 | s32 __dev_pm_qos_read_value(struct device *dev); |
Rafael J. Wysocki | 1a9a915 | 2011-09-29 22:29:44 +0200 | [diff] [blame] | 90 | s32 dev_pm_qos_read_value(struct device *dev); |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 91 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
| 92 | s32 value); |
| 93 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); |
| 94 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); |
| 95 | int dev_pm_qos_add_notifier(struct device *dev, |
| 96 | struct notifier_block *notifier); |
| 97 | int dev_pm_qos_remove_notifier(struct device *dev, |
| 98 | struct notifier_block *notifier); |
Jean Pihet | b66213c | 2011-08-25 15:35:47 +0200 | [diff] [blame] | 99 | int dev_pm_qos_add_global_notifier(struct notifier_block *notifier); |
| 100 | int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier); |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 101 | void dev_pm_qos_constraints_init(struct device *dev); |
| 102 | void dev_pm_qos_constraints_destroy(struct device *dev); |
Rafael J. Wysocki | 40a5f8b | 2011-12-23 01:23:52 +0100 | [diff] [blame] | 103 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
| 104 | struct dev_pm_qos_request *req, s32 value); |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 105 | #else |
Rafael J. Wysocki | 00dc9ad | 2011-12-01 00:01:31 +0100 | [diff] [blame] | 106 | static inline s32 __dev_pm_qos_read_value(struct device *dev) |
| 107 | { return 0; } |
Rafael J. Wysocki | 1a9a915 | 2011-09-29 22:29:44 +0200 | [diff] [blame] | 108 | static inline s32 dev_pm_qos_read_value(struct device *dev) |
| 109 | { return 0; } |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 110 | static inline int dev_pm_qos_add_request(struct device *dev, |
| 111 | struct dev_pm_qos_request *req, |
| 112 | s32 value) |
| 113 | { return 0; } |
| 114 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, |
| 115 | s32 new_value) |
| 116 | { return 0; } |
| 117 | static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) |
| 118 | { return 0; } |
| 119 | static inline int dev_pm_qos_add_notifier(struct device *dev, |
| 120 | struct notifier_block *notifier) |
| 121 | { return 0; } |
| 122 | static inline int dev_pm_qos_remove_notifier(struct device *dev, |
| 123 | struct notifier_block *notifier) |
| 124 | { return 0; } |
Jean Pihet | b66213c | 2011-08-25 15:35:47 +0200 | [diff] [blame] | 125 | static inline int dev_pm_qos_add_global_notifier( |
| 126 | struct notifier_block *notifier) |
| 127 | { return 0; } |
| 128 | static inline int dev_pm_qos_remove_global_notifier( |
| 129 | struct notifier_block *notifier) |
| 130 | { return 0; } |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 131 | static inline void dev_pm_qos_constraints_init(struct device *dev) |
Rafael J. Wysocki | 1a9a915 | 2011-09-29 22:29:44 +0200 | [diff] [blame] | 132 | { |
| 133 | dev->power.power_state = PMSG_ON; |
| 134 | } |
Jean Pihet | 91ff4cb | 2011-08-25 15:35:41 +0200 | [diff] [blame] | 135 | static inline void dev_pm_qos_constraints_destroy(struct device *dev) |
Rafael J. Wysocki | 1a9a915 | 2011-09-29 22:29:44 +0200 | [diff] [blame] | 136 | { |
| 137 | dev->power.power_state = PMSG_INVALID; |
| 138 | } |
Rafael J. Wysocki | 40a5f8b | 2011-12-23 01:23:52 +0100 | [diff] [blame] | 139 | static inline int dev_pm_qos_add_ancestor_request(struct device *dev, |
| 140 | struct dev_pm_qos_request *req, s32 value) |
| 141 | { return 0; } |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 142 | #endif |
| 143 | |
Rafael J. Wysocki | 85dc0b8 | 2012-03-13 01:01:39 +0100 | [diff] [blame] | 144 | #ifdef CONFIG_PM_RUNTIME |
| 145 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); |
| 146 | void dev_pm_qos_hide_latency_limit(struct device *dev); |
| 147 | #else |
| 148 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) |
| 149 | { return 0; } |
| 150 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} |
| 151 | #endif |
| 152 | |
Jean Pihet | e8db0be | 2011-08-25 15:35:03 +0200 | [diff] [blame] | 153 | #endif |