blob: 5009fc932c0f92a4acf2b45e26f7f18ca9933528 [file] [log] [blame]
Mark Grossd82b3512008-02-04 22:30:08 -08001/*
2 * This module exposes the interface to kernel space for specifying
3 * QoS dependencies. It provides infrastructure for registration of:
4 *
Mark Grossed771342010-05-06 01:59:26 +02005 * Dependents on a QoS value : register requests
Mark Grossd82b3512008-02-04 22:30:08 -08006 * Watchers of QoS value : get notified when target QoS value changes
7 *
8 * This QoS design is best effort based. Dependents register their QoS needs.
9 * Watchers register to keep track of the current QoS needs of the system.
10 *
11 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12 * each have defined units:
13 * latency: usec
14 * timeout: usec <-- currently not used.
15 * throughput: kbs (kilo byte / sec)
16 *
Mark Grossed771342010-05-06 01:59:26 +020017 * There are lists of pm_qos_objects each one wrapping requests, notifiers
Mark Grossd82b3512008-02-04 22:30:08 -080018 *
Mark Grossed771342010-05-06 01:59:26 +020019 * User mode requests on a QOS parameter register themselves to the
Mark Grossd82b3512008-02-04 22:30:08 -080020 * subsystem by opening the device node /dev/... and writing there request to
21 * the node. As long as the process holds a file handle open to the node the
22 * client continues to be accounted for. Upon file release the usermode
Mark Grossed771342010-05-06 01:59:26 +020023 * request is removed and a new qos target is computed. This way when the
24 * request that the application has is cleaned up when closes the file
Mark Grossd82b3512008-02-04 22:30:08 -080025 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 *
Richard Hughesbf1db692008-08-05 13:01:35 -070027 * Mark Gross <mgross@linux.intel.com>
Mark Grossd82b3512008-02-04 22:30:08 -080028 */
29
Mark Grossed771342010-05-06 01:59:26 +020030/*#define DEBUG*/
31
Jean Pihete8db0be2011-08-25 15:35:03 +020032#include <linux/pm_qos.h>
Mark Grossd82b3512008-02-04 22:30:08 -080033#include <linux/sched.h>
34#include <linux/spinlock.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/fs.h>
38#include <linux/device.h>
39#include <linux/miscdevice.h>
40#include <linux/string.h>
41#include <linux/platform_device.h>
42#include <linux/init.h>
Rafael J. Wysocki0775a602011-05-27 00:05:23 +020043#include <linux/kernel.h>
Nishanth Menonf5f4eda2014-12-05 11:19:08 -060044#include <linux/debugfs.h>
45#include <linux/seq_file.h>
Praveen Chidambaramba1ec852014-05-21 16:21:31 -060046#include <linux/irq.h>
47#include <linux/irqdesc.h>
Olav Haugan4970a4a2016-05-29 19:50:47 -070048#include <linux/cpumask.h>
Mark Grossd82b3512008-02-04 22:30:08 -080049
50#include <linux/uaccess.h>
Paul Gortmaker6e5fdee2011-05-26 16:00:52 -040051#include <linux/export.h>
Sahara247e9ee2013-06-21 11:12:28 +090052#include <trace/events/power.h>
Mark Grossd82b3512008-02-04 22:30:08 -080053
54/*
Jean Pihetcc749982011-08-25 15:35:12 +020055 * locking rule: all changes to constraints or notifiers lists
Mark Grossd82b3512008-02-04 22:30:08 -080056 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
57 * held, taken with _irqsave. One lock to rule them all
58 */
Mark Grossd82b3512008-02-04 22:30:08 -080059struct pm_qos_object {
Jean Pihet4e1779b2011-08-25 15:35:27 +020060 struct pm_qos_constraints *constraints;
Mark Grossd82b3512008-02-04 22:30:08 -080061 struct miscdevice pm_qos_power_miscdev;
62 char *name;
Mark Grossd82b3512008-02-04 22:30:08 -080063};
64
James Bottomley5f279842010-07-19 02:00:18 +020065static DEFINE_SPINLOCK(pm_qos_lock);
66
Mark Grossd82b3512008-02-04 22:30:08 -080067static struct pm_qos_object null_pm_qos;
Jean Pihet4e1779b2011-08-25 15:35:27 +020068
Mark Grossd82b3512008-02-04 22:30:08 -080069static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
Jean Pihet4e1779b2011-08-25 15:35:27 +020070static struct pm_qos_constraints cpu_dma_constraints = {
71 .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
Tim Chen333c5ae2011-02-11 12:49:04 -080072 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
Praveen Chidambaramba1ec852014-05-21 16:21:31 -060073 .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
74 PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE },
Tim Chen333c5ae2011-02-11 12:49:04 -080075 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
Rafael J. Wysocki327adae2014-02-11 00:35:29 +010076 .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
James Bottomley5f279842010-07-19 02:00:18 +020077 .type = PM_QOS_MIN,
Jean Pihet4e1779b2011-08-25 15:35:27 +020078 .notifiers = &cpu_dma_lat_notifier,
79};
80static struct pm_qos_object cpu_dma_pm_qos = {
81 .constraints = &cpu_dma_constraints,
Dominik Brodowskia6f05b972011-11-06 21:54:12 +010082 .name = "cpu_dma_latency",
Mark Grossd82b3512008-02-04 22:30:08 -080083};
84
85static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
Jean Pihet4e1779b2011-08-25 15:35:27 +020086static struct pm_qos_constraints network_lat_constraints = {
87 .list = PLIST_HEAD_INIT(network_lat_constraints.list),
Tim Chen333c5ae2011-02-11 12:49:04 -080088 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
Praveen Chidambaramba1ec852014-05-21 16:21:31 -060089 .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
90 PM_QOS_NETWORK_LAT_DEFAULT_VALUE },
Tim Chen333c5ae2011-02-11 12:49:04 -080091 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
Rafael J. Wysocki327adae2014-02-11 00:35:29 +010092 .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
Jean Pihet4e1779b2011-08-25 15:35:27 +020093 .type = PM_QOS_MIN,
94 .notifiers = &network_lat_notifier,
95};
96static struct pm_qos_object network_lat_pm_qos = {
97 .constraints = &network_lat_constraints,
98 .name = "network_latency",
Mark Grossd82b3512008-02-04 22:30:08 -080099};
100
Mark Grossd82b3512008-02-04 22:30:08 -0800101static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
Jean Pihet4e1779b2011-08-25 15:35:27 +0200102static struct pm_qos_constraints network_tput_constraints = {
103 .list = PLIST_HEAD_INIT(network_tput_constraints.list),
Tim Chen333c5ae2011-02-11 12:49:04 -0800104 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600105 .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
106 PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE },
Tim Chen333c5ae2011-02-11 12:49:04 -0800107 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
Rafael J. Wysocki327adae2014-02-11 00:35:29 +0100108 .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
James Bottomley5f279842010-07-19 02:00:18 +0200109 .type = PM_QOS_MAX,
Jean Pihet4e1779b2011-08-25 15:35:27 +0200110 .notifiers = &network_throughput_notifier,
111};
112static struct pm_qos_object network_throughput_pm_qos = {
113 .constraints = &network_tput_constraints,
114 .name = "network_throughput",
Mark Grossd82b3512008-02-04 22:30:08 -0800115};
116
117
Tomeu Vizoso7990da72014-09-03 17:49:32 +0200118static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
119static struct pm_qos_constraints memory_bw_constraints = {
120 .list = PLIST_HEAD_INIT(memory_bw_constraints.list),
121 .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
122 .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
123 .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
124 .type = PM_QOS_SUM,
125 .notifiers = &memory_bandwidth_notifier,
126};
127static struct pm_qos_object memory_bandwidth_pm_qos = {
128 .constraints = &memory_bw_constraints,
129 .name = "memory_bandwidth",
130};
131
132
Mark Grossd82b3512008-02-04 22:30:08 -0800133static struct pm_qos_object *pm_qos_array[] = {
134 &null_pm_qos,
135 &cpu_dma_pm_qos,
136 &network_lat_pm_qos,
Tomeu Vizoso7990da72014-09-03 17:49:32 +0200137 &network_throughput_pm_qos,
138 &memory_bandwidth_pm_qos,
Mark Grossd82b3512008-02-04 22:30:08 -0800139};
140
Mark Grossd82b3512008-02-04 22:30:08 -0800141static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
142 size_t count, loff_t *f_pos);
Thomas Renningerf9b9e802011-02-28 22:06:34 +0100143static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
144 size_t count, loff_t *f_pos);
Mark Grossd82b3512008-02-04 22:30:08 -0800145static int pm_qos_power_open(struct inode *inode, struct file *filp);
146static int pm_qos_power_release(struct inode *inode, struct file *filp);
147
148static const struct file_operations pm_qos_power_fops = {
149 .write = pm_qos_power_write,
Thomas Renningerf9b9e802011-02-28 22:06:34 +0100150 .read = pm_qos_power_read,
Mark Grossd82b3512008-02-04 22:30:08 -0800151 .open = pm_qos_power_open,
152 .release = pm_qos_power_release,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200153 .llseek = noop_llseek,
Mark Grossd82b3512008-02-04 22:30:08 -0800154};
155
James Bottomley5f279842010-07-19 02:00:18 +0200156/* unlocked internal variant */
Jean Pihetabe98ec2011-08-25 15:35:34 +0200157static inline int pm_qos_get_value(struct pm_qos_constraints *c)
Mark Grossd82b3512008-02-04 22:30:08 -0800158{
Tomeu Vizoso7990da72014-09-03 17:49:32 +0200159 struct plist_node *node;
160 int total_value = 0;
161
Jean Pihetabe98ec2011-08-25 15:35:34 +0200162 if (plist_head_empty(&c->list))
Rafael J. Wysocki327adae2014-02-11 00:35:29 +0100163 return c->no_constraint_value;
James Bottomley5f279842010-07-19 02:00:18 +0200164
Jean Pihetabe98ec2011-08-25 15:35:34 +0200165 switch (c->type) {
James Bottomley5f279842010-07-19 02:00:18 +0200166 case PM_QOS_MIN:
Jean Pihetabe98ec2011-08-25 15:35:34 +0200167 return plist_first(&c->list)->prio;
James Bottomley5f279842010-07-19 02:00:18 +0200168
169 case PM_QOS_MAX:
Jean Pihetabe98ec2011-08-25 15:35:34 +0200170 return plist_last(&c->list)->prio;
James Bottomley5f279842010-07-19 02:00:18 +0200171
Tomeu Vizoso7990da72014-09-03 17:49:32 +0200172 case PM_QOS_SUM:
173 plist_for_each(node, &c->list)
174 total_value += node->prio;
175
176 return total_value;
177
James Bottomley5f279842010-07-19 02:00:18 +0200178 default:
179 /* runtime check for not using enum */
180 BUG();
Luis Gonzalez Fernandezc6a57bf2012-09-07 21:35:21 +0200181 return PM_QOS_DEFAULT_VALUE;
James Bottomley5f279842010-07-19 02:00:18 +0200182 }
Mark Grossd82b3512008-02-04 22:30:08 -0800183}
184
Jean Pihetb66213c2011-08-25 15:35:47 +0200185s32 pm_qos_read_value(struct pm_qos_constraints *c)
Tim Chen333c5ae2011-02-11 12:49:04 -0800186{
Jean Pihetabe98ec2011-08-25 15:35:34 +0200187 return c->target_value;
Tim Chen333c5ae2011-02-11 12:49:04 -0800188}
189
Jean Pihetabe98ec2011-08-25 15:35:34 +0200190static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
Tim Chen333c5ae2011-02-11 12:49:04 -0800191{
Jean Pihetabe98ec2011-08-25 15:35:34 +0200192 c->target_value = value;
Tim Chen333c5ae2011-02-11 12:49:04 -0800193}
194
Nishanth Menonf5f4eda2014-12-05 11:19:08 -0600195static inline int pm_qos_get_value(struct pm_qos_constraints *c);
196static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
197{
198 struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
199 struct pm_qos_constraints *c;
200 struct pm_qos_request *req;
201 char *type;
202 unsigned long flags;
203 int tot_reqs = 0;
204 int active_reqs = 0;
205
206 if (IS_ERR_OR_NULL(qos)) {
207 pr_err("%s: bad qos param!\n", __func__);
208 return -EINVAL;
209 }
210 c = qos->constraints;
211 if (IS_ERR_OR_NULL(c)) {
212 pr_err("%s: Bad constraints on qos?\n", __func__);
213 return -EINVAL;
214 }
215
216 /* Lock to ensure we have a snapshot */
217 spin_lock_irqsave(&pm_qos_lock, flags);
218 if (plist_head_empty(&c->list)) {
219 seq_puts(s, "Empty!\n");
220 goto out;
221 }
222
223 switch (c->type) {
224 case PM_QOS_MIN:
225 type = "Minimum";
226 break;
227 case PM_QOS_MAX:
228 type = "Maximum";
229 break;
230 case PM_QOS_SUM:
231 type = "Sum";
232 break;
233 default:
234 type = "Unknown";
235 }
236
237 plist_for_each_entry(req, &c->list, node) {
238 char *state = "Default";
239
240 if ((req->node).prio != c->default_value) {
241 active_reqs++;
242 state = "Active";
243 }
244 tot_reqs++;
245 seq_printf(s, "%d: %d: %s\n", tot_reqs,
246 (req->node).prio, state);
247 }
248
249 seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
250 type, pm_qos_get_value(c), active_reqs, tot_reqs);
251
252out:
253 spin_unlock_irqrestore(&pm_qos_lock, flags);
254 return 0;
255}
256
257static int pm_qos_dbg_open(struct inode *inode, struct file *file)
258{
259 return single_open(file, pm_qos_dbg_show_requests,
260 inode->i_private);
261}
262
263static const struct file_operations pm_qos_debug_fops = {
264 .open = pm_qos_dbg_open,
265 .read = seq_read,
266 .llseek = seq_lseek,
267 .release = single_release,
268};
269
Maulik Shahb4d6ca02019-10-11 12:36:34 +0530270static inline int pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
Mahesh Sivasubramanian7ce01e72017-10-31 14:53:25 +0530271 struct cpumask *cpus)
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600272{
273 struct pm_qos_request *req = NULL;
274 int cpu;
275 s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
276
Maulik Shahb4d6ca02019-10-11 12:36:34 +0530277 /*
278 * pm_qos_constraints can be from different classes,
279 * Update cpumask only only for CPU_DMA_LATENCY classes
280 */
281
282 if (c != pm_qos_array[PM_QOS_CPU_DMA_LATENCY]->constraints)
283 return -EINVAL;
284
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600285 plist_for_each_entry(req, &c->list, node) {
286 for_each_cpu(cpu, &req->cpus_affine) {
287 switch (c->type) {
288 case PM_QOS_MIN:
289 if (qos_val[cpu] > req->node.prio)
290 qos_val[cpu] = req->node.prio;
291 break;
292 case PM_QOS_MAX:
293 if (req->node.prio > qos_val[cpu])
294 qos_val[cpu] = req->node.prio;
295 break;
Archana Sathyakumardc7916f2017-06-29 14:10:41 -0600296 case PM_QOS_SUM:
297 qos_val[cpu] += req->node.prio;
298 break;
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600299 default:
300 BUG();
301 break;
302 }
303 }
304 }
305
Mahesh Sivasubramanian7ce01e72017-10-31 14:53:25 +0530306 for_each_possible_cpu(cpu) {
307 if (c->target_per_cpu[cpu] != qos_val[cpu])
308 cpumask_set_cpu(cpu, cpus);
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600309 c->target_per_cpu[cpu] = qos_val[cpu];
Mahesh Sivasubramanian7ce01e72017-10-31 14:53:25 +0530310 }
Maulik Shahb4d6ca02019-10-11 12:36:34 +0530311
312 return 0;
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600313}
314
Jean Pihetabe98ec2011-08-25 15:35:34 +0200315/**
316 * pm_qos_update_target - manages the constraints list and calls the notifiers
317 * if needed
318 * @c: constraints data struct
Praveen Chidambaram46710102014-05-20 12:57:14 -0600319 * @req: request to add to the list, to update or to remove
Jean Pihetabe98ec2011-08-25 15:35:34 +0200320 * @action: action to take on the constraints list
321 * @value: value of the request to add or update
322 *
323 * This function returns 1 if the aggregated constraint value has changed, 0
324 * otherwise.
325 */
Praveen Chidambaram46710102014-05-20 12:57:14 -0600326int pm_qos_update_target(struct pm_qos_constraints *c,
327 struct pm_qos_request *req,
328 enum pm_qos_req_action action, int value)
Mark Grossd82b3512008-02-04 22:30:08 -0800329{
Mark Grossd82b3512008-02-04 22:30:08 -0800330 unsigned long flags;
Jean Pihetabe98ec2011-08-25 15:35:34 +0200331 int prev_value, curr_value, new_value;
Praveen Chidambaram46710102014-05-20 12:57:14 -0600332 struct plist_node *node = &req->node;
Mahesh Sivasubramanian7ce01e72017-10-31 14:53:25 +0530333 struct cpumask cpus;
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100334 int ret;
Mark Grossd82b3512008-02-04 22:30:08 -0800335
336 spin_lock_irqsave(&pm_qos_lock, flags);
Jean Pihetabe98ec2011-08-25 15:35:34 +0200337 prev_value = pm_qos_get_value(c);
338 if (value == PM_QOS_DEFAULT_VALUE)
339 new_value = c->default_value;
340 else
341 new_value = value;
342
343 switch (action) {
344 case PM_QOS_REMOVE_REQ:
345 plist_del(node, &c->list);
346 break;
347 case PM_QOS_UPDATE_REQ:
James Bottomley5f279842010-07-19 02:00:18 +0200348 /*
349 * to change the list, we atomically remove, reinit
350 * with new value and add, then see if the extremal
351 * changed
352 */
Jean Pihetabe98ec2011-08-25 15:35:34 +0200353 plist_del(node, &c->list);
354 case PM_QOS_ADD_REQ:
355 plist_node_init(node, new_value);
356 plist_add(node, &c->list);
357 break;
358 default:
359 /* no action */
360 ;
Mark Grossd82b3512008-02-04 22:30:08 -0800361 }
Jean Pihetabe98ec2011-08-25 15:35:34 +0200362
363 curr_value = pm_qos_get_value(c);
Mahesh Sivasubramanian7ce01e72017-10-31 14:53:25 +0530364 cpumask_clear(&cpus);
Jean Pihetabe98ec2011-08-25 15:35:34 +0200365 pm_qos_set_value(c, curr_value);
Maulik Shahb4d6ca02019-10-11 12:36:34 +0530366 ret = pm_qos_set_value_for_cpus(c, &cpus);
Jean Pihetabe98ec2011-08-25 15:35:34 +0200367
Mark Grossd82b3512008-02-04 22:30:08 -0800368 spin_unlock_irqrestore(&pm_qos_lock, flags);
369
Sahara247e9ee2013-06-21 11:12:28 +0900370 trace_pm_qos_update_target(action, prev_value, curr_value);
Mahesh Sivasubramanian7ce01e72017-10-31 14:53:25 +0530371
372 /*
373 * if cpu mask bits are set, call the notifier call chain
374 * to update the new qos restriction for the cores
375 */
376
Maulik Shahb4d6ca02019-10-11 12:36:34 +0530377 if (!cpumask_empty(&cpus) ||
378 (ret && prev_value != curr_value)) {
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100379 ret = 1;
380 if (c->notifiers)
381 blocking_notifier_call_chain(c->notifiers,
Mahesh Sivasubramanian7ce01e72017-10-31 14:53:25 +0530382 (unsigned long)curr_value, &cpus);
Jean Pihetabe98ec2011-08-25 15:35:34 +0200383 } else {
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100384 ret = 0;
Jean Pihetabe98ec2011-08-25 15:35:34 +0200385 }
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100386 return ret;
Mark Grossd82b3512008-02-04 22:30:08 -0800387}
388
Mark Grossd82b3512008-02-04 22:30:08 -0800389/**
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +0200390 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
391 * @pqf: Device PM QoS flags set to remove the request from.
392 * @req: Request to remove from the set.
393 */
394static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
395 struct pm_qos_flags_request *req)
396{
397 s32 val = 0;
398
399 list_del(&req->node);
400 list_for_each_entry(req, &pqf->list, node)
401 val |= req->flags;
402
403 pqf->effective_flags = val;
404}
405
406/**
407 * pm_qos_update_flags - Update a set of PM QoS flags.
408 * @pqf: Set of flags to update.
409 * @req: Request to add to the set, to modify, or to remove from the set.
410 * @action: Action to take on the set.
411 * @val: Value of the request to add or modify.
412 *
413 * Update the given set of PM QoS flags and call notifiers if the aggregate
414 * value has changed. Returns 1 if the aggregate constraint value has changed,
415 * 0 otherwise.
416 */
417bool pm_qos_update_flags(struct pm_qos_flags *pqf,
418 struct pm_qos_flags_request *req,
419 enum pm_qos_req_action action, s32 val)
420{
421 unsigned long irqflags;
422 s32 prev_value, curr_value;
423
424 spin_lock_irqsave(&pm_qos_lock, irqflags);
425
426 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
427
428 switch (action) {
429 case PM_QOS_REMOVE_REQ:
430 pm_qos_flags_remove_req(pqf, req);
431 break;
432 case PM_QOS_UPDATE_REQ:
433 pm_qos_flags_remove_req(pqf, req);
434 case PM_QOS_ADD_REQ:
435 req->flags = val;
436 INIT_LIST_HEAD(&req->node);
437 list_add_tail(&req->node, &pqf->list);
438 pqf->effective_flags |= val;
439 break;
440 default:
441 /* no action */
442 ;
443 }
444
445 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
446
447 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
448
Sahara247e9ee2013-06-21 11:12:28 +0900449 trace_pm_qos_update_flags(action, prev_value, curr_value);
Rafael J. Wysocki5efbe422012-10-23 01:07:46 +0200450 return prev_value != curr_value;
451}
452
453/**
Mark Grossed771342010-05-06 01:59:26 +0200454 * pm_qos_request - returns current system wide qos expectation
Mark Grossd82b3512008-02-04 22:30:08 -0800455 * @pm_qos_class: identification of which qos value is requested
456 *
Tim Chen333c5ae2011-02-11 12:49:04 -0800457 * This function returns the current target value.
Mark Grossd82b3512008-02-04 22:30:08 -0800458 */
Mark Grossed771342010-05-06 01:59:26 +0200459int pm_qos_request(int pm_qos_class)
Mark Grossd82b3512008-02-04 22:30:08 -0800460{
Jean Pihetabe98ec2011-08-25 15:35:34 +0200461 return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
Mark Grossd82b3512008-02-04 22:30:08 -0800462}
Mark Grossed771342010-05-06 01:59:26 +0200463EXPORT_SYMBOL_GPL(pm_qos_request);
Mark Grossd82b3512008-02-04 22:30:08 -0800464
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600465int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
466{
Olav Haugan4970a4a2016-05-29 19:50:47 -0700467 if (cpu_isolated(cpu))
468 return INT_MAX;
469
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600470 return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
471}
472EXPORT_SYMBOL(pm_qos_request_for_cpu);
473
Jean Pihetcc749982011-08-25 15:35:12 +0200474int pm_qos_request_active(struct pm_qos_request *req)
James Bottomley82f68252010-07-05 22:53:06 +0200475{
476 return req->pm_qos_class != 0;
477}
478EXPORT_SYMBOL_GPL(pm_qos_request_active);
479
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600480int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask)
481{
482 unsigned long irqflags;
483 int cpu;
484 struct pm_qos_constraints *c = NULL;
485 int val;
486
487 spin_lock_irqsave(&pm_qos_lock, irqflags);
488 c = pm_qos_array[pm_qos_class]->constraints;
489 val = c->default_value;
490
491 for_each_cpu(cpu, mask) {
Olav Haugan4970a4a2016-05-29 19:50:47 -0700492
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600493 switch (c->type) {
494 case PM_QOS_MIN:
495 if (c->target_per_cpu[cpu] < val)
496 val = c->target_per_cpu[cpu];
497 break;
498 case PM_QOS_MAX:
499 if (c->target_per_cpu[cpu] > val)
500 val = c->target_per_cpu[cpu];
501 break;
502 default:
503 BUG();
504 break;
505 }
506 }
507 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
508
509 return val;
510}
511EXPORT_SYMBOL(pm_qos_request_for_cpumask);
512
Stephen Boyd40fea92f2013-08-13 14:12:40 -0700513static void __pm_qos_update_request(struct pm_qos_request *req,
514 s32 new_value)
515{
516 trace_pm_qos_update_request(req->pm_qos_class, new_value);
517
518 if (new_value != req->node.prio)
519 pm_qos_update_target(
520 pm_qos_array[req->pm_qos_class]->constraints,
Praveen Chidambaram46710102014-05-20 12:57:14 -0600521 req, PM_QOS_UPDATE_REQ, new_value);
Stephen Boyd40fea92f2013-08-13 14:12:40 -0700522}
523
Mark Grossd82b3512008-02-04 22:30:08 -0800524/**
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200525 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
526 * @work: work struct for the delayed work (timeout)
527 *
528 * This cancels the timeout request by falling back to the default at timeout.
529 */
530static void pm_qos_work_fn(struct work_struct *work)
531{
532 struct pm_qos_request *req = container_of(to_delayed_work(work),
533 struct pm_qos_request,
534 work);
535
Stephen Boyd40fea92f2013-08-13 14:12:40 -0700536 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200537}
538
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600539#ifdef CONFIG_SMP
540static void pm_qos_irq_release(struct kref *ref)
541{
542 unsigned long flags;
543 struct irq_affinity_notify *notify = container_of(ref,
544 struct irq_affinity_notify, kref);
545 struct pm_qos_request *req = container_of(notify,
546 struct pm_qos_request, irq_notify);
547 struct pm_qos_constraints *c =
548 pm_qos_array[req->pm_qos_class]->constraints;
549
550 spin_lock_irqsave(&pm_qos_lock, flags);
551 cpumask_setall(&req->cpus_affine);
552 spin_unlock_irqrestore(&pm_qos_lock, flags);
553
554 pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, c->default_value);
555}
556
557static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
558 const cpumask_t *mask)
559{
560 unsigned long flags;
561 struct pm_qos_request *req = container_of(notify,
562 struct pm_qos_request, irq_notify);
563 struct pm_qos_constraints *c =
564 pm_qos_array[req->pm_qos_class]->constraints;
565
566 spin_lock_irqsave(&pm_qos_lock, flags);
567 cpumask_copy(&req->cpus_affine, mask);
568 spin_unlock_irqrestore(&pm_qos_lock, flags);
569
570 pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, req->node.prio);
571}
572#endif
573
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200574/**
Mark Grossed771342010-05-06 01:59:26 +0200575 * pm_qos_add_request - inserts new qos request into the list
Jean Pihetcc749982011-08-25 15:35:12 +0200576 * @req: pointer to a preallocated handle
Saravana Kannan25cc69e2010-08-26 20:18:43 +0200577 * @pm_qos_class: identifies which list of qos request to use
Mark Grossd82b3512008-02-04 22:30:08 -0800578 * @value: defines the qos request
579 *
580 * This function inserts a new entry in the pm_qos_class list of requested qos
Richard Hughesbf1db692008-08-05 13:01:35 -0700581 * performance characteristics. It recomputes the aggregate QoS expectations
Jean Pihetcc749982011-08-25 15:35:12 +0200582 * for the pm_qos_class of parameters and initializes the pm_qos_request
Saravana Kannan25cc69e2010-08-26 20:18:43 +0200583 * handle. Caller needs to save this handle for later use in updates and
584 * removal.
Mark Grossd82b3512008-02-04 22:30:08 -0800585 */
Saravana Kannan25cc69e2010-08-26 20:18:43 +0200586
Jean Pihetcc749982011-08-25 15:35:12 +0200587void pm_qos_add_request(struct pm_qos_request *req,
James Bottomley82f68252010-07-05 22:53:06 +0200588 int pm_qos_class, s32 value)
Mark Grossd82b3512008-02-04 22:30:08 -0800589{
Jean Pihetabe98ec2011-08-25 15:35:34 +0200590 if (!req) /*guard against callers passing in null */
591 return;
Mark Grossd82b3512008-02-04 22:30:08 -0800592
Jean Pihetcc749982011-08-25 15:35:12 +0200593 if (pm_qos_request_active(req)) {
James Bottomley82f68252010-07-05 22:53:06 +0200594 WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
595 return;
Mark Grossd82b3512008-02-04 22:30:08 -0800596 }
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600597
598 switch (req->type) {
599 case PM_QOS_REQ_AFFINE_CORES:
600 if (cpumask_empty(&req->cpus_affine)) {
601 req->type = PM_QOS_REQ_ALL_CORES;
602 cpumask_setall(&req->cpus_affine);
603 WARN(1, KERN_ERR "Affine cores not set for request with affinity flag\n");
604 }
605 break;
606#ifdef CONFIG_SMP
607 case PM_QOS_REQ_AFFINE_IRQ:
608 if (irq_can_set_affinity(req->irq)) {
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600609 struct irq_desc *desc = irq_to_desc(req->irq);
Lina Iyer1daba252017-07-17 11:52:22 -0600610 struct cpumask *mask;
611
612 if (!desc)
Mahesh Sivasubramanian54aece52017-01-31 14:26:12 -0700613 return;
Lina Iyer1daba252017-07-17 11:52:22 -0600614
615 mask = desc->irq_data.common->affinity;
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600616
617 /* Get the current affinity */
618 cpumask_copy(&req->cpus_affine, mask);
619 req->irq_notify.irq = req->irq;
620 req->irq_notify.notify = pm_qos_irq_notify;
621 req->irq_notify.release = pm_qos_irq_release;
622
Praveen Chidambaramba1ec852014-05-21 16:21:31 -0600623 } else {
624 req->type = PM_QOS_REQ_ALL_CORES;
625 cpumask_setall(&req->cpus_affine);
626 WARN(1, KERN_ERR "IRQ-%d not set for request with affinity flag\n",
627 req->irq);
628 }
629 break;
630#endif
631 default:
632 WARN(1, KERN_ERR "Unknown request type %d\n", req->type);
633 /* fall through */
634 case PM_QOS_REQ_ALL_CORES:
635 cpumask_setall(&req->cpus_affine);
636 break;
637 }
638
Jean Pihetcc749982011-08-25 15:35:12 +0200639 req->pm_qos_class = pm_qos_class;
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200640 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
Saharaae8822b2013-06-21 11:12:29 +0900641 trace_pm_qos_add_request(pm_qos_class, value);
Jean Pihetabe98ec2011-08-25 15:35:34 +0200642 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
Praveen Chidambaram46710102014-05-20 12:57:14 -0600643 req, PM_QOS_ADD_REQ, value);
Anil Kumar Mamidala471c3f22016-04-22 12:42:51 +0530644
645#ifdef CONFIG_SMP
646 if (req->type == PM_QOS_REQ_AFFINE_IRQ &&
647 irq_can_set_affinity(req->irq)) {
648 int ret = 0;
649
650 ret = irq_set_affinity_notifier(req->irq,
651 &req->irq_notify);
652 if (ret) {
653 WARN(1, "IRQ affinity notify set failed\n");
654 req->type = PM_QOS_REQ_ALL_CORES;
655 cpumask_setall(&req->cpus_affine);
656 pm_qos_update_target(
657 pm_qos_array[pm_qos_class]->constraints,
658 req, PM_QOS_UPDATE_REQ, value);
659 }
660 }
661#endif
Mark Grossd82b3512008-02-04 22:30:08 -0800662}
Mark Grossed771342010-05-06 01:59:26 +0200663EXPORT_SYMBOL_GPL(pm_qos_add_request);
Mark Grossd82b3512008-02-04 22:30:08 -0800664
665/**
Mark Grossed771342010-05-06 01:59:26 +0200666 * pm_qos_update_request - modifies an existing qos request
Jean Pihetcc749982011-08-25 15:35:12 +0200667 * @req : handle to list element holding a pm_qos request to use
Mark Grossd82b3512008-02-04 22:30:08 -0800668 * @value: defines the qos request
669 *
Mark Grossed771342010-05-06 01:59:26 +0200670 * Updates an existing qos request for the pm_qos_class of parameters along
Mark Grossd82b3512008-02-04 22:30:08 -0800671 * with updating the target pm_qos_class value.
672 *
Mark Grossed771342010-05-06 01:59:26 +0200673 * Attempts are made to make this code callable on hot code paths.
Mark Grossd82b3512008-02-04 22:30:08 -0800674 */
Jean Pihetcc749982011-08-25 15:35:12 +0200675void pm_qos_update_request(struct pm_qos_request *req,
James Bottomley5f279842010-07-19 02:00:18 +0200676 s32 new_value)
Mark Grossd82b3512008-02-04 22:30:08 -0800677{
Jean Pihetcc749982011-08-25 15:35:12 +0200678 if (!req) /*guard against callers passing in null */
James Bottomley5f279842010-07-19 02:00:18 +0200679 return;
Mark Grossed771342010-05-06 01:59:26 +0200680
Jean Pihetcc749982011-08-25 15:35:12 +0200681 if (!pm_qos_request_active(req)) {
James Bottomley82f68252010-07-05 22:53:06 +0200682 WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
683 return;
684 }
685
Tejun Heoc86d06b2016-09-05 08:38:13 -0400686 /*
687 * This function may be called very early during boot, for example,
688 * from of_clk_init(), where irq needs to stay disabled.
689 * cancel_delayed_work_sync() assumes that irq is enabled on
690 * invocation and re-enables it on return. Avoid calling it until
691 * workqueue is initialized.
692 */
693 if (keventd_up())
694 cancel_delayed_work_sync(&req->work);
695
Stephen Boyd40fea92f2013-08-13 14:12:40 -0700696 __pm_qos_update_request(req, new_value);
Mark Grossd82b3512008-02-04 22:30:08 -0800697}
Mark Grossed771342010-05-06 01:59:26 +0200698EXPORT_SYMBOL_GPL(pm_qos_update_request);
Mark Grossd82b3512008-02-04 22:30:08 -0800699
700/**
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200701 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
702 * @req : handle to list element holding a pm_qos request to use
703 * @new_value: defines the temporal qos request
704 * @timeout_us: the effective duration of this qos request in usecs.
705 *
706 * After timeout_us, this qos request is cancelled automatically.
707 */
708void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
709 unsigned long timeout_us)
710{
711 if (!req)
712 return;
713 if (WARN(!pm_qos_request_active(req),
714 "%s called for unknown object.", __func__))
715 return;
716
Tejun Heoed1ac6e2013-01-11 13:37:33 +0100717 cancel_delayed_work_sync(&req->work);
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200718
Saharaae8822b2013-06-21 11:12:29 +0900719 trace_pm_qos_update_request_timeout(req->pm_qos_class,
720 new_value, timeout_us);
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200721 if (new_value != req->node.prio)
722 pm_qos_update_target(
723 pm_qos_array[req->pm_qos_class]->constraints,
Praveen Chidambaram46710102014-05-20 12:57:14 -0600724 req, PM_QOS_UPDATE_REQ, new_value);
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200725
726 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
727}
728
729/**
Mark Grossed771342010-05-06 01:59:26 +0200730 * pm_qos_remove_request - modifies an existing qos request
Jean Pihetcc749982011-08-25 15:35:12 +0200731 * @req: handle to request list element
Mark Grossd82b3512008-02-04 22:30:08 -0800732 *
Jean Pihetcc749982011-08-25 15:35:12 +0200733 * Will remove pm qos request from the list of constraints and
Mark Grossed771342010-05-06 01:59:26 +0200734 * recompute the current target value for the pm_qos_class. Call this
735 * on slow code paths.
Mark Grossd82b3512008-02-04 22:30:08 -0800736 */
Jean Pihetcc749982011-08-25 15:35:12 +0200737void pm_qos_remove_request(struct pm_qos_request *req)
Mark Grossd82b3512008-02-04 22:30:08 -0800738{
Jean Pihetabe98ec2011-08-25 15:35:34 +0200739 if (!req) /*guard against callers passing in null */
Mark Grossed771342010-05-06 01:59:26 +0200740 return;
741 /* silent return to keep pcm code cleaner */
742
Jean Pihetcc749982011-08-25 15:35:12 +0200743 if (!pm_qos_request_active(req)) {
Srinivas Rao L8bcb8ea2016-01-12 15:40:25 +0530744 WARN(1, "pm_qos_remove_request() called for unknown object\n");
James Bottomley82f68252010-07-05 22:53:06 +0200745 return;
746 }
747
Tejun Heoed1ac6e2013-01-11 13:37:33 +0100748 cancel_delayed_work_sync(&req->work);
MyungJoo Hamc4772d12012-03-28 23:31:24 +0200749
Srinivas Rao L8bcb8ea2016-01-12 15:40:25 +0530750#ifdef CONFIG_SMP
751 if (req->type == PM_QOS_REQ_AFFINE_IRQ) {
752 int ret = 0;
753 /* Get the current affinity */
754 ret = irq_set_affinity_notifier(req->irq, NULL);
755 if (ret)
756 WARN(1, "IRQ affinity notify set failed\n");
757 }
758#endif
759
Saharaae8822b2013-06-21 11:12:29 +0900760 trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
Jean Pihetabe98ec2011-08-25 15:35:34 +0200761 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
Praveen Chidambaram46710102014-05-20 12:57:14 -0600762 req, PM_QOS_REMOVE_REQ,
Jean Pihetabe98ec2011-08-25 15:35:34 +0200763 PM_QOS_DEFAULT_VALUE);
Jean Pihetcc749982011-08-25 15:35:12 +0200764 memset(req, 0, sizeof(*req));
Mark Grossd82b3512008-02-04 22:30:08 -0800765}
Mark Grossed771342010-05-06 01:59:26 +0200766EXPORT_SYMBOL_GPL(pm_qos_remove_request);
Mark Grossd82b3512008-02-04 22:30:08 -0800767
768/**
769 * pm_qos_add_notifier - sets notification entry for changes to target value
770 * @pm_qos_class: identifies which qos target changes should be notified.
771 * @notifier: notifier block managed by caller.
772 *
773 * will register the notifier into a notification chain that gets called
Richard Hughesbf1db692008-08-05 13:01:35 -0700774 * upon changes to the pm_qos_class target value.
Mark Grossd82b3512008-02-04 22:30:08 -0800775 */
Mark Grossed771342010-05-06 01:59:26 +0200776int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
Mark Grossd82b3512008-02-04 22:30:08 -0800777{
778 int retval;
779
780 retval = blocking_notifier_chain_register(
Jean Pihet4e1779b2011-08-25 15:35:27 +0200781 pm_qos_array[pm_qos_class]->constraints->notifiers,
782 notifier);
Mark Grossd82b3512008-02-04 22:30:08 -0800783
784 return retval;
785}
786EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
787
788/**
789 * pm_qos_remove_notifier - deletes notification entry from chain.
790 * @pm_qos_class: identifies which qos target changes are notified.
791 * @notifier: notifier block to be removed.
792 *
793 * will remove the notifier from the notification chain that gets called
Richard Hughesbf1db692008-08-05 13:01:35 -0700794 * upon changes to the pm_qos_class target value.
Mark Grossd82b3512008-02-04 22:30:08 -0800795 */
796int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
797{
798 int retval;
799
800 retval = blocking_notifier_chain_unregister(
Jean Pihet4e1779b2011-08-25 15:35:27 +0200801 pm_qos_array[pm_qos_class]->constraints->notifiers,
802 notifier);
Mark Grossd82b3512008-02-04 22:30:08 -0800803
804 return retval;
805}
806EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
807
Jean Pihet4a31a332011-08-25 15:35:20 +0200808/* User space interface to PM QoS classes via misc devices */
Nishanth Menonf5f4eda2014-12-05 11:19:08 -0600809static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
Jean Pihet4a31a332011-08-25 15:35:20 +0200810{
811 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
812 qos->pm_qos_power_miscdev.name = qos->name;
813 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
814
Nishanth Menonf5f4eda2014-12-05 11:19:08 -0600815 if (d) {
816 (void)debugfs_create_file(qos->name, S_IRUGO, d,
817 (void *)qos, &pm_qos_debug_fops);
818 }
819
Jean Pihet4a31a332011-08-25 15:35:20 +0200820 return misc_register(&qos->pm_qos_power_miscdev);
821}
822
823static int find_pm_qos_object_by_minor(int minor)
824{
825 int pm_qos_class;
826
Saharad24c2a42013-06-20 11:33:57 +0900827 for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
Jean Pihet4a31a332011-08-25 15:35:20 +0200828 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
829 if (minor ==
830 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
831 return pm_qos_class;
832 }
833 return -1;
834}
835
Mark Grossd82b3512008-02-04 22:30:08 -0800836static int pm_qos_power_open(struct inode *inode, struct file *filp)
837{
Mark Grossd82b3512008-02-04 22:30:08 -0800838 long pm_qos_class;
839
840 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
Saharad24c2a42013-06-20 11:33:57 +0900841 if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
Jean Pihetcc749982011-08-25 15:35:12 +0200842 struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
James Bottomley82f68252010-07-05 22:53:06 +0200843 if (!req)
844 return -ENOMEM;
845
846 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
847 filp->private_data = req;
Mark Grossed771342010-05-06 01:59:26 +0200848
Guennadi Liakhovetski6513fd62011-11-03 10:12:36 +0100849 return 0;
Mark Grossd82b3512008-02-04 22:30:08 -0800850 }
Mark Grossd82b3512008-02-04 22:30:08 -0800851 return -EPERM;
852}
853
854static int pm_qos_power_release(struct inode *inode, struct file *filp)
855{
Jean Pihetcc749982011-08-25 15:35:12 +0200856 struct pm_qos_request *req;
Mark Grossd82b3512008-02-04 22:30:08 -0800857
James Bottomley82f68252010-07-05 22:53:06 +0200858 req = filp->private_data;
Mark Grossed771342010-05-06 01:59:26 +0200859 pm_qos_remove_request(req);
James Bottomley82f68252010-07-05 22:53:06 +0200860 kfree(req);
Mark Grossd82b3512008-02-04 22:30:08 -0800861
862 return 0;
863}
864
Mark Grossed771342010-05-06 01:59:26 +0200865
Thomas Renningerf9b9e802011-02-28 22:06:34 +0100866static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
867 size_t count, loff_t *f_pos)
868{
869 s32 value;
870 unsigned long flags;
Jean Pihetcc749982011-08-25 15:35:12 +0200871 struct pm_qos_request *req = filp->private_data;
Thomas Renningerf9b9e802011-02-28 22:06:34 +0100872
Jean Pihetcc749982011-08-25 15:35:12 +0200873 if (!req)
Thomas Renningerf9b9e802011-02-28 22:06:34 +0100874 return -EINVAL;
Jean Pihetcc749982011-08-25 15:35:12 +0200875 if (!pm_qos_request_active(req))
Thomas Renningerf9b9e802011-02-28 22:06:34 +0100876 return -EINVAL;
877
Thomas Renningerf9b9e802011-02-28 22:06:34 +0100878 spin_lock_irqsave(&pm_qos_lock, flags);
Jean Pihetabe98ec2011-08-25 15:35:34 +0200879 value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
Thomas Renningerf9b9e802011-02-28 22:06:34 +0100880 spin_unlock_irqrestore(&pm_qos_lock, flags);
881
882 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
883}
884
Mark Grossd82b3512008-02-04 22:30:08 -0800885static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
886 size_t count, loff_t *f_pos)
887{
888 s32 value;
Jean Pihetcc749982011-08-25 15:35:12 +0200889 struct pm_qos_request *req;
Mark Grossd82b3512008-02-04 22:30:08 -0800890
Mark Grossed771342010-05-06 01:59:26 +0200891 if (count == sizeof(s32)) {
892 if (copy_from_user(&value, buf, sizeof(s32)))
893 return -EFAULT;
Andy Shevchenkod4f7ecf2013-09-11 17:02:38 +0300894 } else {
Rafael J. Wysocki0775a602011-05-27 00:05:23 +0200895 int ret;
896
Andy Shevchenkod4f7ecf2013-09-11 17:02:38 +0300897 ret = kstrtos32_from_user(buf, count, 16, &value);
898 if (ret)
899 return ret;
Rafael J. Wysocki0775a602011-05-27 00:05:23 +0200900 }
Mark Grossd82b3512008-02-04 22:30:08 -0800901
Jean Pihetcc749982011-08-25 15:35:12 +0200902 req = filp->private_data;
903 pm_qos_update_request(req, value);
Mark Grossed771342010-05-06 01:59:26 +0200904
905 return count;
Mark Grossd82b3512008-02-04 22:30:08 -0800906}
907
908
909static int __init pm_qos_power_init(void)
910{
911 int ret = 0;
Alex Fridd031e1d2012-01-29 20:39:25 +0100912 int i;
Nishanth Menonf5f4eda2014-12-05 11:19:08 -0600913 struct dentry *d;
Mark Grossd82b3512008-02-04 22:30:08 -0800914
Alex Fridd031e1d2012-01-29 20:39:25 +0100915 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
916
Nishanth Menonf5f4eda2014-12-05 11:19:08 -0600917 d = debugfs_create_dir("pm_qos", NULL);
918 if (IS_ERR_OR_NULL(d))
919 d = NULL;
920
Saharad24c2a42013-06-20 11:33:57 +0900921 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
Nishanth Menonf5f4eda2014-12-05 11:19:08 -0600922 ret = register_pm_qos_misc(pm_qos_array[i], d);
Alex Fridd031e1d2012-01-29 20:39:25 +0100923 if (ret < 0) {
924 printk(KERN_ERR "pm_qos_param: %s setup failed\n",
925 pm_qos_array[i]->name);
926 return ret;
927 }
Mark Grossd82b3512008-02-04 22:30:08 -0800928 }
Mark Grossd82b3512008-02-04 22:30:08 -0800929
930 return ret;
931}
932
933late_initcall(pm_qos_power_init);