blob: 7d29286d9313ba5ee219d552f7cbe18eb124468a [file] [log] [blame]
Jean Pihet91ff4cb2011-08-25 15:35:41 +02001/*
2 * Devices PM QoS constraints management
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * This module exposes the interface to kernel space for specifying
12 * per-device PM QoS dependencies. It provides infrastructure for registration
13 * of:
14 *
15 * Dependents on a QoS value : register requests
16 * Watchers of QoS value : get notified when target QoS value changes
17 *
18 * This QoS design is best effort based. Dependents register their QoS needs.
19 * Watchers register to keep track of the current QoS needs of the system.
Viresh Kumard08d1b22017-02-22 13:58:52 +053020 * Watchers can register a per-device notification callback using the
21 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
22 * per-device constraint data struct.
Jean Pihet91ff4cb2011-08-25 15:35:41 +020023 *
24 * Note about the per-device constraint data struct allocation:
25 * . The per-device constraints data struct ptr is tored into the device
26 * dev_pm_info.
27 * . To minimize the data usage by the per-device constraints, the data struct
28 * is only allocated at the first call to dev_pm_qos_add_request.
29 * . The data is later free'd when the device is removed from the system.
Jean Pihet91ff4cb2011-08-25 15:35:41 +020030 * . A global mutex protects the constraints users from the data being
31 * allocated and free'd.
32 */
33
34#include <linux/pm_qos.h>
35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/device.h>
38#include <linux/mutex.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040039#include <linux/export.h>
Rafael J. Wysockie39473d2012-10-24 02:08:18 +020040#include <linux/pm_runtime.h>
Rafael J. Wysocki37530f22013-03-04 14:22:57 +010041#include <linux/err.h>
Sahara96d9d0b2013-06-21 11:12:30 +090042#include <trace/events/power.h>
Jean Pihet91ff4cb2011-08-25 15:35:41 +020043
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +010044#include "power.h"
Jean Pihet91ff4cb2011-08-25 15:35:41 +020045
46static DEFINE_MUTEX(dev_pm_qos_mtx);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +020047static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +020048
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +020049/**
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020050 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
51 * @dev: Device to check the PM QoS flags for.
52 * @mask: Flags to check against.
53 *
54 * This routine must be called with dev->power.lock held.
55 */
56enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
57{
58 struct dev_pm_qos *qos = dev->power.qos;
59 struct pm_qos_flags *pqf;
60 s32 val;
61
Krzysztof Kozlowskif90b8ad2015-01-09 09:27:58 +010062 lockdep_assert_held(&dev->power.lock);
63
Rafael J. Wysocki37530f22013-03-04 14:22:57 +010064 if (IS_ERR_OR_NULL(qos))
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020065 return PM_QOS_FLAGS_UNDEFINED;
66
67 pqf = &qos->flags;
68 if (list_empty(&pqf->list))
69 return PM_QOS_FLAGS_UNDEFINED;
70
71 val = pqf->effective_flags & mask;
72 if (val)
73 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
74
75 return PM_QOS_FLAGS_NONE;
76}
77
78/**
79 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
80 * @dev: Device to check the PM QoS flags for.
81 * @mask: Flags to check against.
82 */
83enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
84{
85 unsigned long irqflags;
86 enum pm_qos_flags_status ret;
87
88 spin_lock_irqsave(&dev->power.lock, irqflags);
89 ret = __dev_pm_qos_flags(dev, mask);
90 spin_unlock_irqrestore(&dev->power.lock, irqflags);
91
92 return ret;
93}
Lan Tianyu68027712013-01-23 04:26:28 +080094EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +020095
96/**
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +010097 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
98 * @dev: Device to get the PM QoS constraint value for.
99 *
100 * This routine must be called with dev->power.lock held.
101 */
102s32 __dev_pm_qos_read_value(struct device *dev)
103{
Krzysztof Kozlowskif90b8ad2015-01-09 09:27:58 +0100104 lockdep_assert_held(&dev->power.lock);
105
Rafael J. Wysocki6dbf5ce2017-02-24 13:25:14 +0100106 return dev_pm_qos_raw_read_value(dev);
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100107}
108
109/**
110 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200111 * @dev: Device to get the PM QoS constraint value for.
112 */
113s32 dev_pm_qos_read_value(struct device *dev)
114{
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200115 unsigned long flags;
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100116 s32 ret;
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200117
118 spin_lock_irqsave(&dev->power.lock, flags);
Rafael J. Wysocki00dc9ad2011-12-01 00:01:31 +0100119 ret = __dev_pm_qos_read_value(dev);
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200120 spin_unlock_irqrestore(&dev->power.lock, flags);
121
122 return ret;
123}
124
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200125/**
126 * apply_constraint - Add/modify/remove device PM QoS request.
127 * @req: Constraint request to apply
128 * @action: Action to perform (add/update/remove).
129 * @value: Value to assign to the QoS request.
Jean Pihetb66213c2011-08-25 15:35:47 +0200130 *
131 * Internal function to update the constraints list using the PM QoS core
Viresh Kumard08d1b22017-02-22 13:58:52 +0530132 * code and if needed call the per-device callbacks.
Jean Pihetb66213c2011-08-25 15:35:47 +0200133 */
134static int apply_constraint(struct dev_pm_qos_request *req,
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200135 enum pm_qos_req_action action, s32 value)
Jean Pihetb66213c2011-08-25 15:35:47 +0200136{
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200137 struct dev_pm_qos *qos = req->dev->power.qos;
138 int ret;
Jean Pihetb66213c2011-08-25 15:35:47 +0200139
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200140 switch(req->type) {
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100141 case DEV_PM_QOS_RESUME_LATENCY:
142 ret = pm_qos_update_target(&qos->resume_latency,
143 &req->data.pnode, action, value);
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200144 break;
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100145 case DEV_PM_QOS_LATENCY_TOLERANCE:
146 ret = pm_qos_update_target(&qos->latency_tolerance,
147 &req->data.pnode, action, value);
148 if (ret) {
149 value = pm_qos_read_value(&qos->latency_tolerance);
150 req->dev->power.set_latency_tolerance(req->dev, value);
151 }
152 break;
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200153 case DEV_PM_QOS_FLAGS:
154 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
155 action, value);
156 break;
157 default:
158 ret = -EINVAL;
Jean Pihetb66213c2011-08-25 15:35:47 +0200159 }
160
161 return ret;
162}
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200163
164/*
165 * dev_pm_qos_constraints_allocate
166 * @dev: device to allocate data for
167 *
168 * Called at the first call to add_request, for constraint data allocation
169 * Must be called with the dev_pm_qos_mtx mutex held
170 */
171static int dev_pm_qos_constraints_allocate(struct device *dev)
172{
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +0200173 struct dev_pm_qos *qos;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200174 struct pm_qos_constraints *c;
175 struct blocking_notifier_head *n;
176
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +0200177 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
178 if (!qos)
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200179 return -ENOMEM;
180
181 n = kzalloc(sizeof(*n), GFP_KERNEL);
182 if (!n) {
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +0200183 kfree(qos);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200184 return -ENOMEM;
185 }
186 BLOCKING_INIT_NOTIFIER_HEAD(n);
187
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100188 c = &qos->resume_latency;
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200189 plist_head_init(&c->list);
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100190 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
191 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
Rafael J. Wysocki0cc2b4e2017-10-24 15:20:45 +0200192 c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200193 c->type = PM_QOS_MIN;
194 c->notifiers = n;
195
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100196 c = &qos->latency_tolerance;
197 plist_head_init(&c->list);
198 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
199 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
200 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
201 c->type = PM_QOS_MIN;
202
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200203 INIT_LIST_HEAD(&qos->flags.list);
204
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200205 spin_lock_irq(&dev->power.lock);
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +0200206 dev->power.qos = qos;
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200207 spin_unlock_irq(&dev->power.lock);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200208
209 return 0;
210}
211
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100212static void __dev_pm_qos_hide_latency_limit(struct device *dev);
213static void __dev_pm_qos_hide_flags(struct device *dev);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200214
215/**
216 * dev_pm_qos_constraints_destroy
217 * @dev: target device
218 *
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200219 * Called from the device PM subsystem on device removal under device_pm_lock().
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200220 */
221void dev_pm_qos_constraints_destroy(struct device *dev)
222{
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +0200223 struct dev_pm_qos *qos;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200224 struct dev_pm_qos_request *req, *tmp;
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200225 struct pm_qos_constraints *c;
Rafael J. Wysocki35546bd2012-11-24 10:10:51 +0100226 struct pm_qos_flags *f;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200227
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200228 mutex_lock(&dev_pm_qos_sysfs_mtx);
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100229
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100230 /*
Rafael J. Wysocki35546bd2012-11-24 10:10:51 +0100231 * If the device's PM QoS resume latency limit or PM QoS flags have been
232 * exposed to user space, they have to be hidden at this point.
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100233 */
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100234 pm_qos_sysfs_remove_resume_latency(dev);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200235 pm_qos_sysfs_remove_flags(dev);
236
237 mutex_lock(&dev_pm_qos_mtx);
238
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100239 __dev_pm_qos_hide_latency_limit(dev);
240 __dev_pm_qos_hide_flags(dev);
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100241
Rafael J. Wysocki5f986c52012-10-23 01:07:27 +0200242 qos = dev->power.qos;
243 if (!qos)
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200244 goto out;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200245
Rafael J. Wysocki35546bd2012-11-24 10:10:51 +0100246 /* Flush the constraints lists for the device. */
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100247 c = &qos->resume_latency;
Rafael J. Wysocki021c8702012-10-23 01:09:00 +0200248 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200249 /*
250 * Update constraints list and call the notification
251 * callbacks if needed
252 */
253 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
254 memset(req, 0, sizeof(*req));
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200255 }
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100256 c = &qos->latency_tolerance;
257 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
258 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
259 memset(req, 0, sizeof(*req));
260 }
Rafael J. Wysocki35546bd2012-11-24 10:10:51 +0100261 f = &qos->flags;
262 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
263 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
264 memset(req, 0, sizeof(*req));
265 }
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200266
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200267 spin_lock_irq(&dev->power.lock);
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100268 dev->power.qos = ERR_PTR(-ENODEV);
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200269 spin_unlock_irq(&dev->power.lock);
270
John Keepinge84b4a82017-02-16 17:21:50 +0000271 kfree(qos->resume_latency.notifiers);
Lan,Tianyu9eaee2c2012-11-01 22:45:30 +0100272 kfree(qos);
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200273
274 out:
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200275 mutex_unlock(&dev_pm_qos_mtx);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200276
277 mutex_unlock(&dev_pm_qos_sysfs_mtx);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200278}
279
Jan H. Schönherr41ba8bd2017-09-05 23:14:29 +0200280static bool dev_pm_qos_invalid_req_type(struct device *dev,
281 enum dev_pm_qos_req_type type)
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100282{
Jan H. Schönherr41ba8bd2017-09-05 23:14:29 +0200283 return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
284 !dev->power.set_latency_tolerance;
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100285}
286
287static int __dev_pm_qos_add_request(struct device *dev,
288 struct dev_pm_qos_request *req,
289 enum dev_pm_qos_req_type type, s32 value)
290{
291 int ret = 0;
292
Jan H. Schönherr41ba8bd2017-09-05 23:14:29 +0200293 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100294 return -EINVAL;
295
296 if (WARN(dev_pm_qos_request_active(req),
297 "%s() called for already added request\n", __func__))
298 return -EINVAL;
299
300 if (IS_ERR(dev->power.qos))
301 ret = -ENODEV;
302 else if (!dev->power.qos)
303 ret = dev_pm_qos_constraints_allocate(dev);
304
305 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
306 if (!ret) {
307 req->dev = dev;
308 req->type = type;
309 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
310 }
311 return ret;
312}
313
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200314/**
315 * dev_pm_qos_add_request - inserts new qos request into the list
316 * @dev: target device for the constraint
317 * @req: pointer to a preallocated handle
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200318 * @type: type of the request
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200319 * @value: defines the qos request
320 *
321 * This function inserts a new entry in the device constraints list of
322 * requested qos performance characteristics. It recomputes the aggregate
323 * QoS expectations of parameters and initializes the dev_pm_qos_request
324 * handle. Caller needs to save this handle for later use in updates and
325 * removal.
326 *
327 * Returns 1 if the aggregated constraint value has changed,
328 * 0 if the aggregated constraint value has not changed,
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200329 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
330 * to allocate for data structures, -ENODEV if the device has just been removed
331 * from the system.
Rafael J. Wysocki436ede82012-11-02 13:10:09 +0100332 *
333 * Callers should ensure that the target device is not RPM_SUSPENDED before
334 * using this function for requests of type DEV_PM_QOS_FLAGS.
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200335 */
336int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
Rafael J. Wysockiae0fb4b2012-10-23 01:09:12 +0200337 enum dev_pm_qos_req_type type, s32 value)
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200338{
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100339 int ret;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200340
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200341 mutex_lock(&dev_pm_qos_mtx);
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100342 ret = __dev_pm_qos_add_request(dev, req, type, value);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200343 mutex_unlock(&dev_pm_qos_mtx);
344 return ret;
345}
346EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
347
348/**
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200349 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
350 * @req : PM QoS request to modify.
351 * @new_value: New value to request.
352 */
353static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
354 s32 new_value)
355{
356 s32 curr_value;
357 int ret = 0;
358
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100359 if (!req) /*guard against callers passing in null */
360 return -EINVAL;
361
362 if (WARN(!dev_pm_qos_request_active(req),
363 "%s() called for unknown object\n", __func__))
364 return -EINVAL;
365
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100366 if (IS_ERR_OR_NULL(req->dev->power.qos))
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200367 return -ENODEV;
368
369 switch(req->type) {
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100370 case DEV_PM_QOS_RESUME_LATENCY:
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100371 case DEV_PM_QOS_LATENCY_TOLERANCE:
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200372 curr_value = req->data.pnode.prio;
373 break;
374 case DEV_PM_QOS_FLAGS:
375 curr_value = req->data.flr.flags;
376 break;
377 default:
378 return -EINVAL;
379 }
380
Sahara96d9d0b2013-06-21 11:12:30 +0900381 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
382 new_value);
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200383 if (curr_value != new_value)
384 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
385
386 return ret;
387}
388
389/**
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200390 * dev_pm_qos_update_request - modifies an existing qos request
391 * @req : handle to list element holding a dev_pm_qos request to use
392 * @new_value: defines the qos request
393 *
394 * Updates an existing dev PM qos request along with updating the
395 * target value.
396 *
397 * Attempts are made to make this code callable on hot code paths.
398 *
399 * Returns 1 if the aggregated constraint value has changed,
400 * 0 if the aggregated constraint value has not changed,
401 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
402 * removed from the system
Rafael J. Wysocki436ede82012-11-02 13:10:09 +0100403 *
404 * Callers should ensure that the target device is not RPM_SUSPENDED before
405 * using this function for requests of type DEV_PM_QOS_FLAGS.
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200406 */
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200407int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200408{
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200409 int ret;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200410
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100411 mutex_lock(&dev_pm_qos_mtx);
412 ret = __dev_pm_qos_update_request(req, new_value);
413 mutex_unlock(&dev_pm_qos_mtx);
414 return ret;
415}
416EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
417
418static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
419{
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100420 int ret;
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100421
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200422 if (!req) /*guard against callers passing in null */
423 return -EINVAL;
424
Guennadi Liakhovetskiaf4c7202011-11-10 00:44:18 +0100425 if (WARN(!dev_pm_qos_request_active(req),
426 "%s() called for unknown object\n", __func__))
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200427 return -EINVAL;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200428
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100429 if (IS_ERR_OR_NULL(req->dev->power.qos))
430 return -ENODEV;
431
Sahara96d9d0b2013-06-21 11:12:30 +0900432 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
433 PM_QOS_DEFAULT_VALUE);
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100434 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200436 return ret;
437}
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200438
439/**
440 * dev_pm_qos_remove_request - modifies an existing qos request
441 * @req: handle to request list element
442 *
443 * Will remove pm qos request from the list of constraints and
444 * recompute the current target value. Call this on slow code paths.
445 *
446 * Returns 1 if the aggregated constraint value has changed,
447 * 0 if the aggregated constraint value has not changed,
448 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
449 * removed from the system
Rafael J. Wysocki436ede82012-11-02 13:10:09 +0100450 *
451 * Callers should ensure that the target device is not RPM_SUSPENDED before
452 * using this function for requests of type DEV_PM_QOS_FLAGS.
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200453 */
454int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
455{
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100456 int ret;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200457
458 mutex_lock(&dev_pm_qos_mtx);
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100459 ret = __dev_pm_qos_remove_request(req);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200460 mutex_unlock(&dev_pm_qos_mtx);
461 return ret;
462}
463EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
464
465/**
466 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
467 * of per-device PM QoS constraints
468 *
469 * @dev: target device for the constraint
470 * @notifier: notifier block managed by caller.
471 *
472 * Will register the notifier into a notification chain that gets called
473 * upon changes to the target value for the device.
Rafael J. Wysocki23e0fc52012-04-29 22:54:47 +0200474 *
475 * If the device's constraints object doesn't exist when this routine is called,
476 * it will be created (or error code will be returned if that fails).
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200477 */
478int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
479{
Rafael J. Wysocki23e0fc52012-04-29 22:54:47 +0200480 int ret = 0;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200481
482 mutex_lock(&dev_pm_qos_mtx);
483
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100484 if (IS_ERR(dev->power.qos))
485 ret = -ENODEV;
486 else if (!dev->power.qos)
487 ret = dev_pm_qos_constraints_allocate(dev);
Rafael J. Wysocki23e0fc52012-04-29 22:54:47 +0200488
489 if (!ret)
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100490 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
491 notifier);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200492
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200493 mutex_unlock(&dev_pm_qos_mtx);
Rafael J. Wysocki23e0fc52012-04-29 22:54:47 +0200494 return ret;
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200495}
496EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
497
498/**
499 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
500 * of per-device PM QoS constraints
501 *
502 * @dev: target device for the constraint
503 * @notifier: notifier block to be removed.
504 *
505 * Will remove the notifier from the notification chain that gets called
506 * upon changes to the target value.
507 */
508int dev_pm_qos_remove_notifier(struct device *dev,
509 struct notifier_block *notifier)
510{
511 int retval = 0;
512
513 mutex_lock(&dev_pm_qos_mtx);
514
Rafael J. Wysocki1a9a9152011-09-29 22:29:44 +0200515 /* Silently return if the constraints object is not present. */
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100516 if (!IS_ERR_OR_NULL(dev->power.qos))
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100517 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
518 notifier);
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200519
Jean Pihet91ff4cb2011-08-25 15:35:41 +0200520 mutex_unlock(&dev_pm_qos_mtx);
521 return retval;
522}
523EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
Jean Pihetb66213c2011-08-25 15:35:47 +0200524
525/**
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100526 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
527 * @dev: Device whose ancestor to add the request for.
528 * @req: Pointer to the preallocated handle.
Rafael J. Wysocki71d821f2014-02-11 00:36:00 +0100529 * @type: Type of the request.
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100530 * @value: Constraint latency value.
531 */
532int dev_pm_qos_add_ancestor_request(struct device *dev,
Rafael J. Wysocki71d821f2014-02-11 00:36:00 +0100533 struct dev_pm_qos_request *req,
534 enum dev_pm_qos_req_type type, s32 value)
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100535{
536 struct device *ancestor = dev->parent;
Rafael J. Wysocki4ce47802012-12-18 14:07:49 +0100537 int ret = -ENODEV;
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100538
Rafael J. Wysocki71d821f2014-02-11 00:36:00 +0100539 switch (type) {
540 case DEV_PM_QOS_RESUME_LATENCY:
541 while (ancestor && !ancestor->power.ignore_children)
542 ancestor = ancestor->parent;
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100543
Rafael J. Wysocki71d821f2014-02-11 00:36:00 +0100544 break;
545 case DEV_PM_QOS_LATENCY_TOLERANCE:
546 while (ancestor && !ancestor->power.set_latency_tolerance)
547 ancestor = ancestor->parent;
548
549 break;
550 default:
551 ancestor = NULL;
552 }
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100553 if (ancestor)
Rafael J. Wysocki71d821f2014-02-11 00:36:00 +0100554 ret = dev_pm_qos_add_request(ancestor, req, type, value);
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100555
Rafael J. Wysocki4ce47802012-12-18 14:07:49 +0100556 if (ret < 0)
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100557 req->dev = NULL;
558
Rafael J. Wysocki4ce47802012-12-18 14:07:49 +0100559 return ret;
Rafael J. Wysocki40a5f8b2011-12-23 01:23:52 +0100560}
561EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100562
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200563static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type)
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100565{
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100566 struct dev_pm_qos_request *req = NULL;
567
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200568 switch(type) {
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100569 case DEV_PM_QOS_RESUME_LATENCY:
570 req = dev->power.qos->resume_latency_req;
571 dev->power.qos->resume_latency_req = NULL;
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200572 break;
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100573 case DEV_PM_QOS_LATENCY_TOLERANCE:
574 req = dev->power.qos->latency_tolerance_req;
575 dev->power.qos->latency_tolerance_req = NULL;
576 break;
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200577 case DEV_PM_QOS_FLAGS:
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100578 req = dev->power.qos->flags_req;
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200579 dev->power.qos->flags_req = NULL;
580 break;
581 }
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100582 __dev_pm_qos_remove_request(req);
583 kfree(req);
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100584}
585
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200586static void dev_pm_qos_drop_user_request(struct device *dev,
587 enum dev_pm_qos_req_type type)
588{
589 mutex_lock(&dev_pm_qos_mtx);
590 __dev_pm_qos_drop_user_request(dev, type);
591 mutex_unlock(&dev_pm_qos_mtx);
592}
593
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100594/**
595 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
596 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
597 * @value: Initial value of the latency limit.
598 */
599int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
600{
601 struct dev_pm_qos_request *req;
602 int ret;
603
604 if (!device_is_registered(dev) || value < 0)
605 return -EINVAL;
606
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100607 req = kzalloc(sizeof(*req), GFP_KERNEL);
608 if (!req)
609 return -ENOMEM;
610
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100611 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100612 if (ret < 0) {
613 kfree(req);
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100614 return ret;
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100615 }
616
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200617 mutex_lock(&dev_pm_qos_sysfs_mtx);
618
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100619 mutex_lock(&dev_pm_qos_mtx);
620
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100621 if (IS_ERR_OR_NULL(dev->power.qos))
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100622 ret = -ENODEV;
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100623 else if (dev->power.qos->resume_latency_req)
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100624 ret = -EEXIST;
625
626 if (ret < 0) {
627 __dev_pm_qos_remove_request(req);
628 kfree(req);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200629 mutex_unlock(&dev_pm_qos_mtx);
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100630 goto out;
631 }
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100632 dev->power.qos->resume_latency_req = req;
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200633
634 mutex_unlock(&dev_pm_qos_mtx);
635
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100636 ret = pm_qos_sysfs_add_resume_latency(dev);
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100637 if (ret)
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100638 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100639
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100640 out:
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200641 mutex_unlock(&dev_pm_qos_sysfs_mtx);
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100642 return ret;
643}
644EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
645
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100646static void __dev_pm_qos_hide_latency_limit(struct device *dev)
647{
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100648 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
649 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100650}
651
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100652/**
653 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
654 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
655 */
656void dev_pm_qos_hide_latency_limit(struct device *dev)
657{
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200658 mutex_lock(&dev_pm_qos_sysfs_mtx);
659
Rafael J. Wysockib02f6692014-02-11 00:35:23 +0100660 pm_qos_sysfs_remove_resume_latency(dev);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200661
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100662 mutex_lock(&dev_pm_qos_mtx);
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100663 __dev_pm_qos_hide_latency_limit(dev);
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100664 mutex_unlock(&dev_pm_qos_mtx);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200665
666 mutex_unlock(&dev_pm_qos_sysfs_mtx);
Rafael J. Wysocki85dc0b82012-03-13 01:01:39 +0100667}
668EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200669
670/**
671 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
672 * @dev: Device whose PM QoS flags are to be exposed to user space.
673 * @val: Initial values of the flags.
674 */
675int dev_pm_qos_expose_flags(struct device *dev, s32 val)
676{
677 struct dev_pm_qos_request *req;
678 int ret;
679
680 if (!device_is_registered(dev))
681 return -EINVAL;
682
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200683 req = kzalloc(sizeof(*req), GFP_KERNEL);
684 if (!req)
685 return -ENOMEM;
686
687 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100688 if (ret < 0) {
689 kfree(req);
690 return ret;
691 }
692
693 pm_runtime_get_sync(dev);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200694 mutex_lock(&dev_pm_qos_sysfs_mtx);
695
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100696 mutex_lock(&dev_pm_qos_mtx);
697
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100698 if (IS_ERR_OR_NULL(dev->power.qos))
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100699 ret = -ENODEV;
700 else if (dev->power.qos->flags_req)
701 ret = -EEXIST;
702
703 if (ret < 0) {
704 __dev_pm_qos_remove_request(req);
705 kfree(req);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200706 mutex_unlock(&dev_pm_qos_mtx);
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100707 goto out;
708 }
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200709 dev->power.qos->flags_req = req;
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200710
711 mutex_unlock(&dev_pm_qos_mtx);
712
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200713 ret = pm_qos_sysfs_add_flags(dev);
714 if (ret)
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200715 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200716
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100717 out:
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200718 mutex_unlock(&dev_pm_qos_sysfs_mtx);
Lan Tianyu7e4d6842012-11-08 11:14:08 +0800719 pm_runtime_put(dev);
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200720 return ret;
721}
722EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
723
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100724static void __dev_pm_qos_hide_flags(struct device *dev)
725{
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200726 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100727 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100728}
729
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200730/**
731 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
732 * @dev: Device whose PM QoS flags are to be hidden from user space.
733 */
734void dev_pm_qos_hide_flags(struct device *dev)
735{
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100736 pm_runtime_get_sync(dev);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200737 mutex_lock(&dev_pm_qos_sysfs_mtx);
738
739 pm_qos_sysfs_remove_flags(dev);
740
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100741 mutex_lock(&dev_pm_qos_mtx);
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100742 __dev_pm_qos_hide_flags(dev);
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100743 mutex_unlock(&dev_pm_qos_mtx);
Rafael J. Wysocki0f703062013-04-02 01:25:24 +0200744
745 mutex_unlock(&dev_pm_qos_sysfs_mtx);
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100746 pm_runtime_put(dev);
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200747}
748EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
749
750/**
751 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
752 * @dev: Device to update the PM QoS flags request for.
753 * @mask: Flags to set/clear.
754 * @set: Whether to set or clear the flags (true means set).
755 */
756int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
757{
758 s32 value;
759 int ret;
760
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200761 pm_runtime_get_sync(dev);
762 mutex_lock(&dev_pm_qos_mtx);
763
Rafael J. Wysocki37530f22013-03-04 14:22:57 +0100764 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100765 ret = -EINVAL;
766 goto out;
767 }
768
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200769 value = dev_pm_qos_requested_flags(dev);
770 if (set)
771 value |= mask;
772 else
773 value &= ~mask;
774
775 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
776
Rafael J. Wysockib81ea1b2013-03-03 22:48:14 +0100777 out:
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200778 mutex_unlock(&dev_pm_qos_mtx);
779 pm_runtime_put(dev);
Rafael J. Wysockie39473d2012-10-24 02:08:18 +0200780 return ret;
781}
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100782
783/**
784 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
785 * @dev: Device to obtain the user space latency tolerance for.
786 */
787s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
788{
789 s32 ret;
790
791 mutex_lock(&dev_pm_qos_mtx);
792 ret = IS_ERR_OR_NULL(dev->power.qos)
793 || !dev->power.qos->latency_tolerance_req ?
794 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
795 dev->power.qos->latency_tolerance_req->data.pnode.prio;
796 mutex_unlock(&dev_pm_qos_mtx);
797 return ret;
798}
799
800/**
801 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
802 * @dev: Device to update the user space latency tolerance for.
803 * @val: New user space latency tolerance for @dev (negative values disable).
804 */
805int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
806{
807 int ret;
808
809 mutex_lock(&dev_pm_qos_mtx);
810
811 if (IS_ERR_OR_NULL(dev->power.qos)
812 || !dev->power.qos->latency_tolerance_req) {
813 struct dev_pm_qos_request *req;
814
815 if (val < 0) {
Andrew Lutomirski80a6f7c2016-11-29 17:11:51 -0800816 if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
817 ret = 0;
818 else
819 ret = -EINVAL;
Rafael J. Wysocki2d984ad2014-02-11 00:35:38 +0100820 goto out;
821 }
822 req = kzalloc(sizeof(*req), GFP_KERNEL);
823 if (!req) {
824 ret = -ENOMEM;
825 goto out;
826 }
827 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
828 if (ret < 0) {
829 kfree(req);
830 goto out;
831 }
832 dev->power.qos->latency_tolerance_req = req;
833 } else {
834 if (val < 0) {
835 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
836 ret = 0;
837 } else {
838 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
839 }
840 }
841
842 out:
843 mutex_unlock(&dev_pm_qos_mtx);
844 return ret;
845}
Andrew Lutomirski034e7902016-11-29 17:11:52 -0800846EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
Mika Westerberg13b2c4a2015-07-27 18:03:56 +0300847
848/**
849 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
850 * @dev: Device whose latency tolerance to expose
851 */
852int dev_pm_qos_expose_latency_tolerance(struct device *dev)
853{
854 int ret;
855
856 if (!dev->power.set_latency_tolerance)
857 return -EINVAL;
858
859 mutex_lock(&dev_pm_qos_sysfs_mtx);
860 ret = pm_qos_sysfs_add_latency_tolerance(dev);
861 mutex_unlock(&dev_pm_qos_sysfs_mtx);
862
863 return ret;
864}
865EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
866
867/**
868 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
869 * @dev: Device whose latency tolerance to hide
870 */
871void dev_pm_qos_hide_latency_tolerance(struct device *dev)
872{
873 mutex_lock(&dev_pm_qos_sysfs_mtx);
874 pm_qos_sysfs_remove_latency_tolerance(dev);
875 mutex_unlock(&dev_pm_qos_sysfs_mtx);
876
877 /* Remove the request from user space now */
878 pm_runtime_get_sync(dev);
879 dev_pm_qos_update_user_latency_tolerance(dev,
880 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
881 pm_runtime_put(dev);
882}
883EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);