blob: e63fcacb61a7f0d60ec3fe940a84c0c193898c8a [file] [log] [blame]
Arjan van de Ven5c875792006-09-30 23:27:17 -07001/*
2 * latency.c: Explicit system-wide latency-expectation infrastructure
3 *
4 * The purpose of this infrastructure is to allow device drivers to set
5 * latency constraint they have and to collect and summarize these
6 * expectations globally. The cummulated result can then be used by
7 * power management and similar users to make decisions that have
8 * tradoffs with a latency component.
9 *
10 * An example user of this are the x86 C-states; each higher C state saves
11 * more power, but has a higher exit latency. For the idle loop power
12 * code to make a good decision which C-state to use, information about
13 * acceptable latencies is required.
14 *
15 * An example announcer of latency is an audio driver that knowns it
16 * will get an interrupt when the hardware has 200 usec of samples
17 * left in the DMA buffer; in that case the driver can set a latency
18 * constraint of, say, 150 usec.
19 *
20 * Multiple drivers can each announce their maximum accepted latency,
21 * to keep these appart, a string based identifier is used.
22 *
23 *
24 * (C) Copyright 2006 Intel Corporation
25 * Author: Arjan van de Ven <arjan@linux.intel.com>
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; version 2
30 * of the License.
31 */
32
33#include <linux/latency.h>
34#include <linux/list.h>
35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/module.h>
38#include <linux/notifier.h>
Al Virof6a57032006-10-18 01:47:25 -040039#include <linux/jiffies.h>
Arjan van de Ven5c875792006-09-30 23:27:17 -070040#include <asm/atomic.h>
41
42struct latency_info {
43 struct list_head list;
44 int usecs;
45 char *identifier;
46};
47
48/*
49 * locking rule: all modifications to current_max_latency and
50 * latency_list need to be done while holding the latency_lock.
51 * latency_lock needs to be taken _irqsave.
52 */
53static atomic_t current_max_latency;
54static DEFINE_SPINLOCK(latency_lock);
55
56static LIST_HEAD(latency_list);
57static BLOCKING_NOTIFIER_HEAD(latency_notifier);
58
59/*
60 * This function returns the maximum latency allowed, which
61 * happens to be the minimum of all maximum latencies on the
62 * list.
63 */
64static int __find_max_latency(void)
65{
66 int min = INFINITE_LATENCY;
67 struct latency_info *info;
68
69 list_for_each_entry(info, &latency_list, list) {
70 if (info->usecs < min)
71 min = info->usecs;
72 }
73 return min;
74}
75
76/**
77 * set_acceptable_latency - sets the maximum latency acceptable
78 * @identifier: string that identifies this driver
79 * @usecs: maximum acceptable latency for this driver
80 *
81 * This function informs the kernel that this device(driver)
82 * can accept at most usecs latency. This setting is used for
83 * power management and similar tradeoffs.
84 *
85 * This function sleeps and can only be called from process
86 * context.
87 * Calling this function with an existing identifier is valid
88 * and will cause the existing latency setting to be changed.
89 */
90void set_acceptable_latency(char *identifier, int usecs)
91{
92 struct latency_info *info, *iter;
93 unsigned long flags;
94 int found_old = 0;
95
96 info = kzalloc(sizeof(struct latency_info), GFP_KERNEL);
97 if (!info)
98 return;
99 info->usecs = usecs;
100 info->identifier = kstrdup(identifier, GFP_KERNEL);
101 if (!info->identifier)
102 goto free_info;
103
104 spin_lock_irqsave(&latency_lock, flags);
105 list_for_each_entry(iter, &latency_list, list) {
106 if (strcmp(iter->identifier, identifier)==0) {
107 found_old = 1;
108 iter->usecs = usecs;
109 break;
110 }
111 }
112 if (!found_old)
113 list_add(&info->list, &latency_list);
114
115 if (usecs < atomic_read(&current_max_latency))
116 atomic_set(&current_max_latency, usecs);
117
118 spin_unlock_irqrestore(&latency_lock, flags);
119
120 blocking_notifier_call_chain(&latency_notifier,
121 atomic_read(&current_max_latency), NULL);
122
123 /*
124 * if we inserted the new one, we're done; otherwise there was
125 * an existing one so we need to free the redundant data
126 */
127 if (!found_old)
128 return;
129
130 kfree(info->identifier);
131free_info:
132 kfree(info);
133}
134EXPORT_SYMBOL_GPL(set_acceptable_latency);
135
136/**
137 * modify_acceptable_latency - changes the maximum latency acceptable
138 * @identifier: string that identifies this driver
139 * @usecs: maximum acceptable latency for this driver
140 *
141 * This function informs the kernel that this device(driver)
142 * can accept at most usecs latency. This setting is used for
143 * power management and similar tradeoffs.
144 *
145 * This function does not sleep and can be called in any context.
146 * Trying to use a non-existing identifier silently gets ignored.
147 *
148 * Due to the atomic nature of this function, the modified latency
149 * value will only be used for future decisions; past decisions
150 * can still lead to longer latencies in the near future.
151 */
152void modify_acceptable_latency(char *identifier, int usecs)
153{
154 struct latency_info *iter;
155 unsigned long flags;
156
157 spin_lock_irqsave(&latency_lock, flags);
158 list_for_each_entry(iter, &latency_list, list) {
159 if (strcmp(iter->identifier, identifier) == 0) {
160 iter->usecs = usecs;
161 break;
162 }
163 }
164 if (usecs < atomic_read(&current_max_latency))
165 atomic_set(&current_max_latency, usecs);
166 spin_unlock_irqrestore(&latency_lock, flags);
167}
168EXPORT_SYMBOL_GPL(modify_acceptable_latency);
169
170/**
171 * remove_acceptable_latency - removes the maximum latency acceptable
172 * @identifier: string that identifies this driver
173 *
174 * This function removes a previously set maximum latency setting
175 * for the driver and frees up any resources associated with the
176 * bookkeeping needed for this.
177 *
178 * This function does not sleep and can be called in any context.
179 * Trying to use a non-existing identifier silently gets ignored.
180 */
181void remove_acceptable_latency(char *identifier)
182{
183 unsigned long flags;
184 int newmax = 0;
185 struct latency_info *iter, *temp;
186
187 spin_lock_irqsave(&latency_lock, flags);
188
189 list_for_each_entry_safe(iter, temp, &latency_list, list) {
190 if (strcmp(iter->identifier, identifier) == 0) {
191 list_del(&iter->list);
192 newmax = iter->usecs;
193 kfree(iter->identifier);
194 kfree(iter);
195 break;
196 }
197 }
198
199 /* If we just deleted the system wide value, we need to
200 * recalculate with a full search
201 */
202 if (newmax == atomic_read(&current_max_latency)) {
203 newmax = __find_max_latency();
204 atomic_set(&current_max_latency, newmax);
205 }
206 spin_unlock_irqrestore(&latency_lock, flags);
207}
208EXPORT_SYMBOL_GPL(remove_acceptable_latency);
209
210/**
211 * system_latency_constraint - queries the system wide latency maximum
212 *
213 * This function returns the system wide maximum latency in
214 * microseconds.
215 *
216 * This function does not sleep and can be called in any context.
217 */
218int system_latency_constraint(void)
219{
220 return atomic_read(&current_max_latency);
221}
222EXPORT_SYMBOL_GPL(system_latency_constraint);
223
224/**
225 * synchronize_acceptable_latency - recalculates all latency decisions
226 *
227 * This function will cause a callback to various kernel pieces that
228 * will make those pieces rethink their latency decisions. This implies
229 * that if there are overlong latencies in hardware state already, those
230 * latencies get taken right now. When this call completes no overlong
231 * latency decisions should be active anymore.
232 *
233 * Typical usecase of this is after a modify_acceptable_latency() call,
234 * which in itself is non-blocking and non-synchronizing.
235 *
236 * This function blocks and should not be called with locks held.
237 */
238
239void synchronize_acceptable_latency(void)
240{
241 blocking_notifier_call_chain(&latency_notifier,
242 atomic_read(&current_max_latency), NULL);
243}
244EXPORT_SYMBOL_GPL(synchronize_acceptable_latency);
245
246/*
247 * Latency notifier: this notifier gets called when a non-atomic new
248 * latency value gets set. The expectation nof the caller of the
249 * non-atomic set is that when the call returns, future latencies
250 * are within bounds, so the functions on the notifier list are
251 * expected to take the overlong latencies immediately, inside the
252 * callback, and not make a overlong latency decision anymore.
253 *
254 * The callback gets called when the new latency value is made
255 * active so system_latency_constraint() returns the new latency.
256 */
257int register_latency_notifier(struct notifier_block * nb)
258{
259 return blocking_notifier_chain_register(&latency_notifier, nb);
260}
261EXPORT_SYMBOL_GPL(register_latency_notifier);
262
263int unregister_latency_notifier(struct notifier_block * nb)
264{
265 return blocking_notifier_chain_unregister(&latency_notifier, nb);
266}
267EXPORT_SYMBOL_GPL(unregister_latency_notifier);
268
269static __init int latency_init(void)
270{
271 atomic_set(&current_max_latency, INFINITE_LATENCY);
272 /*
273 * we don't want by default to have longer latencies than 2 ticks,
274 * since that would cause lost ticks
275 */
276 set_acceptable_latency("kernel", 2*1000000/HZ);
277 return 0;
278}
279
280module_init(latency_init);