blob: d169d7188fa6401df41857cde779f34ad453ad9e [file] [log] [blame]
Josef Bacika7905042018-07-03 09:32:35 -06001#include "blk-rq-qos.h"
2
Josef Bacika7905042018-07-03 09:32:35 -06003/*
4 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
5 * false if 'v' + 1 would be bigger than 'below'.
6 */
Josef Bacik22f17952018-07-19 21:42:13 -04007static bool atomic_inc_below(atomic_t *v, unsigned int below)
Josef Bacika7905042018-07-03 09:32:35 -06008{
Josef Bacik22f17952018-07-19 21:42:13 -04009 unsigned int cur = atomic_read(v);
Josef Bacika7905042018-07-03 09:32:35 -060010
11 for (;;) {
Josef Bacik22f17952018-07-19 21:42:13 -040012 unsigned int old;
Josef Bacika7905042018-07-03 09:32:35 -060013
14 if (cur >= below)
15 return false;
16 old = atomic_cmpxchg(v, cur, cur + 1);
17 if (old == cur)
18 break;
19 cur = old;
20 }
21
22 return true;
23}
24
Josef Bacik22f17952018-07-19 21:42:13 -040025bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
Josef Bacika7905042018-07-03 09:32:35 -060026{
27 return atomic_inc_below(&rq_wait->inflight, limit);
28}
29
Jens Axboee5045452018-11-15 12:25:10 -070030void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
Josef Bacika7905042018-07-03 09:32:35 -060031{
Jens Axboee5045452018-11-15 12:25:10 -070032 do {
Josef Bacika7905042018-07-03 09:32:35 -060033 if (rqos->ops->cleanup)
Josef Bacikc1c80382018-07-03 11:14:59 -040034 rqos->ops->cleanup(rqos, bio);
Jens Axboee5045452018-11-15 12:25:10 -070035 rqos = rqos->next;
36 } while (rqos);
Josef Bacika7905042018-07-03 09:32:35 -060037}
38
Jens Axboee5045452018-11-15 12:25:10 -070039void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
Josef Bacika7905042018-07-03 09:32:35 -060040{
Jens Axboee5045452018-11-15 12:25:10 -070041 do {
Josef Bacika7905042018-07-03 09:32:35 -060042 if (rqos->ops->done)
43 rqos->ops->done(rqos, rq);
Jens Axboee5045452018-11-15 12:25:10 -070044 rqos = rqos->next;
45 } while (rqos);
Josef Bacika7905042018-07-03 09:32:35 -060046}
47
Jens Axboee5045452018-11-15 12:25:10 -070048void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
Josef Bacika7905042018-07-03 09:32:35 -060049{
Jens Axboee5045452018-11-15 12:25:10 -070050 do {
Josef Bacika7905042018-07-03 09:32:35 -060051 if (rqos->ops->issue)
52 rqos->ops->issue(rqos, rq);
Jens Axboee5045452018-11-15 12:25:10 -070053 rqos = rqos->next;
54 } while (rqos);
Josef Bacika7905042018-07-03 09:32:35 -060055}
56
Jens Axboee5045452018-11-15 12:25:10 -070057void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
Josef Bacika7905042018-07-03 09:32:35 -060058{
Jens Axboee5045452018-11-15 12:25:10 -070059 do {
Josef Bacika7905042018-07-03 09:32:35 -060060 if (rqos->ops->requeue)
61 rqos->ops->requeue(rqos, rq);
Jens Axboee5045452018-11-15 12:25:10 -070062 rqos = rqos->next;
63 } while (rqos);
Josef Bacika7905042018-07-03 09:32:35 -060064}
65
Jens Axboee5045452018-11-15 12:25:10 -070066void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
Josef Bacika7905042018-07-03 09:32:35 -060067{
Jens Axboee5045452018-11-15 12:25:10 -070068 do {
Josef Bacika7905042018-07-03 09:32:35 -060069 if (rqos->ops->throttle)
Christoph Hellwigd5337562018-11-14 17:02:09 +010070 rqos->ops->throttle(rqos, bio);
Jens Axboee5045452018-11-15 12:25:10 -070071 rqos = rqos->next;
72 } while (rqos);
Josef Bacikc1c80382018-07-03 11:14:59 -040073}
74
Jens Axboee5045452018-11-15 12:25:10 -070075void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
Josef Bacikc1c80382018-07-03 11:14:59 -040076{
Jens Axboee5045452018-11-15 12:25:10 -070077 do {
Josef Bacikc1c80382018-07-03 11:14:59 -040078 if (rqos->ops->track)
79 rqos->ops->track(rqos, rq, bio);
Jens Axboee5045452018-11-15 12:25:10 -070080 rqos = rqos->next;
81 } while (rqos);
Josef Bacika7905042018-07-03 09:32:35 -060082}
83
Jens Axboee5045452018-11-15 12:25:10 -070084void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
Josef Bacik67b42d02018-07-03 11:15:00 -040085{
Jens Axboee5045452018-11-15 12:25:10 -070086 do {
Josef Bacik67b42d02018-07-03 11:15:00 -040087 if (rqos->ops->done_bio)
88 rqos->ops->done_bio(rqos, bio);
Jens Axboee5045452018-11-15 12:25:10 -070089 rqos = rqos->next;
90 } while (rqos);
Josef Bacik67b42d02018-07-03 11:15:00 -040091}
92
Josef Bacika7905042018-07-03 09:32:35 -060093/*
94 * Return true, if we can't increase the depth further by scaling
95 */
96bool rq_depth_calc_max_depth(struct rq_depth *rqd)
97{
98 unsigned int depth;
99 bool ret = false;
100
101 /*
102 * For QD=1 devices, this is a special case. It's important for those
103 * to have one request ready when one completes, so force a depth of
104 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
105 * since the device can't have more than that in flight. If we're
106 * scaling down, then keep a setting of 1/1/1.
107 */
108 if (rqd->queue_depth == 1) {
109 if (rqd->scale_step > 0)
110 rqd->max_depth = 1;
111 else {
112 rqd->max_depth = 2;
113 ret = true;
114 }
115 } else {
116 /*
117 * scale_step == 0 is our default state. If we have suffered
118 * latency spikes, step will be > 0, and we shrink the
119 * allowed write depths. If step is < 0, we're only doing
120 * writes, and we allow a temporarily higher depth to
121 * increase performance.
122 */
123 depth = min_t(unsigned int, rqd->default_depth,
124 rqd->queue_depth);
125 if (rqd->scale_step > 0)
126 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
127 else if (rqd->scale_step < 0) {
128 unsigned int maxd = 3 * rqd->queue_depth / 4;
129
130 depth = 1 + ((depth - 1) << -rqd->scale_step);
131 if (depth > maxd) {
132 depth = maxd;
133 ret = true;
134 }
135 }
136
137 rqd->max_depth = depth;
138 }
139
140 return ret;
141}
142
143void rq_depth_scale_up(struct rq_depth *rqd)
144{
145 /*
146 * Hit max in previous round, stop here
147 */
148 if (rqd->scaled_max)
149 return;
150
151 rqd->scale_step--;
152
153 rqd->scaled_max = rq_depth_calc_max_depth(rqd);
154}
155
156/*
157 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
158 * had a latency violation.
159 */
160void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
161{
162 /*
163 * Stop scaling down when we've hit the limit. This also prevents
164 * ->scale_step from going to crazy values, if the device can't
165 * keep up.
166 */
167 if (rqd->max_depth == 1)
168 return;
169
170 if (rqd->scale_step < 0 && hard_throttle)
171 rqd->scale_step = 0;
172 else
173 rqd->scale_step++;
174
175 rqd->scaled_max = false;
176 rq_depth_calc_max_depth(rqd);
177}
178
Josef Bacik84f60322018-12-04 12:59:02 -0500179struct rq_qos_wait_data {
180 struct wait_queue_entry wq;
181 struct task_struct *task;
182 struct rq_wait *rqw;
183 acquire_inflight_cb_t *cb;
184 void *private_data;
185 bool got_token;
186};
187
188static int rq_qos_wake_function(struct wait_queue_entry *curr,
189 unsigned int mode, int wake_flags, void *key)
190{
191 struct rq_qos_wait_data *data = container_of(curr,
192 struct rq_qos_wait_data,
193 wq);
194
195 /*
196 * If we fail to get a budget, return -1 to interrupt the wake up loop
197 * in __wake_up_common.
198 */
199 if (!data->cb(data->rqw, data->private_data))
200 return -1;
201
202 data->got_token = true;
203 list_del_init(&curr->entry);
204 wake_up_process(data->task);
205 return 1;
206}
207
208/**
209 * rq_qos_wait - throttle on a rqw if we need to
210 * @private_data - caller provided specific data
211 * @acquire_inflight_cb - inc the rqw->inflight counter if we can
212 * @cleanup_cb - the callback to cleanup in case we race with a waker
213 *
214 * This provides a uniform place for the rq_qos users to do their throttling.
215 * Since you can end up with a lot of things sleeping at once, this manages the
216 * waking up based on the resources available. The acquire_inflight_cb should
217 * inc the rqw->inflight if we have the ability to do so, or return false if not
218 * and then we will sleep until the room becomes available.
219 *
220 * cleanup_cb is in case that we race with a waker and need to cleanup the
221 * inflight count accordingly.
222 */
223void rq_qos_wait(struct rq_wait *rqw, void *private_data,
224 acquire_inflight_cb_t *acquire_inflight_cb,
225 cleanup_cb_t *cleanup_cb)
226{
227 struct rq_qos_wait_data data = {
228 .wq = {
229 .func = rq_qos_wake_function,
230 .entry = LIST_HEAD_INIT(data.wq.entry),
231 },
232 .task = current,
233 .rqw = rqw,
234 .cb = acquire_inflight_cb,
235 .private_data = private_data,
236 };
237 bool has_sleeper;
238
239 has_sleeper = wq_has_sleeper(&rqw->wait);
240 if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
241 return;
242
243 prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
244 do {
245 if (data.got_token)
246 break;
247 if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
248 finish_wait(&rqw->wait, &data.wq);
249
250 /*
251 * We raced with wbt_wake_function() getting a token,
252 * which means we now have two. Put our local token
253 * and wake anyone else potentially waiting for one.
254 */
255 if (data.got_token)
256 cleanup_cb(rqw, private_data);
257 break;
258 }
259 io_schedule();
260 has_sleeper = false;
261 } while (1);
262 finish_wait(&rqw->wait, &data.wq);
263}
264
Josef Bacika7905042018-07-03 09:32:35 -0600265void rq_qos_exit(struct request_queue *q)
266{
Ming Leicc566942018-12-17 09:46:00 +0800267 blk_mq_debugfs_unregister_queue_rqos(q);
268
Josef Bacika7905042018-07-03 09:32:35 -0600269 while (q->rq_qos) {
270 struct rq_qos *rqos = q->rq_qos;
271 q->rq_qos = rqos->next;
272 rqos->ops->exit(rqos);
273 }
274}