blob: 6dd6a034185f485713bf2b6efa65873f34e1fa2b [file] [log] [blame]
Tatyana Brokhman16349062012-09-20 10:46:10 +03001/*
2 * ROW (Read Over Write) I/O scheduler.
3 *
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +02004 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Tatyana Brokhman16349062012-09-20 10:46:10 +03005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/* See Documentation/block/row-iosched.txt */
17
18#include <linux/kernel.h>
19#include <linux/fs.h>
20#include <linux/blkdev.h>
21#include <linux/elevator.h>
22#include <linux/bio.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/compiler.h>
27#include <linux/blktrace_api.h>
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +020028#include <linux/hrtimer.h>
Tatyana Brokhman16349062012-09-20 10:46:10 +030029
30/*
31 * enum row_queue_prio - Priorities of the ROW queues
32 *
33 * This enum defines the priorities (and the number of queues)
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +020034 * the requests will be distributed to. The higher priority -
35 * the bigger is the "bus time" (or the dispatch quantum) given
36 * to that queue.
Tatyana Brokhman16349062012-09-20 10:46:10 +030037 * ROWQ_PRIO_HIGH_READ - is the higher priority queue.
38 *
39 */
40enum row_queue_prio {
41 ROWQ_PRIO_HIGH_READ = 0,
Tatyana Brokhman16349062012-09-20 10:46:10 +030042 ROWQ_PRIO_HIGH_SWRITE,
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +020043 ROWQ_PRIO_REG_READ,
Tatyana Brokhman16349062012-09-20 10:46:10 +030044 ROWQ_PRIO_REG_SWRITE,
45 ROWQ_PRIO_REG_WRITE,
46 ROWQ_PRIO_LOW_READ,
47 ROWQ_PRIO_LOW_SWRITE,
48 ROWQ_MAX_PRIO,
49};
50
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +020051/*
52 * The following indexes define the distribution of ROW queues according to
53 * priorities. Each index defines the first queue in that priority group.
54 */
55#define ROWQ_HIGH_PRIO_IDX ROWQ_PRIO_HIGH_READ
56#define ROWQ_REG_PRIO_IDX ROWQ_PRIO_REG_READ
57#define ROWQ_LOW_PRIO_IDX ROWQ_PRIO_LOW_READ
58
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +020059/**
60 * struct row_queue_params - ROW queue parameters
61 * @idling_enabled: Flag indicating whether idling is enable on
62 * the queue
63 * @quantum: Number of requests to be dispatched from this queue
64 * in a dispatch cycle
65 * @is_urgent: Flags indicating whether the queue can notify on
66 * urgent requests
67 *
68 */
69struct row_queue_params {
70 bool idling_enabled;
71 int quantum;
72 bool is_urgent;
Tatyana Brokhman16349062012-09-20 10:46:10 +030073};
74
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +020075/*
76 * This array holds the default values of the different configurables
77 * for each ROW queue. Each row of the array holds the following values:
78 * {idling_enabled, quantum, is_urgent}
79 * Each row corresponds to a queue with the same index (according to
80 * enum row_queue_prio)
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +020081 * Note: The quantums are valid inside their priority type. For example:
82 * For every 10 high priority read requests, 1 high priority sync
83 * write will be dispatched.
84 * For every 100 regular read requests 1 regular write request will
85 * be dispatched.
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +020086 */
87static const struct row_queue_params row_queues_def[] = {
88/* idling_enabled, quantum, is_urgent */
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +020089 {true, 10, true}, /* ROWQ_PRIO_HIGH_READ */
90 {false, 1, true}, /* ROWQ_PRIO_HIGH_SWRITE */
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +020091 {true, 100, true}, /* ROWQ_PRIO_REG_READ */
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +020092 {false, 1, false}, /* ROWQ_PRIO_REG_SWRITE */
93 {false, 1, false}, /* ROWQ_PRIO_REG_WRITE */
94 {false, 1, false}, /* ROWQ_PRIO_LOW_READ */
95 {false, 1, false} /* ROWQ_PRIO_LOW_SWRITE */
Tatyana Brokhman16349062012-09-20 10:46:10 +030096};
97
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +020098/* Default values for idling on read queues (in msec) */
99#define ROW_IDLE_TIME_MSEC 5
100#define ROW_READ_FREQ_MSEC 20
Tatyana Brokhman16349062012-09-20 10:46:10 +0300101
102/**
103 * struct rowq_idling_data - parameters for idling on the queue
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200104 * @last_insert_time: time the last request was inserted
105 * to the queue
Tatyana Brokhman16349062012-09-20 10:46:10 +0300106 * @begin_idling: flag indicating wether we should idle
107 *
108 */
109struct rowq_idling_data {
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200110 ktime_t last_insert_time;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300111 bool begin_idling;
112};
113
114/**
115 * struct row_queue - requests grouping structure
116 * @rdata: parent row_data structure
117 * @fifo: fifo of requests
118 * @prio: queue priority (enum row_queue_prio)
119 * @nr_dispatched: number of requests already dispatched in
120 * the current dispatch cycle
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200121 * @nr_req: number of requests in queue
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200122 * @dispatch quantum: number of requests this queue may
123 * dispatch in a dispatch cycle
Tatyana Brokhman16349062012-09-20 10:46:10 +0300124 * @idle_data: data for idling on queues
125 *
126 */
127struct row_queue {
128 struct row_data *rdata;
129 struct list_head fifo;
130 enum row_queue_prio prio;
131
132 unsigned int nr_dispatched;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300133
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200134 unsigned int nr_req;
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200135 int disp_quantum;
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200136
Tatyana Brokhman16349062012-09-20 10:46:10 +0300137 /* used only for READ queues */
138 struct rowq_idling_data idle_data;
139};
140
141/**
142 * struct idling_data - data for idling on empty rqueue
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200143 * @idle_time_ms: idling duration (msec)
144 * @freq_ms: min time between two requests that
Tatyana Brokhman16349062012-09-20 10:46:10 +0300145 * triger idling (msec)
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200146 * @hr_timer: idling timer
147 * @idle_work: the work to be scheduled when idling timer expires
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200148 * @idling_queue_idx: index of the queues we're idling on
Tatyana Brokhman16349062012-09-20 10:46:10 +0300149 *
150 */
151struct idling_data {
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200152 s64 idle_time_ms;
153 s64 freq_ms;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300154
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200155 struct hrtimer hr_timer;
156 struct work_struct idle_work;
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200157 enum row_queue_prio idling_queue_idx;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300158};
159
160/**
161 * struct row_queue - Per block device rqueue structure
162 * @dispatch_queue: dispatch rqueue
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200163 * @row_queues: array of priority request queues
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200164 * @rd_idle_data: data for idling after READ request
Tatyana Brokhman16349062012-09-20 10:46:10 +0300165 * @nr_reqs: nr_reqs[0] holds the number of all READ requests in
166 * scheduler, nr_reqs[1] holds the number of all WRITE
167 * requests in scheduler
168 * @cycle_flags: used for marking unserved queueus
169 *
170 */
171struct row_data {
172 struct request_queue *dispatch_queue;
173
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200174 struct row_queue row_queues[ROWQ_MAX_PRIO];
Tatyana Brokhman16349062012-09-20 10:46:10 +0300175
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200176 struct idling_data rd_idle_data;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300177 unsigned int nr_reqs[2];
178
179 unsigned int cycle_flags;
180};
181
182#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elv.priv[0]))
183
184#define row_log(q, fmt, args...) \
185 blk_add_trace_msg(q, "%s():" fmt , __func__, ##args)
186#define row_log_rowq(rdata, rowq_id, fmt, args...) \
187 blk_add_trace_msg(rdata->dispatch_queue, "rowq%d " fmt, \
188 rowq_id, ##args)
189
190static inline void row_mark_rowq_unserved(struct row_data *rd,
191 enum row_queue_prio qnum)
192{
193 rd->cycle_flags |= (1 << qnum);
194}
195
196static inline void row_clear_rowq_unserved(struct row_data *rd,
197 enum row_queue_prio qnum)
198{
199 rd->cycle_flags &= ~(1 << qnum);
200}
201
202static inline int row_rowq_unserved(struct row_data *rd,
203 enum row_queue_prio qnum)
204{
205 return rd->cycle_flags & (1 << qnum);
206}
207
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200208static inline void __maybe_unused row_dump_queues_stat(struct row_data *rd)
209{
210 int i;
211
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200212 row_log(rd->dispatch_queue, " Queues status:");
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200213 for (i = 0; i < ROWQ_MAX_PRIO; i++)
214 row_log(rd->dispatch_queue,
215 "queue%d: dispatched= %d, nr_req=%d", i,
216 rd->row_queues[i].nr_dispatched,
217 rd->row_queues[i].nr_req);
218}
219
Tatyana Brokhman16349062012-09-20 10:46:10 +0300220/******************** Static helper functions ***********************/
Tatyana Brokhman16349062012-09-20 10:46:10 +0300221static void kick_queue(struct work_struct *work)
222{
Tatyana Brokhman16349062012-09-20 10:46:10 +0300223 struct idling_data *read_data =
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200224 container_of(work, struct idling_data, idle_work);
225 struct row_data *rd =
226 container_of(read_data, struct row_data, rd_idle_data);
227
228 blk_run_queue(rd->dispatch_queue);
229}
230
231
232static enum hrtimer_restart row_idle_hrtimer_fn(struct hrtimer *hr_timer)
233{
234 struct idling_data *read_data =
235 container_of(hr_timer, struct idling_data, hr_timer);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300236 struct row_data *rd =
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200237 container_of(read_data, struct row_data, rd_idle_data);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300238
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200239 row_log_rowq(rd, rd->rd_idle_data.idling_queue_idx,
240 "Performing delayed work");
Tatyana Brokhman16349062012-09-20 10:46:10 +0300241 /* Mark idling process as done */
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200242 rd->row_queues[rd->rd_idle_data.idling_queue_idx].
243 idle_data.begin_idling = false;
244 rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300245
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200246 if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE])
Tatyana Brokhman16349062012-09-20 10:46:10 +0300247 row_log(rd->dispatch_queue, "No requests in scheduler");
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200248 else
249 kblockd_schedule_work(rd->dispatch_queue,
250 &read_data->idle_work);
251 return HRTIMER_NORESTART;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300252}
253
Tatyana Brokhman16349062012-09-20 10:46:10 +0300254/******************* Elevator callback functions *********************/
255
256/*
257 * row_add_request() - Add request to the scheduler
258 * @q: requests queue
259 * @rq: request to add
260 *
261 */
262static void row_add_request(struct request_queue *q,
263 struct request *rq)
264{
265 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
266 struct row_queue *rqueue = RQ_ROWQ(rq);
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200267 s64 diff_ms;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300268
269 list_add_tail(&rq->queuelist, &rqueue->fifo);
270 rd->nr_reqs[rq_data_dir(rq)]++;
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200271 rqueue->nr_req++;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300272 rq_set_fifo_time(rq, jiffies); /* for statistics*/
273
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +0200274 if (row_queues_def[rqueue->prio].idling_enabled) {
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200275 if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200276 hrtimer_active(&rd->rd_idle_data.hr_timer)) {
277 (void)hrtimer_cancel(&rd->rd_idle_data.hr_timer);
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200278 row_log_rowq(rd, rqueue->prio,
279 "Canceled delayed work on %d",
280 rd->rd_idle_data.idling_queue_idx);
281 rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
282 }
283 diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
284 rqueue->idle_data.last_insert_time));
285 if (unlikely(diff_ms < 0)) {
286 pr_err("ROW BUG: %s diff_ms < 0", __func__);
287 rqueue->idle_data.begin_idling = false;
288 return;
289 }
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200290 if (diff_ms < rd->rd_idle_data.freq_ms) {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300291 rqueue->idle_data.begin_idling = true;
292 row_log_rowq(rd, rqueue->prio, "Enable idling");
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200293 } else {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300294 rqueue->idle_data.begin_idling = false;
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200295 row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",
296 (long)diff_ms);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200297 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300298
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200299 rqueue->idle_data.last_insert_time = ktime_get();
Tatyana Brokhman16349062012-09-20 10:46:10 +0300300 }
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +0200301 if (row_queues_def[rqueue->prio].is_urgent &&
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200302 row_rowq_unserved(rd, rqueue->prio)) {
303 row_log_rowq(rd, rqueue->prio,
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200304 "added urgent request (total on queue=%d)",
305 rqueue->nr_req);
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200306 } else
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200307 row_log_rowq(rd, rqueue->prio,
308 "added request (total on queue=%d)", rqueue->nr_req);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300309}
310
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200311/**
312 * row_reinsert_req() - Reinsert request back to the scheduler
313 * @q: requests queue
314 * @rq: request to add
315 *
316 * Reinsert the given request back to the queue it was
317 * dispatched from as if it was never dispatched.
318 *
319 * Returns 0 on success, error code otherwise
320 */
321static int row_reinsert_req(struct request_queue *q,
322 struct request *rq)
323{
324 struct row_data *rd = q->elevator->elevator_data;
325 struct row_queue *rqueue = RQ_ROWQ(rq);
326
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200327 if (rqueue->prio >= ROWQ_MAX_PRIO) {
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200328 pr_err("\n\n%s:ROW BUG: row_reinsert_req() rqueue->prio = %d\n",
329 rq->rq_disk->disk_name, rqueue->prio);
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200330 blk_dump_rq_flags(rq, "");
331 return -EIO;
332 }
333
334 list_add(&rq->queuelist, &rqueue->fifo);
335 rd->nr_reqs[rq_data_dir(rq)]++;
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200336 rqueue->nr_req++;
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200337
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200338 row_log_rowq(rd, rqueue->prio,
339 "request reinserted (total on queue=%d)", rqueue->nr_req);
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200340
341 return 0;
342}
343
344/**
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200345 * row_urgent_pending() - Return TRUE if there is an urgent
346 * request on scheduler
347 * @q: requests queue
348 */
349static bool row_urgent_pending(struct request_queue *q)
350{
351 struct row_data *rd = q->elevator->elevator_data;
352 int i;
353
354 for (i = 0; i < ROWQ_MAX_PRIO; i++)
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +0200355 if (row_queues_def[i].is_urgent && row_rowq_unserved(rd, i) &&
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200356 !list_empty(&rd->row_queues[i].fifo)) {
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200357 row_log_rowq(rd, i, "Urgent request pending");
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200358 return true;
359 }
360
361 return false;
362}
363
364/**
Tatyana Brokhman16349062012-09-20 10:46:10 +0300365 * row_remove_request() - Remove given request from scheduler
366 * @q: requests queue
367 * @rq: request to remove
368 *
369 */
370static void row_remove_request(struct request_queue *q,
371 struct request *rq)
372{
373 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200374 struct row_queue *rqueue = RQ_ROWQ(rq);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300375
376 rq_fifo_clear(rq);
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200377 rqueue->nr_req--;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300378 rd->nr_reqs[rq_data_dir(rq)]--;
379}
380
381/*
382 * row_dispatch_insert() - move request to dispatch queue
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200383 * @rd: pointer to struct row_data
384 * @queue_idx: index of the row_queue to dispatch from
Tatyana Brokhman16349062012-09-20 10:46:10 +0300385 *
386 * This function moves the next request to dispatch from
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200387 * the given queue (row_queues[queue_idx]) to the dispatch queue
Tatyana Brokhman16349062012-09-20 10:46:10 +0300388 *
389 */
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200390static void row_dispatch_insert(struct row_data *rd, int queue_idx)
Tatyana Brokhman16349062012-09-20 10:46:10 +0300391{
392 struct request *rq;
393
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200394 rq = rq_entry_fifo(rd->row_queues[queue_idx].fifo.next);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300395 row_remove_request(rd->dispatch_queue, rq);
396 elv_dispatch_add_tail(rd->dispatch_queue, rq);
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200397 rd->row_queues[queue_idx].nr_dispatched++;
398 row_clear_rowq_unserved(rd, queue_idx);
399 row_log_rowq(rd, queue_idx, " Dispatched request nr_disp = %d",
400 rd->row_queues[queue_idx].nr_dispatched);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300401}
402
403/*
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200404 * row_get_ioprio_class_to_serve() - Return the next I/O priority
405 * class to dispatch requests from
Tatyana Brokhman16349062012-09-20 10:46:10 +0300406 * @rd: pointer to struct row_data
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200407 * @force: flag indicating if forced dispatch
Tatyana Brokhman16349062012-09-20 10:46:10 +0300408 *
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200409 * This function returns the next I/O priority class to serve
410 * {IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE}.
411 * If there are no more requests in scheduler or if we're idling on some queue
412 * IOPRIO_CLASS_NONE will be returned.
413 * If idling is scheduled on a lower priority queue than the one that needs
414 * to be served, it will be canceled.
Tatyana Brokhman16349062012-09-20 10:46:10 +0300415 *
416 */
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200417static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
Tatyana Brokhman16349062012-09-20 10:46:10 +0300418{
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200419 int i;
420 int ret = IOPRIO_CLASS_NONE;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300421
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200422 if (!rd->nr_reqs[READ] && !rd->nr_reqs[WRITE]) {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300423 row_log(rd->dispatch_queue, "No more requests in scheduler");
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200424 goto check_idling;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300425 }
426
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200427 /* First, go over the high priority queues */
428 for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
429 if (!list_empty(&rd->row_queues[i].fifo)) {
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200430 if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
431 (void)hrtimer_cancel(
432 &rd->rd_idle_data.hr_timer);
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200433 row_log_rowq(rd,
434 rd->rd_idle_data.idling_queue_idx,
435 "Canceling delayed work on %d. RT pending",
436 rd->rd_idle_data.idling_queue_idx);
437 rd->rd_idle_data.idling_queue_idx =
438 ROWQ_MAX_PRIO;
439 }
440 ret = IOPRIO_CLASS_RT;
441 goto done;
442 }
443 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300444
445 /*
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200446 * At the moment idling is implemented only for READ queues.
447 * If enabled on WRITE, this needs updating
Tatyana Brokhman16349062012-09-20 10:46:10 +0300448 */
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200449 if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200450 row_log(rd->dispatch_queue, "Delayed work pending. Exiting");
451 goto done;
452 }
453check_idling:
454 /* Check for (high priority) idling and enable if needed */
455 for (i = 0; i < ROWQ_REG_PRIO_IDX && !force; i++) {
456 if (rd->row_queues[i].idle_data.begin_idling &&
457 row_queues_def[i].idling_enabled)
458 goto initiate_idling;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300459 }
460
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200461 /* Regular priority queues */
462 for (i = ROWQ_REG_PRIO_IDX; i < ROWQ_LOW_PRIO_IDX; i++) {
463 if (list_empty(&rd->row_queues[i].fifo)) {
464 /* We can idle only if this is not a forced dispatch */
465 if (rd->row_queues[i].idle_data.begin_idling &&
466 !force && row_queues_def[i].idling_enabled)
467 goto initiate_idling;
468 } else {
469 ret = IOPRIO_CLASS_BE;
470 goto done;
471 }
472 }
473
474 if (rd->nr_reqs[READ] || rd->nr_reqs[WRITE])
475 ret = IOPRIO_CLASS_IDLE;
476 goto done;
477
478initiate_idling:
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200479 hrtimer_start(&rd->rd_idle_data.hr_timer,
480 ktime_set(0, rd->rd_idle_data.idle_time_ms * NSEC_PER_MSEC),
481 HRTIMER_MODE_REL);
482
483 rd->rd_idle_data.idling_queue_idx = i;
484 row_log_rowq(rd, i, "Scheduled delayed work on %d. exiting", i);
485
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200486done:
487 return ret;
488}
489
490static void row_restart_cycle(struct row_data *rd,
491 int start_idx, int end_idx)
492{
493 int i;
494
495 row_dump_queues_stat(rd);
496 for (i = start_idx; i < end_idx; i++) {
497 if (rd->row_queues[i].nr_dispatched <
498 rd->row_queues[i].disp_quantum)
499 row_mark_rowq_unserved(rd, i);
500 rd->row_queues[i].nr_dispatched = 0;
501 }
502 row_log(rd->dispatch_queue, "Restarting cycle for class @ %d-%d",
503 start_idx, end_idx);
504}
505
506/*
507 * row_get_next_queue() - selects the next queue to dispatch from
508 * @q: requests queue
509 * @rd: pointer to struct row_data
510 * @start_idx/end_idx: indexes in the row_queues array to select a queue
511 * from.
512 *
513 * Return index of the queues to dispatch from. Error code if fails.
514 *
515 */
516static int row_get_next_queue(struct request_queue *q, struct row_data *rd,
517 int start_idx, int end_idx)
518{
519 int i = start_idx;
520 bool restart = true;
521 int ret = -EIO;
522
523 do {
524 if (list_empty(&rd->row_queues[i].fifo) ||
525 rd->row_queues[i].nr_dispatched >=
526 rd->row_queues[i].disp_quantum) {
527 i++;
528 if (i == end_idx && restart) {
529 /* Restart cycle for this priority class */
530 row_restart_cycle(rd, start_idx, end_idx);
531 i = start_idx;
532 restart = false;
533 }
534 } else {
535 ret = i;
536 break;
537 }
538 } while (i < end_idx);
539
540 return ret;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300541}
542
543/*
544 * row_dispatch_requests() - selects the next request to dispatch
545 * @q: requests queue
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200546 * @force: flag indicating if forced dispatch
Tatyana Brokhman16349062012-09-20 10:46:10 +0300547 *
548 * Return 0 if no requests were moved to the dispatch queue.
549 * 1 otherwise
550 *
551 */
552static int row_dispatch_requests(struct request_queue *q, int force)
553{
554 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200555 int ret = 0, currq, ioprio_class_to_serve, start_idx, end_idx;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300556
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200557 if (force && hrtimer_active(&rd->rd_idle_data.hr_timer)) {
558 (void)hrtimer_cancel(&rd->rd_idle_data.hr_timer);
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200559 row_log_rowq(rd, rd->rd_idle_data.idling_queue_idx,
560 "Canceled delayed work on %d - forced dispatch",
561 rd->rd_idle_data.idling_queue_idx);
562 rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300563 }
564
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200565 ioprio_class_to_serve = row_get_ioprio_class_to_serve(rd, force);
566 row_log(rd->dispatch_queue, "Dispatching from %d priority class",
567 ioprio_class_to_serve);
568
569 switch (ioprio_class_to_serve) {
570 case IOPRIO_CLASS_NONE:
571 goto done;
572 case IOPRIO_CLASS_RT:
573 start_idx = ROWQ_HIGH_PRIO_IDX;
574 end_idx = ROWQ_REG_PRIO_IDX;
575 break;
576 case IOPRIO_CLASS_BE:
577 start_idx = ROWQ_REG_PRIO_IDX;
578 end_idx = ROWQ_LOW_PRIO_IDX;
579 break;
580 case IOPRIO_CLASS_IDLE:
581 start_idx = ROWQ_LOW_PRIO_IDX;
582 end_idx = ROWQ_MAX_PRIO;
583 break;
584 default:
585 pr_err("%s(): Invalid I/O priority class", __func__);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300586 goto done;
587 }
588
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200589 currq = row_get_next_queue(q, rd, start_idx, end_idx);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300590
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200591 /* Dispatch */
592 if (currq >= 0) {
593 row_dispatch_insert(rd, currq);
594 ret = 1;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300595 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300596done:
597 return ret;
598}
599
600/*
601 * row_init_queue() - Init scheduler data structures
602 * @q: requests queue
603 *
604 * Return pointer to struct row_data to be saved in elevator for
605 * this dispatch queue
606 *
607 */
608static void *row_init_queue(struct request_queue *q)
609{
610
611 struct row_data *rdata;
612 int i;
613
614 rdata = kmalloc_node(sizeof(*rdata),
615 GFP_KERNEL | __GFP_ZERO, q->node);
616 if (!rdata)
617 return NULL;
618
619 for (i = 0; i < ROWQ_MAX_PRIO; i++) {
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200620 INIT_LIST_HEAD(&rdata->row_queues[i].fifo);
Tatyana Brokhman9375bcc2013-01-12 16:23:18 +0200621 rdata->row_queues[i].disp_quantum = row_queues_def[i].quantum;
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200622 rdata->row_queues[i].rdata = rdata;
623 rdata->row_queues[i].prio = i;
624 rdata->row_queues[i].idle_data.begin_idling = false;
625 rdata->row_queues[i].idle_data.last_insert_time =
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200626 ktime_set(0, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300627 }
628
629 /*
630 * Currently idling is enabled only for READ queues. If we want to
631 * enable it for write queues also, note that idling frequency will
632 * be the same in both cases
633 */
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200634 rdata->rd_idle_data.idle_time_ms = ROW_IDLE_TIME_MSEC;
635 rdata->rd_idle_data.freq_ms = ROW_READ_FREQ_MSEC;
636 hrtimer_init(&rdata->rd_idle_data.hr_timer,
637 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
638 rdata->rd_idle_data.hr_timer.function = &row_idle_hrtimer_fn;
639
640 INIT_WORK(&rdata->rd_idle_data.idle_work, kick_queue);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300641
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200642 rdata->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300643 rdata->dispatch_queue = q;
644
645 rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0;
646
647 return rdata;
648}
649
650/*
651 * row_exit_queue() - called on unloading the RAW scheduler
652 * @e: poiner to struct elevator_queue
653 *
654 */
655static void row_exit_queue(struct elevator_queue *e)
656{
657 struct row_data *rd = (struct row_data *)e->elevator_data;
658 int i;
659
660 for (i = 0; i < ROWQ_MAX_PRIO; i++)
Tatyana Brokhman8a970bc2013-01-12 16:21:12 +0200661 BUG_ON(!list_empty(&rd->row_queues[i].fifo));
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200662 if (hrtimer_cancel(&rd->rd_idle_data.hr_timer))
663 pr_err("ROW BUG: idle timer was active!");
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200664 rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300665 kfree(rd);
666}
667
668/*
669 * row_merged_requests() - Called when 2 requests are merged
670 * @q: requests queue
671 * @rq: request the two requests were merged into
672 * @next: request that was merged
673 */
674static void row_merged_requests(struct request_queue *q, struct request *rq,
675 struct request *next)
676{
677 struct row_queue *rqueue = RQ_ROWQ(next);
678
679 list_del_init(&next->queuelist);
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200680 rqueue->nr_req--;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300681
682 rqueue->rdata->nr_reqs[rq_data_dir(rq)]--;
683}
684
685/*
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200686 * row_get_queue_prio() - Get queue priority for a given request
Tatyana Brokhman16349062012-09-20 10:46:10 +0300687 *
688 * This is a helping function which purpose is to determine what
689 * ROW queue the given request should be added to (and
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200690 * dispatched from later on)
Tatyana Brokhman16349062012-09-20 10:46:10 +0300691 *
Tatyana Brokhman16349062012-09-20 10:46:10 +0300692 */
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200693static enum row_queue_prio row_get_queue_prio(struct request *rq)
Tatyana Brokhman16349062012-09-20 10:46:10 +0300694{
695 const int data_dir = rq_data_dir(rq);
696 const bool is_sync = rq_is_sync(rq);
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200697 enum row_queue_prio q_type = ROWQ_MAX_PRIO;
698 int ioprio_class = IOPRIO_PRIO_CLASS(rq->elv.icq->ioc->ioprio);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300699
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200700 switch (ioprio_class) {
701 case IOPRIO_CLASS_RT:
702 if (data_dir == READ)
703 q_type = ROWQ_PRIO_HIGH_READ;
704 else if (is_sync)
705 q_type = ROWQ_PRIO_HIGH_SWRITE;
706 else {
707 pr_err("%s:%s(): got a simple write from RT_CLASS. How???",
708 rq->rq_disk->disk_name, __func__);
709 q_type = ROWQ_PRIO_REG_WRITE;
710 }
711 break;
712 case IOPRIO_CLASS_IDLE:
713 if (data_dir == READ)
714 q_type = ROWQ_PRIO_LOW_READ;
715 else if (is_sync)
716 q_type = ROWQ_PRIO_LOW_SWRITE;
717 else {
718 pr_err("%s:%s(): got a simple write from IDLE_CLASS. How???",
719 rq->rq_disk->disk_name, __func__);
720 q_type = ROWQ_PRIO_REG_WRITE;
721 }
722 break;
723 case IOPRIO_CLASS_NONE:
724 case IOPRIO_CLASS_BE:
725 default:
726 if (data_dir == READ)
727 q_type = ROWQ_PRIO_REG_READ;
728 else if (is_sync)
729 q_type = ROWQ_PRIO_REG_SWRITE;
730 else
731 q_type = ROWQ_PRIO_REG_WRITE;
732 break;
733 }
734
735 return q_type;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300736}
737
738/*
739 * row_set_request() - Set ROW data structures associated with this request.
740 * @q: requests queue
741 * @rq: pointer to the request
742 * @gfp_mask: ignored
743 *
744 */
745static int
746row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
747{
748 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
749 unsigned long flags;
750
751 spin_lock_irqsave(q->queue_lock, flags);
752 rq->elv.priv[0] =
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200753 (void *)(&rd->row_queues[row_get_queue_prio(rq)]);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300754 spin_unlock_irqrestore(q->queue_lock, flags);
755
756 return 0;
757}
758
759/********** Helping sysfs functions/defenitions for ROW attributes ******/
760static ssize_t row_var_show(int var, char *page)
761{
762 return snprintf(page, 100, "%d\n", var);
763}
764
765static ssize_t row_var_store(int *var, const char *page, size_t count)
766{
767 int err;
768 err = kstrtoul(page, 10, (unsigned long *)var);
769
770 return count;
771}
772
773#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
774static ssize_t __FUNC(struct elevator_queue *e, char *page) \
775{ \
776 struct row_data *rowd = e->elevator_data; \
777 int __data = __VAR; \
778 if (__CONV) \
779 __data = jiffies_to_msecs(__data); \
780 return row_var_show(__data, (page)); \
781}
782SHOW_FUNCTION(row_hp_read_quantum_show,
783 rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0);
784SHOW_FUNCTION(row_rp_read_quantum_show,
785 rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0);
786SHOW_FUNCTION(row_hp_swrite_quantum_show,
787 rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0);
788SHOW_FUNCTION(row_rp_swrite_quantum_show,
789 rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0);
790SHOW_FUNCTION(row_rp_write_quantum_show,
791 rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0);
792SHOW_FUNCTION(row_lp_read_quantum_show,
793 rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0);
794SHOW_FUNCTION(row_lp_swrite_quantum_show,
795 rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0);
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200796SHOW_FUNCTION(row_rd_idle_data_show, rowd->rd_idle_data.idle_time_ms, 0);
797SHOW_FUNCTION(row_rd_idle_data_freq_show, rowd->rd_idle_data.freq_ms, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300798#undef SHOW_FUNCTION
799
800#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
801static ssize_t __FUNC(struct elevator_queue *e, \
802 const char *page, size_t count) \
803{ \
804 struct row_data *rowd = e->elevator_data; \
805 int __data; \
806 int ret = row_var_store(&__data, (page), count); \
807 if (__CONV) \
808 __data = (int)msecs_to_jiffies(__data); \
809 if (__data < (MIN)) \
810 __data = (MIN); \
811 else if (__data > (MAX)) \
812 __data = (MAX); \
813 *(__PTR) = __data; \
814 return ret; \
815}
816STORE_FUNCTION(row_hp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200817&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300818STORE_FUNCTION(row_rp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200819 &rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum,
820 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300821STORE_FUNCTION(row_hp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200822 &rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum,
823 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300824STORE_FUNCTION(row_rp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200825 &rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum,
826 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300827STORE_FUNCTION(row_rp_write_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200828 &rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum,
829 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300830STORE_FUNCTION(row_lp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200831 &rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum,
832 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300833STORE_FUNCTION(row_lp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200834 &rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum,
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200835 1, INT_MAX, 0);
Tatyana Brokhmance1a8ed2013-01-17 20:56:07 +0200836STORE_FUNCTION(row_rd_idle_data_store, &rowd->rd_idle_data.idle_time_ms,
837 1, INT_MAX, 0);
838STORE_FUNCTION(row_rd_idle_data_freq_store, &rowd->rd_idle_data.freq_ms,
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200839 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300840
841#undef STORE_FUNCTION
842
843#define ROW_ATTR(name) \
844 __ATTR(name, S_IRUGO|S_IWUSR, row_##name##_show, \
845 row_##name##_store)
846
847static struct elv_fs_entry row_attrs[] = {
848 ROW_ATTR(hp_read_quantum),
849 ROW_ATTR(rp_read_quantum),
850 ROW_ATTR(hp_swrite_quantum),
851 ROW_ATTR(rp_swrite_quantum),
852 ROW_ATTR(rp_write_quantum),
853 ROW_ATTR(lp_read_quantum),
854 ROW_ATTR(lp_swrite_quantum),
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200855 ROW_ATTR(rd_idle_data),
856 ROW_ATTR(rd_idle_data_freq),
Tatyana Brokhman16349062012-09-20 10:46:10 +0300857 __ATTR_NULL
858};
859
860static struct elevator_type iosched_row = {
861 .ops = {
862 .elevator_merge_req_fn = row_merged_requests,
863 .elevator_dispatch_fn = row_dispatch_requests,
864 .elevator_add_req_fn = row_add_request,
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200865 .elevator_reinsert_req_fn = row_reinsert_req,
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200866 .elevator_is_urgent_fn = row_urgent_pending,
Tatyana Brokhman16349062012-09-20 10:46:10 +0300867 .elevator_former_req_fn = elv_rb_former_request,
868 .elevator_latter_req_fn = elv_rb_latter_request,
869 .elevator_set_req_fn = row_set_request,
870 .elevator_init_fn = row_init_queue,
871 .elevator_exit_fn = row_exit_queue,
872 },
Tatyana Brokhmandb7c1532013-01-23 17:15:49 +0200873 .icq_size = sizeof(struct io_cq),
874 .icq_align = __alignof__(struct io_cq),
Tatyana Brokhman16349062012-09-20 10:46:10 +0300875 .elevator_attrs = row_attrs,
876 .elevator_name = "row",
877 .elevator_owner = THIS_MODULE,
878};
879
880static int __init row_init(void)
881{
882 elv_register(&iosched_row);
883 return 0;
884}
885
886static void __exit row_exit(void)
887{
888 elv_unregister(&iosched_row);
889}
890
891module_init(row_init);
892module_exit(row_exit);
893
894MODULE_LICENSE("GPLv2");
895MODULE_DESCRIPTION("Read Over Write IO scheduler");