blob: f24437c386e697053425e2fd75bc17ff57b4a39e [file] [log] [blame]
Tatyana Brokhman16349062012-09-20 10:46:10 +03001/*
2 * ROW (Read Over Write) I/O scheduler.
3 *
4 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/* See Documentation/block/row-iosched.txt */
17
18#include <linux/kernel.h>
19#include <linux/fs.h>
20#include <linux/blkdev.h>
21#include <linux/elevator.h>
22#include <linux/bio.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/compiler.h>
27#include <linux/blktrace_api.h>
28#include <linux/jiffies.h>
29
30/*
31 * enum row_queue_prio - Priorities of the ROW queues
32 *
33 * This enum defines the priorities (and the number of queues)
34 * the requests will be disptributed to. The higher priority -
35 * the bigger is the dispatch quantum given to that queue.
36 * ROWQ_PRIO_HIGH_READ - is the higher priority queue.
37 *
38 */
39enum row_queue_prio {
40 ROWQ_PRIO_HIGH_READ = 0,
41 ROWQ_PRIO_REG_READ,
42 ROWQ_PRIO_HIGH_SWRITE,
43 ROWQ_PRIO_REG_SWRITE,
44 ROWQ_PRIO_REG_WRITE,
45 ROWQ_PRIO_LOW_READ,
46 ROWQ_PRIO_LOW_SWRITE,
47 ROWQ_MAX_PRIO,
48};
49
50/* Flags indicating whether idling is enabled on the queue */
51static const bool queue_idling_enabled[] = {
52 true, /* ROWQ_PRIO_HIGH_READ */
53 true, /* ROWQ_PRIO_REG_READ */
54 false, /* ROWQ_PRIO_HIGH_SWRITE */
55 false, /* ROWQ_PRIO_REG_SWRITE */
56 false, /* ROWQ_PRIO_REG_WRITE */
57 false, /* ROWQ_PRIO_LOW_READ */
58 false, /* ROWQ_PRIO_LOW_SWRITE */
59};
60
61/* Default values for row queues quantums in each dispatch cycle */
62static const int queue_quantum[] = {
63 100, /* ROWQ_PRIO_HIGH_READ */
64 100, /* ROWQ_PRIO_REG_READ */
65 2, /* ROWQ_PRIO_HIGH_SWRITE */
66 1, /* ROWQ_PRIO_REG_SWRITE */
67 1, /* ROWQ_PRIO_REG_WRITE */
68 1, /* ROWQ_PRIO_LOW_READ */
69 1 /* ROWQ_PRIO_LOW_SWRITE */
70};
71
72/* Default values for idling on read queues */
73#define ROW_IDLE_TIME 50 /* 5 msec */
74#define ROW_READ_FREQ 70 /* 7 msec */
75
76/**
77 * struct rowq_idling_data - parameters for idling on the queue
78 * @idle_trigger_time: time (in jiffies). If a new request was
79 * inserted before this time value, idling
80 * will be enabled.
81 * @begin_idling: flag indicating wether we should idle
82 *
83 */
84struct rowq_idling_data {
85 unsigned long idle_trigger_time;
86 bool begin_idling;
87};
88
89/**
90 * struct row_queue - requests grouping structure
91 * @rdata: parent row_data structure
92 * @fifo: fifo of requests
93 * @prio: queue priority (enum row_queue_prio)
94 * @nr_dispatched: number of requests already dispatched in
95 * the current dispatch cycle
96 * @slice: number of requests to dispatch in a cycle
97 * @idle_data: data for idling on queues
98 *
99 */
100struct row_queue {
101 struct row_data *rdata;
102 struct list_head fifo;
103 enum row_queue_prio prio;
104
105 unsigned int nr_dispatched;
106 unsigned int slice;
107
108 /* used only for READ queues */
109 struct rowq_idling_data idle_data;
110};
111
112/**
113 * struct idling_data - data for idling on empty rqueue
114 * @idle_time: idling duration (msec)
115 * @freq: min time between two requests that
116 * triger idling (msec)
117 * @idle_work: pointer to struct delayed_work
118 *
119 */
120struct idling_data {
121 unsigned long idle_time;
122 unsigned long freq;
123
124 struct workqueue_struct *idle_workqueue;
125 struct delayed_work idle_work;
126};
127
128/**
129 * struct row_queue - Per block device rqueue structure
130 * @dispatch_queue: dispatch rqueue
131 * @row_queues: array of priority request queues with
132 * dispatch quantum per rqueue
133 * @curr_queue: index in the row_queues array of the
134 * currently serviced rqueue
135 * @read_idle: data for idling after READ request
136 * @nr_reqs: nr_reqs[0] holds the number of all READ requests in
137 * scheduler, nr_reqs[1] holds the number of all WRITE
138 * requests in scheduler
139 * @cycle_flags: used for marking unserved queueus
140 *
141 */
142struct row_data {
143 struct request_queue *dispatch_queue;
144
145 struct {
146 struct row_queue rqueue;
147 int disp_quantum;
148 } row_queues[ROWQ_MAX_PRIO];
149
150 enum row_queue_prio curr_queue;
151
152 struct idling_data read_idle;
153 unsigned int nr_reqs[2];
154
155 unsigned int cycle_flags;
156};
157
158#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elv.priv[0]))
159
160#define row_log(q, fmt, args...) \
161 blk_add_trace_msg(q, "%s():" fmt , __func__, ##args)
162#define row_log_rowq(rdata, rowq_id, fmt, args...) \
163 blk_add_trace_msg(rdata->dispatch_queue, "rowq%d " fmt, \
164 rowq_id, ##args)
165
166static inline void row_mark_rowq_unserved(struct row_data *rd,
167 enum row_queue_prio qnum)
168{
169 rd->cycle_flags |= (1 << qnum);
170}
171
172static inline void row_clear_rowq_unserved(struct row_data *rd,
173 enum row_queue_prio qnum)
174{
175 rd->cycle_flags &= ~(1 << qnum);
176}
177
178static inline int row_rowq_unserved(struct row_data *rd,
179 enum row_queue_prio qnum)
180{
181 return rd->cycle_flags & (1 << qnum);
182}
183
184/******************** Static helper functions ***********************/
185/*
186 * kick_queue() - Wake up device driver queue thread
187 * @work: pointer to struct work_struct
188 *
189 * This is a idling delayed work function. It's purpose is to wake up the
190 * device driver in order for it to start fetching requests.
191 *
192 */
193static void kick_queue(struct work_struct *work)
194{
195 struct delayed_work *idle_work = to_delayed_work(work);
196 struct idling_data *read_data =
197 container_of(idle_work, struct idling_data, idle_work);
198 struct row_data *rd =
199 container_of(read_data, struct row_data, read_idle);
200
201 row_log_rowq(rd, rd->curr_queue, "Performing delayed work");
202 /* Mark idling process as done */
203 rd->row_queues[rd->curr_queue].rqueue.idle_data.begin_idling = false;
204
205 if (!(rd->nr_reqs[0] + rd->nr_reqs[1]))
206 row_log(rd->dispatch_queue, "No requests in scheduler");
207 else {
208 spin_lock_irq(rd->dispatch_queue->queue_lock);
209 __blk_run_queue(rd->dispatch_queue);
210 spin_unlock_irq(rd->dispatch_queue->queue_lock);
211 }
212}
213
214/*
215 * row_restart_disp_cycle() - Restart the dispatch cycle
216 * @rd: pointer to struct row_data
217 *
218 * This function restarts the dispatch cycle by:
219 * - Setting current queue to ROWQ_PRIO_HIGH_READ
220 * - For each queue: reset the number of requests dispatched in
221 * the cycle
222 */
223static inline void row_restart_disp_cycle(struct row_data *rd)
224{
225 int i;
226
227 for (i = 0; i < ROWQ_MAX_PRIO; i++)
228 rd->row_queues[i].rqueue.nr_dispatched = 0;
229
230 rd->curr_queue = ROWQ_PRIO_HIGH_READ;
231 row_log(rd->dispatch_queue, "Restarting cycle");
232}
233
234static inline void row_get_next_queue(struct row_data *rd)
235{
236 rd->curr_queue++;
237 if (rd->curr_queue == ROWQ_MAX_PRIO)
238 row_restart_disp_cycle(rd);
239}
240
241/******************* Elevator callback functions *********************/
242
243/*
244 * row_add_request() - Add request to the scheduler
245 * @q: requests queue
246 * @rq: request to add
247 *
248 */
249static void row_add_request(struct request_queue *q,
250 struct request *rq)
251{
252 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
253 struct row_queue *rqueue = RQ_ROWQ(rq);
254
255 list_add_tail(&rq->queuelist, &rqueue->fifo);
256 rd->nr_reqs[rq_data_dir(rq)]++;
257 rq_set_fifo_time(rq, jiffies); /* for statistics*/
258
259 if (queue_idling_enabled[rqueue->prio]) {
260 if (delayed_work_pending(&rd->read_idle.idle_work))
261 (void)cancel_delayed_work(
262 &rd->read_idle.idle_work);
263 if (time_before(jiffies, rqueue->idle_data.idle_trigger_time)) {
264 rqueue->idle_data.begin_idling = true;
265 row_log_rowq(rd, rqueue->prio, "Enable idling");
266 } else
267 rqueue->idle_data.begin_idling = false;
268
269 rqueue->idle_data.idle_trigger_time =
270 jiffies + msecs_to_jiffies(rd->read_idle.freq);
271 }
272 row_log_rowq(rd, rqueue->prio, "added request");
273}
274
275/*
276 * row_remove_request() - Remove given request from scheduler
277 * @q: requests queue
278 * @rq: request to remove
279 *
280 */
281static void row_remove_request(struct request_queue *q,
282 struct request *rq)
283{
284 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
285
286 rq_fifo_clear(rq);
287 rd->nr_reqs[rq_data_dir(rq)]--;
288}
289
290/*
291 * row_dispatch_insert() - move request to dispatch queue
292 * @rd: pointer to struct row_data
293 *
294 * This function moves the next request to dispatch from
295 * rd->curr_queue to the dispatch queue
296 *
297 */
298static void row_dispatch_insert(struct row_data *rd)
299{
300 struct request *rq;
301
302 rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next);
303 row_remove_request(rd->dispatch_queue, rq);
304 elv_dispatch_add_tail(rd->dispatch_queue, rq);
305 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++;
306 row_clear_rowq_unserved(rd, rd->curr_queue);
307 row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d",
308 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched);
309}
310
311/*
312 * row_choose_queue() - choose the next queue to dispatch from
313 * @rd: pointer to struct row_data
314 *
315 * Updates rd->curr_queue. Returns 1 if there are requests to
316 * dispatch, 0 if there are no requests in scheduler
317 *
318 */
319static int row_choose_queue(struct row_data *rd)
320{
321 int prev_curr_queue = rd->curr_queue;
322
323 if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) {
324 row_log(rd->dispatch_queue, "No more requests in scheduler");
325 return 0;
326 }
327
328 row_get_next_queue(rd);
329
330 /*
331 * Loop over all queues to find the next queue that is not empty.
332 * Stop when you get back to curr_queue
333 */
334 while (list_empty(&rd->row_queues[rd->curr_queue].rqueue.fifo)
335 && rd->curr_queue != prev_curr_queue) {
336 /* Mark rqueue as unserved */
337 row_mark_rowq_unserved(rd, rd->curr_queue);
338 row_get_next_queue(rd);
339 }
340
341 return 1;
342}
343
344/*
345 * row_dispatch_requests() - selects the next request to dispatch
346 * @q: requests queue
347 * @force: ignored
348 *
349 * Return 0 if no requests were moved to the dispatch queue.
350 * 1 otherwise
351 *
352 */
353static int row_dispatch_requests(struct request_queue *q, int force)
354{
355 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
356 int ret = 0, currq, i;
357
358 currq = rd->curr_queue;
359
360 /*
361 * Find the first unserved queue (with higher priority then currq)
362 * that is not empty
363 */
364 for (i = 0; i < currq; i++) {
365 if (row_rowq_unserved(rd, i) &&
366 !list_empty(&rd->row_queues[i].rqueue.fifo)) {
367 row_log_rowq(rd, currq,
368 " Preemting for unserved rowq%d", i);
369 rd->curr_queue = i;
370 row_dispatch_insert(rd);
371 ret = 1;
372 goto done;
373 }
374 }
375
376 if (rd->row_queues[currq].rqueue.nr_dispatched >=
377 rd->row_queues[currq].disp_quantum) {
378 rd->row_queues[currq].rqueue.nr_dispatched = 0;
379 row_log_rowq(rd, currq, "Expiring rqueue");
380 ret = row_choose_queue(rd);
381 if (ret)
382 row_dispatch_insert(rd);
383 goto done;
384 }
385
386 /* Dispatch from curr_queue */
387 if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
388 /* check idling */
389 if (delayed_work_pending(&rd->read_idle.idle_work)) {
390 row_log_rowq(rd, currq,
391 "Delayed work pending. Exiting");
392 goto done;
393 }
394
395 if (queue_idling_enabled[currq] &&
396 rd->row_queues[currq].rqueue.idle_data.begin_idling) {
397 if (!queue_delayed_work(rd->read_idle.idle_workqueue,
398 &rd->read_idle.idle_work,
399 jiffies +
400 msecs_to_jiffies(rd->read_idle.idle_time))) {
401 row_log_rowq(rd, currq,
402 "Work already on queue!");
403 pr_err("ROW_BUG: Work already on queue!");
404 } else
405 row_log_rowq(rd, currq,
406 "Scheduled delayed work. exiting");
407 goto done;
408 } else {
409 row_log_rowq(rd, currq,
410 "Currq empty. Choose next queue");
411 ret = row_choose_queue(rd);
412 if (!ret)
413 goto done;
414 }
415 }
416
417 ret = 1;
418 row_dispatch_insert(rd);
419
420done:
421 return ret;
422}
423
424/*
425 * row_init_queue() - Init scheduler data structures
426 * @q: requests queue
427 *
428 * Return pointer to struct row_data to be saved in elevator for
429 * this dispatch queue
430 *
431 */
432static void *row_init_queue(struct request_queue *q)
433{
434
435 struct row_data *rdata;
436 int i;
437
438 rdata = kmalloc_node(sizeof(*rdata),
439 GFP_KERNEL | __GFP_ZERO, q->node);
440 if (!rdata)
441 return NULL;
442
443 for (i = 0; i < ROWQ_MAX_PRIO; i++) {
444 INIT_LIST_HEAD(&rdata->row_queues[i].rqueue.fifo);
445 rdata->row_queues[i].disp_quantum = queue_quantum[i];
446 rdata->row_queues[i].rqueue.rdata = rdata;
447 rdata->row_queues[i].rqueue.prio = i;
448 rdata->row_queues[i].rqueue.idle_data.begin_idling = false;
449 }
450
451 /*
452 * Currently idling is enabled only for READ queues. If we want to
453 * enable it for write queues also, note that idling frequency will
454 * be the same in both cases
455 */
456 rdata->read_idle.idle_time = ROW_IDLE_TIME;
457 rdata->read_idle.freq = ROW_READ_FREQ;
458 rdata->read_idle.idle_workqueue = alloc_workqueue("row_idle_work",
459 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
460 if (!rdata->read_idle.idle_workqueue)
461 panic("Failed to create idle workqueue\n");
462 INIT_DELAYED_WORK(&rdata->read_idle.idle_work, kick_queue);
463
464 rdata->curr_queue = ROWQ_PRIO_HIGH_READ;
465 rdata->dispatch_queue = q;
466
467 rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0;
468
469 return rdata;
470}
471
472/*
473 * row_exit_queue() - called on unloading the RAW scheduler
474 * @e: poiner to struct elevator_queue
475 *
476 */
477static void row_exit_queue(struct elevator_queue *e)
478{
479 struct row_data *rd = (struct row_data *)e->elevator_data;
480 int i;
481
482 for (i = 0; i < ROWQ_MAX_PRIO; i++)
483 BUG_ON(!list_empty(&rd->row_queues[i].rqueue.fifo));
484 (void)cancel_delayed_work_sync(&rd->read_idle.idle_work);
485 kfree(rd);
486}
487
488/*
489 * row_merged_requests() - Called when 2 requests are merged
490 * @q: requests queue
491 * @rq: request the two requests were merged into
492 * @next: request that was merged
493 */
494static void row_merged_requests(struct request_queue *q, struct request *rq,
495 struct request *next)
496{
497 struct row_queue *rqueue = RQ_ROWQ(next);
498
499 list_del_init(&next->queuelist);
500
501 rqueue->rdata->nr_reqs[rq_data_dir(rq)]--;
502}
503
504/*
505 * get_queue_type() - Get queue type for a given request
506 *
507 * This is a helping function which purpose is to determine what
508 * ROW queue the given request should be added to (and
509 * dispatched from leter on)
510 *
511 * TODO: Right now only 3 queues are used REG_READ, REG_WRITE
512 * and REG_SWRITE
513 */
514static enum row_queue_prio get_queue_type(struct request *rq)
515{
516 const int data_dir = rq_data_dir(rq);
517 const bool is_sync = rq_is_sync(rq);
518
519 if (data_dir == READ)
520 return ROWQ_PRIO_REG_READ;
521 else if (is_sync)
522 return ROWQ_PRIO_REG_SWRITE;
523 else
524 return ROWQ_PRIO_REG_WRITE;
525}
526
527/*
528 * row_set_request() - Set ROW data structures associated with this request.
529 * @q: requests queue
530 * @rq: pointer to the request
531 * @gfp_mask: ignored
532 *
533 */
534static int
535row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
536{
537 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
538 unsigned long flags;
539
540 spin_lock_irqsave(q->queue_lock, flags);
541 rq->elv.priv[0] =
542 (void *)(&rd->row_queues[get_queue_type(rq)]);
543 spin_unlock_irqrestore(q->queue_lock, flags);
544
545 return 0;
546}
547
548/********** Helping sysfs functions/defenitions for ROW attributes ******/
549static ssize_t row_var_show(int var, char *page)
550{
551 return snprintf(page, 100, "%d\n", var);
552}
553
554static ssize_t row_var_store(int *var, const char *page, size_t count)
555{
556 int err;
557 err = kstrtoul(page, 10, (unsigned long *)var);
558
559 return count;
560}
561
562#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
563static ssize_t __FUNC(struct elevator_queue *e, char *page) \
564{ \
565 struct row_data *rowd = e->elevator_data; \
566 int __data = __VAR; \
567 if (__CONV) \
568 __data = jiffies_to_msecs(__data); \
569 return row_var_show(__data, (page)); \
570}
571SHOW_FUNCTION(row_hp_read_quantum_show,
572 rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0);
573SHOW_FUNCTION(row_rp_read_quantum_show,
574 rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0);
575SHOW_FUNCTION(row_hp_swrite_quantum_show,
576 rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0);
577SHOW_FUNCTION(row_rp_swrite_quantum_show,
578 rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0);
579SHOW_FUNCTION(row_rp_write_quantum_show,
580 rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0);
581SHOW_FUNCTION(row_lp_read_quantum_show,
582 rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0);
583SHOW_FUNCTION(row_lp_swrite_quantum_show,
584 rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0);
585SHOW_FUNCTION(row_read_idle_show, rowd->read_idle.idle_time, 1);
586SHOW_FUNCTION(row_read_idle_freq_show, rowd->read_idle.freq, 1);
587#undef SHOW_FUNCTION
588
589#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
590static ssize_t __FUNC(struct elevator_queue *e, \
591 const char *page, size_t count) \
592{ \
593 struct row_data *rowd = e->elevator_data; \
594 int __data; \
595 int ret = row_var_store(&__data, (page), count); \
596 if (__CONV) \
597 __data = (int)msecs_to_jiffies(__data); \
598 if (__data < (MIN)) \
599 __data = (MIN); \
600 else if (__data > (MAX)) \
601 __data = (MAX); \
602 *(__PTR) = __data; \
603 return ret; \
604}
605STORE_FUNCTION(row_hp_read_quantum_store,
606 &rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0,
607 INT_MAX, 0);
608STORE_FUNCTION(row_rp_read_quantum_store,
609 &rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0,
610 INT_MAX, 0);
611STORE_FUNCTION(row_hp_swrite_quantum_store,
612 &rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0,
613 INT_MAX, 0);
614STORE_FUNCTION(row_rp_swrite_quantum_store,
615 &rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0,
616 INT_MAX, 0);
617STORE_FUNCTION(row_rp_write_quantum_store,
618 &rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0,
619 INT_MAX, 0);
620STORE_FUNCTION(row_lp_read_quantum_store,
621 &rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0,
622 INT_MAX, 0);
623STORE_FUNCTION(row_lp_swrite_quantum_store,
624 &rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0,
625 INT_MAX, 1);
626STORE_FUNCTION(row_read_idle_store, &rowd->read_idle.idle_time, 1, INT_MAX, 1);
627STORE_FUNCTION(row_read_idle_freq_store, &rowd->read_idle.freq,
628 1, INT_MAX, 1);
629
630#undef STORE_FUNCTION
631
632#define ROW_ATTR(name) \
633 __ATTR(name, S_IRUGO|S_IWUSR, row_##name##_show, \
634 row_##name##_store)
635
636static struct elv_fs_entry row_attrs[] = {
637 ROW_ATTR(hp_read_quantum),
638 ROW_ATTR(rp_read_quantum),
639 ROW_ATTR(hp_swrite_quantum),
640 ROW_ATTR(rp_swrite_quantum),
641 ROW_ATTR(rp_write_quantum),
642 ROW_ATTR(lp_read_quantum),
643 ROW_ATTR(lp_swrite_quantum),
644 ROW_ATTR(read_idle),
645 ROW_ATTR(read_idle_freq),
646 __ATTR_NULL
647};
648
649static struct elevator_type iosched_row = {
650 .ops = {
651 .elevator_merge_req_fn = row_merged_requests,
652 .elevator_dispatch_fn = row_dispatch_requests,
653 .elevator_add_req_fn = row_add_request,
654 .elevator_former_req_fn = elv_rb_former_request,
655 .elevator_latter_req_fn = elv_rb_latter_request,
656 .elevator_set_req_fn = row_set_request,
657 .elevator_init_fn = row_init_queue,
658 .elevator_exit_fn = row_exit_queue,
659 },
660
661 .elevator_attrs = row_attrs,
662 .elevator_name = "row",
663 .elevator_owner = THIS_MODULE,
664};
665
666static int __init row_init(void)
667{
668 elv_register(&iosched_row);
669 return 0;
670}
671
672static void __exit row_exit(void)
673{
674 elv_unregister(&iosched_row);
675}
676
677module_init(row_init);
678module_exit(row_exit);
679
680MODULE_LICENSE("GPLv2");
681MODULE_DESCRIPTION("Read Over Write IO scheduler");