blob: 583e75b9572d070855716fa870acf5850cdff2d4 [file] [log] [blame]
Tatyana Brokhman16349062012-09-20 10:46:10 +03001/*
2 * ROW (Read Over Write) I/O scheduler.
3 *
4 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/* See Documentation/block/row-iosched.txt */
17
18#include <linux/kernel.h>
19#include <linux/fs.h>
20#include <linux/blkdev.h>
21#include <linux/elevator.h>
22#include <linux/bio.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/compiler.h>
27#include <linux/blktrace_api.h>
28#include <linux/jiffies.h>
29
30/*
31 * enum row_queue_prio - Priorities of the ROW queues
32 *
33 * This enum defines the priorities (and the number of queues)
34 * the requests will be disptributed to. The higher priority -
35 * the bigger is the dispatch quantum given to that queue.
36 * ROWQ_PRIO_HIGH_READ - is the higher priority queue.
37 *
38 */
39enum row_queue_prio {
40 ROWQ_PRIO_HIGH_READ = 0,
41 ROWQ_PRIO_REG_READ,
42 ROWQ_PRIO_HIGH_SWRITE,
43 ROWQ_PRIO_REG_SWRITE,
44 ROWQ_PRIO_REG_WRITE,
45 ROWQ_PRIO_LOW_READ,
46 ROWQ_PRIO_LOW_SWRITE,
47 ROWQ_MAX_PRIO,
48};
49
50/* Flags indicating whether idling is enabled on the queue */
51static const bool queue_idling_enabled[] = {
52 true, /* ROWQ_PRIO_HIGH_READ */
53 true, /* ROWQ_PRIO_REG_READ */
54 false, /* ROWQ_PRIO_HIGH_SWRITE */
55 false, /* ROWQ_PRIO_REG_SWRITE */
56 false, /* ROWQ_PRIO_REG_WRITE */
57 false, /* ROWQ_PRIO_LOW_READ */
58 false, /* ROWQ_PRIO_LOW_SWRITE */
59};
60
61/* Default values for row queues quantums in each dispatch cycle */
62static const int queue_quantum[] = {
63 100, /* ROWQ_PRIO_HIGH_READ */
64 100, /* ROWQ_PRIO_REG_READ */
65 2, /* ROWQ_PRIO_HIGH_SWRITE */
66 1, /* ROWQ_PRIO_REG_SWRITE */
67 1, /* ROWQ_PRIO_REG_WRITE */
68 1, /* ROWQ_PRIO_LOW_READ */
69 1 /* ROWQ_PRIO_LOW_SWRITE */
70};
71
72/* Default values for idling on read queues */
73#define ROW_IDLE_TIME 50 /* 5 msec */
74#define ROW_READ_FREQ 70 /* 7 msec */
75
76/**
77 * struct rowq_idling_data - parameters for idling on the queue
78 * @idle_trigger_time: time (in jiffies). If a new request was
79 * inserted before this time value, idling
80 * will be enabled.
81 * @begin_idling: flag indicating wether we should idle
82 *
83 */
84struct rowq_idling_data {
85 unsigned long idle_trigger_time;
86 bool begin_idling;
87};
88
89/**
90 * struct row_queue - requests grouping structure
91 * @rdata: parent row_data structure
92 * @fifo: fifo of requests
93 * @prio: queue priority (enum row_queue_prio)
94 * @nr_dispatched: number of requests already dispatched in
95 * the current dispatch cycle
96 * @slice: number of requests to dispatch in a cycle
97 * @idle_data: data for idling on queues
98 *
99 */
100struct row_queue {
101 struct row_data *rdata;
102 struct list_head fifo;
103 enum row_queue_prio prio;
104
105 unsigned int nr_dispatched;
106 unsigned int slice;
107
108 /* used only for READ queues */
109 struct rowq_idling_data idle_data;
110};
111
112/**
113 * struct idling_data - data for idling on empty rqueue
114 * @idle_time: idling duration (msec)
115 * @freq: min time between two requests that
116 * triger idling (msec)
117 * @idle_work: pointer to struct delayed_work
118 *
119 */
120struct idling_data {
121 unsigned long idle_time;
122 unsigned long freq;
123
124 struct workqueue_struct *idle_workqueue;
125 struct delayed_work idle_work;
126};
127
128/**
129 * struct row_queue - Per block device rqueue structure
130 * @dispatch_queue: dispatch rqueue
131 * @row_queues: array of priority request queues with
132 * dispatch quantum per rqueue
133 * @curr_queue: index in the row_queues array of the
134 * currently serviced rqueue
135 * @read_idle: data for idling after READ request
136 * @nr_reqs: nr_reqs[0] holds the number of all READ requests in
137 * scheduler, nr_reqs[1] holds the number of all WRITE
138 * requests in scheduler
139 * @cycle_flags: used for marking unserved queueus
140 *
141 */
142struct row_data {
143 struct request_queue *dispatch_queue;
144
145 struct {
146 struct row_queue rqueue;
147 int disp_quantum;
148 } row_queues[ROWQ_MAX_PRIO];
149
150 enum row_queue_prio curr_queue;
151
152 struct idling_data read_idle;
153 unsigned int nr_reqs[2];
154
155 unsigned int cycle_flags;
156};
157
158#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elv.priv[0]))
159
160#define row_log(q, fmt, args...) \
161 blk_add_trace_msg(q, "%s():" fmt , __func__, ##args)
162#define row_log_rowq(rdata, rowq_id, fmt, args...) \
163 blk_add_trace_msg(rdata->dispatch_queue, "rowq%d " fmt, \
164 rowq_id, ##args)
165
166static inline void row_mark_rowq_unserved(struct row_data *rd,
167 enum row_queue_prio qnum)
168{
169 rd->cycle_flags |= (1 << qnum);
170}
171
172static inline void row_clear_rowq_unserved(struct row_data *rd,
173 enum row_queue_prio qnum)
174{
175 rd->cycle_flags &= ~(1 << qnum);
176}
177
178static inline int row_rowq_unserved(struct row_data *rd,
179 enum row_queue_prio qnum)
180{
181 return rd->cycle_flags & (1 << qnum);
182}
183
184/******************** Static helper functions ***********************/
185/*
186 * kick_queue() - Wake up device driver queue thread
187 * @work: pointer to struct work_struct
188 *
189 * This is a idling delayed work function. It's purpose is to wake up the
190 * device driver in order for it to start fetching requests.
191 *
192 */
193static void kick_queue(struct work_struct *work)
194{
195 struct delayed_work *idle_work = to_delayed_work(work);
196 struct idling_data *read_data =
197 container_of(idle_work, struct idling_data, idle_work);
198 struct row_data *rd =
199 container_of(read_data, struct row_data, read_idle);
200
201 row_log_rowq(rd, rd->curr_queue, "Performing delayed work");
202 /* Mark idling process as done */
203 rd->row_queues[rd->curr_queue].rqueue.idle_data.begin_idling = false;
204
205 if (!(rd->nr_reqs[0] + rd->nr_reqs[1]))
206 row_log(rd->dispatch_queue, "No requests in scheduler");
207 else {
208 spin_lock_irq(rd->dispatch_queue->queue_lock);
209 __blk_run_queue(rd->dispatch_queue);
210 spin_unlock_irq(rd->dispatch_queue->queue_lock);
211 }
212}
213
214/*
215 * row_restart_disp_cycle() - Restart the dispatch cycle
216 * @rd: pointer to struct row_data
217 *
218 * This function restarts the dispatch cycle by:
219 * - Setting current queue to ROWQ_PRIO_HIGH_READ
220 * - For each queue: reset the number of requests dispatched in
221 * the cycle
222 */
223static inline void row_restart_disp_cycle(struct row_data *rd)
224{
225 int i;
226
227 for (i = 0; i < ROWQ_MAX_PRIO; i++)
228 rd->row_queues[i].rqueue.nr_dispatched = 0;
229
230 rd->curr_queue = ROWQ_PRIO_HIGH_READ;
231 row_log(rd->dispatch_queue, "Restarting cycle");
232}
233
234static inline void row_get_next_queue(struct row_data *rd)
235{
236 rd->curr_queue++;
237 if (rd->curr_queue == ROWQ_MAX_PRIO)
238 row_restart_disp_cycle(rd);
239}
240
241/******************* Elevator callback functions *********************/
242
243/*
244 * row_add_request() - Add request to the scheduler
245 * @q: requests queue
246 * @rq: request to add
247 *
248 */
249static void row_add_request(struct request_queue *q,
250 struct request *rq)
251{
252 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
253 struct row_queue *rqueue = RQ_ROWQ(rq);
254
255 list_add_tail(&rq->queuelist, &rqueue->fifo);
256 rd->nr_reqs[rq_data_dir(rq)]++;
257 rq_set_fifo_time(rq, jiffies); /* for statistics*/
258
259 if (queue_idling_enabled[rqueue->prio]) {
260 if (delayed_work_pending(&rd->read_idle.idle_work))
261 (void)cancel_delayed_work(
262 &rd->read_idle.idle_work);
263 if (time_before(jiffies, rqueue->idle_data.idle_trigger_time)) {
264 rqueue->idle_data.begin_idling = true;
265 row_log_rowq(rd, rqueue->prio, "Enable idling");
266 } else
267 rqueue->idle_data.begin_idling = false;
268
269 rqueue->idle_data.idle_trigger_time =
270 jiffies + msecs_to_jiffies(rd->read_idle.freq);
271 }
272 row_log_rowq(rd, rqueue->prio, "added request");
273}
274
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200275/**
276 * row_reinsert_req() - Reinsert request back to the scheduler
277 * @q: requests queue
278 * @rq: request to add
279 *
280 * Reinsert the given request back to the queue it was
281 * dispatched from as if it was never dispatched.
282 *
283 * Returns 0 on success, error code otherwise
284 */
285static int row_reinsert_req(struct request_queue *q,
286 struct request *rq)
287{
288 struct row_data *rd = q->elevator->elevator_data;
289 struct row_queue *rqueue = RQ_ROWQ(rq);
290
291 /* Verify rqueue is legitimate */
292 if (rqueue->prio >= ROWQ_MAX_PRIO) {
293 pr_err("\n\nROW BUG: row_reinsert_req() rqueue->prio = %d\n",
294 rqueue->prio);
295 blk_dump_rq_flags(rq, "");
296 return -EIO;
297 }
298
299 list_add(&rq->queuelist, &rqueue->fifo);
300 rd->nr_reqs[rq_data_dir(rq)]++;
301
302 row_log_rowq(rd, rqueue->prio, "request reinserted");
303
304 return 0;
305}
306
307/**
Tatyana Brokhman16349062012-09-20 10:46:10 +0300308 * row_remove_request() - Remove given request from scheduler
309 * @q: requests queue
310 * @rq: request to remove
311 *
312 */
313static void row_remove_request(struct request_queue *q,
314 struct request *rq)
315{
316 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
317
318 rq_fifo_clear(rq);
319 rd->nr_reqs[rq_data_dir(rq)]--;
320}
321
322/*
323 * row_dispatch_insert() - move request to dispatch queue
324 * @rd: pointer to struct row_data
325 *
326 * This function moves the next request to dispatch from
327 * rd->curr_queue to the dispatch queue
328 *
329 */
330static void row_dispatch_insert(struct row_data *rd)
331{
332 struct request *rq;
333
334 rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next);
335 row_remove_request(rd->dispatch_queue, rq);
336 elv_dispatch_add_tail(rd->dispatch_queue, rq);
337 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++;
338 row_clear_rowq_unserved(rd, rd->curr_queue);
339 row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d",
340 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched);
341}
342
343/*
344 * row_choose_queue() - choose the next queue to dispatch from
345 * @rd: pointer to struct row_data
346 *
347 * Updates rd->curr_queue. Returns 1 if there are requests to
348 * dispatch, 0 if there are no requests in scheduler
349 *
350 */
351static int row_choose_queue(struct row_data *rd)
352{
353 int prev_curr_queue = rd->curr_queue;
354
355 if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) {
356 row_log(rd->dispatch_queue, "No more requests in scheduler");
357 return 0;
358 }
359
360 row_get_next_queue(rd);
361
362 /*
363 * Loop over all queues to find the next queue that is not empty.
364 * Stop when you get back to curr_queue
365 */
366 while (list_empty(&rd->row_queues[rd->curr_queue].rqueue.fifo)
367 && rd->curr_queue != prev_curr_queue) {
368 /* Mark rqueue as unserved */
369 row_mark_rowq_unserved(rd, rd->curr_queue);
370 row_get_next_queue(rd);
371 }
372
373 return 1;
374}
375
376/*
377 * row_dispatch_requests() - selects the next request to dispatch
378 * @q: requests queue
379 * @force: ignored
380 *
381 * Return 0 if no requests were moved to the dispatch queue.
382 * 1 otherwise
383 *
384 */
385static int row_dispatch_requests(struct request_queue *q, int force)
386{
387 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
388 int ret = 0, currq, i;
389
390 currq = rd->curr_queue;
391
392 /*
393 * Find the first unserved queue (with higher priority then currq)
394 * that is not empty
395 */
396 for (i = 0; i < currq; i++) {
397 if (row_rowq_unserved(rd, i) &&
398 !list_empty(&rd->row_queues[i].rqueue.fifo)) {
399 row_log_rowq(rd, currq,
400 " Preemting for unserved rowq%d", i);
401 rd->curr_queue = i;
402 row_dispatch_insert(rd);
403 ret = 1;
404 goto done;
405 }
406 }
407
408 if (rd->row_queues[currq].rqueue.nr_dispatched >=
409 rd->row_queues[currq].disp_quantum) {
410 rd->row_queues[currq].rqueue.nr_dispatched = 0;
411 row_log_rowq(rd, currq, "Expiring rqueue");
412 ret = row_choose_queue(rd);
413 if (ret)
414 row_dispatch_insert(rd);
415 goto done;
416 }
417
418 /* Dispatch from curr_queue */
419 if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
420 /* check idling */
421 if (delayed_work_pending(&rd->read_idle.idle_work)) {
Tatyana Brokhman25f39882012-10-15 20:56:02 +0200422 if (force) {
423 (void)cancel_delayed_work(
424 &rd->read_idle.idle_work);
425 row_log_rowq(rd, currq,
426 "Canceled delayed work - forced dispatch");
427 } else {
428 row_log_rowq(rd, currq,
429 "Delayed work pending. Exiting");
430 goto done;
431 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300432 }
433
Tatyana Brokhman25f39882012-10-15 20:56:02 +0200434 if (!force && queue_idling_enabled[currq] &&
Tatyana Brokhman16349062012-09-20 10:46:10 +0300435 rd->row_queues[currq].rqueue.idle_data.begin_idling) {
436 if (!queue_delayed_work(rd->read_idle.idle_workqueue,
437 &rd->read_idle.idle_work,
438 jiffies +
439 msecs_to_jiffies(rd->read_idle.idle_time))) {
440 row_log_rowq(rd, currq,
441 "Work already on queue!");
442 pr_err("ROW_BUG: Work already on queue!");
443 } else
444 row_log_rowq(rd, currq,
445 "Scheduled delayed work. exiting");
446 goto done;
447 } else {
448 row_log_rowq(rd, currq,
449 "Currq empty. Choose next queue");
450 ret = row_choose_queue(rd);
451 if (!ret)
452 goto done;
453 }
454 }
455
456 ret = 1;
457 row_dispatch_insert(rd);
458
459done:
460 return ret;
461}
462
463/*
464 * row_init_queue() - Init scheduler data structures
465 * @q: requests queue
466 *
467 * Return pointer to struct row_data to be saved in elevator for
468 * this dispatch queue
469 *
470 */
471static void *row_init_queue(struct request_queue *q)
472{
473
474 struct row_data *rdata;
475 int i;
476
477 rdata = kmalloc_node(sizeof(*rdata),
478 GFP_KERNEL | __GFP_ZERO, q->node);
479 if (!rdata)
480 return NULL;
481
482 for (i = 0; i < ROWQ_MAX_PRIO; i++) {
483 INIT_LIST_HEAD(&rdata->row_queues[i].rqueue.fifo);
484 rdata->row_queues[i].disp_quantum = queue_quantum[i];
485 rdata->row_queues[i].rqueue.rdata = rdata;
486 rdata->row_queues[i].rqueue.prio = i;
487 rdata->row_queues[i].rqueue.idle_data.begin_idling = false;
488 }
489
490 /*
491 * Currently idling is enabled only for READ queues. If we want to
492 * enable it for write queues also, note that idling frequency will
493 * be the same in both cases
494 */
495 rdata->read_idle.idle_time = ROW_IDLE_TIME;
496 rdata->read_idle.freq = ROW_READ_FREQ;
497 rdata->read_idle.idle_workqueue = alloc_workqueue("row_idle_work",
498 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
499 if (!rdata->read_idle.idle_workqueue)
500 panic("Failed to create idle workqueue\n");
501 INIT_DELAYED_WORK(&rdata->read_idle.idle_work, kick_queue);
502
503 rdata->curr_queue = ROWQ_PRIO_HIGH_READ;
504 rdata->dispatch_queue = q;
505
506 rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0;
507
508 return rdata;
509}
510
511/*
512 * row_exit_queue() - called on unloading the RAW scheduler
513 * @e: poiner to struct elevator_queue
514 *
515 */
516static void row_exit_queue(struct elevator_queue *e)
517{
518 struct row_data *rd = (struct row_data *)e->elevator_data;
519 int i;
520
521 for (i = 0; i < ROWQ_MAX_PRIO; i++)
522 BUG_ON(!list_empty(&rd->row_queues[i].rqueue.fifo));
523 (void)cancel_delayed_work_sync(&rd->read_idle.idle_work);
524 kfree(rd);
525}
526
527/*
528 * row_merged_requests() - Called when 2 requests are merged
529 * @q: requests queue
530 * @rq: request the two requests were merged into
531 * @next: request that was merged
532 */
533static void row_merged_requests(struct request_queue *q, struct request *rq,
534 struct request *next)
535{
536 struct row_queue *rqueue = RQ_ROWQ(next);
537
538 list_del_init(&next->queuelist);
539
540 rqueue->rdata->nr_reqs[rq_data_dir(rq)]--;
541}
542
543/*
544 * get_queue_type() - Get queue type for a given request
545 *
546 * This is a helping function which purpose is to determine what
547 * ROW queue the given request should be added to (and
548 * dispatched from leter on)
549 *
550 * TODO: Right now only 3 queues are used REG_READ, REG_WRITE
551 * and REG_SWRITE
552 */
553static enum row_queue_prio get_queue_type(struct request *rq)
554{
555 const int data_dir = rq_data_dir(rq);
556 const bool is_sync = rq_is_sync(rq);
557
558 if (data_dir == READ)
559 return ROWQ_PRIO_REG_READ;
560 else if (is_sync)
561 return ROWQ_PRIO_REG_SWRITE;
562 else
563 return ROWQ_PRIO_REG_WRITE;
564}
565
566/*
567 * row_set_request() - Set ROW data structures associated with this request.
568 * @q: requests queue
569 * @rq: pointer to the request
570 * @gfp_mask: ignored
571 *
572 */
573static int
574row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
575{
576 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
577 unsigned long flags;
578
579 spin_lock_irqsave(q->queue_lock, flags);
580 rq->elv.priv[0] =
581 (void *)(&rd->row_queues[get_queue_type(rq)]);
582 spin_unlock_irqrestore(q->queue_lock, flags);
583
584 return 0;
585}
586
587/********** Helping sysfs functions/defenitions for ROW attributes ******/
588static ssize_t row_var_show(int var, char *page)
589{
590 return snprintf(page, 100, "%d\n", var);
591}
592
593static ssize_t row_var_store(int *var, const char *page, size_t count)
594{
595 int err;
596 err = kstrtoul(page, 10, (unsigned long *)var);
597
598 return count;
599}
600
601#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
602static ssize_t __FUNC(struct elevator_queue *e, char *page) \
603{ \
604 struct row_data *rowd = e->elevator_data; \
605 int __data = __VAR; \
606 if (__CONV) \
607 __data = jiffies_to_msecs(__data); \
608 return row_var_show(__data, (page)); \
609}
610SHOW_FUNCTION(row_hp_read_quantum_show,
611 rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0);
612SHOW_FUNCTION(row_rp_read_quantum_show,
613 rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0);
614SHOW_FUNCTION(row_hp_swrite_quantum_show,
615 rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0);
616SHOW_FUNCTION(row_rp_swrite_quantum_show,
617 rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0);
618SHOW_FUNCTION(row_rp_write_quantum_show,
619 rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0);
620SHOW_FUNCTION(row_lp_read_quantum_show,
621 rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0);
622SHOW_FUNCTION(row_lp_swrite_quantum_show,
623 rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0);
624SHOW_FUNCTION(row_read_idle_show, rowd->read_idle.idle_time, 1);
625SHOW_FUNCTION(row_read_idle_freq_show, rowd->read_idle.freq, 1);
626#undef SHOW_FUNCTION
627
628#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
629static ssize_t __FUNC(struct elevator_queue *e, \
630 const char *page, size_t count) \
631{ \
632 struct row_data *rowd = e->elevator_data; \
633 int __data; \
634 int ret = row_var_store(&__data, (page), count); \
635 if (__CONV) \
636 __data = (int)msecs_to_jiffies(__data); \
637 if (__data < (MIN)) \
638 __data = (MIN); \
639 else if (__data > (MAX)) \
640 __data = (MAX); \
641 *(__PTR) = __data; \
642 return ret; \
643}
644STORE_FUNCTION(row_hp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200645&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300646STORE_FUNCTION(row_rp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200647 &rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum,
648 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300649STORE_FUNCTION(row_hp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200650 &rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum,
651 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300652STORE_FUNCTION(row_rp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200653 &rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum,
654 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300655STORE_FUNCTION(row_rp_write_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200656 &rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum,
657 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300658STORE_FUNCTION(row_lp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200659 &rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum,
660 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300661STORE_FUNCTION(row_lp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200662 &rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum,
663 1, INT_MAX, 1);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300664STORE_FUNCTION(row_read_idle_store, &rowd->read_idle.idle_time, 1, INT_MAX, 1);
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200665STORE_FUNCTION(row_read_idle_freq_store, &rowd->read_idle.freq, 1, INT_MAX, 1);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300666
667#undef STORE_FUNCTION
668
669#define ROW_ATTR(name) \
670 __ATTR(name, S_IRUGO|S_IWUSR, row_##name##_show, \
671 row_##name##_store)
672
673static struct elv_fs_entry row_attrs[] = {
674 ROW_ATTR(hp_read_quantum),
675 ROW_ATTR(rp_read_quantum),
676 ROW_ATTR(hp_swrite_quantum),
677 ROW_ATTR(rp_swrite_quantum),
678 ROW_ATTR(rp_write_quantum),
679 ROW_ATTR(lp_read_quantum),
680 ROW_ATTR(lp_swrite_quantum),
681 ROW_ATTR(read_idle),
682 ROW_ATTR(read_idle_freq),
683 __ATTR_NULL
684};
685
686static struct elevator_type iosched_row = {
687 .ops = {
688 .elevator_merge_req_fn = row_merged_requests,
689 .elevator_dispatch_fn = row_dispatch_requests,
690 .elevator_add_req_fn = row_add_request,
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200691 .elevator_reinsert_req_fn = row_reinsert_req,
Tatyana Brokhman16349062012-09-20 10:46:10 +0300692 .elevator_former_req_fn = elv_rb_former_request,
693 .elevator_latter_req_fn = elv_rb_latter_request,
694 .elevator_set_req_fn = row_set_request,
695 .elevator_init_fn = row_init_queue,
696 .elevator_exit_fn = row_exit_queue,
697 },
698
699 .elevator_attrs = row_attrs,
700 .elevator_name = "row",
701 .elevator_owner = THIS_MODULE,
702};
703
704static int __init row_init(void)
705{
706 elv_register(&iosched_row);
707 return 0;
708}
709
710static void __exit row_exit(void)
711{
712 elv_unregister(&iosched_row);
713}
714
715module_init(row_init);
716module_exit(row_exit);
717
718MODULE_LICENSE("GPLv2");
719MODULE_DESCRIPTION("Read Over Write IO scheduler");