blob: f1cb6193eb8fe76822b9e7ff6785d55cd719b861 [file] [log] [blame]
Tatyana Brokhman16349062012-09-20 10:46:10 +03001/*
2 * ROW (Read Over Write) I/O scheduler.
3 *
4 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/* See Documentation/block/row-iosched.txt */
17
18#include <linux/kernel.h>
19#include <linux/fs.h>
20#include <linux/blkdev.h>
21#include <linux/elevator.h>
22#include <linux/bio.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/compiler.h>
27#include <linux/blktrace_api.h>
28#include <linux/jiffies.h>
29
30/*
31 * enum row_queue_prio - Priorities of the ROW queues
32 *
33 * This enum defines the priorities (and the number of queues)
34 * the requests will be disptributed to. The higher priority -
35 * the bigger is the dispatch quantum given to that queue.
36 * ROWQ_PRIO_HIGH_READ - is the higher priority queue.
37 *
38 */
39enum row_queue_prio {
40 ROWQ_PRIO_HIGH_READ = 0,
41 ROWQ_PRIO_REG_READ,
42 ROWQ_PRIO_HIGH_SWRITE,
43 ROWQ_PRIO_REG_SWRITE,
44 ROWQ_PRIO_REG_WRITE,
45 ROWQ_PRIO_LOW_READ,
46 ROWQ_PRIO_LOW_SWRITE,
47 ROWQ_MAX_PRIO,
48};
49
50/* Flags indicating whether idling is enabled on the queue */
51static const bool queue_idling_enabled[] = {
52 true, /* ROWQ_PRIO_HIGH_READ */
53 true, /* ROWQ_PRIO_REG_READ */
54 false, /* ROWQ_PRIO_HIGH_SWRITE */
55 false, /* ROWQ_PRIO_REG_SWRITE */
56 false, /* ROWQ_PRIO_REG_WRITE */
57 false, /* ROWQ_PRIO_LOW_READ */
58 false, /* ROWQ_PRIO_LOW_SWRITE */
59};
60
Tatyana Brokhman0ef81432012-12-20 19:23:58 +020061/* Flags indicating whether the queue can notify on urgent requests */
62static const bool urgent_queues[] = {
63 true, /* ROWQ_PRIO_HIGH_READ */
64 true, /* ROWQ_PRIO_REG_READ */
65 false, /* ROWQ_PRIO_HIGH_SWRITE */
66 false, /* ROWQ_PRIO_REG_SWRITE */
67 false, /* ROWQ_PRIO_REG_WRITE */
68 false, /* ROWQ_PRIO_LOW_READ */
69 false, /* ROWQ_PRIO_LOW_SWRITE */
70};
71
Tatyana Brokhman16349062012-09-20 10:46:10 +030072/* Default values for row queues quantums in each dispatch cycle */
73static const int queue_quantum[] = {
74 100, /* ROWQ_PRIO_HIGH_READ */
75 100, /* ROWQ_PRIO_REG_READ */
76 2, /* ROWQ_PRIO_HIGH_SWRITE */
77 1, /* ROWQ_PRIO_REG_SWRITE */
78 1, /* ROWQ_PRIO_REG_WRITE */
79 1, /* ROWQ_PRIO_LOW_READ */
80 1 /* ROWQ_PRIO_LOW_SWRITE */
81};
82
83/* Default values for idling on read queues */
84#define ROW_IDLE_TIME 50 /* 5 msec */
85#define ROW_READ_FREQ 70 /* 7 msec */
86
87/**
88 * struct rowq_idling_data - parameters for idling on the queue
89 * @idle_trigger_time: time (in jiffies). If a new request was
90 * inserted before this time value, idling
91 * will be enabled.
92 * @begin_idling: flag indicating wether we should idle
93 *
94 */
95struct rowq_idling_data {
96 unsigned long idle_trigger_time;
97 bool begin_idling;
98};
99
100/**
101 * struct row_queue - requests grouping structure
102 * @rdata: parent row_data structure
103 * @fifo: fifo of requests
104 * @prio: queue priority (enum row_queue_prio)
105 * @nr_dispatched: number of requests already dispatched in
106 * the current dispatch cycle
107 * @slice: number of requests to dispatch in a cycle
108 * @idle_data: data for idling on queues
109 *
110 */
111struct row_queue {
112 struct row_data *rdata;
113 struct list_head fifo;
114 enum row_queue_prio prio;
115
116 unsigned int nr_dispatched;
117 unsigned int slice;
118
119 /* used only for READ queues */
120 struct rowq_idling_data idle_data;
121};
122
123/**
124 * struct idling_data - data for idling on empty rqueue
125 * @idle_time: idling duration (msec)
126 * @freq: min time between two requests that
127 * triger idling (msec)
128 * @idle_work: pointer to struct delayed_work
129 *
130 */
131struct idling_data {
132 unsigned long idle_time;
133 unsigned long freq;
134
135 struct workqueue_struct *idle_workqueue;
136 struct delayed_work idle_work;
137};
138
139/**
140 * struct row_queue - Per block device rqueue structure
141 * @dispatch_queue: dispatch rqueue
142 * @row_queues: array of priority request queues with
143 * dispatch quantum per rqueue
144 * @curr_queue: index in the row_queues array of the
145 * currently serviced rqueue
146 * @read_idle: data for idling after READ request
147 * @nr_reqs: nr_reqs[0] holds the number of all READ requests in
148 * scheduler, nr_reqs[1] holds the number of all WRITE
149 * requests in scheduler
150 * @cycle_flags: used for marking unserved queueus
151 *
152 */
153struct row_data {
154 struct request_queue *dispatch_queue;
155
156 struct {
157 struct row_queue rqueue;
158 int disp_quantum;
159 } row_queues[ROWQ_MAX_PRIO];
160
161 enum row_queue_prio curr_queue;
162
163 struct idling_data read_idle;
164 unsigned int nr_reqs[2];
165
166 unsigned int cycle_flags;
167};
168
169#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elv.priv[0]))
170
171#define row_log(q, fmt, args...) \
172 blk_add_trace_msg(q, "%s():" fmt , __func__, ##args)
173#define row_log_rowq(rdata, rowq_id, fmt, args...) \
174 blk_add_trace_msg(rdata->dispatch_queue, "rowq%d " fmt, \
175 rowq_id, ##args)
176
177static inline void row_mark_rowq_unserved(struct row_data *rd,
178 enum row_queue_prio qnum)
179{
180 rd->cycle_flags |= (1 << qnum);
181}
182
183static inline void row_clear_rowq_unserved(struct row_data *rd,
184 enum row_queue_prio qnum)
185{
186 rd->cycle_flags &= ~(1 << qnum);
187}
188
189static inline int row_rowq_unserved(struct row_data *rd,
190 enum row_queue_prio qnum)
191{
192 return rd->cycle_flags & (1 << qnum);
193}
194
195/******************** Static helper functions ***********************/
196/*
197 * kick_queue() - Wake up device driver queue thread
198 * @work: pointer to struct work_struct
199 *
200 * This is a idling delayed work function. It's purpose is to wake up the
201 * device driver in order for it to start fetching requests.
202 *
203 */
204static void kick_queue(struct work_struct *work)
205{
206 struct delayed_work *idle_work = to_delayed_work(work);
207 struct idling_data *read_data =
208 container_of(idle_work, struct idling_data, idle_work);
209 struct row_data *rd =
210 container_of(read_data, struct row_data, read_idle);
211
212 row_log_rowq(rd, rd->curr_queue, "Performing delayed work");
213 /* Mark idling process as done */
214 rd->row_queues[rd->curr_queue].rqueue.idle_data.begin_idling = false;
215
216 if (!(rd->nr_reqs[0] + rd->nr_reqs[1]))
217 row_log(rd->dispatch_queue, "No requests in scheduler");
218 else {
219 spin_lock_irq(rd->dispatch_queue->queue_lock);
220 __blk_run_queue(rd->dispatch_queue);
221 spin_unlock_irq(rd->dispatch_queue->queue_lock);
222 }
223}
224
225/*
226 * row_restart_disp_cycle() - Restart the dispatch cycle
227 * @rd: pointer to struct row_data
228 *
229 * This function restarts the dispatch cycle by:
230 * - Setting current queue to ROWQ_PRIO_HIGH_READ
231 * - For each queue: reset the number of requests dispatched in
232 * the cycle
233 */
234static inline void row_restart_disp_cycle(struct row_data *rd)
235{
236 int i;
237
238 for (i = 0; i < ROWQ_MAX_PRIO; i++)
239 rd->row_queues[i].rqueue.nr_dispatched = 0;
240
241 rd->curr_queue = ROWQ_PRIO_HIGH_READ;
242 row_log(rd->dispatch_queue, "Restarting cycle");
243}
244
245static inline void row_get_next_queue(struct row_data *rd)
246{
247 rd->curr_queue++;
248 if (rd->curr_queue == ROWQ_MAX_PRIO)
249 row_restart_disp_cycle(rd);
250}
251
252/******************* Elevator callback functions *********************/
253
254/*
255 * row_add_request() - Add request to the scheduler
256 * @q: requests queue
257 * @rq: request to add
258 *
259 */
260static void row_add_request(struct request_queue *q,
261 struct request *rq)
262{
263 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
264 struct row_queue *rqueue = RQ_ROWQ(rq);
265
266 list_add_tail(&rq->queuelist, &rqueue->fifo);
267 rd->nr_reqs[rq_data_dir(rq)]++;
268 rq_set_fifo_time(rq, jiffies); /* for statistics*/
269
270 if (queue_idling_enabled[rqueue->prio]) {
271 if (delayed_work_pending(&rd->read_idle.idle_work))
272 (void)cancel_delayed_work(
273 &rd->read_idle.idle_work);
274 if (time_before(jiffies, rqueue->idle_data.idle_trigger_time)) {
275 rqueue->idle_data.begin_idling = true;
276 row_log_rowq(rd, rqueue->prio, "Enable idling");
277 } else
278 rqueue->idle_data.begin_idling = false;
279
280 rqueue->idle_data.idle_trigger_time =
281 jiffies + msecs_to_jiffies(rd->read_idle.freq);
282 }
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200283 if (urgent_queues[rqueue->prio] &&
284 row_rowq_unserved(rd, rqueue->prio)) {
285 row_log_rowq(rd, rqueue->prio,
286 "added urgent req curr_queue = %d",
287 rd->curr_queue);
288 } else
289 row_log_rowq(rd, rqueue->prio, "added request");
Tatyana Brokhman16349062012-09-20 10:46:10 +0300290}
291
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200292/**
293 * row_reinsert_req() - Reinsert request back to the scheduler
294 * @q: requests queue
295 * @rq: request to add
296 *
297 * Reinsert the given request back to the queue it was
298 * dispatched from as if it was never dispatched.
299 *
300 * Returns 0 on success, error code otherwise
301 */
302static int row_reinsert_req(struct request_queue *q,
303 struct request *rq)
304{
305 struct row_data *rd = q->elevator->elevator_data;
306 struct row_queue *rqueue = RQ_ROWQ(rq);
307
308 /* Verify rqueue is legitimate */
309 if (rqueue->prio >= ROWQ_MAX_PRIO) {
310 pr_err("\n\nROW BUG: row_reinsert_req() rqueue->prio = %d\n",
311 rqueue->prio);
312 blk_dump_rq_flags(rq, "");
313 return -EIO;
314 }
315
316 list_add(&rq->queuelist, &rqueue->fifo);
317 rd->nr_reqs[rq_data_dir(rq)]++;
318
319 row_log_rowq(rd, rqueue->prio, "request reinserted");
320
321 return 0;
322}
323
324/**
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200325 * row_urgent_pending() - Return TRUE if there is an urgent
326 * request on scheduler
327 * @q: requests queue
328 */
329static bool row_urgent_pending(struct request_queue *q)
330{
331 struct row_data *rd = q->elevator->elevator_data;
332 int i;
333
334 for (i = 0; i < ROWQ_MAX_PRIO; i++)
335 if (urgent_queues[i] && row_rowq_unserved(rd, i) &&
336 !list_empty(&rd->row_queues[i].rqueue.fifo)) {
337 row_log_rowq(rd, i,
338 "Urgent request pending (curr=%i)",
339 rd->curr_queue);
340 return true;
341 }
342
343 return false;
344}
345
346/**
Tatyana Brokhman16349062012-09-20 10:46:10 +0300347 * row_remove_request() - Remove given request from scheduler
348 * @q: requests queue
349 * @rq: request to remove
350 *
351 */
352static void row_remove_request(struct request_queue *q,
353 struct request *rq)
354{
355 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
356
357 rq_fifo_clear(rq);
358 rd->nr_reqs[rq_data_dir(rq)]--;
359}
360
361/*
362 * row_dispatch_insert() - move request to dispatch queue
363 * @rd: pointer to struct row_data
364 *
365 * This function moves the next request to dispatch from
366 * rd->curr_queue to the dispatch queue
367 *
368 */
369static void row_dispatch_insert(struct row_data *rd)
370{
371 struct request *rq;
372
373 rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next);
374 row_remove_request(rd->dispatch_queue, rq);
375 elv_dispatch_add_tail(rd->dispatch_queue, rq);
376 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++;
377 row_clear_rowq_unserved(rd, rd->curr_queue);
378 row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d",
379 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched);
380}
381
382/*
383 * row_choose_queue() - choose the next queue to dispatch from
384 * @rd: pointer to struct row_data
385 *
386 * Updates rd->curr_queue. Returns 1 if there are requests to
387 * dispatch, 0 if there are no requests in scheduler
388 *
389 */
390static int row_choose_queue(struct row_data *rd)
391{
392 int prev_curr_queue = rd->curr_queue;
393
394 if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) {
395 row_log(rd->dispatch_queue, "No more requests in scheduler");
396 return 0;
397 }
398
399 row_get_next_queue(rd);
400
401 /*
402 * Loop over all queues to find the next queue that is not empty.
403 * Stop when you get back to curr_queue
404 */
405 while (list_empty(&rd->row_queues[rd->curr_queue].rqueue.fifo)
406 && rd->curr_queue != prev_curr_queue) {
407 /* Mark rqueue as unserved */
408 row_mark_rowq_unserved(rd, rd->curr_queue);
409 row_get_next_queue(rd);
410 }
411
412 return 1;
413}
414
415/*
416 * row_dispatch_requests() - selects the next request to dispatch
417 * @q: requests queue
418 * @force: ignored
419 *
420 * Return 0 if no requests were moved to the dispatch queue.
421 * 1 otherwise
422 *
423 */
424static int row_dispatch_requests(struct request_queue *q, int force)
425{
426 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
427 int ret = 0, currq, i;
428
429 currq = rd->curr_queue;
430
431 /*
432 * Find the first unserved queue (with higher priority then currq)
433 * that is not empty
434 */
435 for (i = 0; i < currq; i++) {
436 if (row_rowq_unserved(rd, i) &&
437 !list_empty(&rd->row_queues[i].rqueue.fifo)) {
438 row_log_rowq(rd, currq,
439 " Preemting for unserved rowq%d", i);
440 rd->curr_queue = i;
441 row_dispatch_insert(rd);
442 ret = 1;
443 goto done;
444 }
445 }
446
447 if (rd->row_queues[currq].rqueue.nr_dispatched >=
448 rd->row_queues[currq].disp_quantum) {
449 rd->row_queues[currq].rqueue.nr_dispatched = 0;
450 row_log_rowq(rd, currq, "Expiring rqueue");
451 ret = row_choose_queue(rd);
452 if (ret)
453 row_dispatch_insert(rd);
454 goto done;
455 }
456
457 /* Dispatch from curr_queue */
458 if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
459 /* check idling */
460 if (delayed_work_pending(&rd->read_idle.idle_work)) {
Tatyana Brokhman25f39882012-10-15 20:56:02 +0200461 if (force) {
462 (void)cancel_delayed_work(
463 &rd->read_idle.idle_work);
464 row_log_rowq(rd, currq,
465 "Canceled delayed work - forced dispatch");
466 } else {
467 row_log_rowq(rd, currq,
468 "Delayed work pending. Exiting");
469 goto done;
470 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300471 }
472
Tatyana Brokhman25f39882012-10-15 20:56:02 +0200473 if (!force && queue_idling_enabled[currq] &&
Tatyana Brokhman16349062012-09-20 10:46:10 +0300474 rd->row_queues[currq].rqueue.idle_data.begin_idling) {
475 if (!queue_delayed_work(rd->read_idle.idle_workqueue,
476 &rd->read_idle.idle_work,
477 jiffies +
478 msecs_to_jiffies(rd->read_idle.idle_time))) {
479 row_log_rowq(rd, currq,
480 "Work already on queue!");
481 pr_err("ROW_BUG: Work already on queue!");
482 } else
483 row_log_rowq(rd, currq,
484 "Scheduled delayed work. exiting");
485 goto done;
486 } else {
487 row_log_rowq(rd, currq,
488 "Currq empty. Choose next queue");
489 ret = row_choose_queue(rd);
490 if (!ret)
491 goto done;
492 }
493 }
494
495 ret = 1;
496 row_dispatch_insert(rd);
497
498done:
499 return ret;
500}
501
502/*
503 * row_init_queue() - Init scheduler data structures
504 * @q: requests queue
505 *
506 * Return pointer to struct row_data to be saved in elevator for
507 * this dispatch queue
508 *
509 */
510static void *row_init_queue(struct request_queue *q)
511{
512
513 struct row_data *rdata;
514 int i;
515
516 rdata = kmalloc_node(sizeof(*rdata),
517 GFP_KERNEL | __GFP_ZERO, q->node);
518 if (!rdata)
519 return NULL;
520
521 for (i = 0; i < ROWQ_MAX_PRIO; i++) {
522 INIT_LIST_HEAD(&rdata->row_queues[i].rqueue.fifo);
523 rdata->row_queues[i].disp_quantum = queue_quantum[i];
524 rdata->row_queues[i].rqueue.rdata = rdata;
525 rdata->row_queues[i].rqueue.prio = i;
526 rdata->row_queues[i].rqueue.idle_data.begin_idling = false;
527 }
528
529 /*
530 * Currently idling is enabled only for READ queues. If we want to
531 * enable it for write queues also, note that idling frequency will
532 * be the same in both cases
533 */
534 rdata->read_idle.idle_time = ROW_IDLE_TIME;
535 rdata->read_idle.freq = ROW_READ_FREQ;
536 rdata->read_idle.idle_workqueue = alloc_workqueue("row_idle_work",
537 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
538 if (!rdata->read_idle.idle_workqueue)
539 panic("Failed to create idle workqueue\n");
540 INIT_DELAYED_WORK(&rdata->read_idle.idle_work, kick_queue);
541
542 rdata->curr_queue = ROWQ_PRIO_HIGH_READ;
543 rdata->dispatch_queue = q;
544
545 rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0;
546
547 return rdata;
548}
549
550/*
551 * row_exit_queue() - called on unloading the RAW scheduler
552 * @e: poiner to struct elevator_queue
553 *
554 */
555static void row_exit_queue(struct elevator_queue *e)
556{
557 struct row_data *rd = (struct row_data *)e->elevator_data;
558 int i;
559
560 for (i = 0; i < ROWQ_MAX_PRIO; i++)
561 BUG_ON(!list_empty(&rd->row_queues[i].rqueue.fifo));
562 (void)cancel_delayed_work_sync(&rd->read_idle.idle_work);
563 kfree(rd);
564}
565
566/*
567 * row_merged_requests() - Called when 2 requests are merged
568 * @q: requests queue
569 * @rq: request the two requests were merged into
570 * @next: request that was merged
571 */
572static void row_merged_requests(struct request_queue *q, struct request *rq,
573 struct request *next)
574{
575 struct row_queue *rqueue = RQ_ROWQ(next);
576
577 list_del_init(&next->queuelist);
578
579 rqueue->rdata->nr_reqs[rq_data_dir(rq)]--;
580}
581
582/*
583 * get_queue_type() - Get queue type for a given request
584 *
585 * This is a helping function which purpose is to determine what
586 * ROW queue the given request should be added to (and
587 * dispatched from leter on)
588 *
589 * TODO: Right now only 3 queues are used REG_READ, REG_WRITE
590 * and REG_SWRITE
591 */
592static enum row_queue_prio get_queue_type(struct request *rq)
593{
594 const int data_dir = rq_data_dir(rq);
595 const bool is_sync = rq_is_sync(rq);
596
597 if (data_dir == READ)
598 return ROWQ_PRIO_REG_READ;
599 else if (is_sync)
600 return ROWQ_PRIO_REG_SWRITE;
601 else
602 return ROWQ_PRIO_REG_WRITE;
603}
604
605/*
606 * row_set_request() - Set ROW data structures associated with this request.
607 * @q: requests queue
608 * @rq: pointer to the request
609 * @gfp_mask: ignored
610 *
611 */
612static int
613row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
614{
615 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
616 unsigned long flags;
617
618 spin_lock_irqsave(q->queue_lock, flags);
619 rq->elv.priv[0] =
620 (void *)(&rd->row_queues[get_queue_type(rq)]);
621 spin_unlock_irqrestore(q->queue_lock, flags);
622
623 return 0;
624}
625
626/********** Helping sysfs functions/defenitions for ROW attributes ******/
627static ssize_t row_var_show(int var, char *page)
628{
629 return snprintf(page, 100, "%d\n", var);
630}
631
632static ssize_t row_var_store(int *var, const char *page, size_t count)
633{
634 int err;
635 err = kstrtoul(page, 10, (unsigned long *)var);
636
637 return count;
638}
639
640#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
641static ssize_t __FUNC(struct elevator_queue *e, char *page) \
642{ \
643 struct row_data *rowd = e->elevator_data; \
644 int __data = __VAR; \
645 if (__CONV) \
646 __data = jiffies_to_msecs(__data); \
647 return row_var_show(__data, (page)); \
648}
649SHOW_FUNCTION(row_hp_read_quantum_show,
650 rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0);
651SHOW_FUNCTION(row_rp_read_quantum_show,
652 rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0);
653SHOW_FUNCTION(row_hp_swrite_quantum_show,
654 rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0);
655SHOW_FUNCTION(row_rp_swrite_quantum_show,
656 rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0);
657SHOW_FUNCTION(row_rp_write_quantum_show,
658 rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0);
659SHOW_FUNCTION(row_lp_read_quantum_show,
660 rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0);
661SHOW_FUNCTION(row_lp_swrite_quantum_show,
662 rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0);
663SHOW_FUNCTION(row_read_idle_show, rowd->read_idle.idle_time, 1);
664SHOW_FUNCTION(row_read_idle_freq_show, rowd->read_idle.freq, 1);
665#undef SHOW_FUNCTION
666
667#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
668static ssize_t __FUNC(struct elevator_queue *e, \
669 const char *page, size_t count) \
670{ \
671 struct row_data *rowd = e->elevator_data; \
672 int __data; \
673 int ret = row_var_store(&__data, (page), count); \
674 if (__CONV) \
675 __data = (int)msecs_to_jiffies(__data); \
676 if (__data < (MIN)) \
677 __data = (MIN); \
678 else if (__data > (MAX)) \
679 __data = (MAX); \
680 *(__PTR) = __data; \
681 return ret; \
682}
683STORE_FUNCTION(row_hp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200684&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300685STORE_FUNCTION(row_rp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200686 &rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum,
687 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300688STORE_FUNCTION(row_hp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200689 &rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum,
690 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300691STORE_FUNCTION(row_rp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200692 &rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum,
693 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300694STORE_FUNCTION(row_rp_write_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200695 &rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum,
696 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300697STORE_FUNCTION(row_lp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200698 &rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum,
699 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300700STORE_FUNCTION(row_lp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200701 &rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum,
702 1, INT_MAX, 1);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300703STORE_FUNCTION(row_read_idle_store, &rowd->read_idle.idle_time, 1, INT_MAX, 1);
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200704STORE_FUNCTION(row_read_idle_freq_store, &rowd->read_idle.freq, 1, INT_MAX, 1);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300705
706#undef STORE_FUNCTION
707
708#define ROW_ATTR(name) \
709 __ATTR(name, S_IRUGO|S_IWUSR, row_##name##_show, \
710 row_##name##_store)
711
712static struct elv_fs_entry row_attrs[] = {
713 ROW_ATTR(hp_read_quantum),
714 ROW_ATTR(rp_read_quantum),
715 ROW_ATTR(hp_swrite_quantum),
716 ROW_ATTR(rp_swrite_quantum),
717 ROW_ATTR(rp_write_quantum),
718 ROW_ATTR(lp_read_quantum),
719 ROW_ATTR(lp_swrite_quantum),
720 ROW_ATTR(read_idle),
721 ROW_ATTR(read_idle_freq),
722 __ATTR_NULL
723};
724
725static struct elevator_type iosched_row = {
726 .ops = {
727 .elevator_merge_req_fn = row_merged_requests,
728 .elevator_dispatch_fn = row_dispatch_requests,
729 .elevator_add_req_fn = row_add_request,
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200730 .elevator_reinsert_req_fn = row_reinsert_req,
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200731 .elevator_is_urgent_fn = row_urgent_pending,
Tatyana Brokhman16349062012-09-20 10:46:10 +0300732 .elevator_former_req_fn = elv_rb_former_request,
733 .elevator_latter_req_fn = elv_rb_latter_request,
734 .elevator_set_req_fn = row_set_request,
735 .elevator_init_fn = row_init_queue,
736 .elevator_exit_fn = row_exit_queue,
737 },
738
739 .elevator_attrs = row_attrs,
740 .elevator_name = "row",
741 .elevator_owner = THIS_MODULE,
742};
743
744static int __init row_init(void)
745{
746 elv_register(&iosched_row);
747 return 0;
748}
749
750static void __exit row_exit(void)
751{
752 elv_unregister(&iosched_row);
753}
754
755module_init(row_init);
756module_exit(row_exit);
757
758MODULE_LICENSE("GPLv2");
759MODULE_DESCRIPTION("Read Over Write IO scheduler");