blob: 483d97f78b52f0ad220c829f28f7c68549e9103c [file] [log] [blame]
Tatyana Brokhman16349062012-09-20 10:46:10 +03001/*
2 * ROW (Read Over Write) I/O scheduler.
3 *
4 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/* See Documentation/block/row-iosched.txt */
17
18#include <linux/kernel.h>
19#include <linux/fs.h>
20#include <linux/blkdev.h>
21#include <linux/elevator.h>
22#include <linux/bio.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/compiler.h>
27#include <linux/blktrace_api.h>
28#include <linux/jiffies.h>
29
30/*
31 * enum row_queue_prio - Priorities of the ROW queues
32 *
33 * This enum defines the priorities (and the number of queues)
34 * the requests will be disptributed to. The higher priority -
35 * the bigger is the dispatch quantum given to that queue.
36 * ROWQ_PRIO_HIGH_READ - is the higher priority queue.
37 *
38 */
39enum row_queue_prio {
40 ROWQ_PRIO_HIGH_READ = 0,
41 ROWQ_PRIO_REG_READ,
42 ROWQ_PRIO_HIGH_SWRITE,
43 ROWQ_PRIO_REG_SWRITE,
44 ROWQ_PRIO_REG_WRITE,
45 ROWQ_PRIO_LOW_READ,
46 ROWQ_PRIO_LOW_SWRITE,
47 ROWQ_MAX_PRIO,
48};
49
50/* Flags indicating whether idling is enabled on the queue */
51static const bool queue_idling_enabled[] = {
52 true, /* ROWQ_PRIO_HIGH_READ */
53 true, /* ROWQ_PRIO_REG_READ */
54 false, /* ROWQ_PRIO_HIGH_SWRITE */
55 false, /* ROWQ_PRIO_REG_SWRITE */
56 false, /* ROWQ_PRIO_REG_WRITE */
57 false, /* ROWQ_PRIO_LOW_READ */
58 false, /* ROWQ_PRIO_LOW_SWRITE */
59};
60
61/* Default values for row queues quantums in each dispatch cycle */
62static const int queue_quantum[] = {
63 100, /* ROWQ_PRIO_HIGH_READ */
64 100, /* ROWQ_PRIO_REG_READ */
65 2, /* ROWQ_PRIO_HIGH_SWRITE */
66 1, /* ROWQ_PRIO_REG_SWRITE */
67 1, /* ROWQ_PRIO_REG_WRITE */
68 1, /* ROWQ_PRIO_LOW_READ */
69 1 /* ROWQ_PRIO_LOW_SWRITE */
70};
71
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +020072/* Default values for idling on read queues (in msec) */
73#define ROW_IDLE_TIME_MSEC 5
74#define ROW_READ_FREQ_MSEC 20
Tatyana Brokhman16349062012-09-20 10:46:10 +030075
76/**
77 * struct rowq_idling_data - parameters for idling on the queue
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +020078 * @last_insert_time: time the last request was inserted
79 * to the queue
Tatyana Brokhman16349062012-09-20 10:46:10 +030080 * @begin_idling: flag indicating wether we should idle
81 *
82 */
83struct rowq_idling_data {
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +020084 ktime_t last_insert_time;
Tatyana Brokhman16349062012-09-20 10:46:10 +030085 bool begin_idling;
86};
87
88/**
89 * struct row_queue - requests grouping structure
90 * @rdata: parent row_data structure
91 * @fifo: fifo of requests
92 * @prio: queue priority (enum row_queue_prio)
93 * @nr_dispatched: number of requests already dispatched in
94 * the current dispatch cycle
95 * @slice: number of requests to dispatch in a cycle
96 * @idle_data: data for idling on queues
97 *
98 */
99struct row_queue {
100 struct row_data *rdata;
101 struct list_head fifo;
102 enum row_queue_prio prio;
103
104 unsigned int nr_dispatched;
105 unsigned int slice;
106
107 /* used only for READ queues */
108 struct rowq_idling_data idle_data;
109};
110
111/**
112 * struct idling_data - data for idling on empty rqueue
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200113 * @idle_time: idling duration (jiffies)
Tatyana Brokhman16349062012-09-20 10:46:10 +0300114 * @freq: min time between two requests that
115 * triger idling (msec)
116 * @idle_work: pointer to struct delayed_work
117 *
118 */
119struct idling_data {
120 unsigned long idle_time;
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200121 u32 freq;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300122
123 struct workqueue_struct *idle_workqueue;
124 struct delayed_work idle_work;
125};
126
127/**
128 * struct row_queue - Per block device rqueue structure
129 * @dispatch_queue: dispatch rqueue
130 * @row_queues: array of priority request queues with
131 * dispatch quantum per rqueue
132 * @curr_queue: index in the row_queues array of the
133 * currently serviced rqueue
134 * @read_idle: data for idling after READ request
135 * @nr_reqs: nr_reqs[0] holds the number of all READ requests in
136 * scheduler, nr_reqs[1] holds the number of all WRITE
137 * requests in scheduler
138 * @cycle_flags: used for marking unserved queueus
139 *
140 */
141struct row_data {
142 struct request_queue *dispatch_queue;
143
144 struct {
145 struct row_queue rqueue;
146 int disp_quantum;
147 } row_queues[ROWQ_MAX_PRIO];
148
149 enum row_queue_prio curr_queue;
150
151 struct idling_data read_idle;
152 unsigned int nr_reqs[2];
153
154 unsigned int cycle_flags;
155};
156
157#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elv.priv[0]))
158
159#define row_log(q, fmt, args...) \
160 blk_add_trace_msg(q, "%s():" fmt , __func__, ##args)
161#define row_log_rowq(rdata, rowq_id, fmt, args...) \
162 blk_add_trace_msg(rdata->dispatch_queue, "rowq%d " fmt, \
163 rowq_id, ##args)
164
165static inline void row_mark_rowq_unserved(struct row_data *rd,
166 enum row_queue_prio qnum)
167{
168 rd->cycle_flags |= (1 << qnum);
169}
170
171static inline void row_clear_rowq_unserved(struct row_data *rd,
172 enum row_queue_prio qnum)
173{
174 rd->cycle_flags &= ~(1 << qnum);
175}
176
177static inline int row_rowq_unserved(struct row_data *rd,
178 enum row_queue_prio qnum)
179{
180 return rd->cycle_flags & (1 << qnum);
181}
182
183/******************** Static helper functions ***********************/
184/*
185 * kick_queue() - Wake up device driver queue thread
186 * @work: pointer to struct work_struct
187 *
188 * This is a idling delayed work function. It's purpose is to wake up the
189 * device driver in order for it to start fetching requests.
190 *
191 */
192static void kick_queue(struct work_struct *work)
193{
194 struct delayed_work *idle_work = to_delayed_work(work);
195 struct idling_data *read_data =
196 container_of(idle_work, struct idling_data, idle_work);
197 struct row_data *rd =
198 container_of(read_data, struct row_data, read_idle);
199
200 row_log_rowq(rd, rd->curr_queue, "Performing delayed work");
201 /* Mark idling process as done */
202 rd->row_queues[rd->curr_queue].rqueue.idle_data.begin_idling = false;
203
204 if (!(rd->nr_reqs[0] + rd->nr_reqs[1]))
205 row_log(rd->dispatch_queue, "No requests in scheduler");
206 else {
207 spin_lock_irq(rd->dispatch_queue->queue_lock);
208 __blk_run_queue(rd->dispatch_queue);
209 spin_unlock_irq(rd->dispatch_queue->queue_lock);
210 }
211}
212
213/*
214 * row_restart_disp_cycle() - Restart the dispatch cycle
215 * @rd: pointer to struct row_data
216 *
217 * This function restarts the dispatch cycle by:
218 * - Setting current queue to ROWQ_PRIO_HIGH_READ
219 * - For each queue: reset the number of requests dispatched in
220 * the cycle
221 */
222static inline void row_restart_disp_cycle(struct row_data *rd)
223{
224 int i;
225
226 for (i = 0; i < ROWQ_MAX_PRIO; i++)
227 rd->row_queues[i].rqueue.nr_dispatched = 0;
228
229 rd->curr_queue = ROWQ_PRIO_HIGH_READ;
230 row_log(rd->dispatch_queue, "Restarting cycle");
231}
232
233static inline void row_get_next_queue(struct row_data *rd)
234{
235 rd->curr_queue++;
236 if (rd->curr_queue == ROWQ_MAX_PRIO)
237 row_restart_disp_cycle(rd);
238}
239
240/******************* Elevator callback functions *********************/
241
242/*
243 * row_add_request() - Add request to the scheduler
244 * @q: requests queue
245 * @rq: request to add
246 *
247 */
248static void row_add_request(struct request_queue *q,
249 struct request *rq)
250{
251 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
252 struct row_queue *rqueue = RQ_ROWQ(rq);
253
254 list_add_tail(&rq->queuelist, &rqueue->fifo);
255 rd->nr_reqs[rq_data_dir(rq)]++;
256 rq_set_fifo_time(rq, jiffies); /* for statistics*/
257
258 if (queue_idling_enabled[rqueue->prio]) {
259 if (delayed_work_pending(&rd->read_idle.idle_work))
260 (void)cancel_delayed_work(
261 &rd->read_idle.idle_work);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200262 if (ktime_to_ms(ktime_sub(ktime_get(),
263 rqueue->idle_data.last_insert_time)) <
264 rd->read_idle.freq) {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300265 rqueue->idle_data.begin_idling = true;
266 row_log_rowq(rd, rqueue->prio, "Enable idling");
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200267 } else {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300268 rqueue->idle_data.begin_idling = false;
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200269 row_log_rowq(rd, rqueue->prio, "Disable idling");
270 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300271
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200272 rqueue->idle_data.last_insert_time = ktime_get();
Tatyana Brokhman16349062012-09-20 10:46:10 +0300273 }
274 row_log_rowq(rd, rqueue->prio, "added request");
275}
276
277/*
278 * row_remove_request() - Remove given request from scheduler
279 * @q: requests queue
280 * @rq: request to remove
281 *
282 */
283static void row_remove_request(struct request_queue *q,
284 struct request *rq)
285{
286 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
287
288 rq_fifo_clear(rq);
289 rd->nr_reqs[rq_data_dir(rq)]--;
290}
291
292/*
293 * row_dispatch_insert() - move request to dispatch queue
294 * @rd: pointer to struct row_data
295 *
296 * This function moves the next request to dispatch from
297 * rd->curr_queue to the dispatch queue
298 *
299 */
300static void row_dispatch_insert(struct row_data *rd)
301{
302 struct request *rq;
303
304 rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next);
305 row_remove_request(rd->dispatch_queue, rq);
306 elv_dispatch_add_tail(rd->dispatch_queue, rq);
307 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++;
308 row_clear_rowq_unserved(rd, rd->curr_queue);
309 row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d",
310 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched);
311}
312
313/*
314 * row_choose_queue() - choose the next queue to dispatch from
315 * @rd: pointer to struct row_data
316 *
317 * Updates rd->curr_queue. Returns 1 if there are requests to
318 * dispatch, 0 if there are no requests in scheduler
319 *
320 */
321static int row_choose_queue(struct row_data *rd)
322{
323 int prev_curr_queue = rd->curr_queue;
324
325 if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) {
326 row_log(rd->dispatch_queue, "No more requests in scheduler");
327 return 0;
328 }
329
330 row_get_next_queue(rd);
331
332 /*
333 * Loop over all queues to find the next queue that is not empty.
334 * Stop when you get back to curr_queue
335 */
336 while (list_empty(&rd->row_queues[rd->curr_queue].rqueue.fifo)
337 && rd->curr_queue != prev_curr_queue) {
338 /* Mark rqueue as unserved */
339 row_mark_rowq_unserved(rd, rd->curr_queue);
340 row_get_next_queue(rd);
341 }
342
343 return 1;
344}
345
346/*
347 * row_dispatch_requests() - selects the next request to dispatch
348 * @q: requests queue
349 * @force: ignored
350 *
351 * Return 0 if no requests were moved to the dispatch queue.
352 * 1 otherwise
353 *
354 */
355static int row_dispatch_requests(struct request_queue *q, int force)
356{
357 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
358 int ret = 0, currq, i;
359
360 currq = rd->curr_queue;
361
362 /*
363 * Find the first unserved queue (with higher priority then currq)
364 * that is not empty
365 */
366 for (i = 0; i < currq; i++) {
367 if (row_rowq_unserved(rd, i) &&
368 !list_empty(&rd->row_queues[i].rqueue.fifo)) {
369 row_log_rowq(rd, currq,
370 " Preemting for unserved rowq%d", i);
371 rd->curr_queue = i;
372 row_dispatch_insert(rd);
373 ret = 1;
374 goto done;
375 }
376 }
377
378 if (rd->row_queues[currq].rqueue.nr_dispatched >=
379 rd->row_queues[currq].disp_quantum) {
380 rd->row_queues[currq].rqueue.nr_dispatched = 0;
381 row_log_rowq(rd, currq, "Expiring rqueue");
382 ret = row_choose_queue(rd);
383 if (ret)
384 row_dispatch_insert(rd);
385 goto done;
386 }
387
388 /* Dispatch from curr_queue */
389 if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
390 /* check idling */
391 if (delayed_work_pending(&rd->read_idle.idle_work)) {
Tatyana Brokhman25f39882012-10-15 20:56:02 +0200392 if (force) {
393 (void)cancel_delayed_work(
394 &rd->read_idle.idle_work);
395 row_log_rowq(rd, currq,
396 "Canceled delayed work - forced dispatch");
397 } else {
398 row_log_rowq(rd, currq,
399 "Delayed work pending. Exiting");
400 goto done;
401 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300402 }
403
Tatyana Brokhman25f39882012-10-15 20:56:02 +0200404 if (!force && queue_idling_enabled[currq] &&
Tatyana Brokhman16349062012-09-20 10:46:10 +0300405 rd->row_queues[currq].rqueue.idle_data.begin_idling) {
406 if (!queue_delayed_work(rd->read_idle.idle_workqueue,
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200407 &rd->read_idle.idle_work,
408 rd->read_idle.idle_time)) {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300409 row_log_rowq(rd, currq,
410 "Work already on queue!");
411 pr_err("ROW_BUG: Work already on queue!");
412 } else
413 row_log_rowq(rd, currq,
414 "Scheduled delayed work. exiting");
415 goto done;
416 } else {
417 row_log_rowq(rd, currq,
418 "Currq empty. Choose next queue");
419 ret = row_choose_queue(rd);
420 if (!ret)
421 goto done;
422 }
423 }
424
425 ret = 1;
426 row_dispatch_insert(rd);
427
428done:
429 return ret;
430}
431
432/*
433 * row_init_queue() - Init scheduler data structures
434 * @q: requests queue
435 *
436 * Return pointer to struct row_data to be saved in elevator for
437 * this dispatch queue
438 *
439 */
440static void *row_init_queue(struct request_queue *q)
441{
442
443 struct row_data *rdata;
444 int i;
445
446 rdata = kmalloc_node(sizeof(*rdata),
447 GFP_KERNEL | __GFP_ZERO, q->node);
448 if (!rdata)
449 return NULL;
450
451 for (i = 0; i < ROWQ_MAX_PRIO; i++) {
452 INIT_LIST_HEAD(&rdata->row_queues[i].rqueue.fifo);
453 rdata->row_queues[i].disp_quantum = queue_quantum[i];
454 rdata->row_queues[i].rqueue.rdata = rdata;
455 rdata->row_queues[i].rqueue.prio = i;
456 rdata->row_queues[i].rqueue.idle_data.begin_idling = false;
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200457 rdata->row_queues[i].rqueue.idle_data.last_insert_time =
458 ktime_set(0, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300459 }
460
461 /*
462 * Currently idling is enabled only for READ queues. If we want to
463 * enable it for write queues also, note that idling frequency will
464 * be the same in both cases
465 */
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200466 rdata->read_idle.idle_time = msecs_to_jiffies(ROW_IDLE_TIME_MSEC);
467 /* Maybe 0 on some platforms */
468 if (!rdata->read_idle.idle_time)
469 rdata->read_idle.idle_time = 1;
470 rdata->read_idle.freq = ROW_READ_FREQ_MSEC;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300471 rdata->read_idle.idle_workqueue = alloc_workqueue("row_idle_work",
472 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
473 if (!rdata->read_idle.idle_workqueue)
474 panic("Failed to create idle workqueue\n");
475 INIT_DELAYED_WORK(&rdata->read_idle.idle_work, kick_queue);
476
477 rdata->curr_queue = ROWQ_PRIO_HIGH_READ;
478 rdata->dispatch_queue = q;
479
480 rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0;
481
482 return rdata;
483}
484
485/*
486 * row_exit_queue() - called on unloading the RAW scheduler
487 * @e: poiner to struct elevator_queue
488 *
489 */
490static void row_exit_queue(struct elevator_queue *e)
491{
492 struct row_data *rd = (struct row_data *)e->elevator_data;
493 int i;
494
495 for (i = 0; i < ROWQ_MAX_PRIO; i++)
496 BUG_ON(!list_empty(&rd->row_queues[i].rqueue.fifo));
497 (void)cancel_delayed_work_sync(&rd->read_idle.idle_work);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200498 BUG_ON(delayed_work_pending(&rd->read_idle.idle_work));
499 destroy_workqueue(rd->read_idle.idle_workqueue);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300500 kfree(rd);
501}
502
503/*
504 * row_merged_requests() - Called when 2 requests are merged
505 * @q: requests queue
506 * @rq: request the two requests were merged into
507 * @next: request that was merged
508 */
509static void row_merged_requests(struct request_queue *q, struct request *rq,
510 struct request *next)
511{
512 struct row_queue *rqueue = RQ_ROWQ(next);
513
514 list_del_init(&next->queuelist);
515
516 rqueue->rdata->nr_reqs[rq_data_dir(rq)]--;
517}
518
519/*
520 * get_queue_type() - Get queue type for a given request
521 *
522 * This is a helping function which purpose is to determine what
523 * ROW queue the given request should be added to (and
524 * dispatched from leter on)
525 *
526 * TODO: Right now only 3 queues are used REG_READ, REG_WRITE
527 * and REG_SWRITE
528 */
529static enum row_queue_prio get_queue_type(struct request *rq)
530{
531 const int data_dir = rq_data_dir(rq);
532 const bool is_sync = rq_is_sync(rq);
533
534 if (data_dir == READ)
535 return ROWQ_PRIO_REG_READ;
536 else if (is_sync)
537 return ROWQ_PRIO_REG_SWRITE;
538 else
539 return ROWQ_PRIO_REG_WRITE;
540}
541
542/*
543 * row_set_request() - Set ROW data structures associated with this request.
544 * @q: requests queue
545 * @rq: pointer to the request
546 * @gfp_mask: ignored
547 *
548 */
549static int
550row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
551{
552 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
553 unsigned long flags;
554
555 spin_lock_irqsave(q->queue_lock, flags);
556 rq->elv.priv[0] =
557 (void *)(&rd->row_queues[get_queue_type(rq)]);
558 spin_unlock_irqrestore(q->queue_lock, flags);
559
560 return 0;
561}
562
563/********** Helping sysfs functions/defenitions for ROW attributes ******/
564static ssize_t row_var_show(int var, char *page)
565{
566 return snprintf(page, 100, "%d\n", var);
567}
568
569static ssize_t row_var_store(int *var, const char *page, size_t count)
570{
571 int err;
572 err = kstrtoul(page, 10, (unsigned long *)var);
573
574 return count;
575}
576
577#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
578static ssize_t __FUNC(struct elevator_queue *e, char *page) \
579{ \
580 struct row_data *rowd = e->elevator_data; \
581 int __data = __VAR; \
582 if (__CONV) \
583 __data = jiffies_to_msecs(__data); \
584 return row_var_show(__data, (page)); \
585}
586SHOW_FUNCTION(row_hp_read_quantum_show,
587 rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0);
588SHOW_FUNCTION(row_rp_read_quantum_show,
589 rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0);
590SHOW_FUNCTION(row_hp_swrite_quantum_show,
591 rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0);
592SHOW_FUNCTION(row_rp_swrite_quantum_show,
593 rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0);
594SHOW_FUNCTION(row_rp_write_quantum_show,
595 rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0);
596SHOW_FUNCTION(row_lp_read_quantum_show,
597 rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0);
598SHOW_FUNCTION(row_lp_swrite_quantum_show,
599 rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0);
600SHOW_FUNCTION(row_read_idle_show, rowd->read_idle.idle_time, 1);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200601SHOW_FUNCTION(row_read_idle_freq_show, rowd->read_idle.freq, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300602#undef SHOW_FUNCTION
603
604#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
605static ssize_t __FUNC(struct elevator_queue *e, \
606 const char *page, size_t count) \
607{ \
608 struct row_data *rowd = e->elevator_data; \
609 int __data; \
610 int ret = row_var_store(&__data, (page), count); \
611 if (__CONV) \
612 __data = (int)msecs_to_jiffies(__data); \
613 if (__data < (MIN)) \
614 __data = (MIN); \
615 else if (__data > (MAX)) \
616 __data = (MAX); \
617 *(__PTR) = __data; \
618 return ret; \
619}
620STORE_FUNCTION(row_hp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200621&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300622STORE_FUNCTION(row_rp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200623 &rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum,
624 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300625STORE_FUNCTION(row_hp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200626 &rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum,
627 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300628STORE_FUNCTION(row_rp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200629 &rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum,
630 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300631STORE_FUNCTION(row_rp_write_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200632 &rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum,
633 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300634STORE_FUNCTION(row_lp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200635 &rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum,
636 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300637STORE_FUNCTION(row_lp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200638 &rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum,
639 1, INT_MAX, 1);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300640STORE_FUNCTION(row_read_idle_store, &rowd->read_idle.idle_time, 1, INT_MAX, 1);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200641STORE_FUNCTION(row_read_idle_freq_store, &rowd->read_idle.freq, 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300642
643#undef STORE_FUNCTION
644
645#define ROW_ATTR(name) \
646 __ATTR(name, S_IRUGO|S_IWUSR, row_##name##_show, \
647 row_##name##_store)
648
649static struct elv_fs_entry row_attrs[] = {
650 ROW_ATTR(hp_read_quantum),
651 ROW_ATTR(rp_read_quantum),
652 ROW_ATTR(hp_swrite_quantum),
653 ROW_ATTR(rp_swrite_quantum),
654 ROW_ATTR(rp_write_quantum),
655 ROW_ATTR(lp_read_quantum),
656 ROW_ATTR(lp_swrite_quantum),
657 ROW_ATTR(read_idle),
658 ROW_ATTR(read_idle_freq),
659 __ATTR_NULL
660};
661
662static struct elevator_type iosched_row = {
663 .ops = {
664 .elevator_merge_req_fn = row_merged_requests,
665 .elevator_dispatch_fn = row_dispatch_requests,
666 .elevator_add_req_fn = row_add_request,
667 .elevator_former_req_fn = elv_rb_former_request,
668 .elevator_latter_req_fn = elv_rb_latter_request,
669 .elevator_set_req_fn = row_set_request,
670 .elevator_init_fn = row_init_queue,
671 .elevator_exit_fn = row_exit_queue,
672 },
673
674 .elevator_attrs = row_attrs,
675 .elevator_name = "row",
676 .elevator_owner = THIS_MODULE,
677};
678
679static int __init row_init(void)
680{
681 elv_register(&iosched_row);
682 return 0;
683}
684
685static void __exit row_exit(void)
686{
687 elv_unregister(&iosched_row);
688}
689
690module_init(row_init);
691module_exit(row_exit);
692
693MODULE_LICENSE("GPLv2");
694MODULE_DESCRIPTION("Read Over Write IO scheduler");