blob: f610a39ebfb7a1cf1fa5982564d1c884f84a0580 [file] [log] [blame]
Tatyana Brokhman16349062012-09-20 10:46:10 +03001/*
2 * ROW (Read Over Write) I/O scheduler.
3 *
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +02004 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Tatyana Brokhman16349062012-09-20 10:46:10 +03005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/* See Documentation/block/row-iosched.txt */
17
18#include <linux/kernel.h>
19#include <linux/fs.h>
20#include <linux/blkdev.h>
21#include <linux/elevator.h>
22#include <linux/bio.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/compiler.h>
27#include <linux/blktrace_api.h>
28#include <linux/jiffies.h>
29
30/*
31 * enum row_queue_prio - Priorities of the ROW queues
32 *
33 * This enum defines the priorities (and the number of queues)
34 * the requests will be disptributed to. The higher priority -
35 * the bigger is the dispatch quantum given to that queue.
36 * ROWQ_PRIO_HIGH_READ - is the higher priority queue.
37 *
38 */
39enum row_queue_prio {
40 ROWQ_PRIO_HIGH_READ = 0,
41 ROWQ_PRIO_REG_READ,
42 ROWQ_PRIO_HIGH_SWRITE,
43 ROWQ_PRIO_REG_SWRITE,
44 ROWQ_PRIO_REG_WRITE,
45 ROWQ_PRIO_LOW_READ,
46 ROWQ_PRIO_LOW_SWRITE,
47 ROWQ_MAX_PRIO,
48};
49
50/* Flags indicating whether idling is enabled on the queue */
51static const bool queue_idling_enabled[] = {
52 true, /* ROWQ_PRIO_HIGH_READ */
53 true, /* ROWQ_PRIO_REG_READ */
54 false, /* ROWQ_PRIO_HIGH_SWRITE */
55 false, /* ROWQ_PRIO_REG_SWRITE */
56 false, /* ROWQ_PRIO_REG_WRITE */
57 false, /* ROWQ_PRIO_LOW_READ */
58 false, /* ROWQ_PRIO_LOW_SWRITE */
59};
60
Tatyana Brokhman0ef81432012-12-20 19:23:58 +020061/* Flags indicating whether the queue can notify on urgent requests */
62static const bool urgent_queues[] = {
63 true, /* ROWQ_PRIO_HIGH_READ */
64 true, /* ROWQ_PRIO_REG_READ */
65 false, /* ROWQ_PRIO_HIGH_SWRITE */
66 false, /* ROWQ_PRIO_REG_SWRITE */
67 false, /* ROWQ_PRIO_REG_WRITE */
68 false, /* ROWQ_PRIO_LOW_READ */
69 false, /* ROWQ_PRIO_LOW_SWRITE */
70};
71
Tatyana Brokhman16349062012-09-20 10:46:10 +030072/* Default values for row queues quantums in each dispatch cycle */
73static const int queue_quantum[] = {
74 100, /* ROWQ_PRIO_HIGH_READ */
75 100, /* ROWQ_PRIO_REG_READ */
76 2, /* ROWQ_PRIO_HIGH_SWRITE */
77 1, /* ROWQ_PRIO_REG_SWRITE */
78 1, /* ROWQ_PRIO_REG_WRITE */
79 1, /* ROWQ_PRIO_LOW_READ */
80 1 /* ROWQ_PRIO_LOW_SWRITE */
81};
82
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +020083/* Default values for idling on read queues (in msec) */
84#define ROW_IDLE_TIME_MSEC 5
85#define ROW_READ_FREQ_MSEC 20
Tatyana Brokhman16349062012-09-20 10:46:10 +030086
87/**
88 * struct rowq_idling_data - parameters for idling on the queue
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +020089 * @last_insert_time: time the last request was inserted
90 * to the queue
Tatyana Brokhman16349062012-09-20 10:46:10 +030091 * @begin_idling: flag indicating wether we should idle
92 *
93 */
94struct rowq_idling_data {
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +020095 ktime_t last_insert_time;
Tatyana Brokhman16349062012-09-20 10:46:10 +030096 bool begin_idling;
97};
98
99/**
100 * struct row_queue - requests grouping structure
101 * @rdata: parent row_data structure
102 * @fifo: fifo of requests
103 * @prio: queue priority (enum row_queue_prio)
104 * @nr_dispatched: number of requests already dispatched in
105 * the current dispatch cycle
106 * @slice: number of requests to dispatch in a cycle
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200107 * @nr_req: number of requests in queue
Tatyana Brokhman16349062012-09-20 10:46:10 +0300108 * @idle_data: data for idling on queues
109 *
110 */
111struct row_queue {
112 struct row_data *rdata;
113 struct list_head fifo;
114 enum row_queue_prio prio;
115
116 unsigned int nr_dispatched;
117 unsigned int slice;
118
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200119 unsigned int nr_req;
120
Tatyana Brokhman16349062012-09-20 10:46:10 +0300121 /* used only for READ queues */
122 struct rowq_idling_data idle_data;
123};
124
125/**
126 * struct idling_data - data for idling on empty rqueue
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200127 * @idle_time: idling duration (jiffies)
Tatyana Brokhman16349062012-09-20 10:46:10 +0300128 * @freq: min time between two requests that
129 * triger idling (msec)
130 * @idle_work: pointer to struct delayed_work
131 *
132 */
133struct idling_data {
134 unsigned long idle_time;
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200135 u32 freq;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300136
137 struct workqueue_struct *idle_workqueue;
138 struct delayed_work idle_work;
139};
140
141/**
142 * struct row_queue - Per block device rqueue structure
143 * @dispatch_queue: dispatch rqueue
144 * @row_queues: array of priority request queues with
145 * dispatch quantum per rqueue
146 * @curr_queue: index in the row_queues array of the
147 * currently serviced rqueue
148 * @read_idle: data for idling after READ request
149 * @nr_reqs: nr_reqs[0] holds the number of all READ requests in
150 * scheduler, nr_reqs[1] holds the number of all WRITE
151 * requests in scheduler
152 * @cycle_flags: used for marking unserved queueus
153 *
154 */
155struct row_data {
156 struct request_queue *dispatch_queue;
157
158 struct {
159 struct row_queue rqueue;
160 int disp_quantum;
161 } row_queues[ROWQ_MAX_PRIO];
162
163 enum row_queue_prio curr_queue;
164
165 struct idling_data read_idle;
166 unsigned int nr_reqs[2];
167
168 unsigned int cycle_flags;
169};
170
171#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elv.priv[0]))
172
173#define row_log(q, fmt, args...) \
174 blk_add_trace_msg(q, "%s():" fmt , __func__, ##args)
175#define row_log_rowq(rdata, rowq_id, fmt, args...) \
176 blk_add_trace_msg(rdata->dispatch_queue, "rowq%d " fmt, \
177 rowq_id, ##args)
178
179static inline void row_mark_rowq_unserved(struct row_data *rd,
180 enum row_queue_prio qnum)
181{
182 rd->cycle_flags |= (1 << qnum);
183}
184
185static inline void row_clear_rowq_unserved(struct row_data *rd,
186 enum row_queue_prio qnum)
187{
188 rd->cycle_flags &= ~(1 << qnum);
189}
190
191static inline int row_rowq_unserved(struct row_data *rd,
192 enum row_queue_prio qnum)
193{
194 return rd->cycle_flags & (1 << qnum);
195}
196
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200197static inline void __maybe_unused row_dump_queues_stat(struct row_data *rd)
198{
199 int i;
200
201 row_log(rd->dispatch_queue, " Queues status (curr_queue=%d):",
202 rd->curr_queue);
203 for (i = 0; i < ROWQ_MAX_PRIO; i++)
204 row_log(rd->dispatch_queue,
205 "queue%d: dispatched= %d, nr_req=%d", i,
206 rd->row_queues[i].nr_dispatched,
207 rd->row_queues[i].nr_req);
208}
209
Tatyana Brokhman16349062012-09-20 10:46:10 +0300210/******************** Static helper functions ***********************/
211/*
212 * kick_queue() - Wake up device driver queue thread
213 * @work: pointer to struct work_struct
214 *
215 * This is a idling delayed work function. It's purpose is to wake up the
216 * device driver in order for it to start fetching requests.
217 *
218 */
219static void kick_queue(struct work_struct *work)
220{
221 struct delayed_work *idle_work = to_delayed_work(work);
222 struct idling_data *read_data =
223 container_of(idle_work, struct idling_data, idle_work);
224 struct row_data *rd =
225 container_of(read_data, struct row_data, read_idle);
226
227 row_log_rowq(rd, rd->curr_queue, "Performing delayed work");
228 /* Mark idling process as done */
229 rd->row_queues[rd->curr_queue].rqueue.idle_data.begin_idling = false;
230
231 if (!(rd->nr_reqs[0] + rd->nr_reqs[1]))
232 row_log(rd->dispatch_queue, "No requests in scheduler");
233 else {
234 spin_lock_irq(rd->dispatch_queue->queue_lock);
235 __blk_run_queue(rd->dispatch_queue);
236 spin_unlock_irq(rd->dispatch_queue->queue_lock);
237 }
238}
239
240/*
241 * row_restart_disp_cycle() - Restart the dispatch cycle
242 * @rd: pointer to struct row_data
243 *
244 * This function restarts the dispatch cycle by:
245 * - Setting current queue to ROWQ_PRIO_HIGH_READ
246 * - For each queue: reset the number of requests dispatched in
247 * the cycle
248 */
249static inline void row_restart_disp_cycle(struct row_data *rd)
250{
251 int i;
252
253 for (i = 0; i < ROWQ_MAX_PRIO; i++)
254 rd->row_queues[i].rqueue.nr_dispatched = 0;
255
256 rd->curr_queue = ROWQ_PRIO_HIGH_READ;
257 row_log(rd->dispatch_queue, "Restarting cycle");
258}
259
260static inline void row_get_next_queue(struct row_data *rd)
261{
262 rd->curr_queue++;
263 if (rd->curr_queue == ROWQ_MAX_PRIO)
264 row_restart_disp_cycle(rd);
265}
266
267/******************* Elevator callback functions *********************/
268
269/*
270 * row_add_request() - Add request to the scheduler
271 * @q: requests queue
272 * @rq: request to add
273 *
274 */
275static void row_add_request(struct request_queue *q,
276 struct request *rq)
277{
278 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
279 struct row_queue *rqueue = RQ_ROWQ(rq);
280
281 list_add_tail(&rq->queuelist, &rqueue->fifo);
282 rd->nr_reqs[rq_data_dir(rq)]++;
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200283 rqueue->nr_req++;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300284 rq_set_fifo_time(rq, jiffies); /* for statistics*/
285
286 if (queue_idling_enabled[rqueue->prio]) {
287 if (delayed_work_pending(&rd->read_idle.idle_work))
288 (void)cancel_delayed_work(
289 &rd->read_idle.idle_work);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200290 if (ktime_to_ms(ktime_sub(ktime_get(),
291 rqueue->idle_data.last_insert_time)) <
292 rd->read_idle.freq) {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300293 rqueue->idle_data.begin_idling = true;
294 row_log_rowq(rd, rqueue->prio, "Enable idling");
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200295 } else {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300296 rqueue->idle_data.begin_idling = false;
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200297 row_log_rowq(rd, rqueue->prio, "Disable idling");
298 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300299
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200300 rqueue->idle_data.last_insert_time = ktime_get();
Tatyana Brokhman16349062012-09-20 10:46:10 +0300301 }
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200302 if (urgent_queues[rqueue->prio] &&
303 row_rowq_unserved(rd, rqueue->prio)) {
304 row_log_rowq(rd, rqueue->prio,
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200305 "added urgent request (total on queue=%d)",
306 rqueue->nr_req);
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200307 } else
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200308 row_log_rowq(rd, rqueue->prio,
309 "added request (total on queue=%d)", rqueue->nr_req);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300310}
311
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200312/**
313 * row_reinsert_req() - Reinsert request back to the scheduler
314 * @q: requests queue
315 * @rq: request to add
316 *
317 * Reinsert the given request back to the queue it was
318 * dispatched from as if it was never dispatched.
319 *
320 * Returns 0 on success, error code otherwise
321 */
322static int row_reinsert_req(struct request_queue *q,
323 struct request *rq)
324{
325 struct row_data *rd = q->elevator->elevator_data;
326 struct row_queue *rqueue = RQ_ROWQ(rq);
327
328 /* Verify rqueue is legitimate */
329 if (rqueue->prio >= ROWQ_MAX_PRIO) {
330 pr_err("\n\nROW BUG: row_reinsert_req() rqueue->prio = %d\n",
331 rqueue->prio);
332 blk_dump_rq_flags(rq, "");
333 return -EIO;
334 }
335
336 list_add(&rq->queuelist, &rqueue->fifo);
337 rd->nr_reqs[rq_data_dir(rq)]++;
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200338 rqueue->nr_req++;
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200339
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200340 row_log_rowq(rd, rqueue->prio,
341 "request reinserted (total on queue=%d)", rqueue->nr_req);
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200342
343 return 0;
344}
345
346/**
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200347 * row_urgent_pending() - Return TRUE if there is an urgent
348 * request on scheduler
349 * @q: requests queue
350 */
351static bool row_urgent_pending(struct request_queue *q)
352{
353 struct row_data *rd = q->elevator->elevator_data;
354 int i;
355
356 for (i = 0; i < ROWQ_MAX_PRIO; i++)
357 if (urgent_queues[i] && row_rowq_unserved(rd, i) &&
358 !list_empty(&rd->row_queues[i].rqueue.fifo)) {
359 row_log_rowq(rd, i,
360 "Urgent request pending (curr=%i)",
361 rd->curr_queue);
362 return true;
363 }
364
365 return false;
366}
367
368/**
Tatyana Brokhman16349062012-09-20 10:46:10 +0300369 * row_remove_request() - Remove given request from scheduler
370 * @q: requests queue
371 * @rq: request to remove
372 *
373 */
374static void row_remove_request(struct request_queue *q,
375 struct request *rq)
376{
377 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200378 struct row_queue *rqueue = RQ_ROWQ(rq);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300379
380 rq_fifo_clear(rq);
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200381 rqueue->nr_req--;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300382 rd->nr_reqs[rq_data_dir(rq)]--;
383}
384
385/*
386 * row_dispatch_insert() - move request to dispatch queue
387 * @rd: pointer to struct row_data
388 *
389 * This function moves the next request to dispatch from
390 * rd->curr_queue to the dispatch queue
391 *
392 */
393static void row_dispatch_insert(struct row_data *rd)
394{
395 struct request *rq;
396
397 rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next);
398 row_remove_request(rd->dispatch_queue, rq);
399 elv_dispatch_add_tail(rd->dispatch_queue, rq);
400 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++;
401 row_clear_rowq_unserved(rd, rd->curr_queue);
402 row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d",
403 rd->row_queues[rd->curr_queue].rqueue.nr_dispatched);
404}
405
406/*
407 * row_choose_queue() - choose the next queue to dispatch from
408 * @rd: pointer to struct row_data
409 *
410 * Updates rd->curr_queue. Returns 1 if there are requests to
411 * dispatch, 0 if there are no requests in scheduler
412 *
413 */
414static int row_choose_queue(struct row_data *rd)
415{
416 int prev_curr_queue = rd->curr_queue;
417
418 if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) {
419 row_log(rd->dispatch_queue, "No more requests in scheduler");
420 return 0;
421 }
422
423 row_get_next_queue(rd);
424
425 /*
426 * Loop over all queues to find the next queue that is not empty.
427 * Stop when you get back to curr_queue
428 */
429 while (list_empty(&rd->row_queues[rd->curr_queue].rqueue.fifo)
430 && rd->curr_queue != prev_curr_queue) {
431 /* Mark rqueue as unserved */
432 row_mark_rowq_unserved(rd, rd->curr_queue);
433 row_get_next_queue(rd);
434 }
435
436 return 1;
437}
438
439/*
440 * row_dispatch_requests() - selects the next request to dispatch
441 * @q: requests queue
442 * @force: ignored
443 *
444 * Return 0 if no requests were moved to the dispatch queue.
445 * 1 otherwise
446 *
447 */
448static int row_dispatch_requests(struct request_queue *q, int force)
449{
450 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
451 int ret = 0, currq, i;
452
453 currq = rd->curr_queue;
454
455 /*
456 * Find the first unserved queue (with higher priority then currq)
457 * that is not empty
458 */
459 for (i = 0; i < currq; i++) {
460 if (row_rowq_unserved(rd, i) &&
461 !list_empty(&rd->row_queues[i].rqueue.fifo)) {
462 row_log_rowq(rd, currq,
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200463 " Preemting for unserved rowq%d. (nr_req=%u)",
464 i, rd->row_queues[currq].rqueue.nr_req);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300465 rd->curr_queue = i;
466 row_dispatch_insert(rd);
467 ret = 1;
468 goto done;
469 }
470 }
471
472 if (rd->row_queues[currq].rqueue.nr_dispatched >=
473 rd->row_queues[currq].disp_quantum) {
474 rd->row_queues[currq].rqueue.nr_dispatched = 0;
475 row_log_rowq(rd, currq, "Expiring rqueue");
476 ret = row_choose_queue(rd);
477 if (ret)
478 row_dispatch_insert(rd);
479 goto done;
480 }
481
482 /* Dispatch from curr_queue */
483 if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
484 /* check idling */
485 if (delayed_work_pending(&rd->read_idle.idle_work)) {
Tatyana Brokhman25f39882012-10-15 20:56:02 +0200486 if (force) {
487 (void)cancel_delayed_work(
488 &rd->read_idle.idle_work);
489 row_log_rowq(rd, currq,
490 "Canceled delayed work - forced dispatch");
491 } else {
492 row_log_rowq(rd, currq,
493 "Delayed work pending. Exiting");
494 goto done;
495 }
Tatyana Brokhman16349062012-09-20 10:46:10 +0300496 }
497
Tatyana Brokhman25f39882012-10-15 20:56:02 +0200498 if (!force && queue_idling_enabled[currq] &&
Tatyana Brokhman16349062012-09-20 10:46:10 +0300499 rd->row_queues[currq].rqueue.idle_data.begin_idling) {
500 if (!queue_delayed_work(rd->read_idle.idle_workqueue,
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200501 &rd->read_idle.idle_work,
502 rd->read_idle.idle_time)) {
Tatyana Brokhman16349062012-09-20 10:46:10 +0300503 row_log_rowq(rd, currq,
504 "Work already on queue!");
505 pr_err("ROW_BUG: Work already on queue!");
506 } else
507 row_log_rowq(rd, currq,
508 "Scheduled delayed work. exiting");
509 goto done;
510 } else {
511 row_log_rowq(rd, currq,
512 "Currq empty. Choose next queue");
513 ret = row_choose_queue(rd);
514 if (!ret)
515 goto done;
516 }
517 }
518
519 ret = 1;
520 row_dispatch_insert(rd);
521
522done:
523 return ret;
524}
525
526/*
527 * row_init_queue() - Init scheduler data structures
528 * @q: requests queue
529 *
530 * Return pointer to struct row_data to be saved in elevator for
531 * this dispatch queue
532 *
533 */
534static void *row_init_queue(struct request_queue *q)
535{
536
537 struct row_data *rdata;
538 int i;
539
540 rdata = kmalloc_node(sizeof(*rdata),
541 GFP_KERNEL | __GFP_ZERO, q->node);
542 if (!rdata)
543 return NULL;
544
545 for (i = 0; i < ROWQ_MAX_PRIO; i++) {
546 INIT_LIST_HEAD(&rdata->row_queues[i].rqueue.fifo);
547 rdata->row_queues[i].disp_quantum = queue_quantum[i];
548 rdata->row_queues[i].rqueue.rdata = rdata;
549 rdata->row_queues[i].rqueue.prio = i;
550 rdata->row_queues[i].rqueue.idle_data.begin_idling = false;
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200551 rdata->row_queues[i].rqueue.idle_data.last_insert_time =
552 ktime_set(0, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300553 }
554
555 /*
556 * Currently idling is enabled only for READ queues. If we want to
557 * enable it for write queues also, note that idling frequency will
558 * be the same in both cases
559 */
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200560 rdata->read_idle.idle_time = msecs_to_jiffies(ROW_IDLE_TIME_MSEC);
561 /* Maybe 0 on some platforms */
562 if (!rdata->read_idle.idle_time)
563 rdata->read_idle.idle_time = 1;
564 rdata->read_idle.freq = ROW_READ_FREQ_MSEC;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300565 rdata->read_idle.idle_workqueue = alloc_workqueue("row_idle_work",
566 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
567 if (!rdata->read_idle.idle_workqueue)
568 panic("Failed to create idle workqueue\n");
569 INIT_DELAYED_WORK(&rdata->read_idle.idle_work, kick_queue);
570
571 rdata->curr_queue = ROWQ_PRIO_HIGH_READ;
572 rdata->dispatch_queue = q;
573
574 rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0;
575
576 return rdata;
577}
578
579/*
580 * row_exit_queue() - called on unloading the RAW scheduler
581 * @e: poiner to struct elevator_queue
582 *
583 */
584static void row_exit_queue(struct elevator_queue *e)
585{
586 struct row_data *rd = (struct row_data *)e->elevator_data;
587 int i;
588
589 for (i = 0; i < ROWQ_MAX_PRIO; i++)
590 BUG_ON(!list_empty(&rd->row_queues[i].rqueue.fifo));
591 (void)cancel_delayed_work_sync(&rd->read_idle.idle_work);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200592 BUG_ON(delayed_work_pending(&rd->read_idle.idle_work));
593 destroy_workqueue(rd->read_idle.idle_workqueue);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300594 kfree(rd);
595}
596
597/*
598 * row_merged_requests() - Called when 2 requests are merged
599 * @q: requests queue
600 * @rq: request the two requests were merged into
601 * @next: request that was merged
602 */
603static void row_merged_requests(struct request_queue *q, struct request *rq,
604 struct request *next)
605{
606 struct row_queue *rqueue = RQ_ROWQ(next);
607
608 list_del_init(&next->queuelist);
Tatyana Brokhmanbd56be32013-01-13 22:04:59 +0200609 rqueue->nr_req--;
Tatyana Brokhman16349062012-09-20 10:46:10 +0300610
611 rqueue->rdata->nr_reqs[rq_data_dir(rq)]--;
612}
613
614/*
615 * get_queue_type() - Get queue type for a given request
616 *
617 * This is a helping function which purpose is to determine what
618 * ROW queue the given request should be added to (and
619 * dispatched from leter on)
620 *
621 * TODO: Right now only 3 queues are used REG_READ, REG_WRITE
622 * and REG_SWRITE
623 */
624static enum row_queue_prio get_queue_type(struct request *rq)
625{
626 const int data_dir = rq_data_dir(rq);
627 const bool is_sync = rq_is_sync(rq);
628
629 if (data_dir == READ)
630 return ROWQ_PRIO_REG_READ;
631 else if (is_sync)
632 return ROWQ_PRIO_REG_SWRITE;
633 else
634 return ROWQ_PRIO_REG_WRITE;
635}
636
637/*
638 * row_set_request() - Set ROW data structures associated with this request.
639 * @q: requests queue
640 * @rq: pointer to the request
641 * @gfp_mask: ignored
642 *
643 */
644static int
645row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
646{
647 struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
648 unsigned long flags;
649
650 spin_lock_irqsave(q->queue_lock, flags);
651 rq->elv.priv[0] =
652 (void *)(&rd->row_queues[get_queue_type(rq)]);
653 spin_unlock_irqrestore(q->queue_lock, flags);
654
655 return 0;
656}
657
658/********** Helping sysfs functions/defenitions for ROW attributes ******/
659static ssize_t row_var_show(int var, char *page)
660{
661 return snprintf(page, 100, "%d\n", var);
662}
663
664static ssize_t row_var_store(int *var, const char *page, size_t count)
665{
666 int err;
667 err = kstrtoul(page, 10, (unsigned long *)var);
668
669 return count;
670}
671
672#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
673static ssize_t __FUNC(struct elevator_queue *e, char *page) \
674{ \
675 struct row_data *rowd = e->elevator_data; \
676 int __data = __VAR; \
677 if (__CONV) \
678 __data = jiffies_to_msecs(__data); \
679 return row_var_show(__data, (page)); \
680}
681SHOW_FUNCTION(row_hp_read_quantum_show,
682 rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0);
683SHOW_FUNCTION(row_rp_read_quantum_show,
684 rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0);
685SHOW_FUNCTION(row_hp_swrite_quantum_show,
686 rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0);
687SHOW_FUNCTION(row_rp_swrite_quantum_show,
688 rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0);
689SHOW_FUNCTION(row_rp_write_quantum_show,
690 rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0);
691SHOW_FUNCTION(row_lp_read_quantum_show,
692 rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0);
693SHOW_FUNCTION(row_lp_swrite_quantum_show,
694 rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0);
695SHOW_FUNCTION(row_read_idle_show, rowd->read_idle.idle_time, 1);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200696SHOW_FUNCTION(row_read_idle_freq_show, rowd->read_idle.freq, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300697#undef SHOW_FUNCTION
698
699#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
700static ssize_t __FUNC(struct elevator_queue *e, \
701 const char *page, size_t count) \
702{ \
703 struct row_data *rowd = e->elevator_data; \
704 int __data; \
705 int ret = row_var_store(&__data, (page), count); \
706 if (__CONV) \
707 __data = (int)msecs_to_jiffies(__data); \
708 if (__data < (MIN)) \
709 __data = (MIN); \
710 else if (__data > (MAX)) \
711 __data = (MAX); \
712 *(__PTR) = __data; \
713 return ret; \
714}
715STORE_FUNCTION(row_hp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200716&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300717STORE_FUNCTION(row_rp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200718 &rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum,
719 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300720STORE_FUNCTION(row_hp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200721 &rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum,
722 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300723STORE_FUNCTION(row_rp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200724 &rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum,
725 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300726STORE_FUNCTION(row_rp_write_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200727 &rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum,
728 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300729STORE_FUNCTION(row_lp_read_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200730 &rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum,
731 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300732STORE_FUNCTION(row_lp_swrite_quantum_store,
Tatyana Brokhman0a0345a2012-10-15 20:50:54 +0200733 &rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum,
734 1, INT_MAX, 1);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300735STORE_FUNCTION(row_read_idle_store, &rowd->read_idle.idle_time, 1, INT_MAX, 1);
Tatyana Brokhmanbfb04f62012-12-06 13:17:19 +0200736STORE_FUNCTION(row_read_idle_freq_store, &rowd->read_idle.freq, 1, INT_MAX, 0);
Tatyana Brokhman16349062012-09-20 10:46:10 +0300737
738#undef STORE_FUNCTION
739
740#define ROW_ATTR(name) \
741 __ATTR(name, S_IRUGO|S_IWUSR, row_##name##_show, \
742 row_##name##_store)
743
744static struct elv_fs_entry row_attrs[] = {
745 ROW_ATTR(hp_read_quantum),
746 ROW_ATTR(rp_read_quantum),
747 ROW_ATTR(hp_swrite_quantum),
748 ROW_ATTR(rp_swrite_quantum),
749 ROW_ATTR(rp_write_quantum),
750 ROW_ATTR(lp_read_quantum),
751 ROW_ATTR(lp_swrite_quantum),
752 ROW_ATTR(read_idle),
753 ROW_ATTR(read_idle_freq),
754 __ATTR_NULL
755};
756
757static struct elevator_type iosched_row = {
758 .ops = {
759 .elevator_merge_req_fn = row_merged_requests,
760 .elevator_dispatch_fn = row_dispatch_requests,
761 .elevator_add_req_fn = row_add_request,
Tatyana Brokhmanb7bf9ac2012-10-30 08:33:06 +0200762 .elevator_reinsert_req_fn = row_reinsert_req,
Tatyana Brokhman0ef81432012-12-20 19:23:58 +0200763 .elevator_is_urgent_fn = row_urgent_pending,
Tatyana Brokhman16349062012-09-20 10:46:10 +0300764 .elevator_former_req_fn = elv_rb_former_request,
765 .elevator_latter_req_fn = elv_rb_latter_request,
766 .elevator_set_req_fn = row_set_request,
767 .elevator_init_fn = row_init_queue,
768 .elevator_exit_fn = row_exit_queue,
769 },
770
771 .elevator_attrs = row_attrs,
772 .elevator_name = "row",
773 .elevator_owner = THIS_MODULE,
774};
775
776static int __init row_init(void)
777{
778 elv_register(&iosched_row);
779 return 0;
780}
781
782static void __exit row_exit(void)
783{
784 elv_unregister(&iosched_row);
785}
786
787module_init(row_init);
788module_exit(row_exit);
789
790MODULE_LICENSE("GPLv2");
791MODULE_DESCRIPTION("Read Over Write IO scheduler");