Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/block/cfq-iosched.c |
| 3 | * |
| 4 | * CFQ, or complete fairness queueing, disk scheduler. |
| 5 | * |
| 6 | * Based on ideas from a previously unfinished io |
| 7 | * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. |
| 8 | * |
| 9 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> |
| 10 | */ |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/fs.h> |
| 13 | #include <linux/blkdev.h> |
| 14 | #include <linux/elevator.h> |
| 15 | #include <linux/bio.h> |
| 16 | #include <linux/config.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/compiler.h> |
| 21 | #include <linux/hash.h> |
| 22 | #include <linux/rbtree.h> |
| 23 | #include <linux/mempool.h> |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 24 | #include <linux/ioprio.h> |
| 25 | #include <linux/writeback.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | /* |
| 28 | * tunables |
| 29 | */ |
| 30 | static int cfq_quantum = 4; /* max queue in one round of service */ |
| 31 | static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 32 | static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ |
| 34 | static int cfq_back_penalty = 2; /* penalty of a backwards seek */ |
| 35 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 36 | static int cfq_slice_sync = HZ / 10; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 37 | static int cfq_slice_async = HZ / 25; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 38 | static int cfq_slice_async_rq = 2; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 39 | static int cfq_slice_idle = HZ / 100; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 40 | |
| 41 | #define CFQ_IDLE_GRACE (HZ / 10) |
| 42 | #define CFQ_SLICE_SCALE (5) |
| 43 | |
| 44 | #define CFQ_KEY_ASYNC (0) |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 45 | #define CFQ_KEY_ANY (0xffff) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 46 | |
| 47 | /* |
| 48 | * disable queueing at the driver/hardware level |
| 49 | */ |
| 50 | static int cfq_max_depth = 1; |
| 51 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | /* |
| 53 | * for the hash of cfqq inside the cfqd |
| 54 | */ |
| 55 | #define CFQ_QHASH_SHIFT 6 |
| 56 | #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT) |
| 57 | #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash) |
| 58 | |
| 59 | /* |
| 60 | * for the hash of crq inside the cfqq |
| 61 | */ |
| 62 | #define CFQ_MHASH_SHIFT 6 |
| 63 | #define CFQ_MHASH_BLOCK(sec) ((sec) >> 3) |
| 64 | #define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT) |
| 65 | #define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT) |
| 66 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
| 67 | #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) |
| 68 | |
| 69 | #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 70 | #define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
| 72 | #define RQ_DATA(rq) (rq)->elevator_private |
| 73 | |
| 74 | /* |
| 75 | * rb-tree defines |
| 76 | */ |
| 77 | #define RB_NONE (2) |
| 78 | #define RB_EMPTY(node) ((node)->rb_node == NULL) |
| 79 | #define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE |
| 80 | #define RB_CLEAR(node) do { \ |
| 81 | (node)->rb_parent = NULL; \ |
| 82 | RB_CLEAR_COLOR((node)); \ |
| 83 | (node)->rb_right = NULL; \ |
| 84 | (node)->rb_left = NULL; \ |
| 85 | } while (0) |
| 86 | #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) |
| 87 | #define ON_RB(node) ((node)->rb_color != RB_NONE) |
| 88 | #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) |
| 89 | #define rq_rb_key(rq) (rq)->sector |
| 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | static kmem_cache_t *crq_pool; |
| 92 | static kmem_cache_t *cfq_pool; |
| 93 | static kmem_cache_t *cfq_ioc_pool; |
| 94 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 95 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
| 96 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
| 97 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) |
| 98 | #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) |
| 99 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 100 | #define ASYNC (0) |
| 101 | #define SYNC (1) |
| 102 | |
| 103 | #define cfq_cfqq_dispatched(cfqq) \ |
| 104 | ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC]) |
| 105 | |
| 106 | #define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) |
| 107 | |
| 108 | #define cfq_cfqq_sync(cfqq) \ |
| 109 | (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 110 | |
| 111 | /* |
| 112 | * Per block device queue structure |
| 113 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | struct cfq_data { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 115 | atomic_t ref; |
| 116 | request_queue_t *queue; |
| 117 | |
| 118 | /* |
| 119 | * rr list of queues with requests and the count of them |
| 120 | */ |
| 121 | struct list_head rr_list[CFQ_PRIO_LISTS]; |
| 122 | struct list_head busy_rr; |
| 123 | struct list_head cur_rr; |
| 124 | struct list_head idle_rr; |
| 125 | unsigned int busy_queues; |
| 126 | |
| 127 | /* |
| 128 | * non-ordered list of empty cfqq's |
| 129 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | struct list_head empty_list; |
| 131 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 132 | /* |
| 133 | * cfqq lookup hash |
| 134 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | struct hlist_head *cfq_hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 137 | /* |
| 138 | * global crq hash for all queues |
| 139 | */ |
| 140 | struct hlist_head *crq_hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | |
| 142 | unsigned int max_queued; |
| 143 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | mempool_t *crq_pool; |
| 145 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 146 | int rq_in_driver; |
| 147 | |
| 148 | /* |
| 149 | * schedule slice state info |
| 150 | */ |
| 151 | /* |
| 152 | * idle window management |
| 153 | */ |
| 154 | struct timer_list idle_slice_timer; |
| 155 | struct work_struct unplug_work; |
| 156 | |
| 157 | struct cfq_queue *active_queue; |
| 158 | struct cfq_io_context *active_cic; |
| 159 | int cur_prio, cur_end_prio; |
| 160 | unsigned int dispatch_slice; |
| 161 | |
| 162 | struct timer_list idle_class_timer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
| 164 | sector_t last_sector; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 165 | unsigned long last_end_request; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 167 | unsigned int rq_starved; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
| 169 | /* |
| 170 | * tunables, see top of file |
| 171 | */ |
| 172 | unsigned int cfq_quantum; |
| 173 | unsigned int cfq_queued; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 174 | unsigned int cfq_fifo_expire[2]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | unsigned int cfq_back_penalty; |
| 176 | unsigned int cfq_back_max; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 177 | unsigned int cfq_slice[2]; |
| 178 | unsigned int cfq_slice_async_rq; |
| 179 | unsigned int cfq_slice_idle; |
| 180 | unsigned int cfq_max_depth; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | }; |
| 182 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 183 | /* |
| 184 | * Per process-grouping structure |
| 185 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | struct cfq_queue { |
| 187 | /* reference count */ |
| 188 | atomic_t ref; |
| 189 | /* parent cfq_data */ |
| 190 | struct cfq_data *cfqd; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 191 | /* cfqq lookup hash */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | struct hlist_node cfq_hash; |
| 193 | /* hash key */ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 194 | unsigned int key; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | /* on either rr or empty list of cfqd */ |
| 196 | struct list_head cfq_list; |
| 197 | /* sorted list of pending requests */ |
| 198 | struct rb_root sort_list; |
| 199 | /* if fifo isn't expired, next request to serve */ |
| 200 | struct cfq_rq *next_crq; |
| 201 | /* requests queued in sort_list */ |
| 202 | int queued[2]; |
| 203 | /* currently allocated requests */ |
| 204 | int allocated[2]; |
| 205 | /* fifo list of requests in sort_list */ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 206 | struct list_head fifo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 208 | unsigned long slice_start; |
| 209 | unsigned long slice_end; |
| 210 | unsigned long slice_left; |
| 211 | unsigned long service_last; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 213 | /* number of requests that are on the dispatch list */ |
| 214 | int on_dispatch[2]; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 215 | |
| 216 | /* io prio of this group */ |
| 217 | unsigned short ioprio, org_ioprio; |
| 218 | unsigned short ioprio_class, org_ioprio_class; |
| 219 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 220 | /* various state flags, see below */ |
| 221 | unsigned int flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | }; |
| 223 | |
| 224 | struct cfq_rq { |
| 225 | struct rb_node rb_node; |
| 226 | sector_t rb_key; |
| 227 | struct request *request; |
| 228 | struct hlist_node hash; |
| 229 | |
| 230 | struct cfq_queue *cfq_queue; |
| 231 | struct cfq_io_context *io_context; |
| 232 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 233 | unsigned int crq_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | }; |
| 235 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 236 | enum cfqq_state_flags { |
| 237 | CFQ_CFQQ_FLAG_on_rr = 0, |
| 238 | CFQ_CFQQ_FLAG_wait_request, |
| 239 | CFQ_CFQQ_FLAG_must_alloc, |
| 240 | CFQ_CFQQ_FLAG_must_alloc_slice, |
| 241 | CFQ_CFQQ_FLAG_must_dispatch, |
| 242 | CFQ_CFQQ_FLAG_fifo_expire, |
| 243 | CFQ_CFQQ_FLAG_idle_window, |
| 244 | CFQ_CFQQ_FLAG_prio_changed, |
| 245 | CFQ_CFQQ_FLAG_expired, |
| 246 | }; |
| 247 | |
| 248 | #define CFQ_CFQQ_FNS(name) \ |
| 249 | static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ |
| 250 | { \ |
| 251 | cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ |
| 252 | } \ |
| 253 | static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ |
| 254 | { \ |
| 255 | cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ |
| 256 | } \ |
| 257 | static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ |
| 258 | { \ |
| 259 | return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ |
| 260 | } |
| 261 | |
| 262 | CFQ_CFQQ_FNS(on_rr); |
| 263 | CFQ_CFQQ_FNS(wait_request); |
| 264 | CFQ_CFQQ_FNS(must_alloc); |
| 265 | CFQ_CFQQ_FNS(must_alloc_slice); |
| 266 | CFQ_CFQQ_FNS(must_dispatch); |
| 267 | CFQ_CFQQ_FNS(fifo_expire); |
| 268 | CFQ_CFQQ_FNS(idle_window); |
| 269 | CFQ_CFQQ_FNS(prio_changed); |
| 270 | CFQ_CFQQ_FNS(expired); |
| 271 | #undef CFQ_CFQQ_FNS |
| 272 | |
| 273 | enum cfq_rq_state_flags { |
| 274 | CFQ_CRQ_FLAG_in_flight = 0, |
| 275 | CFQ_CRQ_FLAG_in_driver, |
| 276 | CFQ_CRQ_FLAG_is_sync, |
| 277 | CFQ_CRQ_FLAG_requeued, |
| 278 | }; |
| 279 | |
| 280 | #define CFQ_CRQ_FNS(name) \ |
| 281 | static inline void cfq_mark_crq_##name(struct cfq_rq *crq) \ |
| 282 | { \ |
| 283 | crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name); \ |
| 284 | } \ |
| 285 | static inline void cfq_clear_crq_##name(struct cfq_rq *crq) \ |
| 286 | { \ |
| 287 | crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name); \ |
| 288 | } \ |
| 289 | static inline int cfq_crq_##name(const struct cfq_rq *crq) \ |
| 290 | { \ |
| 291 | return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ |
| 292 | } |
| 293 | |
| 294 | CFQ_CRQ_FNS(in_flight); |
| 295 | CFQ_CRQ_FNS(in_driver); |
| 296 | CFQ_CRQ_FNS(is_sync); |
| 297 | CFQ_CRQ_FNS(requeued); |
| 298 | #undef CFQ_CRQ_FNS |
| 299 | |
| 300 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | static void cfq_put_cfqd(struct cfq_data *cfqd); |
| 303 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 304 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
| 306 | /* |
| 307 | * lots of deadline iosched dupes, can be abstracted later... |
| 308 | */ |
| 309 | static inline void cfq_del_crq_hash(struct cfq_rq *crq) |
| 310 | { |
| 311 | hlist_del_init(&crq->hash); |
| 312 | } |
| 313 | |
| 314 | static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq) |
| 315 | { |
| 316 | cfq_del_crq_hash(crq); |
| 317 | |
| 318 | if (q->last_merge == crq->request) |
| 319 | q->last_merge = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | } |
| 321 | |
| 322 | static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) |
| 323 | { |
| 324 | const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); |
| 325 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); |
| 327 | } |
| 328 | |
| 329 | static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) |
| 330 | { |
| 331 | struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)]; |
| 332 | struct hlist_node *entry, *next; |
| 333 | |
| 334 | hlist_for_each_safe(entry, next, hash_list) { |
| 335 | struct cfq_rq *crq = list_entry_hash(entry); |
| 336 | struct request *__rq = crq->request; |
| 337 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | if (!rq_mergeable(__rq)) { |
| 339 | cfq_del_crq_hash(crq); |
| 340 | continue; |
| 341 | } |
| 342 | |
| 343 | if (rq_hash_key(__rq) == offset) |
| 344 | return __rq; |
| 345 | } |
| 346 | |
| 347 | return NULL; |
| 348 | } |
| 349 | |
Andrew Morton | 99f95e5 | 2005-06-27 20:14:05 -0700 | [diff] [blame] | 350 | static inline int cfq_pending_requests(struct cfq_data *cfqd) |
| 351 | { |
| 352 | return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; |
| 353 | } |
| 354 | |
| 355 | /* |
| 356 | * scheduler run of queue, if there are requests pending and no one in the |
| 357 | * driver that will restart queueing |
| 358 | */ |
| 359 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) |
| 360 | { |
| 361 | if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) |
| 362 | kblockd_schedule_work(&cfqd->unplug_work); |
| 363 | } |
| 364 | |
| 365 | static int cfq_queue_empty(request_queue_t *q) |
| 366 | { |
| 367 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 368 | |
| 369 | return !cfq_pending_requests(cfqd); |
| 370 | } |
| 371 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | /* |
| 373 | * Lifted from AS - choose which of crq1 and crq2 that is best served now. |
| 374 | * We choose the request that is closest to the head right now. Distance |
| 375 | * behind the head are penalized and only allowed to a certain extent. |
| 376 | */ |
| 377 | static struct cfq_rq * |
| 378 | cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) |
| 379 | { |
| 380 | sector_t last, s1, s2, d1 = 0, d2 = 0; |
| 381 | int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */ |
| 382 | unsigned long back_max; |
| 383 | |
| 384 | if (crq1 == NULL || crq1 == crq2) |
| 385 | return crq2; |
| 386 | if (crq2 == NULL) |
| 387 | return crq1; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 388 | if (cfq_crq_requeued(crq1)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 389 | return crq1; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 390 | if (cfq_crq_requeued(crq2)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 391 | return crq2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | |
| 393 | s1 = crq1->request->sector; |
| 394 | s2 = crq2->request->sector; |
| 395 | |
| 396 | last = cfqd->last_sector; |
| 397 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | /* |
| 399 | * by definition, 1KiB is 2 sectors |
| 400 | */ |
| 401 | back_max = cfqd->cfq_back_max * 2; |
| 402 | |
| 403 | /* |
| 404 | * Strict one way elevator _except_ in the case where we allow |
| 405 | * short backward seeks which are biased as twice the cost of a |
| 406 | * similar forward seek. |
| 407 | */ |
| 408 | if (s1 >= last) |
| 409 | d1 = s1 - last; |
| 410 | else if (s1 + back_max >= last) |
| 411 | d1 = (last - s1) * cfqd->cfq_back_penalty; |
| 412 | else |
| 413 | r1_wrap = 1; |
| 414 | |
| 415 | if (s2 >= last) |
| 416 | d2 = s2 - last; |
| 417 | else if (s2 + back_max >= last) |
| 418 | d2 = (last - s2) * cfqd->cfq_back_penalty; |
| 419 | else |
| 420 | r2_wrap = 1; |
| 421 | |
| 422 | /* Found required data */ |
| 423 | if (!r1_wrap && r2_wrap) |
| 424 | return crq1; |
| 425 | else if (!r2_wrap && r1_wrap) |
| 426 | return crq2; |
| 427 | else if (r1_wrap && r2_wrap) { |
| 428 | /* both behind the head */ |
| 429 | if (s1 <= s2) |
| 430 | return crq1; |
| 431 | else |
| 432 | return crq2; |
| 433 | } |
| 434 | |
| 435 | /* Both requests in front of the head */ |
| 436 | if (d1 < d2) |
| 437 | return crq1; |
| 438 | else if (d2 < d1) |
| 439 | return crq2; |
| 440 | else { |
| 441 | if (s1 >= s2) |
| 442 | return crq1; |
| 443 | else |
| 444 | return crq2; |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | /* |
| 449 | * would be nice to take fifo expire time into account as well |
| 450 | */ |
| 451 | static struct cfq_rq * |
| 452 | cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 453 | struct cfq_rq *last) |
| 454 | { |
| 455 | struct cfq_rq *crq_next = NULL, *crq_prev = NULL; |
| 456 | struct rb_node *rbnext, *rbprev; |
| 457 | |
Jens Axboe | 3d25f35 | 2005-06-27 10:55:49 +0200 | [diff] [blame] | 458 | rbnext = NULL; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 459 | if (ON_RB(&last->rb_node)) |
| 460 | rbnext = rb_next(&last->rb_node); |
Jens Axboe | 3d25f35 | 2005-06-27 10:55:49 +0200 | [diff] [blame] | 461 | if (!rbnext) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | rbnext = rb_first(&cfqq->sort_list); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 463 | if (rbnext == &last->rb_node) |
| 464 | rbnext = NULL; |
| 465 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | |
| 467 | rbprev = rb_prev(&last->rb_node); |
| 468 | |
| 469 | if (rbprev) |
| 470 | crq_prev = rb_entry_crq(rbprev); |
| 471 | if (rbnext) |
| 472 | crq_next = rb_entry_crq(rbnext); |
| 473 | |
| 474 | return cfq_choose_req(cfqd, crq_next, crq_prev); |
| 475 | } |
| 476 | |
| 477 | static void cfq_update_next_crq(struct cfq_rq *crq) |
| 478 | { |
| 479 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 480 | |
| 481 | if (cfqq->next_crq == crq) |
| 482 | cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); |
| 483 | } |
| 484 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 485 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 487 | struct cfq_data *cfqd = cfqq->cfqd; |
| 488 | struct list_head *list, *entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 490 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | |
| 492 | list_del(&cfqq->cfq_list); |
| 493 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 494 | if (cfq_class_rt(cfqq)) |
| 495 | list = &cfqd->cur_rr; |
| 496 | else if (cfq_class_idle(cfqq)) |
| 497 | list = &cfqd->idle_rr; |
| 498 | else { |
| 499 | /* |
| 500 | * if cfqq has requests in flight, don't allow it to be |
| 501 | * found in cfq_set_active_queue before it has finished them. |
| 502 | * this is done to increase fairness between a process that |
| 503 | * has lots of io pending vs one that only generates one |
| 504 | * sporadically or synchronously |
| 505 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 506 | if (cfq_cfqq_dispatched(cfqq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 507 | list = &cfqd->busy_rr; |
| 508 | else |
| 509 | list = &cfqd->rr_list[cfqq->ioprio]; |
| 510 | } |
| 511 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | /* |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 513 | * if queue was preempted, just add to front to be fair. busy_rr |
| 514 | * isn't sorted. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | */ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 516 | if (preempted || list == &cfqd->busy_rr) { |
| 517 | list_add(&cfqq->cfq_list, list); |
| 518 | return; |
| 519 | } |
| 520 | |
| 521 | /* |
| 522 | * sort by when queue was last serviced |
| 523 | */ |
| 524 | entry = list; |
| 525 | while ((entry = entry->prev) != list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | struct cfq_queue *__cfqq = list_entry_cfqq(entry); |
| 527 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 528 | if (!__cfqq->service_last) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | break; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 530 | if (time_before(__cfqq->service_last, cfqq->service_last)) |
| 531 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | } |
| 533 | |
| 534 | list_add(&cfqq->cfq_list, entry); |
| 535 | } |
| 536 | |
| 537 | /* |
| 538 | * add to busy list of queues for service, trying to be fair in ordering |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 539 | * the pending list according to last request service |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | */ |
| 541 | static inline void |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 542 | cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 544 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
| 545 | cfq_mark_cfqq_on_rr(cfqq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | cfqd->busy_queues++; |
| 547 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 548 | cfq_resort_rr_list(cfqq, requeue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | } |
| 550 | |
| 551 | static inline void |
| 552 | cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
| 553 | { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 554 | BUG_ON(!cfq_cfqq_on_rr(cfqq)); |
| 555 | cfq_clear_cfqq_on_rr(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 556 | list_move(&cfqq->cfq_list, &cfqd->empty_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | |
| 558 | BUG_ON(!cfqd->busy_queues); |
| 559 | cfqd->busy_queues--; |
| 560 | } |
| 561 | |
| 562 | /* |
| 563 | * rb tree support functions |
| 564 | */ |
| 565 | static inline void cfq_del_crq_rb(struct cfq_rq *crq) |
| 566 | { |
| 567 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 568 | |
| 569 | if (ON_RB(&crq->rb_node)) { |
| 570 | struct cfq_data *cfqd = cfqq->cfqd; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 571 | const int sync = cfq_crq_is_sync(crq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 573 | BUG_ON(!cfqq->queued[sync]); |
| 574 | cfqq->queued[sync]--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | |
| 576 | cfq_update_next_crq(crq); |
| 577 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | rb_erase(&crq->rb_node, &cfqq->sort_list); |
| 579 | RB_CLEAR_COLOR(&crq->rb_node); |
| 580 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 581 | if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | cfq_del_cfqq_rr(cfqd, cfqq); |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | static struct cfq_rq * |
| 587 | __cfq_add_crq_rb(struct cfq_rq *crq) |
| 588 | { |
| 589 | struct rb_node **p = &crq->cfq_queue->sort_list.rb_node; |
| 590 | struct rb_node *parent = NULL; |
| 591 | struct cfq_rq *__crq; |
| 592 | |
| 593 | while (*p) { |
| 594 | parent = *p; |
| 595 | __crq = rb_entry_crq(parent); |
| 596 | |
| 597 | if (crq->rb_key < __crq->rb_key) |
| 598 | p = &(*p)->rb_left; |
| 599 | else if (crq->rb_key > __crq->rb_key) |
| 600 | p = &(*p)->rb_right; |
| 601 | else |
| 602 | return __crq; |
| 603 | } |
| 604 | |
| 605 | rb_link_node(&crq->rb_node, parent, p); |
| 606 | return NULL; |
| 607 | } |
| 608 | |
| 609 | static void cfq_add_crq_rb(struct cfq_rq *crq) |
| 610 | { |
| 611 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 612 | struct cfq_data *cfqd = cfqq->cfqd; |
| 613 | struct request *rq = crq->request; |
| 614 | struct cfq_rq *__alias; |
| 615 | |
| 616 | crq->rb_key = rq_rb_key(rq); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 617 | cfqq->queued[cfq_crq_is_sync(crq)]++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | |
| 619 | /* |
| 620 | * looks a little odd, but the first insert might return an alias. |
| 621 | * if that happens, put the alias on the dispatch list |
| 622 | */ |
| 623 | while ((__alias = __cfq_add_crq_rb(crq)) != NULL) |
| 624 | cfq_dispatch_sort(cfqd->queue, __alias); |
| 625 | |
| 626 | rb_insert_color(&crq->rb_node, &cfqq->sort_list); |
| 627 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 628 | if (!cfq_cfqq_on_rr(cfqq)) |
| 629 | cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | |
| 631 | /* |
| 632 | * check if this request is a better next-serve candidate |
| 633 | */ |
| 634 | cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); |
| 635 | } |
| 636 | |
| 637 | static inline void |
| 638 | cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) |
| 639 | { |
| 640 | if (ON_RB(&crq->rb_node)) { |
| 641 | rb_erase(&crq->rb_node, &cfqq->sort_list); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 642 | cfqq->queued[cfq_crq_is_sync(crq)]--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | } |
| 644 | |
| 645 | cfq_add_crq_rb(crq); |
| 646 | } |
| 647 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 648 | static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) |
| 649 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 651 | struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | struct rb_node *n; |
| 653 | |
| 654 | if (!cfqq) |
| 655 | goto out; |
| 656 | |
| 657 | n = cfqq->sort_list.rb_node; |
| 658 | while (n) { |
| 659 | struct cfq_rq *crq = rb_entry_crq(n); |
| 660 | |
| 661 | if (sector < crq->rb_key) |
| 662 | n = n->rb_left; |
| 663 | else if (sector > crq->rb_key) |
| 664 | n = n->rb_right; |
| 665 | else |
| 666 | return crq->request; |
| 667 | } |
| 668 | |
| 669 | out: |
| 670 | return NULL; |
| 671 | } |
| 672 | |
| 673 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) |
| 674 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 675 | struct cfq_data *cfqd = q->elevator->elevator_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | struct cfq_rq *crq = RQ_DATA(rq); |
| 677 | |
| 678 | if (crq) { |
| 679 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 680 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 681 | if (cfq_crq_in_driver(crq)) { |
| 682 | cfq_clear_crq_in_driver(crq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 683 | WARN_ON(!cfqd->rq_in_driver); |
| 684 | cfqd->rq_in_driver--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | } |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 686 | if (cfq_crq_in_flight(crq)) { |
| 687 | const int sync = cfq_crq_is_sync(crq); |
| 688 | |
| 689 | cfq_clear_crq_in_flight(crq); |
| 690 | WARN_ON(!cfqq->on_dispatch[sync]); |
| 691 | cfqq->on_dispatch[sync]--; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 692 | } |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 693 | cfq_mark_crq_requeued(crq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | } |
| 695 | } |
| 696 | |
| 697 | /* |
| 698 | * make sure the service time gets corrected on reissue of this request |
| 699 | */ |
| 700 | static void cfq_requeue_request(request_queue_t *q, struct request *rq) |
| 701 | { |
| 702 | cfq_deactivate_request(q, rq); |
| 703 | list_add(&rq->queuelist, &q->queue_head); |
| 704 | } |
| 705 | |
| 706 | static void cfq_remove_request(request_queue_t *q, struct request *rq) |
| 707 | { |
| 708 | struct cfq_rq *crq = RQ_DATA(rq); |
| 709 | |
| 710 | if (crq) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | list_del_init(&rq->queuelist); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 712 | cfq_del_crq_rb(crq); |
| 713 | cfq_remove_merge_hints(q, crq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | } |
| 716 | } |
| 717 | |
| 718 | static int |
| 719 | cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) |
| 720 | { |
| 721 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 722 | struct request *__rq; |
| 723 | int ret; |
| 724 | |
| 725 | ret = elv_try_last_merge(q, bio); |
| 726 | if (ret != ELEVATOR_NO_MERGE) { |
| 727 | __rq = q->last_merge; |
| 728 | goto out_insert; |
| 729 | } |
| 730 | |
| 731 | __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 732 | if (__rq && elv_rq_merge_ok(__rq, bio)) { |
| 733 | ret = ELEVATOR_BACK_MERGE; |
| 734 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | } |
| 736 | |
| 737 | __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 738 | if (__rq && elv_rq_merge_ok(__rq, bio)) { |
| 739 | ret = ELEVATOR_FRONT_MERGE; |
| 740 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | } |
| 742 | |
| 743 | return ELEVATOR_NO_MERGE; |
| 744 | out: |
| 745 | q->last_merge = __rq; |
| 746 | out_insert: |
| 747 | *req = __rq; |
| 748 | return ret; |
| 749 | } |
| 750 | |
| 751 | static void cfq_merged_request(request_queue_t *q, struct request *req) |
| 752 | { |
| 753 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 754 | struct cfq_rq *crq = RQ_DATA(req); |
| 755 | |
| 756 | cfq_del_crq_hash(crq); |
| 757 | cfq_add_crq_hash(cfqd, crq); |
| 758 | |
| 759 | if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) { |
| 760 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 761 | |
| 762 | cfq_update_next_crq(crq); |
| 763 | cfq_reposition_crq_rb(cfqq, crq); |
| 764 | } |
| 765 | |
| 766 | q->last_merge = req; |
| 767 | } |
| 768 | |
| 769 | static void |
| 770 | cfq_merged_requests(request_queue_t *q, struct request *rq, |
| 771 | struct request *next) |
| 772 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | cfq_merged_request(q, rq); |
| 774 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 775 | /* |
| 776 | * reposition in fifo if next is older than rq |
| 777 | */ |
| 778 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && |
| 779 | time_before(next->start_time, rq->start_time)) |
| 780 | list_move(&rq->queuelist, &next->queuelist); |
| 781 | |
| 782 | cfq_remove_request(q, next); |
| 783 | } |
| 784 | |
| 785 | static inline void |
| 786 | __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
| 787 | { |
| 788 | if (cfqq) { |
| 789 | /* |
| 790 | * stop potential idle class queues waiting service |
| 791 | */ |
| 792 | del_timer(&cfqd->idle_class_timer); |
| 793 | |
| 794 | cfqq->slice_start = jiffies; |
| 795 | cfqq->slice_end = 0; |
| 796 | cfqq->slice_left = 0; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 797 | cfq_clear_cfqq_must_alloc_slice(cfqq); |
| 798 | cfq_clear_cfqq_fifo_expire(cfqq); |
| 799 | cfq_clear_cfqq_expired(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 800 | } |
| 801 | |
| 802 | cfqd->active_queue = cfqq; |
| 803 | } |
| 804 | |
| 805 | /* |
| 806 | * 0 |
| 807 | * 0,1 |
| 808 | * 0,1,2 |
| 809 | * 0,1,2,3 |
| 810 | * 0,1,2,3,4 |
| 811 | * 0,1,2,3,4,5 |
| 812 | * 0,1,2,3,4,5,6 |
| 813 | * 0,1,2,3,4,5,6,7 |
| 814 | */ |
| 815 | static int cfq_get_next_prio_level(struct cfq_data *cfqd) |
| 816 | { |
| 817 | int prio, wrap; |
| 818 | |
| 819 | prio = -1; |
| 820 | wrap = 0; |
| 821 | do { |
| 822 | int p; |
| 823 | |
| 824 | for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { |
| 825 | if (!list_empty(&cfqd->rr_list[p])) { |
| 826 | prio = p; |
| 827 | break; |
| 828 | } |
| 829 | } |
| 830 | |
| 831 | if (prio != -1) |
| 832 | break; |
| 833 | cfqd->cur_prio = 0; |
| 834 | if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) { |
| 835 | cfqd->cur_end_prio = 0; |
| 836 | if (wrap) |
| 837 | break; |
| 838 | wrap = 1; |
| 839 | } |
| 840 | } while (1); |
| 841 | |
| 842 | if (unlikely(prio == -1)) |
| 843 | return -1; |
| 844 | |
| 845 | BUG_ON(prio >= CFQ_PRIO_LISTS); |
| 846 | |
| 847 | list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); |
| 848 | |
| 849 | cfqd->cur_prio = prio + 1; |
| 850 | if (cfqd->cur_prio > cfqd->cur_end_prio) { |
| 851 | cfqd->cur_end_prio = cfqd->cur_prio; |
| 852 | cfqd->cur_prio = 0; |
| 853 | } |
| 854 | if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) { |
| 855 | cfqd->cur_prio = 0; |
| 856 | cfqd->cur_end_prio = 0; |
| 857 | } |
| 858 | |
| 859 | return prio; |
| 860 | } |
| 861 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 862 | static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 863 | { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 864 | struct cfq_queue *cfqq; |
| 865 | |
| 866 | /* |
| 867 | * if current queue is expired but not done with its requests yet, |
| 868 | * wait for that to happen |
| 869 | */ |
| 870 | if ((cfqq = cfqd->active_queue) != NULL) { |
| 871 | if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq)) |
| 872 | return NULL; |
| 873 | } |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 874 | |
| 875 | /* |
| 876 | * if current list is non-empty, grab first entry. if it is empty, |
| 877 | * get next prio level and grab first entry then if any are spliced |
| 878 | */ |
| 879 | if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) |
| 880 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); |
| 881 | |
| 882 | /* |
| 883 | * if we have idle queues and no rt or be queues had pending |
| 884 | * requests, either allow immediate service if the grace period |
| 885 | * has passed or arm the idle grace timer |
| 886 | */ |
| 887 | if (!cfqq && !list_empty(&cfqd->idle_rr)) { |
| 888 | unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; |
| 889 | |
| 890 | if (time_after_eq(jiffies, end)) |
| 891 | cfqq = list_entry_cfqq(cfqd->idle_rr.next); |
| 892 | else |
| 893 | mod_timer(&cfqd->idle_class_timer, end); |
| 894 | } |
| 895 | |
| 896 | __cfq_set_active_queue(cfqd, cfqq); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 897 | return cfqq; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 898 | } |
| 899 | |
| 900 | /* |
| 901 | * current cfqq expired its slice (or was too idle), select new one |
| 902 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 903 | static void |
| 904 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 905 | int preempted) |
| 906 | { |
| 907 | unsigned long now = jiffies; |
| 908 | |
| 909 | if (cfq_cfqq_wait_request(cfqq)) |
| 910 | del_timer(&cfqd->idle_slice_timer); |
| 911 | |
| 912 | if (!preempted && !cfq_cfqq_dispatched(cfqq)) |
| 913 | cfqq->service_last = now; |
| 914 | |
| 915 | cfq_clear_cfqq_must_dispatch(cfqq); |
| 916 | cfq_clear_cfqq_wait_request(cfqq); |
| 917 | |
| 918 | /* |
| 919 | * store what was left of this slice, if the queue idled out |
| 920 | * or was preempted |
| 921 | */ |
| 922 | if (time_after(now, cfqq->slice_end)) |
| 923 | cfqq->slice_left = now - cfqq->slice_end; |
| 924 | else |
| 925 | cfqq->slice_left = 0; |
| 926 | |
| 927 | if (cfq_cfqq_on_rr(cfqq)) |
| 928 | cfq_resort_rr_list(cfqq, preempted); |
| 929 | |
| 930 | if (cfqq == cfqd->active_queue) |
| 931 | cfqd->active_queue = NULL; |
| 932 | |
| 933 | if (cfqd->active_cic) { |
| 934 | put_io_context(cfqd->active_cic->ioc); |
| 935 | cfqd->active_cic = NULL; |
| 936 | } |
| 937 | |
| 938 | cfqd->dispatch_slice = 0; |
| 939 | } |
| 940 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 941 | static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) |
| 942 | { |
| 943 | struct cfq_queue *cfqq = cfqd->active_queue; |
| 944 | |
| 945 | if (cfqq) { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 946 | /* |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 947 | * use deferred expiry, if there are requests in progress as |
| 948 | * not to disturb the slice of the next queue |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 949 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 950 | if (cfq_cfqq_dispatched(cfqq)) |
| 951 | cfq_mark_cfqq_expired(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 952 | else |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 953 | __cfq_slice_expired(cfqd, cfqq, preempted); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | } |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 955 | } |
| 956 | |
| 957 | static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
| 958 | |
| 959 | { |
| 960 | WARN_ON(!RB_EMPTY(&cfqq->sort_list)); |
| 961 | WARN_ON(cfqq != cfqd->active_queue); |
| 962 | |
| 963 | /* |
| 964 | * idle is disabled, either manually or by past process history |
| 965 | */ |
| 966 | if (!cfqd->cfq_slice_idle) |
| 967 | return 0; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 968 | if (!cfq_cfqq_idle_window(cfqq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 969 | return 0; |
| 970 | /* |
| 971 | * task has exited, don't wait |
| 972 | */ |
| 973 | if (cfqd->active_cic && !cfqd->active_cic->ioc->task) |
| 974 | return 0; |
| 975 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 976 | cfq_mark_cfqq_must_dispatch(cfqq); |
| 977 | cfq_mark_cfqq_wait_request(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 978 | |
| 979 | if (!timer_pending(&cfqd->idle_slice_timer)) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 980 | unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 981 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 982 | cfqd->idle_slice_timer.expires = jiffies + slice_left; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 983 | add_timer(&cfqd->idle_slice_timer); |
| 984 | } |
| 985 | |
| 986 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | } |
| 988 | |
| 989 | /* |
| 990 | * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues, |
| 991 | * this function sector sorts the selected request to minimize seeks. we start |
| 992 | * at cfqd->last_sector, not 0. |
| 993 | */ |
| 994 | static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) |
| 995 | { |
| 996 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 997 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 998 | struct list_head *head = &q->queue_head, *entry = head; |
| 999 | struct request *__rq; |
| 1000 | sector_t last; |
| 1001 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | list_del(&crq->request->queuelist); |
| 1003 | |
| 1004 | last = cfqd->last_sector; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1005 | list_for_each_entry_reverse(__rq, head, queuelist) { |
| 1006 | struct cfq_rq *__crq = RQ_DATA(__rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1008 | if (blk_barrier_rq(__rq)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | break; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1010 | if (!blk_fs_request(__rq)) |
| 1011 | break; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1012 | if (cfq_crq_requeued(__crq)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | break; |
| 1014 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1015 | if (__rq->sector <= crq->request->sector) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | break; |
| 1017 | if (__rq->sector > last && crq->request->sector < last) { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1018 | last = crq->request->sector + crq->request->nr_sectors; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | break; |
| 1020 | } |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1021 | entry = &__rq->queuelist; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | } |
| 1023 | |
| 1024 | cfqd->last_sector = last; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1025 | |
| 1026 | cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); |
| 1027 | |
| 1028 | cfq_del_crq_rb(crq); |
| 1029 | cfq_remove_merge_hints(q, crq); |
| 1030 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1031 | cfq_mark_crq_in_flight(crq); |
| 1032 | cfq_clear_crq_requeued(crq); |
| 1033 | |
| 1034 | cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1035 | list_add_tail(&crq->request->queuelist, entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | } |
| 1037 | |
| 1038 | /* |
| 1039 | * return expired entry, or NULL to just start from scratch in rbtree |
| 1040 | */ |
| 1041 | static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) |
| 1042 | { |
| 1043 | struct cfq_data *cfqd = cfqq->cfqd; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1044 | struct request *rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1045 | struct cfq_rq *crq; |
| 1046 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1047 | if (cfq_cfqq_fifo_expire(cfqq)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1048 | return NULL; |
| 1049 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1050 | if (!list_empty(&cfqq->fifo)) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1051 | int fifo = cfq_cfqq_class_sync(cfqq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1052 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1053 | crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next)); |
| 1054 | rq = crq->request; |
| 1055 | if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1056 | cfq_mark_cfqq_fifo_expire(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1057 | return crq; |
| 1058 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | } |
| 1060 | |
| 1061 | return NULL; |
| 1062 | } |
| 1063 | |
| 1064 | /* |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1065 | * Scale schedule slice based on io priority. Use the sync time slice only |
| 1066 | * if a queue is marked sync and has sync io queued. A sync queue with async |
| 1067 | * io only, should not get full sync slice length. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | */ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1069 | static inline int |
| 1070 | cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1072 | const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1074 | WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1075 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1076 | return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | } |
| 1078 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1079 | static inline void |
| 1080 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
| 1081 | { |
| 1082 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; |
| 1083 | } |
| 1084 | |
| 1085 | static inline int |
| 1086 | cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
| 1087 | { |
| 1088 | const int base_rq = cfqd->cfq_slice_async_rq; |
| 1089 | |
| 1090 | WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); |
| 1091 | |
| 1092 | return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); |
| 1093 | } |
| 1094 | |
| 1095 | /* |
| 1096 | * get next queue for service |
| 1097 | */ |
| 1098 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force) |
| 1099 | { |
| 1100 | unsigned long now = jiffies; |
| 1101 | struct cfq_queue *cfqq; |
| 1102 | |
| 1103 | cfqq = cfqd->active_queue; |
| 1104 | if (!cfqq) |
| 1105 | goto new_queue; |
| 1106 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1107 | if (cfq_cfqq_expired(cfqq)) |
| 1108 | goto new_queue; |
| 1109 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1110 | /* |
| 1111 | * slice has expired |
| 1112 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1113 | if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end)) |
| 1114 | goto expire; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1115 | |
| 1116 | /* |
| 1117 | * if queue has requests, dispatch one. if not, check if |
| 1118 | * enough slice is left to wait for one |
| 1119 | */ |
| 1120 | if (!RB_EMPTY(&cfqq->sort_list)) |
| 1121 | goto keep_queue; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1122 | else if (!force && cfq_cfqq_class_sync(cfqq) && |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1123 | time_before(now, cfqq->slice_end)) { |
| 1124 | if (cfq_arm_slice_timer(cfqd, cfqq)) |
| 1125 | return NULL; |
| 1126 | } |
| 1127 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1128 | expire: |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1129 | cfq_slice_expired(cfqd, 0); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1130 | new_queue: |
| 1131 | cfqq = cfq_set_active_queue(cfqd); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1132 | keep_queue: |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1133 | return cfqq; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1134 | } |
| 1135 | |
| 1136 | static int |
| 1137 | __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 1138 | int max_dispatch) |
| 1139 | { |
| 1140 | int dispatched = 0; |
| 1141 | |
| 1142 | BUG_ON(RB_EMPTY(&cfqq->sort_list)); |
| 1143 | |
| 1144 | do { |
| 1145 | struct cfq_rq *crq; |
| 1146 | |
| 1147 | /* |
| 1148 | * follow expired path, else get first next available |
| 1149 | */ |
| 1150 | if ((crq = cfq_check_fifo(cfqq)) == NULL) |
| 1151 | crq = cfqq->next_crq; |
| 1152 | |
| 1153 | /* |
| 1154 | * finally, insert request into driver dispatch list |
| 1155 | */ |
| 1156 | cfq_dispatch_sort(cfqd->queue, crq); |
| 1157 | |
| 1158 | cfqd->dispatch_slice++; |
| 1159 | dispatched++; |
| 1160 | |
| 1161 | if (!cfqd->active_cic) { |
| 1162 | atomic_inc(&crq->io_context->ioc->refcount); |
| 1163 | cfqd->active_cic = crq->io_context; |
| 1164 | } |
| 1165 | |
| 1166 | if (RB_EMPTY(&cfqq->sort_list)) |
| 1167 | break; |
| 1168 | |
| 1169 | } while (dispatched < max_dispatch); |
| 1170 | |
| 1171 | /* |
| 1172 | * if slice end isn't set yet, set it. if at least one request was |
| 1173 | * sync, use the sync time slice value |
| 1174 | */ |
| 1175 | if (!cfqq->slice_end) |
| 1176 | cfq_set_prio_slice(cfqd, cfqq); |
| 1177 | |
| 1178 | /* |
| 1179 | * expire an async queue immediately if it has used up its slice. idle |
| 1180 | * queue always expire after 1 dispatch round. |
| 1181 | */ |
| 1182 | if ((!cfq_cfqq_sync(cfqq) && |
| 1183 | cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || |
| 1184 | cfq_class_idle(cfqq)) |
| 1185 | cfq_slice_expired(cfqd, 0); |
| 1186 | |
| 1187 | return dispatched; |
| 1188 | } |
| 1189 | |
| 1190 | static int |
| 1191 | cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | { |
| 1193 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 1194 | struct cfq_queue *cfqq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1196 | if (!cfqd->busy_queues) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | return 0; |
| 1198 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1199 | cfqq = cfq_select_queue(cfqd, force); |
| 1200 | if (cfqq) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1201 | cfq_clear_cfqq_must_dispatch(cfqq); |
| 1202 | cfq_clear_cfqq_wait_request(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1203 | del_timer(&cfqd->idle_slice_timer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1205 | if (cfq_class_idle(cfqq)) |
| 1206 | max_dispatch = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1208 | return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | } |
| 1210 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1211 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | } |
| 1213 | |
| 1214 | static inline void cfq_account_dispatch(struct cfq_rq *crq) |
| 1215 | { |
| 1216 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 1217 | struct cfq_data *cfqd = cfqq->cfqd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1219 | if (unlikely(!blk_fs_request(crq->request))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | return; |
| 1221 | |
| 1222 | /* |
| 1223 | * accounted bit is necessary since some drivers will call |
| 1224 | * elv_next_request() many times for the same request (eg ide) |
| 1225 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1226 | if (cfq_crq_in_driver(crq)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | return; |
| 1228 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1229 | cfq_mark_crq_in_driver(crq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1230 | cfqd->rq_in_driver++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | } |
| 1232 | |
| 1233 | static inline void |
| 1234 | cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) |
| 1235 | { |
| 1236 | struct cfq_data *cfqd = cfqq->cfqd; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1237 | unsigned long now; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1239 | if (!cfq_crq_in_driver(crq)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1240 | return; |
| 1241 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1242 | now = jiffies; |
| 1243 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1244 | WARN_ON(!cfqd->rq_in_driver); |
| 1245 | cfqd->rq_in_driver--; |
| 1246 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1247 | if (!cfq_class_idle(cfqq)) |
| 1248 | cfqd->last_end_request = now; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1250 | if (!cfq_cfqq_dispatched(cfqq)) { |
| 1251 | if (cfq_cfqq_on_rr(cfqq)) { |
| 1252 | cfqq->service_last = now; |
| 1253 | cfq_resort_rr_list(cfqq, 0); |
| 1254 | } |
| 1255 | if (cfq_cfqq_expired(cfqq)) { |
| 1256 | __cfq_slice_expired(cfqd, cfqq, 0); |
| 1257 | cfq_schedule_dispatch(cfqd); |
| 1258 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | } |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1260 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1261 | if (cfq_crq_is_sync(crq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1262 | crq->io_context->last_end_request = now; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | } |
| 1264 | |
| 1265 | static struct request *cfq_next_request(request_queue_t *q) |
| 1266 | { |
| 1267 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 1268 | struct request *rq; |
| 1269 | |
| 1270 | if (!list_empty(&q->queue_head)) { |
| 1271 | struct cfq_rq *crq; |
| 1272 | dispatch: |
| 1273 | rq = list_entry_rq(q->queue_head.next); |
| 1274 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1275 | crq = RQ_DATA(rq); |
| 1276 | if (crq) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1277 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 1278 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1279 | /* |
| 1280 | * if idle window is disabled, allow queue buildup |
| 1281 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1282 | if (!cfq_crq_in_driver(crq) && |
| 1283 | !cfq_cfqq_idle_window(cfqq) && |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1284 | cfqd->rq_in_driver >= cfqd->cfq_max_depth) |
| 1285 | return NULL; |
| 1286 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1287 | cfq_remove_merge_hints(q, crq); |
| 1288 | cfq_account_dispatch(crq); |
| 1289 | } |
| 1290 | |
| 1291 | return rq; |
| 1292 | } |
| 1293 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1294 | if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | goto dispatch; |
| 1296 | |
| 1297 | return NULL; |
| 1298 | } |
| 1299 | |
| 1300 | /* |
| 1301 | * task holds one reference to the queue, dropped when task exits. each crq |
| 1302 | * in-flight on this queue also holds a reference, dropped when crq is freed. |
| 1303 | * |
| 1304 | * queue lock must be held here. |
| 1305 | */ |
| 1306 | static void cfq_put_queue(struct cfq_queue *cfqq) |
| 1307 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1308 | struct cfq_data *cfqd = cfqq->cfqd; |
| 1309 | |
| 1310 | BUG_ON(atomic_read(&cfqq->ref) <= 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 | |
| 1312 | if (!atomic_dec_and_test(&cfqq->ref)) |
| 1313 | return; |
| 1314 | |
| 1315 | BUG_ON(rb_first(&cfqq->sort_list)); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1316 | BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1317 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1319 | if (unlikely(cfqd->active_queue == cfqq)) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1320 | __cfq_slice_expired(cfqd, cfqq, 0); |
| 1321 | cfq_schedule_dispatch(cfqd); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1322 | } |
| 1323 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1324 | cfq_put_cfqd(cfqq->cfqd); |
| 1325 | |
| 1326 | /* |
| 1327 | * it's on the empty list and still hashed |
| 1328 | */ |
| 1329 | list_del(&cfqq->cfq_list); |
| 1330 | hlist_del(&cfqq->cfq_hash); |
| 1331 | kmem_cache_free(cfq_pool, cfqq); |
| 1332 | } |
| 1333 | |
| 1334 | static inline struct cfq_queue * |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1335 | __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, |
| 1336 | const int hashval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | { |
| 1338 | struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; |
| 1339 | struct hlist_node *entry, *next; |
| 1340 | |
| 1341 | hlist_for_each_safe(entry, next, hash_list) { |
| 1342 | struct cfq_queue *__cfqq = list_entry_qhash(entry); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1343 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1345 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1346 | return __cfqq; |
| 1347 | } |
| 1348 | |
| 1349 | return NULL; |
| 1350 | } |
| 1351 | |
| 1352 | static struct cfq_queue * |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1353 | cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1355 | return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 | } |
| 1357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1358 | static void cfq_free_io_context(struct cfq_io_context *cic) |
| 1359 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1360 | struct cfq_io_context *__cic; |
| 1361 | struct list_head *entry, *next; |
| 1362 | |
| 1363 | list_for_each_safe(entry, next, &cic->list) { |
| 1364 | __cic = list_entry(entry, struct cfq_io_context, list); |
| 1365 | kmem_cache_free(cfq_ioc_pool, __cic); |
| 1366 | } |
| 1367 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | kmem_cache_free(cfq_ioc_pool, cic); |
| 1369 | } |
| 1370 | |
| 1371 | /* |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1372 | * Called with interrupts disabled |
| 1373 | */ |
| 1374 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) |
| 1375 | { |
| 1376 | struct cfq_data *cfqd = cic->cfqq->cfqd; |
| 1377 | request_queue_t *q = cfqd->queue; |
| 1378 | |
| 1379 | WARN_ON(!irqs_disabled()); |
| 1380 | |
| 1381 | spin_lock(q->queue_lock); |
| 1382 | |
| 1383 | if (unlikely(cic->cfqq == cfqd->active_queue)) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1384 | __cfq_slice_expired(cfqd, cic->cfqq, 0); |
| 1385 | cfq_schedule_dispatch(cfqd); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1386 | } |
| 1387 | |
| 1388 | cfq_put_queue(cic->cfqq); |
| 1389 | cic->cfqq = NULL; |
| 1390 | spin_unlock(q->queue_lock); |
| 1391 | } |
| 1392 | |
| 1393 | /* |
| 1394 | * Another task may update the task cic list, if it is doing a queue lookup |
| 1395 | * on its behalf. cfq_cic_lock excludes such concurrent updates |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | */ |
| 1397 | static void cfq_exit_io_context(struct cfq_io_context *cic) |
| 1398 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1399 | struct cfq_io_context *__cic; |
| 1400 | struct list_head *entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | unsigned long flags; |
| 1402 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1403 | local_irq_save(flags); |
| 1404 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 | /* |
| 1406 | * put the reference this task is holding to the various queues |
| 1407 | */ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1408 | list_for_each(entry, &cic->list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | __cic = list_entry(entry, struct cfq_io_context, list); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1410 | cfq_exit_single_io_context(__cic); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | } |
| 1412 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1413 | cfq_exit_single_io_context(cic); |
| 1414 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | } |
| 1416 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1417 | static struct cfq_io_context * |
| 1418 | cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1419 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1420 | struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1421 | |
| 1422 | if (cic) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | INIT_LIST_HEAD(&cic->list); |
| 1424 | cic->cfqq = NULL; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1425 | cic->key = NULL; |
| 1426 | cic->last_end_request = jiffies; |
| 1427 | cic->ttime_total = 0; |
| 1428 | cic->ttime_samples = 0; |
| 1429 | cic->ttime_mean = 0; |
| 1430 | cic->dtor = cfq_free_io_context; |
| 1431 | cic->exit = cfq_exit_io_context; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 | } |
| 1433 | |
| 1434 | return cic; |
| 1435 | } |
| 1436 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1437 | static void cfq_init_prio_data(struct cfq_queue *cfqq) |
| 1438 | { |
| 1439 | struct task_struct *tsk = current; |
| 1440 | int ioprio_class; |
| 1441 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1442 | if (!cfq_cfqq_prio_changed(cfqq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1443 | return; |
| 1444 | |
| 1445 | ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); |
| 1446 | switch (ioprio_class) { |
| 1447 | default: |
| 1448 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); |
| 1449 | case IOPRIO_CLASS_NONE: |
| 1450 | /* |
| 1451 | * no prio set, place us in the middle of the BE classes |
| 1452 | */ |
| 1453 | cfqq->ioprio = task_nice_ioprio(tsk); |
| 1454 | cfqq->ioprio_class = IOPRIO_CLASS_BE; |
| 1455 | break; |
| 1456 | case IOPRIO_CLASS_RT: |
| 1457 | cfqq->ioprio = task_ioprio(tsk); |
| 1458 | cfqq->ioprio_class = IOPRIO_CLASS_RT; |
| 1459 | break; |
| 1460 | case IOPRIO_CLASS_BE: |
| 1461 | cfqq->ioprio = task_ioprio(tsk); |
| 1462 | cfqq->ioprio_class = IOPRIO_CLASS_BE; |
| 1463 | break; |
| 1464 | case IOPRIO_CLASS_IDLE: |
| 1465 | cfqq->ioprio_class = IOPRIO_CLASS_IDLE; |
| 1466 | cfqq->ioprio = 7; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1467 | cfq_clear_cfqq_idle_window(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1468 | break; |
| 1469 | } |
| 1470 | |
| 1471 | /* |
| 1472 | * keep track of original prio settings in case we have to temporarily |
| 1473 | * elevate the priority of this queue |
| 1474 | */ |
| 1475 | cfqq->org_ioprio = cfqq->ioprio; |
| 1476 | cfqq->org_ioprio_class = cfqq->ioprio_class; |
| 1477 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1478 | if (cfq_cfqq_on_rr(cfqq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1479 | cfq_resort_rr_list(cfqq, 0); |
| 1480 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1481 | cfq_clear_cfqq_prio_changed(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1482 | } |
| 1483 | |
| 1484 | static inline void changed_ioprio(struct cfq_queue *cfqq) |
| 1485 | { |
| 1486 | if (cfqq) { |
| 1487 | struct cfq_data *cfqd = cfqq->cfqd; |
| 1488 | |
| 1489 | spin_lock(cfqd->queue->queue_lock); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1490 | cfq_mark_cfqq_prio_changed(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1491 | cfq_init_prio_data(cfqq); |
| 1492 | spin_unlock(cfqd->queue->queue_lock); |
| 1493 | } |
| 1494 | } |
| 1495 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1496 | /* |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1497 | * callback from sys_ioprio_set, irqs are disabled |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1498 | */ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1499 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1500 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1501 | struct cfq_io_context *cic = ioc->cic; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1503 | changed_ioprio(cic->cfqq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1505 | list_for_each_entry(cic, &cic->list, list) |
| 1506 | changed_ioprio(cic->cfqq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1507 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1508 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1509 | } |
| 1510 | |
| 1511 | static struct cfq_queue * |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1512 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, |
| 1513 | int gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | { |
| 1515 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); |
| 1516 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
| 1517 | |
| 1518 | retry: |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1519 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | |
| 1521 | if (!cfqq) { |
| 1522 | if (new_cfqq) { |
| 1523 | cfqq = new_cfqq; |
| 1524 | new_cfqq = NULL; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1525 | } else if (gfp_mask & __GFP_WAIT) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 | spin_unlock_irq(cfqd->queue->queue_lock); |
| 1527 | new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); |
| 1528 | spin_lock_irq(cfqd->queue->queue_lock); |
| 1529 | goto retry; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1530 | } else { |
| 1531 | cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); |
| 1532 | if (!cfqq) |
| 1533 | goto out; |
Kiyoshi Ueda | db3b584 | 2005-06-17 16:15:10 +0200 | [diff] [blame] | 1534 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | |
| 1536 | memset(cfqq, 0, sizeof(*cfqq)); |
| 1537 | |
| 1538 | INIT_HLIST_NODE(&cfqq->cfq_hash); |
| 1539 | INIT_LIST_HEAD(&cfqq->cfq_list); |
| 1540 | RB_CLEAR_ROOT(&cfqq->sort_list); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1541 | INIT_LIST_HEAD(&cfqq->fifo); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1542 | |
| 1543 | cfqq->key = key; |
| 1544 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); |
| 1545 | atomic_set(&cfqq->ref, 0); |
| 1546 | cfqq->cfqd = cfqd; |
| 1547 | atomic_inc(&cfqd->ref); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1548 | cfqq->service_last = 0; |
| 1549 | /* |
| 1550 | * set ->slice_left to allow preemption for a new process |
| 1551 | */ |
| 1552 | cfqq->slice_left = 2 * cfqd->cfq_slice_idle; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1553 | cfq_mark_cfqq_idle_window(cfqq); |
| 1554 | cfq_mark_cfqq_prio_changed(cfqq); |
| 1555 | cfq_init_prio_data(cfqq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | } |
| 1557 | |
| 1558 | if (new_cfqq) |
| 1559 | kmem_cache_free(cfq_pool, new_cfqq); |
| 1560 | |
| 1561 | atomic_inc(&cfqq->ref); |
| 1562 | out: |
| 1563 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); |
| 1564 | return cfqq; |
| 1565 | } |
| 1566 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1567 | /* |
| 1568 | * Setup general io context and cfq io context. There can be several cfq |
| 1569 | * io contexts per general io context, if this process is doing io to more |
| 1570 | * than one device managed by cfq. Note that caller is holding a reference to |
| 1571 | * cfqq, so we don't need to worry about it disappearing |
| 1572 | */ |
| 1573 | static struct cfq_io_context * |
| 1574 | cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1575 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1576 | struct io_context *ioc = NULL; |
| 1577 | struct cfq_io_context *cic; |
| 1578 | |
| 1579 | might_sleep_if(gfp_mask & __GFP_WAIT); |
| 1580 | |
| 1581 | ioc = get_io_context(gfp_mask); |
| 1582 | if (!ioc) |
| 1583 | return NULL; |
| 1584 | |
| 1585 | if ((cic = ioc->cic) == NULL) { |
| 1586 | cic = cfq_alloc_io_context(cfqd, gfp_mask); |
| 1587 | |
| 1588 | if (cic == NULL) |
| 1589 | goto err; |
| 1590 | |
| 1591 | /* |
| 1592 | * manually increment generic io_context usage count, it |
| 1593 | * cannot go away since we are already holding one ref to it |
| 1594 | */ |
| 1595 | ioc->cic = cic; |
| 1596 | ioc->set_ioprio = cfq_ioc_set_ioprio; |
| 1597 | cic->ioc = ioc; |
| 1598 | cic->key = cfqd; |
| 1599 | atomic_inc(&cfqd->ref); |
| 1600 | } else { |
| 1601 | struct cfq_io_context *__cic; |
| 1602 | |
| 1603 | /* |
| 1604 | * the first cic on the list is actually the head itself |
| 1605 | */ |
| 1606 | if (cic->key == cfqd) |
| 1607 | goto out; |
| 1608 | |
| 1609 | /* |
| 1610 | * cic exists, check if we already are there. linear search |
| 1611 | * should be ok here, the list will usually not be more than |
| 1612 | * 1 or a few entries long |
| 1613 | */ |
| 1614 | list_for_each_entry(__cic, &cic->list, list) { |
| 1615 | /* |
| 1616 | * this process is already holding a reference to |
| 1617 | * this queue, so no need to get one more |
| 1618 | */ |
| 1619 | if (__cic->key == cfqd) { |
| 1620 | cic = __cic; |
| 1621 | goto out; |
| 1622 | } |
| 1623 | } |
| 1624 | |
| 1625 | /* |
| 1626 | * nope, process doesn't have a cic assoicated with this |
| 1627 | * cfqq yet. get a new one and add to list |
| 1628 | */ |
| 1629 | __cic = cfq_alloc_io_context(cfqd, gfp_mask); |
| 1630 | if (__cic == NULL) |
| 1631 | goto err; |
| 1632 | |
| 1633 | __cic->ioc = ioc; |
| 1634 | __cic->key = cfqd; |
| 1635 | atomic_inc(&cfqd->ref); |
| 1636 | list_add(&__cic->list, &cic->list); |
| 1637 | cic = __cic; |
| 1638 | } |
| 1639 | |
| 1640 | out: |
| 1641 | return cic; |
| 1642 | err: |
| 1643 | put_io_context(ioc); |
| 1644 | return NULL; |
| 1645 | } |
| 1646 | |
| 1647 | static void |
| 1648 | cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) |
| 1649 | { |
| 1650 | unsigned long elapsed, ttime; |
| 1651 | |
| 1652 | /* |
| 1653 | * if this context already has stuff queued, thinktime is from |
| 1654 | * last queue not last end |
| 1655 | */ |
| 1656 | #if 0 |
| 1657 | if (time_after(cic->last_end_request, cic->last_queue)) |
| 1658 | elapsed = jiffies - cic->last_end_request; |
| 1659 | else |
| 1660 | elapsed = jiffies - cic->last_queue; |
| 1661 | #else |
| 1662 | elapsed = jiffies - cic->last_end_request; |
| 1663 | #endif |
| 1664 | |
| 1665 | ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); |
| 1666 | |
| 1667 | cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; |
| 1668 | cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; |
| 1669 | cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; |
| 1670 | } |
| 1671 | |
| 1672 | #define sample_valid(samples) ((samples) > 80) |
| 1673 | |
| 1674 | /* |
| 1675 | * Disable idle window if the process thinks too long or seeks so much that |
| 1676 | * it doesn't matter |
| 1677 | */ |
| 1678 | static void |
| 1679 | cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 1680 | struct cfq_io_context *cic) |
| 1681 | { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1682 | int enable_idle = cfq_cfqq_idle_window(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1683 | |
| 1684 | if (!cic->ioc->task || !cfqd->cfq_slice_idle) |
| 1685 | enable_idle = 0; |
| 1686 | else if (sample_valid(cic->ttime_samples)) { |
| 1687 | if (cic->ttime_mean > cfqd->cfq_slice_idle) |
| 1688 | enable_idle = 0; |
| 1689 | else |
| 1690 | enable_idle = 1; |
| 1691 | } |
| 1692 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1693 | if (enable_idle) |
| 1694 | cfq_mark_cfqq_idle_window(cfqq); |
| 1695 | else |
| 1696 | cfq_clear_cfqq_idle_window(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1697 | } |
| 1698 | |
| 1699 | |
| 1700 | /* |
| 1701 | * Check if new_cfqq should preempt the currently active queue. Return 0 for |
| 1702 | * no or if we aren't sure, a 1 will cause a preempt. |
| 1703 | */ |
| 1704 | static int |
| 1705 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, |
| 1706 | struct cfq_rq *crq) |
| 1707 | { |
| 1708 | struct cfq_queue *cfqq = cfqd->active_queue; |
| 1709 | |
| 1710 | if (cfq_class_idle(new_cfqq)) |
| 1711 | return 0; |
| 1712 | |
| 1713 | if (!cfqq) |
| 1714 | return 1; |
| 1715 | |
| 1716 | if (cfq_class_idle(cfqq)) |
| 1717 | return 1; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1718 | if (!cfq_cfqq_wait_request(new_cfqq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1719 | return 0; |
| 1720 | /* |
| 1721 | * if it doesn't have slice left, forget it |
| 1722 | */ |
| 1723 | if (new_cfqq->slice_left < cfqd->cfq_slice_idle) |
| 1724 | return 0; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1725 | if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1726 | return 1; |
| 1727 | |
| 1728 | return 0; |
| 1729 | } |
| 1730 | |
| 1731 | /* |
| 1732 | * cfqq preempts the active queue. if we allowed preempt with no slice left, |
| 1733 | * let it have half of its nominal slice. |
| 1734 | */ |
| 1735 | static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
| 1736 | { |
| 1737 | struct cfq_queue *__cfqq, *next; |
| 1738 | |
| 1739 | list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list) |
| 1740 | cfq_resort_rr_list(__cfqq, 1); |
| 1741 | |
| 1742 | if (!cfqq->slice_left) |
| 1743 | cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; |
| 1744 | |
| 1745 | cfqq->slice_end = cfqq->slice_left + jiffies; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1746 | __cfq_slice_expired(cfqd, cfqq, 1); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1747 | __cfq_set_active_queue(cfqd, cfqq); |
| 1748 | } |
| 1749 | |
| 1750 | /* |
| 1751 | * should really be a ll_rw_blk.c helper |
| 1752 | */ |
| 1753 | static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
| 1754 | { |
| 1755 | request_queue_t *q = cfqd->queue; |
| 1756 | |
| 1757 | if (!blk_queue_plugged(q)) |
| 1758 | q->request_fn(q); |
| 1759 | else |
| 1760 | __generic_unplug_device(q); |
| 1761 | } |
| 1762 | |
| 1763 | /* |
| 1764 | * Called when a new fs request (crq) is added (to cfqq). Check if there's |
| 1765 | * something we should do about it |
| 1766 | */ |
| 1767 | static void |
| 1768 | cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 1769 | struct cfq_rq *crq) |
| 1770 | { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1771 | const int sync = cfq_crq_is_sync(crq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1772 | |
| 1773 | cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); |
| 1774 | |
| 1775 | if (sync) { |
| 1776 | struct cfq_io_context *cic = crq->io_context; |
| 1777 | |
| 1778 | cfq_update_io_thinktime(cfqd, cic); |
| 1779 | cfq_update_idle_window(cfqd, cfqq, cic); |
| 1780 | |
| 1781 | cic->last_queue = jiffies; |
| 1782 | } |
| 1783 | |
| 1784 | if (cfqq == cfqd->active_queue) { |
| 1785 | /* |
| 1786 | * if we are waiting for a request for this queue, let it rip |
| 1787 | * immediately and flag that we must not expire this queue |
| 1788 | * just now |
| 1789 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1790 | if (cfq_cfqq_wait_request(cfqq)) { |
| 1791 | cfq_mark_cfqq_must_dispatch(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1792 | del_timer(&cfqd->idle_slice_timer); |
| 1793 | cfq_start_queueing(cfqd, cfqq); |
| 1794 | } |
| 1795 | } else if (cfq_should_preempt(cfqd, cfqq, crq)) { |
| 1796 | /* |
| 1797 | * not the active queue - expire current slice if it is |
| 1798 | * idle and has expired it's mean thinktime or this new queue |
| 1799 | * has some old slice time left and is of higher priority |
| 1800 | */ |
| 1801 | cfq_preempt_queue(cfqd, cfqq); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1802 | cfq_mark_cfqq_must_dispatch(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1803 | cfq_start_queueing(cfqd, cfqq); |
| 1804 | } |
| 1805 | } |
| 1806 | |
| 1807 | static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) |
| 1808 | { |
| 1809 | struct cfq_rq *crq = RQ_DATA(rq); |
| 1810 | struct cfq_queue *cfqq = crq->cfq_queue; |
| 1811 | |
| 1812 | cfq_init_prio_data(cfqq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1813 | |
| 1814 | cfq_add_crq_rb(crq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1815 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1816 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
| 1817 | |
| 1818 | if (rq_mergeable(rq)) { |
| 1819 | cfq_add_crq_hash(cfqd, crq); |
| 1820 | |
| 1821 | if (!cfqd->queue->last_merge) |
| 1822 | cfqd->queue->last_merge = rq; |
| 1823 | } |
| 1824 | |
| 1825 | cfq_crq_enqueued(cfqd, cfqq, crq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1826 | } |
| 1827 | |
| 1828 | static void |
| 1829 | cfq_insert_request(request_queue_t *q, struct request *rq, int where) |
| 1830 | { |
| 1831 | struct cfq_data *cfqd = q->elevator->elevator_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1832 | |
| 1833 | switch (where) { |
| 1834 | case ELEVATOR_INSERT_BACK: |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1835 | while (cfq_dispatch_requests(q, INT_MAX, 1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | ; |
| 1837 | list_add_tail(&rq->queuelist, &q->queue_head); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1838 | /* |
| 1839 | * If we were idling with pending requests on |
| 1840 | * inactive cfqqs, force dispatching will |
| 1841 | * remove the idle timer and the queue won't |
| 1842 | * be kicked by __make_request() afterward. |
| 1843 | * Kick it here. |
| 1844 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1845 | cfq_schedule_dispatch(cfqd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1846 | break; |
| 1847 | case ELEVATOR_INSERT_FRONT: |
| 1848 | list_add(&rq->queuelist, &q->queue_head); |
| 1849 | break; |
| 1850 | case ELEVATOR_INSERT_SORT: |
| 1851 | BUG_ON(!blk_fs_request(rq)); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1852 | cfq_enqueue(cfqd, rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1853 | break; |
| 1854 | default: |
| 1855 | printk("%s: bad insert point %d\n", __FUNCTION__,where); |
| 1856 | return; |
| 1857 | } |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1858 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1860 | static void cfq_completed_request(request_queue_t *q, struct request *rq) |
| 1861 | { |
| 1862 | struct cfq_rq *crq = RQ_DATA(rq); |
| 1863 | struct cfq_queue *cfqq; |
| 1864 | |
| 1865 | if (unlikely(!blk_fs_request(rq))) |
| 1866 | return; |
| 1867 | |
| 1868 | cfqq = crq->cfq_queue; |
| 1869 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1870 | if (cfq_crq_in_flight(crq)) { |
| 1871 | const int sync = cfq_crq_is_sync(crq); |
| 1872 | |
| 1873 | WARN_ON(!cfqq->on_dispatch[sync]); |
| 1874 | cfqq->on_dispatch[sync]--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1875 | } |
| 1876 | |
| 1877 | cfq_account_completion(cfqq, crq); |
| 1878 | } |
| 1879 | |
| 1880 | static struct request * |
| 1881 | cfq_former_request(request_queue_t *q, struct request *rq) |
| 1882 | { |
| 1883 | struct cfq_rq *crq = RQ_DATA(rq); |
| 1884 | struct rb_node *rbprev = rb_prev(&crq->rb_node); |
| 1885 | |
| 1886 | if (rbprev) |
| 1887 | return rb_entry_crq(rbprev)->request; |
| 1888 | |
| 1889 | return NULL; |
| 1890 | } |
| 1891 | |
| 1892 | static struct request * |
| 1893 | cfq_latter_request(request_queue_t *q, struct request *rq) |
| 1894 | { |
| 1895 | struct cfq_rq *crq = RQ_DATA(rq); |
| 1896 | struct rb_node *rbnext = rb_next(&crq->rb_node); |
| 1897 | |
| 1898 | if (rbnext) |
| 1899 | return rb_entry_crq(rbnext)->request; |
| 1900 | |
| 1901 | return NULL; |
| 1902 | } |
| 1903 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1904 | /* |
| 1905 | * we temporarily boost lower priority queues if they are holding fs exclusive |
| 1906 | * resources. they are boosted to normal prio (CLASS_BE/4) |
| 1907 | */ |
| 1908 | static void cfq_prio_boost(struct cfq_queue *cfqq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1909 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1910 | const int ioprio_class = cfqq->ioprio_class; |
| 1911 | const int ioprio = cfqq->ioprio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1913 | if (has_fs_excl()) { |
| 1914 | /* |
| 1915 | * boost idle prio on transactions that would lock out other |
| 1916 | * users of the filesystem |
| 1917 | */ |
| 1918 | if (cfq_class_idle(cfqq)) |
| 1919 | cfqq->ioprio_class = IOPRIO_CLASS_BE; |
| 1920 | if (cfqq->ioprio > IOPRIO_NORM) |
| 1921 | cfqq->ioprio = IOPRIO_NORM; |
| 1922 | } else { |
| 1923 | /* |
| 1924 | * check if we need to unboost the queue |
| 1925 | */ |
| 1926 | if (cfqq->ioprio_class != cfqq->org_ioprio_class) |
| 1927 | cfqq->ioprio_class = cfqq->org_ioprio_class; |
| 1928 | if (cfqq->ioprio != cfqq->org_ioprio) |
| 1929 | cfqq->ioprio = cfqq->org_ioprio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1930 | } |
| 1931 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1932 | /* |
| 1933 | * refile between round-robin lists if we moved the priority class |
| 1934 | */ |
| 1935 | if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) && |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1936 | cfq_cfqq_on_rr(cfqq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1937 | cfq_resort_rr_list(cfqq, 0); |
| 1938 | } |
| 1939 | |
| 1940 | static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) |
| 1941 | { |
| 1942 | if (rw == READ || process_sync(task)) |
| 1943 | return task->pid; |
| 1944 | |
| 1945 | return CFQ_KEY_ASYNC; |
| 1946 | } |
| 1947 | |
| 1948 | static inline int |
| 1949 | __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 1950 | struct task_struct *task, int rw) |
| 1951 | { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1952 | #if 1 |
| 1953 | if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && |
Andrew Morton | 99f95e5 | 2005-06-27 20:14:05 -0700 | [diff] [blame] | 1954 | !cfq_cfqq_must_alloc_slice(cfqq)) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1955 | cfq_mark_cfqq_must_alloc_slice(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1956 | return ELV_MQUEUE_MUST; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1957 | } |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1958 | |
| 1959 | return ELV_MQUEUE_MAY; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1960 | #else |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1961 | if (!cfqq || task->flags & PF_MEMALLOC) |
| 1962 | return ELV_MQUEUE_MAY; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1963 | if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) { |
| 1964 | if (cfq_cfqq_wait_request(cfqq)) |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1965 | return ELV_MQUEUE_MUST; |
| 1966 | |
| 1967 | /* |
| 1968 | * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we |
| 1969 | * can quickly flood the queue with writes from a single task |
| 1970 | */ |
Andrew Morton | 99f95e5 | 2005-06-27 20:14:05 -0700 | [diff] [blame] | 1971 | if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 1972 | cfq_mark_cfqq_must_alloc_slice(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1973 | return ELV_MQUEUE_MUST; |
| 1974 | } |
| 1975 | |
| 1976 | return ELV_MQUEUE_MAY; |
| 1977 | } |
| 1978 | if (cfq_class_idle(cfqq)) |
| 1979 | return ELV_MQUEUE_NO; |
| 1980 | if (cfqq->allocated[rw] >= cfqd->max_queued) { |
| 1981 | struct io_context *ioc = get_io_context(GFP_ATOMIC); |
| 1982 | int ret = ELV_MQUEUE_NO; |
| 1983 | |
| 1984 | if (ioc && ioc->nr_batch_requests) |
| 1985 | ret = ELV_MQUEUE_MAY; |
| 1986 | |
| 1987 | put_io_context(ioc); |
| 1988 | return ret; |
| 1989 | } |
| 1990 | |
| 1991 | return ELV_MQUEUE_MAY; |
| 1992 | #endif |
| 1993 | } |
| 1994 | |
| 1995 | static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) |
| 1996 | { |
| 1997 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 1998 | struct task_struct *tsk = current; |
| 1999 | struct cfq_queue *cfqq; |
| 2000 | |
| 2001 | /* |
| 2002 | * don't force setup of a queue from here, as a call to may_queue |
| 2003 | * does not necessarily imply that a request actually will be queued. |
| 2004 | * so just lookup a possibly existing queue, or return 'may queue' |
| 2005 | * if that fails |
| 2006 | */ |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2007 | cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2008 | if (cfqq) { |
| 2009 | cfq_init_prio_data(cfqq); |
| 2010 | cfq_prio_boost(cfqq); |
| 2011 | |
| 2012 | return __cfq_may_queue(cfqd, cfqq, tsk, rw); |
| 2013 | } |
| 2014 | |
| 2015 | return ELV_MQUEUE_MAY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2016 | } |
| 2017 | |
| 2018 | static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) |
| 2019 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2020 | struct cfq_data *cfqd = q->elevator->elevator_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2021 | struct request_list *rl = &q->rq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2022 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2023 | if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { |
| 2024 | smp_mb(); |
| 2025 | if (waitqueue_active(&rl->wait[READ])) |
| 2026 | wake_up(&rl->wait[READ]); |
| 2027 | } |
| 2028 | |
| 2029 | if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { |
| 2030 | smp_mb(); |
| 2031 | if (waitqueue_active(&rl->wait[WRITE])) |
| 2032 | wake_up(&rl->wait[WRITE]); |
| 2033 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2034 | } |
| 2035 | |
| 2036 | /* |
| 2037 | * queue lock held here |
| 2038 | */ |
| 2039 | static void cfq_put_request(request_queue_t *q, struct request *rq) |
| 2040 | { |
| 2041 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 2042 | struct cfq_rq *crq = RQ_DATA(rq); |
| 2043 | |
| 2044 | if (crq) { |
| 2045 | struct cfq_queue *cfqq = crq->cfq_queue; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2046 | const int rw = rq_data_dir(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2048 | BUG_ON(!cfqq->allocated[rw]); |
| 2049 | cfqq->allocated[rw]--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2050 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2051 | put_io_context(crq->io_context->ioc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2052 | |
| 2053 | mempool_free(crq, cfqd->crq_pool); |
| 2054 | rq->elevator_private = NULL; |
| 2055 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2056 | cfq_check_waiters(q, cfqq); |
| 2057 | cfq_put_queue(cfqq); |
| 2058 | } |
| 2059 | } |
| 2060 | |
| 2061 | /* |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2062 | * Allocate cfq data structures associated with this request. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 | */ |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2064 | static int |
| 2065 | cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
| 2066 | int gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2067 | { |
| 2068 | struct cfq_data *cfqd = q->elevator->elevator_data; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2069 | struct task_struct *tsk = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2070 | struct cfq_io_context *cic; |
| 2071 | const int rw = rq_data_dir(rq); |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2072 | pid_t key = cfq_queue_pid(tsk, rw); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2073 | struct cfq_queue *cfqq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2074 | struct cfq_rq *crq; |
| 2075 | unsigned long flags; |
| 2076 | |
| 2077 | might_sleep_if(gfp_mask & __GFP_WAIT); |
| 2078 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2079 | cic = cfq_get_io_context(cfqd, key, gfp_mask); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2080 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2081 | spin_lock_irqsave(q->queue_lock, flags); |
| 2082 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2083 | if (!cic) |
| 2084 | goto queue_fail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2085 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2086 | if (!cic->cfqq) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2087 | cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2088 | if (!cfqq) |
| 2089 | goto queue_fail; |
| 2090 | |
| 2091 | cic->cfqq = cfqq; |
| 2092 | } else |
| 2093 | cfqq = cic->cfqq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | |
| 2095 | cfqq->allocated[rw]++; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2096 | cfq_clear_cfqq_must_alloc(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2097 | cfqd->rq_starved = 0; |
| 2098 | atomic_inc(&cfqq->ref); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2099 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 2100 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2101 | crq = mempool_alloc(cfqd->crq_pool, gfp_mask); |
| 2102 | if (crq) { |
| 2103 | RB_CLEAR(&crq->rb_node); |
| 2104 | crq->rb_key = 0; |
| 2105 | crq->request = rq; |
| 2106 | INIT_HLIST_NODE(&crq->hash); |
| 2107 | crq->cfq_queue = cfqq; |
| 2108 | crq->io_context = cic; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2109 | cfq_clear_crq_in_flight(crq); |
| 2110 | cfq_clear_crq_in_driver(crq); |
| 2111 | cfq_clear_crq_requeued(crq); |
| 2112 | |
| 2113 | if (rw == READ || process_sync(tsk)) |
| 2114 | cfq_mark_crq_is_sync(crq); |
| 2115 | else |
| 2116 | cfq_clear_crq_is_sync(crq); |
| 2117 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2118 | rq->elevator_private = crq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2119 | return 0; |
| 2120 | } |
| 2121 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2122 | spin_lock_irqsave(q->queue_lock, flags); |
| 2123 | cfqq->allocated[rw]--; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2124 | if (!(cfqq->allocated[0] + cfqq->allocated[1])) |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2125 | cfq_mark_cfqq_must_alloc(cfqq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2126 | cfq_put_queue(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2127 | queue_fail: |
| 2128 | if (cic) |
| 2129 | put_io_context(cic->ioc); |
| 2130 | /* |
| 2131 | * mark us rq allocation starved. we need to kickstart the process |
| 2132 | * ourselves if there are no pending requests that can do it for us. |
| 2133 | * that would be an extremely rare OOM situation |
| 2134 | */ |
| 2135 | cfqd->rq_starved = 1; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2136 | cfq_schedule_dispatch(cfqd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2137 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 2138 | return 1; |
| 2139 | } |
| 2140 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2141 | static void cfq_kick_queue(void *data) |
| 2142 | { |
| 2143 | request_queue_t *q = data; |
| 2144 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 2145 | unsigned long flags; |
| 2146 | |
| 2147 | spin_lock_irqsave(q->queue_lock, flags); |
| 2148 | |
| 2149 | if (cfqd->rq_starved) { |
| 2150 | struct request_list *rl = &q->rq; |
| 2151 | |
| 2152 | /* |
| 2153 | * we aren't guaranteed to get a request after this, but we |
| 2154 | * have to be opportunistic |
| 2155 | */ |
| 2156 | smp_mb(); |
| 2157 | if (waitqueue_active(&rl->wait[READ])) |
| 2158 | wake_up(&rl->wait[READ]); |
| 2159 | if (waitqueue_active(&rl->wait[WRITE])) |
| 2160 | wake_up(&rl->wait[WRITE]); |
| 2161 | } |
| 2162 | |
| 2163 | blk_remove_plug(q); |
| 2164 | q->request_fn(q); |
| 2165 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 2166 | } |
| 2167 | |
| 2168 | /* |
| 2169 | * Timer running if the active_queue is currently idling inside its time slice |
| 2170 | */ |
| 2171 | static void cfq_idle_slice_timer(unsigned long data) |
| 2172 | { |
| 2173 | struct cfq_data *cfqd = (struct cfq_data *) data; |
| 2174 | struct cfq_queue *cfqq; |
| 2175 | unsigned long flags; |
| 2176 | |
| 2177 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
| 2178 | |
| 2179 | if ((cfqq = cfqd->active_queue) != NULL) { |
| 2180 | unsigned long now = jiffies; |
| 2181 | |
| 2182 | /* |
| 2183 | * expired |
| 2184 | */ |
| 2185 | if (time_after(now, cfqq->slice_end)) |
| 2186 | goto expire; |
| 2187 | |
| 2188 | /* |
| 2189 | * only expire and reinvoke request handler, if there are |
| 2190 | * other queues with pending requests |
| 2191 | */ |
| 2192 | if (!cfq_pending_requests(cfqd)) { |
| 2193 | cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); |
| 2194 | add_timer(&cfqd->idle_slice_timer); |
| 2195 | goto out_cont; |
| 2196 | } |
| 2197 | |
| 2198 | /* |
| 2199 | * not expired and it has a request pending, let it dispatch |
| 2200 | */ |
| 2201 | if (!RB_EMPTY(&cfqq->sort_list)) { |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2202 | cfq_mark_cfqq_must_dispatch(cfqq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2203 | goto out_kick; |
| 2204 | } |
| 2205 | } |
| 2206 | expire: |
| 2207 | cfq_slice_expired(cfqd, 0); |
| 2208 | out_kick: |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2209 | cfq_schedule_dispatch(cfqd); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2210 | out_cont: |
| 2211 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
| 2212 | } |
| 2213 | |
| 2214 | /* |
| 2215 | * Timer running if an idle class queue is waiting for service |
| 2216 | */ |
| 2217 | static void cfq_idle_class_timer(unsigned long data) |
| 2218 | { |
| 2219 | struct cfq_data *cfqd = (struct cfq_data *) data; |
| 2220 | unsigned long flags, end; |
| 2221 | |
| 2222 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
| 2223 | |
| 2224 | /* |
| 2225 | * race with a non-idle queue, reset timer |
| 2226 | */ |
| 2227 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; |
| 2228 | if (!time_after_eq(jiffies, end)) { |
| 2229 | cfqd->idle_class_timer.expires = end; |
| 2230 | add_timer(&cfqd->idle_class_timer); |
| 2231 | } else |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2232 | cfq_schedule_dispatch(cfqd); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2233 | |
| 2234 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
| 2235 | } |
| 2236 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2237 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
| 2238 | { |
| 2239 | del_timer_sync(&cfqd->idle_slice_timer); |
| 2240 | del_timer_sync(&cfqd->idle_class_timer); |
| 2241 | blk_sync_queue(cfqd->queue); |
| 2242 | } |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2243 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2244 | static void cfq_put_cfqd(struct cfq_data *cfqd) |
| 2245 | { |
| 2246 | request_queue_t *q = cfqd->queue; |
| 2247 | |
| 2248 | if (!atomic_dec_and_test(&cfqd->ref)) |
| 2249 | return; |
| 2250 | |
| 2251 | blk_put_queue(q); |
| 2252 | |
Jens Axboe | 96c51ce | 2005-06-27 14:49:39 +0200 | [diff] [blame] | 2253 | cfq_shutdown_timer_wq(cfqd); |
| 2254 | q->elevator->elevator_data = NULL; |
| 2255 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2256 | mempool_destroy(cfqd->crq_pool); |
| 2257 | kfree(cfqd->crq_hash); |
| 2258 | kfree(cfqd->cfq_hash); |
| 2259 | kfree(cfqd); |
| 2260 | } |
| 2261 | |
| 2262 | static void cfq_exit_queue(elevator_t *e) |
| 2263 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2264 | struct cfq_data *cfqd = e->elevator_data; |
| 2265 | |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2266 | cfq_shutdown_timer_wq(cfqd); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2267 | cfq_put_cfqd(cfqd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2268 | } |
| 2269 | |
| 2270 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) |
| 2271 | { |
| 2272 | struct cfq_data *cfqd; |
| 2273 | int i; |
| 2274 | |
| 2275 | cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); |
| 2276 | if (!cfqd) |
| 2277 | return -ENOMEM; |
| 2278 | |
| 2279 | memset(cfqd, 0, sizeof(*cfqd)); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2280 | |
| 2281 | for (i = 0; i < CFQ_PRIO_LISTS; i++) |
| 2282 | INIT_LIST_HEAD(&cfqd->rr_list[i]); |
| 2283 | |
| 2284 | INIT_LIST_HEAD(&cfqd->busy_rr); |
| 2285 | INIT_LIST_HEAD(&cfqd->cur_rr); |
| 2286 | INIT_LIST_HEAD(&cfqd->idle_rr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2287 | INIT_LIST_HEAD(&cfqd->empty_list); |
| 2288 | |
| 2289 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); |
| 2290 | if (!cfqd->crq_hash) |
| 2291 | goto out_crqhash; |
| 2292 | |
| 2293 | cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); |
| 2294 | if (!cfqd->cfq_hash) |
| 2295 | goto out_cfqhash; |
| 2296 | |
| 2297 | cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool); |
| 2298 | if (!cfqd->crq_pool) |
| 2299 | goto out_crqpool; |
| 2300 | |
| 2301 | for (i = 0; i < CFQ_MHASH_ENTRIES; i++) |
| 2302 | INIT_HLIST_HEAD(&cfqd->crq_hash[i]); |
| 2303 | for (i = 0; i < CFQ_QHASH_ENTRIES; i++) |
| 2304 | INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); |
| 2305 | |
| 2306 | e->elevator_data = cfqd; |
| 2307 | |
| 2308 | cfqd->queue = q; |
| 2309 | atomic_inc(&q->refcnt); |
| 2310 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2311 | cfqd->max_queued = q->nr_requests / 4; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2312 | q->nr_batching = cfq_queued; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2313 | |
| 2314 | init_timer(&cfqd->idle_slice_timer); |
| 2315 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
| 2316 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
| 2317 | |
| 2318 | init_timer(&cfqd->idle_class_timer); |
| 2319 | cfqd->idle_class_timer.function = cfq_idle_class_timer; |
| 2320 | cfqd->idle_class_timer.data = (unsigned long) cfqd; |
| 2321 | |
| 2322 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); |
| 2323 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2324 | atomic_set(&cfqd->ref, 1); |
| 2325 | |
| 2326 | cfqd->cfq_queued = cfq_queued; |
| 2327 | cfqd->cfq_quantum = cfq_quantum; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2328 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
| 2329 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2330 | cfqd->cfq_back_max = cfq_back_max; |
| 2331 | cfqd->cfq_back_penalty = cfq_back_penalty; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2332 | cfqd->cfq_slice[0] = cfq_slice_async; |
| 2333 | cfqd->cfq_slice[1] = cfq_slice_sync; |
| 2334 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
| 2335 | cfqd->cfq_slice_idle = cfq_slice_idle; |
| 2336 | cfqd->cfq_max_depth = cfq_max_depth; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2337 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2338 | return 0; |
| 2339 | out_crqpool: |
| 2340 | kfree(cfqd->cfq_hash); |
| 2341 | out_cfqhash: |
| 2342 | kfree(cfqd->crq_hash); |
| 2343 | out_crqhash: |
| 2344 | kfree(cfqd); |
| 2345 | return -ENOMEM; |
| 2346 | } |
| 2347 | |
| 2348 | static void cfq_slab_kill(void) |
| 2349 | { |
| 2350 | if (crq_pool) |
| 2351 | kmem_cache_destroy(crq_pool); |
| 2352 | if (cfq_pool) |
| 2353 | kmem_cache_destroy(cfq_pool); |
| 2354 | if (cfq_ioc_pool) |
| 2355 | kmem_cache_destroy(cfq_ioc_pool); |
| 2356 | } |
| 2357 | |
| 2358 | static int __init cfq_slab_setup(void) |
| 2359 | { |
| 2360 | crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0, |
| 2361 | NULL, NULL); |
| 2362 | if (!crq_pool) |
| 2363 | goto fail; |
| 2364 | |
| 2365 | cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0, |
| 2366 | NULL, NULL); |
| 2367 | if (!cfq_pool) |
| 2368 | goto fail; |
| 2369 | |
| 2370 | cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool", |
| 2371 | sizeof(struct cfq_io_context), 0, 0, NULL, NULL); |
| 2372 | if (!cfq_ioc_pool) |
| 2373 | goto fail; |
| 2374 | |
| 2375 | return 0; |
| 2376 | fail: |
| 2377 | cfq_slab_kill(); |
| 2378 | return -ENOMEM; |
| 2379 | } |
| 2380 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2381 | /* |
| 2382 | * sysfs parts below --> |
| 2383 | */ |
| 2384 | struct cfq_fs_entry { |
| 2385 | struct attribute attr; |
| 2386 | ssize_t (*show)(struct cfq_data *, char *); |
| 2387 | ssize_t (*store)(struct cfq_data *, const char *, size_t); |
| 2388 | }; |
| 2389 | |
| 2390 | static ssize_t |
| 2391 | cfq_var_show(unsigned int var, char *page) |
| 2392 | { |
| 2393 | return sprintf(page, "%d\n", var); |
| 2394 | } |
| 2395 | |
| 2396 | static ssize_t |
| 2397 | cfq_var_store(unsigned int *var, const char *page, size_t count) |
| 2398 | { |
| 2399 | char *p = (char *) page; |
| 2400 | |
| 2401 | *var = simple_strtoul(p, &p, 10); |
| 2402 | return count; |
| 2403 | } |
| 2404 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2405 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
| 2406 | static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ |
| 2407 | { \ |
| 2408 | unsigned int __data = __VAR; \ |
| 2409 | if (__CONV) \ |
| 2410 | __data = jiffies_to_msecs(__data); \ |
| 2411 | return cfq_var_show(__data, (page)); \ |
| 2412 | } |
| 2413 | SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); |
| 2414 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2415 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); |
| 2416 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2417 | SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); |
| 2418 | SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2419 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
| 2420 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
| 2421 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
| 2422 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
| 2423 | SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2424 | #undef SHOW_FUNCTION |
| 2425 | |
| 2426 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
| 2427 | static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ |
| 2428 | { \ |
| 2429 | unsigned int __data; \ |
| 2430 | int ret = cfq_var_store(&__data, (page), count); \ |
| 2431 | if (__data < (MIN)) \ |
| 2432 | __data = (MIN); \ |
| 2433 | else if (__data > (MAX)) \ |
| 2434 | __data = (MAX); \ |
| 2435 | if (__CONV) \ |
| 2436 | *(__PTR) = msecs_to_jiffies(__data); \ |
| 2437 | else \ |
| 2438 | *(__PTR) = __data; \ |
| 2439 | return ret; \ |
| 2440 | } |
| 2441 | STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); |
| 2442 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2443 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); |
| 2444 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2445 | STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
| 2446 | STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2447 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
| 2448 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
| 2449 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
| 2450 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); |
| 2451 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2452 | #undef STORE_FUNCTION |
| 2453 | |
| 2454 | static struct cfq_fs_entry cfq_quantum_entry = { |
| 2455 | .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, |
| 2456 | .show = cfq_quantum_show, |
| 2457 | .store = cfq_quantum_store, |
| 2458 | }; |
| 2459 | static struct cfq_fs_entry cfq_queued_entry = { |
| 2460 | .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, |
| 2461 | .show = cfq_queued_show, |
| 2462 | .store = cfq_queued_store, |
| 2463 | }; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2464 | static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2465 | .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2466 | .show = cfq_fifo_expire_sync_show, |
| 2467 | .store = cfq_fifo_expire_sync_store, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2468 | }; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2469 | static struct cfq_fs_entry cfq_fifo_expire_async_entry = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2470 | .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2471 | .show = cfq_fifo_expire_async_show, |
| 2472 | .store = cfq_fifo_expire_async_store, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2473 | }; |
| 2474 | static struct cfq_fs_entry cfq_back_max_entry = { |
| 2475 | .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, |
| 2476 | .show = cfq_back_max_show, |
| 2477 | .store = cfq_back_max_store, |
| 2478 | }; |
| 2479 | static struct cfq_fs_entry cfq_back_penalty_entry = { |
| 2480 | .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR }, |
| 2481 | .show = cfq_back_penalty_show, |
| 2482 | .store = cfq_back_penalty_store, |
| 2483 | }; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2484 | static struct cfq_fs_entry cfq_slice_sync_entry = { |
| 2485 | .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, |
| 2486 | .show = cfq_slice_sync_show, |
| 2487 | .store = cfq_slice_sync_store, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2488 | }; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2489 | static struct cfq_fs_entry cfq_slice_async_entry = { |
| 2490 | .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, |
| 2491 | .show = cfq_slice_async_show, |
| 2492 | .store = cfq_slice_async_store, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | }; |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2494 | static struct cfq_fs_entry cfq_slice_async_rq_entry = { |
| 2495 | .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, |
| 2496 | .show = cfq_slice_async_rq_show, |
| 2497 | .store = cfq_slice_async_rq_store, |
| 2498 | }; |
| 2499 | static struct cfq_fs_entry cfq_slice_idle_entry = { |
| 2500 | .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, |
| 2501 | .show = cfq_slice_idle_show, |
| 2502 | .store = cfq_slice_idle_store, |
| 2503 | }; |
| 2504 | static struct cfq_fs_entry cfq_max_depth_entry = { |
| 2505 | .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, |
| 2506 | .show = cfq_max_depth_show, |
| 2507 | .store = cfq_max_depth_store, |
| 2508 | }; |
Jens Axboe | 3b18152 | 2005-06-27 10:56:24 +0200 | [diff] [blame] | 2509 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2510 | static struct attribute *default_attrs[] = { |
| 2511 | &cfq_quantum_entry.attr, |
| 2512 | &cfq_queued_entry.attr, |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2513 | &cfq_fifo_expire_sync_entry.attr, |
| 2514 | &cfq_fifo_expire_async_entry.attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2515 | &cfq_back_max_entry.attr, |
| 2516 | &cfq_back_penalty_entry.attr, |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2517 | &cfq_slice_sync_entry.attr, |
| 2518 | &cfq_slice_async_entry.attr, |
| 2519 | &cfq_slice_async_rq_entry.attr, |
| 2520 | &cfq_slice_idle_entry.attr, |
| 2521 | &cfq_max_depth_entry.attr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2522 | NULL, |
| 2523 | }; |
| 2524 | |
| 2525 | #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr) |
| 2526 | |
| 2527 | static ssize_t |
| 2528 | cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
| 2529 | { |
| 2530 | elevator_t *e = container_of(kobj, elevator_t, kobj); |
| 2531 | struct cfq_fs_entry *entry = to_cfq(attr); |
| 2532 | |
| 2533 | if (!entry->show) |
Dmitry Torokhov | 6c1852a | 2005-04-29 01:26:06 -0500 | [diff] [blame] | 2534 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2535 | |
| 2536 | return entry->show(e->elevator_data, page); |
| 2537 | } |
| 2538 | |
| 2539 | static ssize_t |
| 2540 | cfq_attr_store(struct kobject *kobj, struct attribute *attr, |
| 2541 | const char *page, size_t length) |
| 2542 | { |
| 2543 | elevator_t *e = container_of(kobj, elevator_t, kobj); |
| 2544 | struct cfq_fs_entry *entry = to_cfq(attr); |
| 2545 | |
| 2546 | if (!entry->store) |
Dmitry Torokhov | 6c1852a | 2005-04-29 01:26:06 -0500 | [diff] [blame] | 2547 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2548 | |
| 2549 | return entry->store(e->elevator_data, page, length); |
| 2550 | } |
| 2551 | |
| 2552 | static struct sysfs_ops cfq_sysfs_ops = { |
| 2553 | .show = cfq_attr_show, |
| 2554 | .store = cfq_attr_store, |
| 2555 | }; |
| 2556 | |
| 2557 | static struct kobj_type cfq_ktype = { |
| 2558 | .sysfs_ops = &cfq_sysfs_ops, |
| 2559 | .default_attrs = default_attrs, |
| 2560 | }; |
| 2561 | |
| 2562 | static struct elevator_type iosched_cfq = { |
| 2563 | .ops = { |
| 2564 | .elevator_merge_fn = cfq_merge, |
| 2565 | .elevator_merged_fn = cfq_merged_request, |
| 2566 | .elevator_merge_req_fn = cfq_merged_requests, |
| 2567 | .elevator_next_req_fn = cfq_next_request, |
| 2568 | .elevator_add_req_fn = cfq_insert_request, |
| 2569 | .elevator_remove_req_fn = cfq_remove_request, |
| 2570 | .elevator_requeue_req_fn = cfq_requeue_request, |
| 2571 | .elevator_deactivate_req_fn = cfq_deactivate_request, |
| 2572 | .elevator_queue_empty_fn = cfq_queue_empty, |
| 2573 | .elevator_completed_req_fn = cfq_completed_request, |
| 2574 | .elevator_former_req_fn = cfq_former_request, |
| 2575 | .elevator_latter_req_fn = cfq_latter_request, |
| 2576 | .elevator_set_req_fn = cfq_set_request, |
| 2577 | .elevator_put_req_fn = cfq_put_request, |
| 2578 | .elevator_may_queue_fn = cfq_may_queue, |
| 2579 | .elevator_init_fn = cfq_init_queue, |
| 2580 | .elevator_exit_fn = cfq_exit_queue, |
| 2581 | }, |
| 2582 | .elevator_ktype = &cfq_ktype, |
| 2583 | .elevator_name = "cfq", |
| 2584 | .elevator_owner = THIS_MODULE, |
| 2585 | }; |
| 2586 | |
| 2587 | static int __init cfq_init(void) |
| 2588 | { |
| 2589 | int ret; |
| 2590 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2591 | /* |
| 2592 | * could be 0 on HZ < 1000 setups |
| 2593 | */ |
| 2594 | if (!cfq_slice_async) |
| 2595 | cfq_slice_async = 1; |
| 2596 | if (!cfq_slice_idle) |
| 2597 | cfq_slice_idle = 1; |
| 2598 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2599 | if (cfq_slab_setup()) |
| 2600 | return -ENOMEM; |
| 2601 | |
| 2602 | ret = elv_register(&iosched_cfq); |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2603 | if (ret) |
| 2604 | cfq_slab_kill(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2605 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2606 | return ret; |
| 2607 | } |
| 2608 | |
| 2609 | static void __exit cfq_exit(void) |
| 2610 | { |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 2611 | struct task_struct *g, *p; |
| 2612 | unsigned long flags; |
| 2613 | |
| 2614 | read_lock_irqsave(&tasklist_lock, flags); |
| 2615 | |
| 2616 | /* |
| 2617 | * iterate each process in the system, removing our io_context |
| 2618 | */ |
| 2619 | do_each_thread(g, p) { |
| 2620 | struct io_context *ioc = p->io_context; |
| 2621 | |
| 2622 | if (ioc && ioc->cic) { |
| 2623 | ioc->cic->exit(ioc->cic); |
| 2624 | cfq_free_io_context(ioc->cic); |
| 2625 | ioc->cic = NULL; |
| 2626 | } |
| 2627 | } while_each_thread(g, p); |
| 2628 | |
| 2629 | read_unlock_irqrestore(&tasklist_lock, flags); |
| 2630 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2631 | cfq_slab_kill(); |
| 2632 | elv_unregister(&iosched_cfq); |
| 2633 | } |
| 2634 | |
| 2635 | module_init(cfq_init); |
| 2636 | module_exit(cfq_exit); |
| 2637 | |
| 2638 | MODULE_AUTHOR("Jens Axboe"); |
| 2639 | MODULE_LICENSE("GPL"); |
| 2640 | MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); |