Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Anticipatory & deadline i/o scheduler. |
| 3 | * |
| 4 | * Copyright (C) 2002 Jens Axboe <axboe@suse.de> |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 5 | * Nick Piggin <nickpiggin@yahoo.com.au> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | */ |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/blkdev.h> |
| 11 | #include <linux/elevator.h> |
| 12 | #include <linux/bio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/module.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/compiler.h> |
| 17 | #include <linux/hash.h> |
| 18 | #include <linux/rbtree.h> |
| 19 | #include <linux/interrupt.h> |
| 20 | |
| 21 | #define REQ_SYNC 1 |
| 22 | #define REQ_ASYNC 0 |
| 23 | |
| 24 | /* |
| 25 | * See Documentation/block/as-iosched.txt |
| 26 | */ |
| 27 | |
| 28 | /* |
| 29 | * max time before a read is submitted. |
| 30 | */ |
| 31 | #define default_read_expire (HZ / 8) |
| 32 | |
| 33 | /* |
| 34 | * ditto for writes, these limits are not hard, even |
| 35 | * if the disk is capable of satisfying them. |
| 36 | */ |
| 37 | #define default_write_expire (HZ / 4) |
| 38 | |
| 39 | /* |
| 40 | * read_batch_expire describes how long we will allow a stream of reads to |
| 41 | * persist before looking to see whether it is time to switch over to writes. |
| 42 | */ |
| 43 | #define default_read_batch_expire (HZ / 2) |
| 44 | |
| 45 | /* |
| 46 | * write_batch_expire describes how long we want a stream of writes to run for. |
| 47 | * This is not a hard limit, but a target we set for the auto-tuning thingy. |
| 48 | * See, the problem is: we can send a lot of writes to disk cache / TCQ in |
| 49 | * a short amount of time... |
| 50 | */ |
| 51 | #define default_write_batch_expire (HZ / 8) |
| 52 | |
| 53 | /* |
| 54 | * max time we may wait to anticipate a read (default around 6ms) |
| 55 | */ |
| 56 | #define default_antic_expire ((HZ / 150) ? HZ / 150 : 1) |
| 57 | |
| 58 | /* |
| 59 | * Keep track of up to 20ms thinktimes. We can go as big as we like here, |
| 60 | * however huge values tend to interfere and not decay fast enough. A program |
| 61 | * might be in a non-io phase of operation. Waiting on user input for example, |
| 62 | * or doing a lengthy computation. A small penalty can be justified there, and |
| 63 | * will still catch out those processes that constantly have large thinktimes. |
| 64 | */ |
| 65 | #define MAX_THINKTIME (HZ/50UL) |
| 66 | |
| 67 | /* Bits in as_io_context.state */ |
| 68 | enum as_io_states { |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 69 | AS_TASK_RUNNING=0, /* Process has not exited */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | AS_TASK_IOSTARTED, /* Process has started some IO */ |
| 71 | AS_TASK_IORUNNING, /* Process has completed some IO */ |
| 72 | }; |
| 73 | |
| 74 | enum anticipation_status { |
| 75 | ANTIC_OFF=0, /* Not anticipating (normal operation) */ |
| 76 | ANTIC_WAIT_REQ, /* The last read has not yet completed */ |
| 77 | ANTIC_WAIT_NEXT, /* Currently anticipating a request vs |
| 78 | last read (which has completed) */ |
| 79 | ANTIC_FINISHED, /* Anticipating but have found a candidate |
| 80 | * or timed out */ |
| 81 | }; |
| 82 | |
| 83 | struct as_data { |
| 84 | /* |
| 85 | * run time data |
| 86 | */ |
| 87 | |
| 88 | struct request_queue *q; /* the "owner" queue */ |
| 89 | |
| 90 | /* |
| 91 | * requests (as_rq s) are present on both sort_list and fifo_list |
| 92 | */ |
| 93 | struct rb_root sort_list[2]; |
| 94 | struct list_head fifo_list[2]; |
| 95 | |
| 96 | struct as_rq *next_arq[2]; /* next in sort order */ |
| 97 | sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 98 | struct hlist_head *hash; /* request hash */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
| 100 | unsigned long exit_prob; /* probability a task will exit while |
| 101 | being waited on */ |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 102 | unsigned long exit_no_coop; /* probablility an exited task will |
| 103 | not be part of a later cooperating |
| 104 | request */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | unsigned long new_ttime_total; /* mean thinktime on new proc */ |
| 106 | unsigned long new_ttime_mean; |
| 107 | u64 new_seek_total; /* mean seek on new proc */ |
| 108 | sector_t new_seek_mean; |
| 109 | |
| 110 | unsigned long current_batch_expires; |
| 111 | unsigned long last_check_fifo[2]; |
| 112 | int changed_batch; /* 1: waiting for old batch to end */ |
| 113 | int new_batch; /* 1: waiting on first read complete */ |
| 114 | int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */ |
| 115 | int write_batch_count; /* max # of reqs in a write batch */ |
| 116 | int current_write_count; /* how many requests left this batch */ |
| 117 | int write_batch_idled; /* has the write batch gone idle? */ |
| 118 | mempool_t *arq_pool; |
| 119 | |
| 120 | enum anticipation_status antic_status; |
| 121 | unsigned long antic_start; /* jiffies: when it started */ |
| 122 | struct timer_list antic_timer; /* anticipatory scheduling timer */ |
| 123 | struct work_struct antic_work; /* Deferred unplugging */ |
| 124 | struct io_context *io_context; /* Identify the expected process */ |
| 125 | int ioc_finished; /* IO associated with io_context is finished */ |
| 126 | int nr_dispatched; |
| 127 | |
| 128 | /* |
| 129 | * settings that change how the i/o scheduler behaves |
| 130 | */ |
| 131 | unsigned long fifo_expire[2]; |
| 132 | unsigned long batch_expire[2]; |
| 133 | unsigned long antic_expire; |
| 134 | }; |
| 135 | |
| 136 | #define list_entry_fifo(ptr) list_entry((ptr), struct as_rq, fifo) |
| 137 | |
| 138 | /* |
| 139 | * per-request data. |
| 140 | */ |
| 141 | enum arq_state { |
| 142 | AS_RQ_NEW=0, /* New - not referenced and not on any lists */ |
| 143 | AS_RQ_QUEUED, /* In the request queue. It belongs to the |
| 144 | scheduler */ |
| 145 | AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the |
| 146 | driver now */ |
| 147 | AS_RQ_PRESCHED, /* Debug poisoning for requests being used */ |
| 148 | AS_RQ_REMOVED, |
| 149 | AS_RQ_MERGED, |
| 150 | AS_RQ_POSTSCHED, /* when they shouldn't be */ |
| 151 | }; |
| 152 | |
| 153 | struct as_rq { |
| 154 | /* |
| 155 | * rbtree index, key is the starting offset |
| 156 | */ |
| 157 | struct rb_node rb_node; |
| 158 | sector_t rb_key; |
| 159 | |
| 160 | struct request *request; |
| 161 | |
| 162 | struct io_context *io_context; /* The submitting task */ |
| 163 | |
| 164 | /* |
| 165 | * request hash, key is the ending offset (for back merge lookup) |
| 166 | */ |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 167 | struct hlist_node hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
| 169 | /* |
| 170 | * expire fifo |
| 171 | */ |
| 172 | struct list_head fifo; |
| 173 | unsigned long expires; |
| 174 | |
| 175 | unsigned int is_sync; |
| 176 | enum arq_state state; |
| 177 | }; |
| 178 | |
| 179 | #define RQ_DATA(rq) ((struct as_rq *) (rq)->elevator_private) |
| 180 | |
| 181 | static kmem_cache_t *arq_pool; |
| 182 | |
Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 183 | static atomic_t ioc_count = ATOMIC_INIT(0); |
| 184 | static struct completion *ioc_gone; |
| 185 | |
Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 186 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); |
| 187 | static void as_antic_stop(struct as_data *ad); |
| 188 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | /* |
| 190 | * IO Context helper functions |
| 191 | */ |
| 192 | |
| 193 | /* Called to deallocate the as_io_context */ |
| 194 | static void free_as_io_context(struct as_io_context *aic) |
| 195 | { |
| 196 | kfree(aic); |
Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 197 | if (atomic_dec_and_test(&ioc_count) && ioc_gone) |
| 198 | complete(ioc_gone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | } |
| 200 | |
Al Viro | e17a948 | 2006-03-18 13:21:20 -0500 | [diff] [blame] | 201 | static void as_trim(struct io_context *ioc) |
| 202 | { |
Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 203 | if (ioc->aic) |
| 204 | free_as_io_context(ioc->aic); |
Al Viro | e17a948 | 2006-03-18 13:21:20 -0500 | [diff] [blame] | 205 | ioc->aic = NULL; |
| 206 | } |
| 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | /* Called when the task exits */ |
| 209 | static void exit_as_io_context(struct as_io_context *aic) |
| 210 | { |
| 211 | WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state)); |
| 212 | clear_bit(AS_TASK_RUNNING, &aic->state); |
| 213 | } |
| 214 | |
| 215 | static struct as_io_context *alloc_as_io_context(void) |
| 216 | { |
| 217 | struct as_io_context *ret; |
| 218 | |
| 219 | ret = kmalloc(sizeof(*ret), GFP_ATOMIC); |
| 220 | if (ret) { |
| 221 | ret->dtor = free_as_io_context; |
| 222 | ret->exit = exit_as_io_context; |
| 223 | ret->state = 1 << AS_TASK_RUNNING; |
| 224 | atomic_set(&ret->nr_queued, 0); |
| 225 | atomic_set(&ret->nr_dispatched, 0); |
| 226 | spin_lock_init(&ret->lock); |
| 227 | ret->ttime_total = 0; |
| 228 | ret->ttime_samples = 0; |
| 229 | ret->ttime_mean = 0; |
| 230 | ret->seek_total = 0; |
| 231 | ret->seek_samples = 0; |
| 232 | ret->seek_mean = 0; |
Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 233 | atomic_inc(&ioc_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | return ret; |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * If the current task has no AS IO context then create one and initialise it. |
| 241 | * Then take a ref on the task's io context and return it. |
| 242 | */ |
| 243 | static struct io_context *as_get_io_context(void) |
| 244 | { |
| 245 | struct io_context *ioc = get_io_context(GFP_ATOMIC); |
| 246 | if (ioc && !ioc->aic) { |
| 247 | ioc->aic = alloc_as_io_context(); |
| 248 | if (!ioc->aic) { |
| 249 | put_io_context(ioc); |
| 250 | ioc = NULL; |
| 251 | } |
| 252 | } |
| 253 | return ioc; |
| 254 | } |
| 255 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 256 | static void as_put_io_context(struct as_rq *arq) |
| 257 | { |
| 258 | struct as_io_context *aic; |
| 259 | |
| 260 | if (unlikely(!arq->io_context)) |
| 261 | return; |
| 262 | |
| 263 | aic = arq->io_context->aic; |
| 264 | |
| 265 | if (arq->is_sync == REQ_SYNC && aic) { |
| 266 | spin_lock(&aic->lock); |
| 267 | set_bit(AS_TASK_IORUNNING, &aic->state); |
| 268 | aic->last_end_request = jiffies; |
| 269 | spin_unlock(&aic->lock); |
| 270 | } |
| 271 | |
| 272 | put_io_context(arq->io_context); |
| 273 | } |
| 274 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | /* |
| 276 | * the back merge hash support functions |
| 277 | */ |
| 278 | static const int as_hash_shift = 6; |
| 279 | #define AS_HASH_BLOCK(sec) ((sec) >> 3) |
| 280 | #define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift)) |
| 281 | #define AS_HASH_ENTRIES (1 << as_hash_shift) |
| 282 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | |
| 284 | static inline void __as_del_arq_hash(struct as_rq *arq) |
| 285 | { |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 286 | hlist_del_init(&arq->hash); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | } |
| 288 | |
| 289 | static inline void as_del_arq_hash(struct as_rq *arq) |
| 290 | { |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 291 | if (!hlist_unhashed(&arq->hash)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | __as_del_arq_hash(arq); |
| 293 | } |
| 294 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) |
| 296 | { |
| 297 | struct request *rq = arq->request; |
| 298 | |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 299 | BUG_ON(!hlist_unhashed(&arq->hash)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 301 | hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | /* |
| 305 | * move hot entry to front of chain |
| 306 | */ |
| 307 | static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq) |
| 308 | { |
| 309 | struct request *rq = arq->request; |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 310 | struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 312 | if (hlist_unhashed(&arq->hash)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | WARN_ON(1); |
| 314 | return; |
| 315 | } |
| 316 | |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 317 | if (&arq->hash != head->first) { |
| 318 | hlist_del(&arq->hash); |
| 319 | hlist_add_head(&arq->hash, head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | } |
| 321 | } |
| 322 | |
| 323 | static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) |
| 324 | { |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 325 | struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)]; |
| 326 | struct hlist_node *entry, *next; |
| 327 | struct as_rq *arq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 329 | hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | struct request *__rq = arq->request; |
| 331 | |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 332 | BUG_ON(hlist_unhashed(&arq->hash)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | |
| 334 | if (!rq_mergeable(__rq)) { |
Tejun Heo | 98b1147 | 2005-10-20 16:46:54 +0200 | [diff] [blame] | 335 | as_del_arq_hash(arq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | continue; |
| 337 | } |
| 338 | |
| 339 | if (rq_hash_key(__rq) == offset) |
| 340 | return __rq; |
| 341 | } |
| 342 | |
| 343 | return NULL; |
| 344 | } |
| 345 | |
| 346 | /* |
| 347 | * rb tree support functions |
| 348 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | #define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) |
| 350 | #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) |
| 351 | #define rq_rb_key(rq) (rq)->sector |
| 352 | |
| 353 | /* |
| 354 | * as_find_first_arq finds the first (lowest sector numbered) request |
| 355 | * for the specified data_dir. Used to sweep back to the start of the disk |
| 356 | * (1-way elevator) after we process the last (highest sector) request. |
| 357 | */ |
| 358 | static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir) |
| 359 | { |
| 360 | struct rb_node *n = ad->sort_list[data_dir].rb_node; |
| 361 | |
| 362 | if (n == NULL) |
| 363 | return NULL; |
| 364 | |
| 365 | for (;;) { |
| 366 | if (n->rb_left == NULL) |
| 367 | return rb_entry_arq(n); |
| 368 | |
| 369 | n = n->rb_left; |
| 370 | } |
| 371 | } |
| 372 | |
| 373 | /* |
| 374 | * Add the request to the rb tree if it is unique. If there is an alias (an |
| 375 | * existing request against the same sector), which can happen when using |
| 376 | * direct IO, then return the alias. |
| 377 | */ |
Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 378 | static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | { |
| 380 | struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node; |
| 381 | struct rb_node *parent = NULL; |
| 382 | struct as_rq *__arq; |
| 383 | struct request *rq = arq->request; |
| 384 | |
| 385 | arq->rb_key = rq_rb_key(rq); |
| 386 | |
| 387 | while (*p) { |
| 388 | parent = *p; |
| 389 | __arq = rb_entry_arq(parent); |
| 390 | |
| 391 | if (arq->rb_key < __arq->rb_key) |
| 392 | p = &(*p)->rb_left; |
| 393 | else if (arq->rb_key > __arq->rb_key) |
| 394 | p = &(*p)->rb_right; |
| 395 | else |
| 396 | return __arq; |
| 397 | } |
| 398 | |
| 399 | rb_link_node(&arq->rb_node, parent, p); |
| 400 | rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); |
| 401 | |
| 402 | return NULL; |
| 403 | } |
| 404 | |
Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 405 | static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq) |
| 406 | { |
| 407 | struct as_rq *alias; |
| 408 | |
| 409 | while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) { |
| 410 | as_move_to_dispatch(ad, alias); |
| 411 | as_antic_stop(ad); |
| 412 | } |
| 413 | } |
| 414 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) |
| 416 | { |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 417 | if (!RB_EMPTY_NODE(&arq->rb_node)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | WARN_ON(1); |
| 419 | return; |
| 420 | } |
| 421 | |
| 422 | rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 423 | RB_CLEAR_NODE(&arq->rb_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | static struct request * |
| 427 | as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir) |
| 428 | { |
| 429 | struct rb_node *n = ad->sort_list[data_dir].rb_node; |
| 430 | struct as_rq *arq; |
| 431 | |
| 432 | while (n) { |
| 433 | arq = rb_entry_arq(n); |
| 434 | |
| 435 | if (sector < arq->rb_key) |
| 436 | n = n->rb_left; |
| 437 | else if (sector > arq->rb_key) |
| 438 | n = n->rb_right; |
| 439 | else |
| 440 | return arq->request; |
| 441 | } |
| 442 | |
| 443 | return NULL; |
| 444 | } |
| 445 | |
| 446 | /* |
| 447 | * IO Scheduler proper |
| 448 | */ |
| 449 | |
| 450 | #define MAXBACK (1024 * 1024) /* |
| 451 | * Maximum distance the disk will go backward |
| 452 | * for a request. |
| 453 | */ |
| 454 | |
| 455 | #define BACK_PENALTY 2 |
| 456 | |
| 457 | /* |
| 458 | * as_choose_req selects the preferred one of two requests of the same data_dir |
| 459 | * ignoring time - eg. timeouts, which is the job of as_dispatch_request |
| 460 | */ |
| 461 | static struct as_rq * |
| 462 | as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2) |
| 463 | { |
| 464 | int data_dir; |
| 465 | sector_t last, s1, s2, d1, d2; |
| 466 | int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */ |
| 467 | const sector_t maxback = MAXBACK; |
| 468 | |
| 469 | if (arq1 == NULL || arq1 == arq2) |
| 470 | return arq2; |
| 471 | if (arq2 == NULL) |
| 472 | return arq1; |
| 473 | |
| 474 | data_dir = arq1->is_sync; |
| 475 | |
| 476 | last = ad->last_sector[data_dir]; |
| 477 | s1 = arq1->request->sector; |
| 478 | s2 = arq2->request->sector; |
| 479 | |
| 480 | BUG_ON(data_dir != arq2->is_sync); |
| 481 | |
| 482 | /* |
| 483 | * Strict one way elevator _except_ in the case where we allow |
| 484 | * short backward seeks which are biased as twice the cost of a |
| 485 | * similar forward seek. |
| 486 | */ |
| 487 | if (s1 >= last) |
| 488 | d1 = s1 - last; |
| 489 | else if (s1+maxback >= last) |
| 490 | d1 = (last - s1)*BACK_PENALTY; |
| 491 | else { |
| 492 | r1_wrap = 1; |
| 493 | d1 = 0; /* shut up, gcc */ |
| 494 | } |
| 495 | |
| 496 | if (s2 >= last) |
| 497 | d2 = s2 - last; |
| 498 | else if (s2+maxback >= last) |
| 499 | d2 = (last - s2)*BACK_PENALTY; |
| 500 | else { |
| 501 | r2_wrap = 1; |
| 502 | d2 = 0; |
| 503 | } |
| 504 | |
| 505 | /* Found required data */ |
| 506 | if (!r1_wrap && r2_wrap) |
| 507 | return arq1; |
| 508 | else if (!r2_wrap && r1_wrap) |
| 509 | return arq2; |
| 510 | else if (r1_wrap && r2_wrap) { |
| 511 | /* both behind the head */ |
| 512 | if (s1 <= s2) |
| 513 | return arq1; |
| 514 | else |
| 515 | return arq2; |
| 516 | } |
| 517 | |
| 518 | /* Both requests in front of the head */ |
| 519 | if (d1 < d2) |
| 520 | return arq1; |
| 521 | else if (d2 < d1) |
| 522 | return arq2; |
| 523 | else { |
| 524 | if (s1 >= s2) |
| 525 | return arq1; |
| 526 | else |
| 527 | return arq2; |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | /* |
| 532 | * as_find_next_arq finds the next request after @prev in elevator order. |
| 533 | * this with as_choose_req form the basis for how the scheduler chooses |
| 534 | * what request to process next. Anticipation works on top of this. |
| 535 | */ |
| 536 | static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last) |
| 537 | { |
| 538 | const int data_dir = last->is_sync; |
| 539 | struct as_rq *ret; |
| 540 | struct rb_node *rbnext = rb_next(&last->rb_node); |
| 541 | struct rb_node *rbprev = rb_prev(&last->rb_node); |
| 542 | struct as_rq *arq_next, *arq_prev; |
| 543 | |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 544 | BUG_ON(!RB_EMPTY_NODE(&last->rb_node)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | |
| 546 | if (rbprev) |
| 547 | arq_prev = rb_entry_arq(rbprev); |
| 548 | else |
| 549 | arq_prev = NULL; |
| 550 | |
| 551 | if (rbnext) |
| 552 | arq_next = rb_entry_arq(rbnext); |
| 553 | else { |
| 554 | arq_next = as_find_first_arq(ad, data_dir); |
| 555 | if (arq_next == last) |
| 556 | arq_next = NULL; |
| 557 | } |
| 558 | |
| 559 | ret = as_choose_req(ad, arq_next, arq_prev); |
| 560 | |
| 561 | return ret; |
| 562 | } |
| 563 | |
| 564 | /* |
| 565 | * anticipatory scheduling functions follow |
| 566 | */ |
| 567 | |
| 568 | /* |
| 569 | * as_antic_expired tells us when we have anticipated too long. |
| 570 | * The funny "absolute difference" math on the elapsed time is to handle |
| 571 | * jiffy wraps, and disks which have been idle for 0x80000000 jiffies. |
| 572 | */ |
| 573 | static int as_antic_expired(struct as_data *ad) |
| 574 | { |
| 575 | long delta_jif; |
| 576 | |
| 577 | delta_jif = jiffies - ad->antic_start; |
| 578 | if (unlikely(delta_jif < 0)) |
| 579 | delta_jif = -delta_jif; |
| 580 | if (delta_jif < ad->antic_expire) |
| 581 | return 0; |
| 582 | |
| 583 | return 1; |
| 584 | } |
| 585 | |
| 586 | /* |
| 587 | * as_antic_waitnext starts anticipating that a nice request will soon be |
| 588 | * submitted. See also as_antic_waitreq |
| 589 | */ |
| 590 | static void as_antic_waitnext(struct as_data *ad) |
| 591 | { |
| 592 | unsigned long timeout; |
| 593 | |
| 594 | BUG_ON(ad->antic_status != ANTIC_OFF |
| 595 | && ad->antic_status != ANTIC_WAIT_REQ); |
| 596 | |
| 597 | timeout = ad->antic_start + ad->antic_expire; |
| 598 | |
| 599 | mod_timer(&ad->antic_timer, timeout); |
| 600 | |
| 601 | ad->antic_status = ANTIC_WAIT_NEXT; |
| 602 | } |
| 603 | |
| 604 | /* |
| 605 | * as_antic_waitreq starts anticipating. We don't start timing the anticipation |
| 606 | * until the request that we're anticipating on has finished. This means we |
| 607 | * are timing from when the candidate process wakes up hopefully. |
| 608 | */ |
| 609 | static void as_antic_waitreq(struct as_data *ad) |
| 610 | { |
| 611 | BUG_ON(ad->antic_status == ANTIC_FINISHED); |
| 612 | if (ad->antic_status == ANTIC_OFF) { |
| 613 | if (!ad->io_context || ad->ioc_finished) |
| 614 | as_antic_waitnext(ad); |
| 615 | else |
| 616 | ad->antic_status = ANTIC_WAIT_REQ; |
| 617 | } |
| 618 | } |
| 619 | |
| 620 | /* |
| 621 | * This is called directly by the functions in this file to stop anticipation. |
| 622 | * We kill the timer and schedule a call to the request_fn asap. |
| 623 | */ |
| 624 | static void as_antic_stop(struct as_data *ad) |
| 625 | { |
| 626 | int status = ad->antic_status; |
| 627 | |
| 628 | if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) { |
| 629 | if (status == ANTIC_WAIT_NEXT) |
| 630 | del_timer(&ad->antic_timer); |
| 631 | ad->antic_status = ANTIC_FINISHED; |
| 632 | /* see as_work_handler */ |
| 633 | kblockd_schedule_work(&ad->antic_work); |
| 634 | } |
| 635 | } |
| 636 | |
| 637 | /* |
| 638 | * as_antic_timeout is the timer function set by as_antic_waitnext. |
| 639 | */ |
| 640 | static void as_antic_timeout(unsigned long data) |
| 641 | { |
| 642 | struct request_queue *q = (struct request_queue *)data; |
| 643 | struct as_data *ad = q->elevator->elevator_data; |
| 644 | unsigned long flags; |
| 645 | |
| 646 | spin_lock_irqsave(q->queue_lock, flags); |
| 647 | if (ad->antic_status == ANTIC_WAIT_REQ |
| 648 | || ad->antic_status == ANTIC_WAIT_NEXT) { |
| 649 | struct as_io_context *aic = ad->io_context->aic; |
| 650 | |
| 651 | ad->antic_status = ANTIC_FINISHED; |
| 652 | kblockd_schedule_work(&ad->antic_work); |
| 653 | |
| 654 | if (aic->ttime_samples == 0) { |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 655 | /* process anticipated on has exited or timed out*/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | ad->exit_prob = (7*ad->exit_prob + 256)/8; |
| 657 | } |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 658 | if (!test_bit(AS_TASK_RUNNING, &aic->state)) { |
| 659 | /* process not "saved" by a cooperating request */ |
| 660 | ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8; |
| 661 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | } |
| 663 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 664 | } |
| 665 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 666 | static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, |
| 667 | unsigned long ttime) |
| 668 | { |
| 669 | /* fixed point: 1.0 == 1<<8 */ |
| 670 | if (aic->ttime_samples == 0) { |
| 671 | ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8; |
| 672 | ad->new_ttime_mean = ad->new_ttime_total / 256; |
| 673 | |
| 674 | ad->exit_prob = (7*ad->exit_prob)/8; |
| 675 | } |
| 676 | aic->ttime_samples = (7*aic->ttime_samples + 256) / 8; |
| 677 | aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8; |
| 678 | aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples; |
| 679 | } |
| 680 | |
| 681 | static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, |
| 682 | sector_t sdist) |
| 683 | { |
| 684 | u64 total; |
| 685 | |
| 686 | if (aic->seek_samples == 0) { |
| 687 | ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8; |
| 688 | ad->new_seek_mean = ad->new_seek_total / 256; |
| 689 | } |
| 690 | |
| 691 | /* |
| 692 | * Don't allow the seek distance to get too large from the |
| 693 | * odd fragment, pagein, etc |
| 694 | */ |
| 695 | if (aic->seek_samples <= 60) /* second&third seek */ |
| 696 | sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024); |
| 697 | else |
| 698 | sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64); |
| 699 | |
| 700 | aic->seek_samples = (7*aic->seek_samples + 256) / 8; |
| 701 | aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8; |
| 702 | total = aic->seek_total + (aic->seek_samples/2); |
| 703 | do_div(total, aic->seek_samples); |
| 704 | aic->seek_mean = (sector_t)total; |
| 705 | } |
| 706 | |
| 707 | /* |
| 708 | * as_update_iohist keeps a decaying histogram of IO thinktimes, and |
| 709 | * updates @aic->ttime_mean based on that. It is called when a new |
| 710 | * request is queued. |
| 711 | */ |
| 712 | static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, |
| 713 | struct request *rq) |
| 714 | { |
| 715 | struct as_rq *arq = RQ_DATA(rq); |
| 716 | int data_dir = arq->is_sync; |
| 717 | unsigned long thinktime = 0; |
| 718 | sector_t seek_dist; |
| 719 | |
| 720 | if (aic == NULL) |
| 721 | return; |
| 722 | |
| 723 | if (data_dir == REQ_SYNC) { |
| 724 | unsigned long in_flight = atomic_read(&aic->nr_queued) |
| 725 | + atomic_read(&aic->nr_dispatched); |
| 726 | spin_lock(&aic->lock); |
| 727 | if (test_bit(AS_TASK_IORUNNING, &aic->state) || |
| 728 | test_bit(AS_TASK_IOSTARTED, &aic->state)) { |
| 729 | /* Calculate read -> read thinktime */ |
| 730 | if (test_bit(AS_TASK_IORUNNING, &aic->state) |
| 731 | && in_flight == 0) { |
| 732 | thinktime = jiffies - aic->last_end_request; |
| 733 | thinktime = min(thinktime, MAX_THINKTIME-1); |
| 734 | } |
| 735 | as_update_thinktime(ad, aic, thinktime); |
| 736 | |
| 737 | /* Calculate read -> read seek distance */ |
| 738 | if (aic->last_request_pos < rq->sector) |
| 739 | seek_dist = rq->sector - aic->last_request_pos; |
| 740 | else |
| 741 | seek_dist = aic->last_request_pos - rq->sector; |
| 742 | as_update_seekdist(ad, aic, seek_dist); |
| 743 | } |
| 744 | aic->last_request_pos = rq->sector + rq->nr_sectors; |
| 745 | set_bit(AS_TASK_IOSTARTED, &aic->state); |
| 746 | spin_unlock(&aic->lock); |
| 747 | } |
| 748 | } |
| 749 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | /* |
| 751 | * as_close_req decides if one request is considered "close" to the |
| 752 | * previous one issued. |
| 753 | */ |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 754 | static int as_close_req(struct as_data *ad, struct as_io_context *aic, |
| 755 | struct as_rq *arq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | { |
| 757 | unsigned long delay; /* milliseconds */ |
| 758 | sector_t last = ad->last_sector[ad->batch_data_dir]; |
| 759 | sector_t next = arq->request->sector; |
| 760 | sector_t delta; /* acceptable close offset (in sectors) */ |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 761 | sector_t s; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | |
| 763 | if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished) |
| 764 | delay = 0; |
| 765 | else |
| 766 | delay = ((jiffies - ad->antic_start) * 1000) / HZ; |
| 767 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 768 | if (delay == 0) |
| 769 | delta = 8192; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | else if (delay <= 20 && delay <= ad->antic_expire) |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 771 | delta = 8192 << delay; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | else |
| 773 | return 1; |
| 774 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 775 | if ((last <= next + (delta>>1)) && (next <= last + delta)) |
| 776 | return 1; |
| 777 | |
| 778 | if (last < next) |
| 779 | s = next - last; |
| 780 | else |
| 781 | s = last - next; |
| 782 | |
| 783 | if (aic->seek_samples == 0) { |
| 784 | /* |
| 785 | * Process has just started IO. Use past statistics to |
| 786 | * gauge success possibility |
| 787 | */ |
| 788 | if (ad->new_seek_mean > s) { |
| 789 | /* this request is better than what we're expecting */ |
| 790 | return 1; |
| 791 | } |
| 792 | |
| 793 | } else { |
| 794 | if (aic->seek_mean > s) { |
| 795 | /* this request is better than what we're expecting */ |
| 796 | return 1; |
| 797 | } |
| 798 | } |
| 799 | |
| 800 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | } |
| 802 | |
| 803 | /* |
| 804 | * as_can_break_anticipation returns true if we have been anticipating this |
| 805 | * request. |
| 806 | * |
| 807 | * It also returns true if the process against which we are anticipating |
| 808 | * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to |
| 809 | * dispatch it ASAP, because we know that application will not be submitting |
| 810 | * any new reads. |
| 811 | * |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 812 | * If the task which has submitted the request has exited, break anticipation. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | * |
| 814 | * If this task has queued some other IO, do not enter enticipation. |
| 815 | */ |
| 816 | static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq) |
| 817 | { |
| 818 | struct io_context *ioc; |
| 819 | struct as_io_context *aic; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | |
| 821 | ioc = ad->io_context; |
| 822 | BUG_ON(!ioc); |
| 823 | |
| 824 | if (arq && ioc == arq->io_context) { |
| 825 | /* request from same process */ |
| 826 | return 1; |
| 827 | } |
| 828 | |
| 829 | if (ad->ioc_finished && as_antic_expired(ad)) { |
| 830 | /* |
| 831 | * In this situation status should really be FINISHED, |
| 832 | * however the timer hasn't had the chance to run yet. |
| 833 | */ |
| 834 | return 1; |
| 835 | } |
| 836 | |
| 837 | aic = ioc->aic; |
| 838 | if (!aic) |
| 839 | return 0; |
| 840 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 841 | if (atomic_read(&aic->nr_queued) > 0) { |
| 842 | /* process has more requests queued */ |
| 843 | return 1; |
| 844 | } |
| 845 | |
| 846 | if (atomic_read(&aic->nr_dispatched) > 0) { |
| 847 | /* process has more requests dispatched */ |
| 848 | return 1; |
| 849 | } |
| 850 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 851 | if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, aic, arq)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | /* |
| 853 | * Found a close request that is not one of ours. |
| 854 | * |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 855 | * This makes close requests from another process update |
| 856 | * our IO history. Is generally useful when there are |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | * two or more cooperating processes working in the same |
| 858 | * area. |
| 859 | */ |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 860 | if (!test_bit(AS_TASK_RUNNING, &aic->state)) { |
| 861 | if (aic->ttime_samples == 0) |
| 862 | ad->exit_prob = (7*ad->exit_prob + 256)/8; |
| 863 | |
| 864 | ad->exit_no_coop = (7*ad->exit_no_coop)/8; |
| 865 | } |
| 866 | |
| 867 | as_update_iohist(ad, aic, arq->request); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | return 1; |
| 869 | } |
| 870 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 871 | if (!test_bit(AS_TASK_RUNNING, &aic->state)) { |
| 872 | /* process anticipated on has exited */ |
| 873 | if (aic->ttime_samples == 0) |
| 874 | ad->exit_prob = (7*ad->exit_prob + 256)/8; |
| 875 | |
| 876 | if (ad->exit_no_coop > 128) |
| 877 | return 1; |
| 878 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | |
| 880 | if (aic->ttime_samples == 0) { |
| 881 | if (ad->new_ttime_mean > ad->antic_expire) |
| 882 | return 1; |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 883 | if (ad->exit_prob * ad->exit_no_coop > 128*256) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | return 1; |
| 885 | } else if (aic->ttime_mean > ad->antic_expire) { |
| 886 | /* the process thinks too much between requests */ |
| 887 | return 1; |
| 888 | } |
| 889 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | return 0; |
| 891 | } |
| 892 | |
| 893 | /* |
Andreas Mohr | d6e05ed | 2006-06-26 18:35:02 +0200 | [diff] [blame] | 894 | * as_can_anticipate indicates whether we should either run arq |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | * or keep anticipating a better request. |
| 896 | */ |
| 897 | static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) |
| 898 | { |
| 899 | if (!ad->io_context) |
| 900 | /* |
| 901 | * Last request submitted was a write |
| 902 | */ |
| 903 | return 0; |
| 904 | |
| 905 | if (ad->antic_status == ANTIC_FINISHED) |
| 906 | /* |
| 907 | * Don't restart if we have just finished. Run the next request |
| 908 | */ |
| 909 | return 0; |
| 910 | |
| 911 | if (as_can_break_anticipation(ad, arq)) |
| 912 | /* |
| 913 | * This request is a good candidate. Don't keep anticipating, |
| 914 | * run it. |
| 915 | */ |
| 916 | return 0; |
| 917 | |
| 918 | /* |
| 919 | * OK from here, we haven't finished, and don't have a decent request! |
| 920 | * Status is either ANTIC_OFF so start waiting, |
| 921 | * ANTIC_WAIT_REQ so continue waiting for request to finish |
| 922 | * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | */ |
| 924 | |
| 925 | return 1; |
| 926 | } |
| 927 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | /* |
| 929 | * as_update_arq must be called whenever a request (arq) is added to |
| 930 | * the sort_list. This function keeps caches up to date, and checks if the |
| 931 | * request might be one we are "anticipating" |
| 932 | */ |
| 933 | static void as_update_arq(struct as_data *ad, struct as_rq *arq) |
| 934 | { |
| 935 | const int data_dir = arq->is_sync; |
| 936 | |
| 937 | /* keep the next_arq cache up to date */ |
| 938 | ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]); |
| 939 | |
| 940 | /* |
| 941 | * have we been anticipating this request? |
| 942 | * or does it come from the same process as the one we are anticipating |
| 943 | * for? |
| 944 | */ |
| 945 | if (ad->antic_status == ANTIC_WAIT_REQ |
| 946 | || ad->antic_status == ANTIC_WAIT_NEXT) { |
| 947 | if (as_can_break_anticipation(ad, arq)) |
| 948 | as_antic_stop(ad); |
| 949 | } |
| 950 | } |
| 951 | |
| 952 | /* |
| 953 | * Gathers timings and resizes the write batch automatically |
| 954 | */ |
| 955 | static void update_write_batch(struct as_data *ad) |
| 956 | { |
| 957 | unsigned long batch = ad->batch_expire[REQ_ASYNC]; |
| 958 | long write_time; |
| 959 | |
| 960 | write_time = (jiffies - ad->current_batch_expires) + batch; |
| 961 | if (write_time < 0) |
| 962 | write_time = 0; |
| 963 | |
| 964 | if (write_time > batch && !ad->write_batch_idled) { |
| 965 | if (write_time > batch * 3) |
| 966 | ad->write_batch_count /= 2; |
| 967 | else |
| 968 | ad->write_batch_count--; |
| 969 | } else if (write_time < batch && ad->current_write_count == 0) { |
| 970 | if (batch > write_time * 3) |
| 971 | ad->write_batch_count *= 2; |
| 972 | else |
| 973 | ad->write_batch_count++; |
| 974 | } |
| 975 | |
| 976 | if (ad->write_batch_count < 1) |
| 977 | ad->write_batch_count = 1; |
| 978 | } |
| 979 | |
| 980 | /* |
| 981 | * as_completed_request is to be called when a request has completed and |
| 982 | * returned something to the requesting process, be it an error or data. |
| 983 | */ |
| 984 | static void as_completed_request(request_queue_t *q, struct request *rq) |
| 985 | { |
| 986 | struct as_data *ad = q->elevator->elevator_data; |
| 987 | struct as_rq *arq = RQ_DATA(rq); |
| 988 | |
| 989 | WARN_ON(!list_empty(&rq->queuelist)); |
| 990 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | if (arq->state != AS_RQ_REMOVED) { |
| 992 | printk("arq->state %d\n", arq->state); |
| 993 | WARN_ON(1); |
| 994 | goto out; |
| 995 | } |
| 996 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 | if (ad->changed_batch && ad->nr_dispatched == 1) { |
| 998 | kblockd_schedule_work(&ad->antic_work); |
| 999 | ad->changed_batch = 0; |
| 1000 | |
| 1001 | if (ad->batch_data_dir == REQ_SYNC) |
| 1002 | ad->new_batch = 1; |
| 1003 | } |
| 1004 | WARN_ON(ad->nr_dispatched == 0); |
| 1005 | ad->nr_dispatched--; |
| 1006 | |
| 1007 | /* |
| 1008 | * Start counting the batch from when a request of that direction is |
| 1009 | * actually serviced. This should help devices with big TCQ windows |
| 1010 | * and writeback caches |
| 1011 | */ |
| 1012 | if (ad->new_batch && ad->batch_data_dir == arq->is_sync) { |
| 1013 | update_write_batch(ad); |
| 1014 | ad->current_batch_expires = jiffies + |
| 1015 | ad->batch_expire[REQ_SYNC]; |
| 1016 | ad->new_batch = 0; |
| 1017 | } |
| 1018 | |
| 1019 | if (ad->io_context == arq->io_context && ad->io_context) { |
| 1020 | ad->antic_start = jiffies; |
| 1021 | ad->ioc_finished = 1; |
| 1022 | if (ad->antic_status == ANTIC_WAIT_REQ) { |
| 1023 | /* |
| 1024 | * We were waiting on this request, now anticipate |
| 1025 | * the next one |
| 1026 | */ |
| 1027 | as_antic_waitnext(ad); |
| 1028 | } |
| 1029 | } |
| 1030 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1031 | as_put_io_context(arq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | out: |
| 1033 | arq->state = AS_RQ_POSTSCHED; |
| 1034 | } |
| 1035 | |
| 1036 | /* |
| 1037 | * as_remove_queued_request removes a request from the pre dispatch queue |
| 1038 | * without updating refcounts. It is expected the caller will drop the |
| 1039 | * reference unless it replaces the request at somepart of the elevator |
| 1040 | * (ie. the dispatch queue) |
| 1041 | */ |
| 1042 | static void as_remove_queued_request(request_queue_t *q, struct request *rq) |
| 1043 | { |
| 1044 | struct as_rq *arq = RQ_DATA(rq); |
| 1045 | const int data_dir = arq->is_sync; |
| 1046 | struct as_data *ad = q->elevator->elevator_data; |
| 1047 | |
| 1048 | WARN_ON(arq->state != AS_RQ_QUEUED); |
| 1049 | |
| 1050 | if (arq->io_context && arq->io_context->aic) { |
| 1051 | BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued)); |
| 1052 | atomic_dec(&arq->io_context->aic->nr_queued); |
| 1053 | } |
| 1054 | |
| 1055 | /* |
| 1056 | * Update the "next_arq" cache if we are about to remove its |
| 1057 | * entry |
| 1058 | */ |
| 1059 | if (ad->next_arq[data_dir] == arq) |
| 1060 | ad->next_arq[data_dir] = as_find_next_arq(ad, arq); |
| 1061 | |
| 1062 | list_del_init(&arq->fifo); |
Tejun Heo | 98b1147 | 2005-10-20 16:46:54 +0200 | [diff] [blame] | 1063 | as_del_arq_hash(arq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | as_del_arq_rb(ad, arq); |
| 1065 | } |
| 1066 | |
| 1067 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | * as_fifo_expired returns 0 if there are no expired reads on the fifo, |
| 1069 | * 1 otherwise. It is ratelimited so that we only perform the check once per |
| 1070 | * `fifo_expire' interval. Otherwise a large number of expired requests |
| 1071 | * would create a hopeless seekstorm. |
| 1072 | * |
| 1073 | * See as_antic_expired comment. |
| 1074 | */ |
| 1075 | static int as_fifo_expired(struct as_data *ad, int adir) |
| 1076 | { |
| 1077 | struct as_rq *arq; |
| 1078 | long delta_jif; |
| 1079 | |
| 1080 | delta_jif = jiffies - ad->last_check_fifo[adir]; |
| 1081 | if (unlikely(delta_jif < 0)) |
| 1082 | delta_jif = -delta_jif; |
| 1083 | if (delta_jif < ad->fifo_expire[adir]) |
| 1084 | return 0; |
| 1085 | |
| 1086 | ad->last_check_fifo[adir] = jiffies; |
| 1087 | |
| 1088 | if (list_empty(&ad->fifo_list[adir])) |
| 1089 | return 0; |
| 1090 | |
| 1091 | arq = list_entry_fifo(ad->fifo_list[adir].next); |
| 1092 | |
| 1093 | return time_after(jiffies, arq->expires); |
| 1094 | } |
| 1095 | |
| 1096 | /* |
| 1097 | * as_batch_expired returns true if the current batch has expired. A batch |
| 1098 | * is a set of reads or a set of writes. |
| 1099 | */ |
| 1100 | static inline int as_batch_expired(struct as_data *ad) |
| 1101 | { |
| 1102 | if (ad->changed_batch || ad->new_batch) |
| 1103 | return 0; |
| 1104 | |
| 1105 | if (ad->batch_data_dir == REQ_SYNC) |
| 1106 | /* TODO! add a check so a complete fifo gets written? */ |
| 1107 | return time_after(jiffies, ad->current_batch_expires); |
| 1108 | |
| 1109 | return time_after(jiffies, ad->current_batch_expires) |
| 1110 | || ad->current_write_count == 0; |
| 1111 | } |
| 1112 | |
| 1113 | /* |
| 1114 | * move an entry to dispatch queue |
| 1115 | */ |
| 1116 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) |
| 1117 | { |
| 1118 | struct request *rq = arq->request; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | const int data_dir = arq->is_sync; |
| 1120 | |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 1121 | BUG_ON(!RB_EMPTY_NODE(&arq->rb_node)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | |
| 1123 | as_antic_stop(ad); |
| 1124 | ad->antic_status = ANTIC_OFF; |
| 1125 | |
| 1126 | /* |
| 1127 | * This has to be set in order to be correctly updated by |
| 1128 | * as_find_next_arq |
| 1129 | */ |
| 1130 | ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; |
| 1131 | |
| 1132 | if (data_dir == REQ_SYNC) { |
| 1133 | /* In case we have to anticipate after this */ |
| 1134 | copy_io_context(&ad->io_context, &arq->io_context); |
| 1135 | } else { |
| 1136 | if (ad->io_context) { |
| 1137 | put_io_context(ad->io_context); |
| 1138 | ad->io_context = NULL; |
| 1139 | } |
| 1140 | |
| 1141 | if (ad->current_write_count != 0) |
| 1142 | ad->current_write_count--; |
| 1143 | } |
| 1144 | ad->ioc_finished = 0; |
| 1145 | |
| 1146 | ad->next_arq[data_dir] = as_find_next_arq(ad, arq); |
| 1147 | |
| 1148 | /* |
| 1149 | * take it off the sort and fifo list, add to dispatch queue |
| 1150 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | as_remove_queued_request(ad->q, rq); |
| 1152 | WARN_ON(arq->state != AS_RQ_QUEUED); |
| 1153 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1154 | elv_dispatch_sort(ad->q, rq); |
| 1155 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 | arq->state = AS_RQ_DISPATCHED; |
| 1157 | if (arq->io_context && arq->io_context->aic) |
| 1158 | atomic_inc(&arq->io_context->aic->nr_dispatched); |
| 1159 | ad->nr_dispatched++; |
| 1160 | } |
| 1161 | |
| 1162 | /* |
| 1163 | * as_dispatch_request selects the best request according to |
| 1164 | * read/write expire, batch expire, etc, and moves it to the dispatch |
| 1165 | * queue. Returns 1 if a request was found, 0 otherwise. |
| 1166 | */ |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1167 | static int as_dispatch_request(request_queue_t *q, int force) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1168 | { |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1169 | struct as_data *ad = q->elevator->elevator_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 | struct as_rq *arq; |
| 1171 | const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); |
| 1172 | const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); |
| 1173 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1174 | if (unlikely(force)) { |
| 1175 | /* |
| 1176 | * Forced dispatch, accounting is useless. Reset |
| 1177 | * accounting states and dump fifo_lists. Note that |
| 1178 | * batch_data_dir is reset to REQ_SYNC to avoid |
| 1179 | * screwing write batch accounting as write batch |
| 1180 | * accounting occurs on W->R transition. |
| 1181 | */ |
| 1182 | int dispatched = 0; |
| 1183 | |
| 1184 | ad->batch_data_dir = REQ_SYNC; |
| 1185 | ad->changed_batch = 0; |
| 1186 | ad->new_batch = 0; |
| 1187 | |
| 1188 | while (ad->next_arq[REQ_SYNC]) { |
| 1189 | as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]); |
| 1190 | dispatched++; |
| 1191 | } |
| 1192 | ad->last_check_fifo[REQ_SYNC] = jiffies; |
| 1193 | |
| 1194 | while (ad->next_arq[REQ_ASYNC]) { |
| 1195 | as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]); |
| 1196 | dispatched++; |
| 1197 | } |
| 1198 | ad->last_check_fifo[REQ_ASYNC] = jiffies; |
| 1199 | |
| 1200 | return dispatched; |
| 1201 | } |
| 1202 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | /* Signal that the write batch was uncontended, so we can't time it */ |
| 1204 | if (ad->batch_data_dir == REQ_ASYNC && !reads) { |
| 1205 | if (ad->current_write_count == 0 || !writes) |
| 1206 | ad->write_batch_idled = 1; |
| 1207 | } |
| 1208 | |
| 1209 | if (!(reads || writes) |
| 1210 | || ad->antic_status == ANTIC_WAIT_REQ |
| 1211 | || ad->antic_status == ANTIC_WAIT_NEXT |
| 1212 | || ad->changed_batch) |
| 1213 | return 0; |
| 1214 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1215 | if (!(reads && writes && as_batch_expired(ad))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | /* |
| 1217 | * batch is still running or no reads or no writes |
| 1218 | */ |
| 1219 | arq = ad->next_arq[ad->batch_data_dir]; |
| 1220 | |
| 1221 | if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { |
| 1222 | if (as_fifo_expired(ad, REQ_SYNC)) |
| 1223 | goto fifo_expired; |
| 1224 | |
| 1225 | if (as_can_anticipate(ad, arq)) { |
| 1226 | as_antic_waitreq(ad); |
| 1227 | return 0; |
| 1228 | } |
| 1229 | } |
| 1230 | |
| 1231 | if (arq) { |
| 1232 | /* we have a "next request" */ |
| 1233 | if (reads && !writes) |
| 1234 | ad->current_batch_expires = |
| 1235 | jiffies + ad->batch_expire[REQ_SYNC]; |
| 1236 | goto dispatch_request; |
| 1237 | } |
| 1238 | } |
| 1239 | |
| 1240 | /* |
| 1241 | * at this point we are not running a batch. select the appropriate |
| 1242 | * data direction (read / write) |
| 1243 | */ |
| 1244 | |
| 1245 | if (reads) { |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 1246 | BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1247 | |
| 1248 | if (writes && ad->batch_data_dir == REQ_SYNC) |
| 1249 | /* |
| 1250 | * Last batch was a read, switch to writes |
| 1251 | */ |
| 1252 | goto dispatch_writes; |
| 1253 | |
| 1254 | if (ad->batch_data_dir == REQ_ASYNC) { |
| 1255 | WARN_ON(ad->new_batch); |
| 1256 | ad->changed_batch = 1; |
| 1257 | } |
| 1258 | ad->batch_data_dir = REQ_SYNC; |
| 1259 | arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next); |
| 1260 | ad->last_check_fifo[ad->batch_data_dir] = jiffies; |
| 1261 | goto dispatch_request; |
| 1262 | } |
| 1263 | |
| 1264 | /* |
| 1265 | * the last batch was a read |
| 1266 | */ |
| 1267 | |
| 1268 | if (writes) { |
| 1269 | dispatch_writes: |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 1270 | BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1271 | |
| 1272 | if (ad->batch_data_dir == REQ_SYNC) { |
| 1273 | ad->changed_batch = 1; |
| 1274 | |
| 1275 | /* |
| 1276 | * new_batch might be 1 when the queue runs out of |
| 1277 | * reads. A subsequent submission of a write might |
| 1278 | * cause a change of batch before the read is finished. |
| 1279 | */ |
| 1280 | ad->new_batch = 0; |
| 1281 | } |
| 1282 | ad->batch_data_dir = REQ_ASYNC; |
| 1283 | ad->current_write_count = ad->write_batch_count; |
| 1284 | ad->write_batch_idled = 0; |
| 1285 | arq = ad->next_arq[ad->batch_data_dir]; |
| 1286 | goto dispatch_request; |
| 1287 | } |
| 1288 | |
| 1289 | BUG(); |
| 1290 | return 0; |
| 1291 | |
| 1292 | dispatch_request: |
| 1293 | /* |
| 1294 | * If a request has expired, service it. |
| 1295 | */ |
| 1296 | |
| 1297 | if (as_fifo_expired(ad, ad->batch_data_dir)) { |
| 1298 | fifo_expired: |
| 1299 | arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next); |
| 1300 | BUG_ON(arq == NULL); |
| 1301 | } |
| 1302 | |
| 1303 | if (ad->changed_batch) { |
| 1304 | WARN_ON(ad->new_batch); |
| 1305 | |
| 1306 | if (ad->nr_dispatched) |
| 1307 | return 0; |
| 1308 | |
| 1309 | if (ad->batch_data_dir == REQ_ASYNC) |
| 1310 | ad->current_batch_expires = jiffies + |
| 1311 | ad->batch_expire[REQ_ASYNC]; |
| 1312 | else |
| 1313 | ad->new_batch = 1; |
| 1314 | |
| 1315 | ad->changed_batch = 0; |
| 1316 | } |
| 1317 | |
| 1318 | /* |
| 1319 | * arq is the selected appropriate request. |
| 1320 | */ |
| 1321 | as_move_to_dispatch(ad, arq); |
| 1322 | |
| 1323 | return 1; |
| 1324 | } |
| 1325 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | * add arq to rbtree and fifo |
| 1328 | */ |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1329 | static void as_add_request(request_queue_t *q, struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 | { |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1331 | struct as_data *ad = q->elevator->elevator_data; |
| 1332 | struct as_rq *arq = RQ_DATA(rq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | int data_dir; |
| 1334 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1335 | arq->state = AS_RQ_NEW; |
| 1336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | if (rq_data_dir(arq->request) == READ |
Jens Axboe | b31dc66 | 2006-06-13 08:26:10 +0200 | [diff] [blame] | 1338 | || (arq->request->flags & REQ_RW_SYNC)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1339 | arq->is_sync = 1; |
| 1340 | else |
| 1341 | arq->is_sync = 0; |
| 1342 | data_dir = arq->is_sync; |
| 1343 | |
| 1344 | arq->io_context = as_get_io_context(); |
| 1345 | |
| 1346 | if (arq->io_context) { |
| 1347 | as_update_iohist(ad, arq->io_context->aic, arq->request); |
| 1348 | atomic_inc(&arq->io_context->aic->nr_queued); |
| 1349 | } |
| 1350 | |
Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 1351 | as_add_arq_rb(ad, arq); |
| 1352 | if (rq_mergeable(arq->request)) |
| 1353 | as_add_arq_hash(ad, arq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | |
Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 1355 | /* |
| 1356 | * set expire time (only used for reads) and add to fifo list |
| 1357 | */ |
| 1358 | arq->expires = jiffies + ad->fifo_expire[data_dir]; |
| 1359 | list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | |
Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 1361 | as_update_arq(ad, arq); /* keep state machine up to date */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | arq->state = AS_RQ_QUEUED; |
| 1363 | } |
| 1364 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1365 | static void as_activate_request(request_queue_t *q, struct request *rq) |
| 1366 | { |
| 1367 | struct as_rq *arq = RQ_DATA(rq); |
| 1368 | |
| 1369 | WARN_ON(arq->state != AS_RQ_DISPATCHED); |
| 1370 | arq->state = AS_RQ_REMOVED; |
| 1371 | if (arq->io_context && arq->io_context->aic) |
| 1372 | atomic_dec(&arq->io_context->aic->nr_dispatched); |
| 1373 | } |
| 1374 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | static void as_deactivate_request(request_queue_t *q, struct request *rq) |
| 1376 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | struct as_rq *arq = RQ_DATA(rq); |
| 1378 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1379 | WARN_ON(arq->state != AS_RQ_REMOVED); |
| 1380 | arq->state = AS_RQ_DISPATCHED; |
| 1381 | if (arq->io_context && arq->io_context->aic) |
| 1382 | atomic_inc(&arq->io_context->aic->nr_dispatched); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1383 | } |
| 1384 | |
| 1385 | /* |
| 1386 | * as_queue_empty tells us if there are requests left in the device. It may |
| 1387 | * not be the case that a driver can get the next request even if the queue |
| 1388 | * is not empty - it is used in the block layer to check for plugging and |
| 1389 | * merging opportunities |
| 1390 | */ |
| 1391 | static int as_queue_empty(request_queue_t *q) |
| 1392 | { |
| 1393 | struct as_data *ad = q->elevator->elevator_data; |
| 1394 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1395 | return list_empty(&ad->fifo_list[REQ_ASYNC]) |
| 1396 | && list_empty(&ad->fifo_list[REQ_SYNC]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | } |
| 1398 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1399 | static struct request *as_former_request(request_queue_t *q, |
| 1400 | struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | { |
| 1402 | struct as_rq *arq = RQ_DATA(rq); |
| 1403 | struct rb_node *rbprev = rb_prev(&arq->rb_node); |
| 1404 | struct request *ret = NULL; |
| 1405 | |
| 1406 | if (rbprev) |
| 1407 | ret = rb_entry_arq(rbprev)->request; |
| 1408 | |
| 1409 | return ret; |
| 1410 | } |
| 1411 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1412 | static struct request *as_latter_request(request_queue_t *q, |
| 1413 | struct request *rq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1414 | { |
| 1415 | struct as_rq *arq = RQ_DATA(rq); |
| 1416 | struct rb_node *rbnext = rb_next(&arq->rb_node); |
| 1417 | struct request *ret = NULL; |
| 1418 | |
| 1419 | if (rbnext) |
| 1420 | ret = rb_entry_arq(rbnext)->request; |
| 1421 | |
| 1422 | return ret; |
| 1423 | } |
| 1424 | |
| 1425 | static int |
| 1426 | as_merge(request_queue_t *q, struct request **req, struct bio *bio) |
| 1427 | { |
| 1428 | struct as_data *ad = q->elevator->elevator_data; |
| 1429 | sector_t rb_key = bio->bi_sector + bio_sectors(bio); |
| 1430 | struct request *__rq; |
| 1431 | int ret; |
| 1432 | |
| 1433 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | * see if the merge hash can satisfy a back merge |
| 1435 | */ |
| 1436 | __rq = as_find_arq_hash(ad, bio->bi_sector); |
| 1437 | if (__rq) { |
| 1438 | BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); |
| 1439 | |
| 1440 | if (elv_rq_merge_ok(__rq, bio)) { |
| 1441 | ret = ELEVATOR_BACK_MERGE; |
| 1442 | goto out; |
| 1443 | } |
| 1444 | } |
| 1445 | |
| 1446 | /* |
| 1447 | * check for front merge |
| 1448 | */ |
| 1449 | __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio)); |
| 1450 | if (__rq) { |
| 1451 | BUG_ON(rb_key != rq_rb_key(__rq)); |
| 1452 | |
| 1453 | if (elv_rq_merge_ok(__rq, bio)) { |
| 1454 | ret = ELEVATOR_FRONT_MERGE; |
| 1455 | goto out; |
| 1456 | } |
| 1457 | } |
| 1458 | |
| 1459 | return ELEVATOR_NO_MERGE; |
| 1460 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | if (ret) { |
| 1462 | if (rq_mergeable(__rq)) |
| 1463 | as_hot_arq_hash(ad, RQ_DATA(__rq)); |
| 1464 | } |
| 1465 | *req = __rq; |
| 1466 | return ret; |
| 1467 | } |
| 1468 | |
| 1469 | static void as_merged_request(request_queue_t *q, struct request *req) |
| 1470 | { |
| 1471 | struct as_data *ad = q->elevator->elevator_data; |
| 1472 | struct as_rq *arq = RQ_DATA(req); |
| 1473 | |
| 1474 | /* |
| 1475 | * hash always needs to be repositioned, key is end sector |
| 1476 | */ |
| 1477 | as_del_arq_hash(arq); |
| 1478 | as_add_arq_hash(ad, arq); |
| 1479 | |
| 1480 | /* |
| 1481 | * if the merge was a front merge, we need to reposition request |
| 1482 | */ |
| 1483 | if (rq_rb_key(req) != arq->rb_key) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1484 | as_del_arq_rb(ad, arq); |
Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 1485 | as_add_arq_rb(ad, arq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1486 | /* |
| 1487 | * Note! At this stage of this and the next function, our next |
| 1488 | * request may not be optimal - eg the request may have "grown" |
| 1489 | * behind the disk head. We currently don't bother adjusting. |
| 1490 | */ |
| 1491 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1492 | } |
| 1493 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1494 | static void as_merged_requests(request_queue_t *q, struct request *req, |
| 1495 | struct request *next) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1496 | { |
| 1497 | struct as_data *ad = q->elevator->elevator_data; |
| 1498 | struct as_rq *arq = RQ_DATA(req); |
| 1499 | struct as_rq *anext = RQ_DATA(next); |
| 1500 | |
| 1501 | BUG_ON(!arq); |
| 1502 | BUG_ON(!anext); |
| 1503 | |
| 1504 | /* |
| 1505 | * reposition arq (this is the merged request) in hash, and in rbtree |
| 1506 | * in case of a front merge |
| 1507 | */ |
| 1508 | as_del_arq_hash(arq); |
| 1509 | as_add_arq_hash(ad, arq); |
| 1510 | |
| 1511 | if (rq_rb_key(req) != arq->rb_key) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | as_del_arq_rb(ad, arq); |
Tejun Heo | ef9be1d | 2005-11-11 14:27:09 +0100 | [diff] [blame] | 1513 | as_add_arq_rb(ad, arq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | } |
| 1515 | |
| 1516 | /* |
| 1517 | * if anext expires before arq, assign its expire time to arq |
| 1518 | * and move into anext position (anext will be deleted) in fifo |
| 1519 | */ |
| 1520 | if (!list_empty(&arq->fifo) && !list_empty(&anext->fifo)) { |
| 1521 | if (time_before(anext->expires, arq->expires)) { |
| 1522 | list_move(&arq->fifo, &anext->fifo); |
| 1523 | arq->expires = anext->expires; |
| 1524 | /* |
| 1525 | * Don't copy here but swap, because when anext is |
| 1526 | * removed below, it must contain the unused context |
| 1527 | */ |
| 1528 | swap_io_context(&arq->io_context, &anext->io_context); |
| 1529 | } |
| 1530 | } |
| 1531 | |
| 1532 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1533 | * kill knowledge of next, this one is a goner |
| 1534 | */ |
| 1535 | as_remove_queued_request(q, next); |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1536 | as_put_io_context(anext); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1537 | |
| 1538 | anext->state = AS_RQ_MERGED; |
| 1539 | } |
| 1540 | |
| 1541 | /* |
| 1542 | * This is executed in a "deferred" process context, by kblockd. It calls the |
| 1543 | * driver's request_fn so the driver can submit that request. |
| 1544 | * |
| 1545 | * IMPORTANT! This guy will reenter the elevator, so set up all queue global |
| 1546 | * state before calling, and don't rely on any state over calls. |
| 1547 | * |
| 1548 | * FIXME! dispatch queue is not a queue at all! |
| 1549 | */ |
| 1550 | static void as_work_handler(void *data) |
| 1551 | { |
| 1552 | struct request_queue *q = data; |
| 1553 | unsigned long flags; |
| 1554 | |
| 1555 | spin_lock_irqsave(q->queue_lock, flags); |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1556 | if (!as_queue_empty(q)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 | q->request_fn(q); |
| 1558 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 1559 | } |
| 1560 | |
| 1561 | static void as_put_request(request_queue_t *q, struct request *rq) |
| 1562 | { |
| 1563 | struct as_data *ad = q->elevator->elevator_data; |
| 1564 | struct as_rq *arq = RQ_DATA(rq); |
| 1565 | |
| 1566 | if (!arq) { |
| 1567 | WARN_ON(1); |
| 1568 | return; |
| 1569 | } |
| 1570 | |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1571 | if (unlikely(arq->state != AS_RQ_POSTSCHED && |
| 1572 | arq->state != AS_RQ_PRESCHED && |
| 1573 | arq->state != AS_RQ_MERGED)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | printk("arq->state %d\n", arq->state); |
| 1575 | WARN_ON(1); |
| 1576 | } |
| 1577 | |
| 1578 | mempool_free(arq, ad->arq_pool); |
| 1579 | rq->elevator_private = NULL; |
| 1580 | } |
| 1581 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1582 | static int as_set_request(request_queue_t *q, struct request *rq, |
Al Viro | 8267e26 | 2005-10-21 03:20:53 -0400 | [diff] [blame] | 1583 | struct bio *bio, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 | { |
| 1585 | struct as_data *ad = q->elevator->elevator_data; |
| 1586 | struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); |
| 1587 | |
| 1588 | if (arq) { |
| 1589 | memset(arq, 0, sizeof(*arq)); |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 1590 | RB_CLEAR_NODE(&arq->rb_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | arq->request = rq; |
| 1592 | arq->state = AS_RQ_PRESCHED; |
| 1593 | arq->io_context = NULL; |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 1594 | INIT_HLIST_NODE(&arq->hash); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | INIT_LIST_HEAD(&arq->fifo); |
| 1596 | rq->elevator_private = arq; |
| 1597 | return 0; |
| 1598 | } |
| 1599 | |
| 1600 | return 1; |
| 1601 | } |
| 1602 | |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 1603 | static int as_may_queue(request_queue_t *q, int rw, struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | { |
| 1605 | int ret = ELV_MQUEUE_MAY; |
| 1606 | struct as_data *ad = q->elevator->elevator_data; |
| 1607 | struct io_context *ioc; |
| 1608 | if (ad->antic_status == ANTIC_WAIT_REQ || |
| 1609 | ad->antic_status == ANTIC_WAIT_NEXT) { |
| 1610 | ioc = as_get_io_context(); |
| 1611 | if (ad->io_context == ioc) |
| 1612 | ret = ELV_MQUEUE_MUST; |
| 1613 | put_io_context(ioc); |
| 1614 | } |
| 1615 | |
| 1616 | return ret; |
| 1617 | } |
| 1618 | |
| 1619 | static void as_exit_queue(elevator_t *e) |
| 1620 | { |
| 1621 | struct as_data *ad = e->elevator_data; |
| 1622 | |
| 1623 | del_timer_sync(&ad->antic_timer); |
| 1624 | kblockd_flush(); |
| 1625 | |
| 1626 | BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); |
| 1627 | BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); |
| 1628 | |
| 1629 | mempool_destroy(ad->arq_pool); |
| 1630 | put_io_context(ad->io_context); |
| 1631 | kfree(ad->hash); |
| 1632 | kfree(ad); |
| 1633 | } |
| 1634 | |
| 1635 | /* |
| 1636 | * initialize elevator private data (as_data), and alloc a arq for |
| 1637 | * each request on the free lists |
| 1638 | */ |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 1639 | static void *as_init_queue(request_queue_t *q, elevator_t *e) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1640 | { |
| 1641 | struct as_data *ad; |
| 1642 | int i; |
| 1643 | |
| 1644 | if (!arq_pool) |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 1645 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 | |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 1647 | ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 | if (!ad) |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 1649 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1650 | memset(ad, 0, sizeof(*ad)); |
| 1651 | |
| 1652 | ad->q = q; /* Identify what queue the data belongs to */ |
| 1653 | |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 1654 | ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES, |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 1655 | GFP_KERNEL, q->node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1656 | if (!ad->hash) { |
| 1657 | kfree(ad); |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 1658 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1659 | } |
| 1660 | |
Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 1661 | ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
| 1662 | mempool_free_slab, arq_pool, q->node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | if (!ad->arq_pool) { |
| 1664 | kfree(ad->hash); |
| 1665 | kfree(ad); |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 1666 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | } |
| 1668 | |
| 1669 | /* anticipatory scheduling helpers */ |
| 1670 | ad->antic_timer.function = as_antic_timeout; |
| 1671 | ad->antic_timer.data = (unsigned long)q; |
| 1672 | init_timer(&ad->antic_timer); |
| 1673 | INIT_WORK(&ad->antic_work, as_work_handler, q); |
| 1674 | |
| 1675 | for (i = 0; i < AS_HASH_ENTRIES; i++) |
Akinobu Mita | bae386f | 2006-04-24 21:12:59 +0200 | [diff] [blame] | 1676 | INIT_HLIST_HEAD(&ad->hash[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 | |
| 1678 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); |
| 1679 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); |
| 1680 | ad->sort_list[REQ_SYNC] = RB_ROOT; |
| 1681 | ad->sort_list[REQ_ASYNC] = RB_ROOT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1682 | ad->fifo_expire[REQ_SYNC] = default_read_expire; |
| 1683 | ad->fifo_expire[REQ_ASYNC] = default_write_expire; |
| 1684 | ad->antic_expire = default_antic_expire; |
| 1685 | ad->batch_expire[REQ_SYNC] = default_read_batch_expire; |
| 1686 | ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | |
| 1688 | ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; |
| 1689 | ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; |
| 1690 | if (ad->write_batch_count < 2) |
| 1691 | ad->write_batch_count = 2; |
| 1692 | |
Jens Axboe | bc1c116 | 2006-06-08 08:49:06 +0200 | [diff] [blame] | 1693 | return ad; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1694 | } |
| 1695 | |
| 1696 | /* |
| 1697 | * sysfs parts below |
| 1698 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1699 | |
| 1700 | static ssize_t |
| 1701 | as_var_show(unsigned int var, char *page) |
| 1702 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | return sprintf(page, "%d\n", var); |
| 1704 | } |
| 1705 | |
| 1706 | static ssize_t |
| 1707 | as_var_store(unsigned long *var, const char *page, size_t count) |
| 1708 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1709 | char *p = (char *) page; |
| 1710 | |
Jens Axboe | c9b3ad6 | 2005-07-27 11:43:37 -0700 | [diff] [blame] | 1711 | *var = simple_strtoul(p, &p, 10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1712 | return count; |
| 1713 | } |
| 1714 | |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1715 | static ssize_t est_time_show(elevator_t *e, char *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1716 | { |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1717 | struct as_data *ad = e->elevator_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1718 | int pos = 0; |
| 1719 | |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1720 | pos += sprintf(page+pos, "%lu %% exit probability\n", |
| 1721 | 100*ad->exit_prob/256); |
| 1722 | pos += sprintf(page+pos, "%lu %% probability of exiting without a " |
| 1723 | "cooperating process submitting IO\n", |
| 1724 | 100*ad->exit_no_coop/256); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1725 | pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean); |
Nick Piggin | f5b3db0 | 2005-11-07 00:59:53 -0800 | [diff] [blame] | 1726 | pos += sprintf(page+pos, "%llu sectors new seek distance\n", |
| 1727 | (unsigned long long)ad->new_seek_mean); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1728 | |
| 1729 | return pos; |
| 1730 | } |
| 1731 | |
| 1732 | #define SHOW_FUNCTION(__FUNC, __VAR) \ |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1733 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1734 | { \ |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1735 | struct as_data *ad = e->elevator_data; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1736 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ |
| 1737 | } |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1738 | SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); |
| 1739 | SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); |
| 1740 | SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); |
| 1741 | SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); |
| 1742 | SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1743 | #undef SHOW_FUNCTION |
| 1744 | |
| 1745 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1746 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1747 | { \ |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1748 | struct as_data *ad = e->elevator_data; \ |
| 1749 | int ret = as_var_store(__PTR, (page), count); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 | if (*(__PTR) < (MIN)) \ |
| 1751 | *(__PTR) = (MIN); \ |
| 1752 | else if (*(__PTR) > (MAX)) \ |
| 1753 | *(__PTR) = (MAX); \ |
| 1754 | *(__PTR) = msecs_to_jiffies(*(__PTR)); \ |
| 1755 | return ret; \ |
| 1756 | } |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1757 | STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); |
| 1758 | STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); |
| 1759 | STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); |
| 1760 | STORE_FUNCTION(as_read_batch_expire_store, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1761 | &ad->batch_expire[REQ_SYNC], 0, INT_MAX); |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1762 | STORE_FUNCTION(as_write_batch_expire_store, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); |
| 1764 | #undef STORE_FUNCTION |
| 1765 | |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1766 | #define AS_ATTR(name) \ |
| 1767 | __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | |
Al Viro | e572ec7 | 2006-03-18 22:27:18 -0500 | [diff] [blame] | 1769 | static struct elv_fs_entry as_attrs[] = { |
| 1770 | __ATTR_RO(est_time), |
| 1771 | AS_ATTR(read_expire), |
| 1772 | AS_ATTR(write_expire), |
| 1773 | AS_ATTR(antic_expire), |
| 1774 | AS_ATTR(read_batch_expire), |
| 1775 | AS_ATTR(write_batch_expire), |
| 1776 | __ATTR_NULL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1777 | }; |
| 1778 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1779 | static struct elevator_type iosched_as = { |
| 1780 | .ops = { |
| 1781 | .elevator_merge_fn = as_merge, |
| 1782 | .elevator_merged_fn = as_merged_request, |
| 1783 | .elevator_merge_req_fn = as_merged_requests, |
Jens Axboe | b4878f2 | 2005-10-20 16:42:29 +0200 | [diff] [blame] | 1784 | .elevator_dispatch_fn = as_dispatch_request, |
| 1785 | .elevator_add_req_fn = as_add_request, |
| 1786 | .elevator_activate_req_fn = as_activate_request, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1787 | .elevator_deactivate_req_fn = as_deactivate_request, |
| 1788 | .elevator_queue_empty_fn = as_queue_empty, |
| 1789 | .elevator_completed_req_fn = as_completed_request, |
| 1790 | .elevator_former_req_fn = as_former_request, |
| 1791 | .elevator_latter_req_fn = as_latter_request, |
| 1792 | .elevator_set_req_fn = as_set_request, |
| 1793 | .elevator_put_req_fn = as_put_request, |
| 1794 | .elevator_may_queue_fn = as_may_queue, |
| 1795 | .elevator_init_fn = as_init_queue, |
| 1796 | .elevator_exit_fn = as_exit_queue, |
Al Viro | e17a948 | 2006-03-18 13:21:20 -0500 | [diff] [blame] | 1797 | .trim = as_trim, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | }, |
| 1799 | |
Al Viro | 3d1ab40 | 2006-03-18 18:35:43 -0500 | [diff] [blame] | 1800 | .elevator_attrs = as_attrs, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1801 | .elevator_name = "anticipatory", |
| 1802 | .elevator_owner = THIS_MODULE, |
| 1803 | }; |
| 1804 | |
| 1805 | static int __init as_init(void) |
| 1806 | { |
| 1807 | int ret; |
| 1808 | |
| 1809 | arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq), |
| 1810 | 0, 0, NULL, NULL); |
| 1811 | if (!arq_pool) |
| 1812 | return -ENOMEM; |
| 1813 | |
| 1814 | ret = elv_register(&iosched_as); |
| 1815 | if (!ret) { |
| 1816 | /* |
| 1817 | * don't allow AS to get unregistered, since we would have |
| 1818 | * to browse all tasks in the system and release their |
| 1819 | * as_io_context first |
| 1820 | */ |
| 1821 | __module_get(THIS_MODULE); |
| 1822 | return 0; |
| 1823 | } |
| 1824 | |
| 1825 | kmem_cache_destroy(arq_pool); |
| 1826 | return ret; |
| 1827 | } |
| 1828 | |
| 1829 | static void __exit as_exit(void) |
| 1830 | { |
Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 1831 | DECLARE_COMPLETION(all_gone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1832 | elv_unregister(&iosched_as); |
Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 1833 | ioc_gone = &all_gone; |
OGAWA Hirofumi | fba8227 | 2006-04-18 09:44:06 +0200 | [diff] [blame] | 1834 | /* ioc_gone's update must be visible before reading ioc_count */ |
| 1835 | smp_wmb(); |
Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 1836 | if (atomic_read(&ioc_count)) |
OGAWA Hirofumi | fba8227 | 2006-04-18 09:44:06 +0200 | [diff] [blame] | 1837 | wait_for_completion(ioc_gone); |
Al Viro | 334e94d | 2006-03-18 15:05:53 -0500 | [diff] [blame] | 1838 | synchronize_rcu(); |
Christoph Hellwig | 83521d3 | 2005-10-30 15:01:39 -0800 | [diff] [blame] | 1839 | kmem_cache_destroy(arq_pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1840 | } |
| 1841 | |
| 1842 | module_init(as_init); |
| 1843 | module_exit(as_exit); |
| 1844 | |
| 1845 | MODULE_AUTHOR("Nick Piggin"); |
| 1846 | MODULE_LICENSE("GPL"); |
| 1847 | MODULE_DESCRIPTION("anticipatory IO scheduler"); |