Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to io context handling |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/init.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 11 | |
| 12 | #include "blk.h" |
| 13 | |
| 14 | /* |
| 15 | * For io context allocations |
| 16 | */ |
| 17 | static struct kmem_cache *iocontext_cachep; |
| 18 | |
| 19 | static void cfq_dtor(struct io_context *ioc) |
| 20 | { |
Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 21 | if (!hlist_empty(&ioc->cic_list)) { |
| 22 | struct cfq_io_context *cic; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 23 | |
Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 24 | cic = list_entry(ioc->cic_list.first, struct cfq_io_context, |
| 25 | cic_list); |
| 26 | cic->dtor(ioc); |
| 27 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 28 | } |
| 29 | |
| 30 | /* |
| 31 | * IO Context helper functions. put_io_context() returns 1 if there are no |
| 32 | * more users of this io context, 0 otherwise. |
| 33 | */ |
| 34 | int put_io_context(struct io_context *ioc) |
| 35 | { |
| 36 | if (ioc == NULL) |
| 37 | return 1; |
| 38 | |
Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 39 | BUG_ON(atomic_long_read(&ioc->refcount) == 0); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 40 | |
Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 41 | if (atomic_long_dec_and_test(&ioc->refcount)) { |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 42 | rcu_read_lock(); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 43 | cfq_dtor(ioc); |
Jens Axboe | 07416d2 | 2008-05-07 09:17:12 +0200 | [diff] [blame] | 44 | rcu_read_unlock(); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 45 | |
| 46 | kmem_cache_free(iocontext_cachep, ioc); |
| 47 | return 1; |
| 48 | } |
| 49 | return 0; |
| 50 | } |
| 51 | EXPORT_SYMBOL(put_io_context); |
| 52 | |
| 53 | static void cfq_exit(struct io_context *ioc) |
| 54 | { |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 55 | rcu_read_lock(); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 56 | |
Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 57 | if (!hlist_empty(&ioc->cic_list)) { |
| 58 | struct cfq_io_context *cic; |
| 59 | |
| 60 | cic = list_entry(ioc->cic_list.first, struct cfq_io_context, |
| 61 | cic_list); |
| 62 | cic->exit(ioc); |
| 63 | } |
| 64 | rcu_read_unlock(); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 65 | } |
| 66 | |
Bart Van Assche | 27667c9 | 2010-12-21 15:07:45 +0100 | [diff] [blame] | 67 | /* Called by the exiting task */ |
Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 68 | void exit_io_context(struct task_struct *task) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 69 | { |
| 70 | struct io_context *ioc; |
| 71 | |
Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 72 | task_lock(task); |
| 73 | ioc = task->io_context; |
| 74 | task->io_context = NULL; |
| 75 | task_unlock(task); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 76 | |
Bart Van Assche | 27667c9 | 2010-12-21 15:07:45 +0100 | [diff] [blame] | 77 | if (atomic_dec_and_test(&ioc->nr_tasks)) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 78 | cfq_exit(ioc); |
| 79 | |
Louis Rilling | 61cc74f | 2009-12-04 14:52:41 +0100 | [diff] [blame] | 80 | put_io_context(ioc); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) |
| 84 | { |
| 85 | struct io_context *ret; |
| 86 | |
| 87 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); |
| 88 | if (ret) { |
Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 89 | atomic_long_set(&ret->refcount, 1); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 90 | atomic_set(&ret->nr_tasks, 1); |
| 91 | spin_lock_init(&ret->lock); |
| 92 | ret->ioprio_changed = 0; |
| 93 | ret->ioprio = 0; |
Richard Kennedy | 4671a13 | 2010-03-01 10:57:22 +0100 | [diff] [blame] | 94 | ret->last_waited = 0; /* doesn't matter... */ |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 95 | ret->nr_batch_requests = 0; /* because this is 0 */ |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 96 | INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); |
Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 97 | INIT_HLIST_HEAD(&ret->cic_list); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 98 | ret->ioc_data = NULL; |
Vivek Goyal | 4cbadbd | 2011-05-23 19:35:04 +0200 | [diff] [blame] | 99 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) |
| 100 | ret->cgroup_changed = 0; |
| 101 | #endif |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | return ret; |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * If the current task has no IO context then create one and initialise it. |
| 109 | * Otherwise, return its existing IO context. |
| 110 | * |
| 111 | * This returned IO context doesn't have a specifically elevated refcount, |
| 112 | * but since the current task itself holds a reference, the context can be |
| 113 | * used in general code, so long as it stays within `current` context. |
| 114 | */ |
| 115 | struct io_context *current_io_context(gfp_t gfp_flags, int node) |
| 116 | { |
| 117 | struct task_struct *tsk = current; |
| 118 | struct io_context *ret; |
| 119 | |
| 120 | ret = tsk->io_context; |
| 121 | if (likely(ret)) |
| 122 | return ret; |
| 123 | |
| 124 | ret = alloc_io_context(gfp_flags, node); |
| 125 | if (ret) { |
| 126 | /* make sure set_task_ioprio() sees the settings above */ |
| 127 | smp_wmb(); |
| 128 | tsk->io_context = ret; |
| 129 | } |
| 130 | |
| 131 | return ret; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * If the current task has no IO context then create one and initialise it. |
| 136 | * If it does have a context, take a ref on it. |
| 137 | * |
| 138 | * This is always called in the context of the task which submitted the I/O. |
| 139 | */ |
| 140 | struct io_context *get_io_context(gfp_t gfp_flags, int node) |
| 141 | { |
| 142 | struct io_context *ret = NULL; |
| 143 | |
| 144 | /* |
| 145 | * Check for unlikely race with exiting task. ioc ref count is |
| 146 | * zero when ioc is being detached. |
| 147 | */ |
| 148 | do { |
| 149 | ret = current_io_context(gfp_flags, node); |
| 150 | if (unlikely(!ret)) |
| 151 | break; |
Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 152 | } while (!atomic_long_inc_not_zero(&ret->refcount)); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 153 | |
| 154 | return ret; |
| 155 | } |
| 156 | EXPORT_SYMBOL(get_io_context); |
| 157 | |
Adrian Bunk | 1334159 | 2008-02-18 13:45:53 +0100 | [diff] [blame] | 158 | static int __init blk_ioc_init(void) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 159 | { |
| 160 | iocontext_cachep = kmem_cache_create("blkdev_ioc", |
| 161 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); |
| 162 | return 0; |
| 163 | } |
| 164 | subsys_initcall(blk_ioc_init); |