blob: c898049dafd54d8ea9deed98885b02db10ce43e7 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010011
12#include "blk.h"
13
14/*
15 * For io context allocations
16 */
17static struct kmem_cache *iocontext_cachep;
18
19static void cfq_dtor(struct io_context *ioc)
20{
Jens Axboeffc4e752008-02-19 10:02:29 +010021 if (!hlist_empty(&ioc->cic_list)) {
22 struct cfq_io_context *cic;
Jens Axboe86db1e22008-01-29 14:53:40 +010023
Jens Axboeffc4e752008-02-19 10:02:29 +010024 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
25 cic_list);
26 cic->dtor(ioc);
27 }
Jens Axboe86db1e22008-01-29 14:53:40 +010028}
29
30/*
31 * IO Context helper functions. put_io_context() returns 1 if there are no
32 * more users of this io context, 0 otherwise.
33 */
34int put_io_context(struct io_context *ioc)
35{
36 if (ioc == NULL)
37 return 1;
38
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070039 BUG_ON(atomic_long_read(&ioc->refcount) == 0);
Jens Axboe86db1e22008-01-29 14:53:40 +010040
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070041 if (atomic_long_dec_and_test(&ioc->refcount)) {
Jens Axboe86db1e22008-01-29 14:53:40 +010042 rcu_read_lock();
Jens Axboe86db1e22008-01-29 14:53:40 +010043 cfq_dtor(ioc);
Jens Axboe07416d22008-05-07 09:17:12 +020044 rcu_read_unlock();
Jens Axboe86db1e22008-01-29 14:53:40 +010045
46 kmem_cache_free(iocontext_cachep, ioc);
47 return 1;
48 }
49 return 0;
50}
51EXPORT_SYMBOL(put_io_context);
52
53static void cfq_exit(struct io_context *ioc)
54{
Jens Axboe86db1e22008-01-29 14:53:40 +010055 rcu_read_lock();
Jens Axboe86db1e22008-01-29 14:53:40 +010056
Jens Axboeffc4e752008-02-19 10:02:29 +010057 if (!hlist_empty(&ioc->cic_list)) {
58 struct cfq_io_context *cic;
59
60 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
61 cic_list);
62 cic->exit(ioc);
63 }
64 rcu_read_unlock();
Jens Axboe86db1e22008-01-29 14:53:40 +010065}
66
Bart Van Assche27667c92010-12-21 15:07:45 +010067/* Called by the exiting task */
Louis Rillingb69f2292009-12-04 14:52:42 +010068void exit_io_context(struct task_struct *task)
Jens Axboe86db1e22008-01-29 14:53:40 +010069{
70 struct io_context *ioc;
71
Louis Rillingb69f2292009-12-04 14:52:42 +010072 task_lock(task);
73 ioc = task->io_context;
74 task->io_context = NULL;
75 task_unlock(task);
Jens Axboe86db1e22008-01-29 14:53:40 +010076
Bart Van Assche27667c92010-12-21 15:07:45 +010077 if (atomic_dec_and_test(&ioc->nr_tasks))
Jens Axboe86db1e22008-01-29 14:53:40 +010078 cfq_exit(ioc);
79
Louis Rilling61cc74f2009-12-04 14:52:41 +010080 put_io_context(ioc);
Jens Axboe86db1e22008-01-29 14:53:40 +010081}
82
83struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
84{
85 struct io_context *ret;
86
87 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
88 if (ret) {
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070089 atomic_long_set(&ret->refcount, 1);
Jens Axboe86db1e22008-01-29 14:53:40 +010090 atomic_set(&ret->nr_tasks, 1);
91 spin_lock_init(&ret->lock);
92 ret->ioprio_changed = 0;
93 ret->ioprio = 0;
Richard Kennedy4671a132010-03-01 10:57:22 +010094 ret->last_waited = 0; /* doesn't matter... */
Jens Axboe86db1e22008-01-29 14:53:40 +010095 ret->nr_batch_requests = 0; /* because this is 0 */
Jens Axboe86db1e22008-01-29 14:53:40 +010096 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
Jens Axboeffc4e752008-02-19 10:02:29 +010097 INIT_HLIST_HEAD(&ret->cic_list);
Jens Axboe86db1e22008-01-29 14:53:40 +010098 ret->ioc_data = NULL;
Vivek Goyal4cbadbd2011-05-23 19:35:04 +020099#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
100 ret->cgroup_changed = 0;
101#endif
Jens Axboe86db1e22008-01-29 14:53:40 +0100102 }
103
104 return ret;
105}
106
107/*
108 * If the current task has no IO context then create one and initialise it.
109 * Otherwise, return its existing IO context.
110 *
111 * This returned IO context doesn't have a specifically elevated refcount,
112 * but since the current task itself holds a reference, the context can be
113 * used in general code, so long as it stays within `current` context.
114 */
115struct io_context *current_io_context(gfp_t gfp_flags, int node)
116{
117 struct task_struct *tsk = current;
118 struct io_context *ret;
119
120 ret = tsk->io_context;
121 if (likely(ret))
122 return ret;
123
124 ret = alloc_io_context(gfp_flags, node);
125 if (ret) {
126 /* make sure set_task_ioprio() sees the settings above */
127 smp_wmb();
128 tsk->io_context = ret;
129 }
130
131 return ret;
132}
133
134/*
135 * If the current task has no IO context then create one and initialise it.
136 * If it does have a context, take a ref on it.
137 *
138 * This is always called in the context of the task which submitted the I/O.
139 */
140struct io_context *get_io_context(gfp_t gfp_flags, int node)
141{
142 struct io_context *ret = NULL;
143
144 /*
145 * Check for unlikely race with exiting task. ioc ref count is
146 * zero when ioc is being detached.
147 */
148 do {
149 ret = current_io_context(gfp_flags, node);
150 if (unlikely(!ret))
151 break;
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -0700152 } while (!atomic_long_inc_not_zero(&ret->refcount));
Jens Axboe86db1e22008-01-29 14:53:40 +0100153
154 return ret;
155}
156EXPORT_SYMBOL(get_io_context);
157
Adrian Bunk13341592008-02-18 13:45:53 +0100158static int __init blk_ioc_init(void)
Jens Axboe86db1e22008-01-29 14:53:40 +0100159{
160 iocontext_cachep = kmem_cache_create("blkdev_ioc",
161 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
162 return 0;
163}
164subsys_initcall(blk_ioc_init);