blob: e34df7c9fc367048de2c9e669a70291922467ba9 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10
11#include "blk.h"
12
13/*
14 * For io context allocations
15 */
16static struct kmem_cache *iocontext_cachep;
17
18static void cfq_dtor(struct io_context *ioc)
19{
Jens Axboeffc4e752008-02-19 10:02:29 +010020 if (!hlist_empty(&ioc->cic_list)) {
21 struct cfq_io_context *cic;
Jens Axboe86db1e22008-01-29 14:53:40 +010022
Jens Axboeffc4e752008-02-19 10:02:29 +010023 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
24 cic_list);
25 cic->dtor(ioc);
26 }
Jens Axboe86db1e22008-01-29 14:53:40 +010027}
28
29/*
30 * IO Context helper functions. put_io_context() returns 1 if there are no
31 * more users of this io context, 0 otherwise.
32 */
33int put_io_context(struct io_context *ioc)
34{
35 if (ioc == NULL)
36 return 1;
37
38 BUG_ON(atomic_read(&ioc->refcount) == 0);
39
40 if (atomic_dec_and_test(&ioc->refcount)) {
41 rcu_read_lock();
42 if (ioc->aic && ioc->aic->dtor)
43 ioc->aic->dtor(ioc->aic);
44 rcu_read_unlock();
45 cfq_dtor(ioc);
46
47 kmem_cache_free(iocontext_cachep, ioc);
48 return 1;
49 }
50 return 0;
51}
52EXPORT_SYMBOL(put_io_context);
53
54static void cfq_exit(struct io_context *ioc)
55{
Jens Axboe86db1e22008-01-29 14:53:40 +010056 rcu_read_lock();
Jens Axboe86db1e22008-01-29 14:53:40 +010057
Jens Axboeffc4e752008-02-19 10:02:29 +010058 if (!hlist_empty(&ioc->cic_list)) {
59 struct cfq_io_context *cic;
60
61 cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
62 cic_list);
63 cic->exit(ioc);
64 }
65 rcu_read_unlock();
Jens Axboe86db1e22008-01-29 14:53:40 +010066}
67
68/* Called by the exitting task */
69void exit_io_context(void)
70{
71 struct io_context *ioc;
72
73 task_lock(current);
74 ioc = current->io_context;
75 current->io_context = NULL;
76 task_unlock(current);
77
78 if (atomic_dec_and_test(&ioc->nr_tasks)) {
79 if (ioc->aic && ioc->aic->exit)
80 ioc->aic->exit(ioc->aic);
81 cfq_exit(ioc);
82
83 put_io_context(ioc);
84 }
85}
86
87struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
88{
89 struct io_context *ret;
90
91 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
92 if (ret) {
93 atomic_set(&ret->refcount, 1);
94 atomic_set(&ret->nr_tasks, 1);
95 spin_lock_init(&ret->lock);
96 ret->ioprio_changed = 0;
97 ret->ioprio = 0;
98 ret->last_waited = jiffies; /* doesn't matter... */
99 ret->nr_batch_requests = 0; /* because this is 0 */
100 ret->aic = NULL;
101 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
Jens Axboeffc4e752008-02-19 10:02:29 +0100102 INIT_HLIST_HEAD(&ret->cic_list);
Jens Axboe86db1e22008-01-29 14:53:40 +0100103 ret->ioc_data = NULL;
104 }
105
106 return ret;
107}
108
109/*
110 * If the current task has no IO context then create one and initialise it.
111 * Otherwise, return its existing IO context.
112 *
113 * This returned IO context doesn't have a specifically elevated refcount,
114 * but since the current task itself holds a reference, the context can be
115 * used in general code, so long as it stays within `current` context.
116 */
117struct io_context *current_io_context(gfp_t gfp_flags, int node)
118{
119 struct task_struct *tsk = current;
120 struct io_context *ret;
121
122 ret = tsk->io_context;
123 if (likely(ret))
124 return ret;
125
126 ret = alloc_io_context(gfp_flags, node);
127 if (ret) {
128 /* make sure set_task_ioprio() sees the settings above */
129 smp_wmb();
130 tsk->io_context = ret;
131 }
132
133 return ret;
134}
135
136/*
137 * If the current task has no IO context then create one and initialise it.
138 * If it does have a context, take a ref on it.
139 *
140 * This is always called in the context of the task which submitted the I/O.
141 */
142struct io_context *get_io_context(gfp_t gfp_flags, int node)
143{
144 struct io_context *ret = NULL;
145
146 /*
147 * Check for unlikely race with exiting task. ioc ref count is
148 * zero when ioc is being detached.
149 */
150 do {
151 ret = current_io_context(gfp_flags, node);
152 if (unlikely(!ret))
153 break;
154 } while (!atomic_inc_not_zero(&ret->refcount));
155
156 return ret;
157}
158EXPORT_SYMBOL(get_io_context);
159
160void copy_io_context(struct io_context **pdst, struct io_context **psrc)
161{
162 struct io_context *src = *psrc;
163 struct io_context *dst = *pdst;
164
165 if (src) {
166 BUG_ON(atomic_read(&src->refcount) == 0);
167 atomic_inc(&src->refcount);
168 put_io_context(dst);
169 *pdst = src;
170 }
171}
172EXPORT_SYMBOL(copy_io_context);
173
Adrian Bunk13341592008-02-18 13:45:53 +0100174static int __init blk_ioc_init(void)
Jens Axboe86db1e22008-01-29 14:53:40 +0100175{
176 iocontext_cachep = kmem_cache_create("blkdev_ioc",
177 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
178 return 0;
179}
180subsys_initcall(blk_ioc_init);