blob: 9884fd7427fef7658f01b0d5cb6af129d9ecb645 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010011
12#include "blk.h"
13
14/*
15 * For io context allocations
16 */
17static struct kmem_cache *iocontext_cachep;
18
Tejun Heo6e736be2011-12-14 00:33:38 +010019/**
20 * get_io_context - increment reference count to io_context
21 * @ioc: io_context to get
22 *
23 * Increment reference count to @ioc.
24 */
25void get_io_context(struct io_context *ioc)
26{
27 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 atomic_long_inc(&ioc->refcount);
29}
30EXPORT_SYMBOL(get_io_context);
31
Tejun Heo7e5a8792011-12-14 00:33:42 +010032static void icq_free_icq_rcu(struct rcu_head *head)
33{
34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35
36 kmem_cache_free(icq->__rcu_icq_cache, icq);
37}
38
39/*
40 * Exit and free an icq. Called with both ioc and q locked.
41 */
42static void ioc_exit_icq(struct io_cq *icq)
43{
44 struct io_context *ioc = icq->ioc;
45 struct request_queue *q = icq->q;
46 struct elevator_type *et = q->elevator->type;
47
48 lockdep_assert_held(&ioc->lock);
49 lockdep_assert_held(q->queue_lock);
50
51 radix_tree_delete(&ioc->icq_tree, icq->q->id);
52 hlist_del_init(&icq->ioc_node);
53 list_del_init(&icq->q_node);
54
55 /*
56 * Both setting lookup hint to and clearing it from @icq are done
57 * under queue_lock. If it's not pointing to @icq now, it never
58 * will. Hint assignment itself can race safely.
59 */
60 if (rcu_dereference_raw(ioc->icq_hint) == icq)
61 rcu_assign_pointer(ioc->icq_hint, NULL);
62
Tejun Heo11a31222012-02-07 07:51:30 +010063 if (et->ops.elevator_exit_icq_fn)
Tejun Heo7e5a8792011-12-14 00:33:42 +010064 et->ops.elevator_exit_icq_fn(icq);
Tejun Heo7e5a8792011-12-14 00:33:42 +010065
66 /*
67 * @icq->q might have gone away by the time RCU callback runs
68 * making it impossible to determine icq_cache. Record it in @icq.
69 */
70 icq->__rcu_icq_cache = et->icq_cache;
71 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
72}
73
Tejun Heob2efa052011-12-14 00:33:39 +010074/*
75 * Slow path for ioc release in put_io_context(). Performs double-lock
Tejun Heoc5869802011-12-14 00:33:41 +010076 * dancing to unlink all icq's and then frees ioc.
Tejun Heob2efa052011-12-14 00:33:39 +010077 */
78static void ioc_release_fn(struct work_struct *work)
79{
80 struct io_context *ioc = container_of(work, struct io_context,
81 release_work);
82 struct request_queue *last_q = NULL;
83
84 spin_lock_irq(&ioc->lock);
85
Tejun Heoc5869802011-12-14 00:33:41 +010086 while (!hlist_empty(&ioc->icq_list)) {
87 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
88 struct io_cq, ioc_node);
89 struct request_queue *this_q = icq->q;
Tejun Heob2efa052011-12-14 00:33:39 +010090
91 if (this_q != last_q) {
92 /*
93 * Need to switch to @this_q. Once we release
94 * @ioc->lock, it can go away along with @cic.
95 * Hold on to it.
96 */
97 __blk_get_queue(this_q);
98
99 /*
100 * blk_put_queue() might sleep thanks to kobject
101 * idiocy. Always release both locks, put and
102 * restart.
103 */
104 if (last_q) {
105 spin_unlock(last_q->queue_lock);
106 spin_unlock_irq(&ioc->lock);
107 blk_put_queue(last_q);
108 } else {
109 spin_unlock_irq(&ioc->lock);
110 }
111
112 last_q = this_q;
113 spin_lock_irq(this_q->queue_lock);
114 spin_lock(&ioc->lock);
115 continue;
116 }
Tejun Heo7e5a8792011-12-14 00:33:42 +0100117 ioc_exit_icq(icq);
Jens Axboeffc4e752008-02-19 10:02:29 +0100118 }
Tejun Heob2efa052011-12-14 00:33:39 +0100119
120 if (last_q) {
121 spin_unlock(last_q->queue_lock);
122 spin_unlock_irq(&ioc->lock);
123 blk_put_queue(last_q);
124 } else {
125 spin_unlock_irq(&ioc->lock);
126 }
127
128 kmem_cache_free(iocontext_cachep, ioc);
Jens Axboe86db1e22008-01-29 14:53:40 +0100129}
130
Tejun Heo42ec57a2011-12-14 00:33:37 +0100131/**
132 * put_io_context - put a reference of io_context
133 * @ioc: io_context to put
134 *
135 * Decrement reference count of @ioc and release it if the count reaches
Tejun Heo11a31222012-02-07 07:51:30 +0100136 * zero.
Jens Axboe86db1e22008-01-29 14:53:40 +0100137 */
Tejun Heo11a31222012-02-07 07:51:30 +0100138void put_io_context(struct io_context *ioc)
Jens Axboe86db1e22008-01-29 14:53:40 +0100139{
Tejun Heob2efa052011-12-14 00:33:39 +0100140 unsigned long flags;
141
Jens Axboe86db1e22008-01-29 14:53:40 +0100142 if (ioc == NULL)
Tejun Heo42ec57a2011-12-14 00:33:37 +0100143 return;
Jens Axboe86db1e22008-01-29 14:53:40 +0100144
Tejun Heo42ec57a2011-12-14 00:33:37 +0100145 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
Jens Axboe86db1e22008-01-29 14:53:40 +0100146
Tejun Heob2efa052011-12-14 00:33:39 +0100147 /*
Tejun Heo11a31222012-02-07 07:51:30 +0100148 * Releasing ioc requires reverse order double locking and we may
149 * already be holding a queue_lock. Do it asynchronously from wq.
Tejun Heob2efa052011-12-14 00:33:39 +0100150 */
Tejun Heo11a31222012-02-07 07:51:30 +0100151 if (atomic_long_dec_and_test(&ioc->refcount)) {
152 spin_lock_irqsave(&ioc->lock, flags);
153 if (!hlist_empty(&ioc->icq_list))
154 schedule_work(&ioc->release_work);
155 spin_unlock_irqrestore(&ioc->lock, flags);
Tejun Heob2efa052011-12-14 00:33:39 +0100156 }
Jens Axboe86db1e22008-01-29 14:53:40 +0100157}
158EXPORT_SYMBOL(put_io_context);
159
Bart Van Assche27667c92010-12-21 15:07:45 +0100160/* Called by the exiting task */
Louis Rillingb69f2292009-12-04 14:52:42 +0100161void exit_io_context(struct task_struct *task)
Jens Axboe86db1e22008-01-29 14:53:40 +0100162{
163 struct io_context *ioc;
164
Louis Rillingb69f2292009-12-04 14:52:42 +0100165 task_lock(task);
166 ioc = task->io_context;
167 task->io_context = NULL;
168 task_unlock(task);
Jens Axboe86db1e22008-01-29 14:53:40 +0100169
Tejun Heob2efa052011-12-14 00:33:39 +0100170 atomic_dec(&ioc->nr_tasks);
Tejun Heo11a31222012-02-07 07:51:30 +0100171 put_io_context(ioc);
Jens Axboe86db1e22008-01-29 14:53:40 +0100172}
173
Tejun Heo7e5a8792011-12-14 00:33:42 +0100174/**
175 * ioc_clear_queue - break any ioc association with the specified queue
176 * @q: request_queue being cleared
177 *
178 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
179 */
180void ioc_clear_queue(struct request_queue *q)
181{
182 lockdep_assert_held(q->queue_lock);
183
184 while (!list_empty(&q->icq_list)) {
185 struct io_cq *icq = list_entry(q->icq_list.next,
186 struct io_cq, q_node);
187 struct io_context *ioc = icq->ioc;
188
189 spin_lock(&ioc->lock);
190 ioc_exit_icq(icq);
191 spin_unlock(&ioc->lock);
192 }
193}
194
Tejun Heof2dbd762011-12-14 00:33:40 +0100195void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
196 int node)
Jens Axboe86db1e22008-01-29 14:53:40 +0100197{
Paul Bolledf415652011-06-06 05:11:34 +0200198 struct io_context *ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +0100199
Tejun Heo42ec57a2011-12-14 00:33:37 +0100200 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
201 node);
202 if (unlikely(!ioc))
Tejun Heof2dbd762011-12-14 00:33:40 +0100203 return;
Tejun Heo42ec57a2011-12-14 00:33:37 +0100204
205 /* initialize */
206 atomic_long_set(&ioc->refcount, 1);
207 atomic_set(&ioc->nr_tasks, 1);
208 spin_lock_init(&ioc->lock);
Tejun Heoc5869802011-12-14 00:33:41 +0100209 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
210 INIT_HLIST_HEAD(&ioc->icq_list);
Tejun Heob2efa052011-12-14 00:33:39 +0100211 INIT_WORK(&ioc->release_work, ioc_release_fn);
Jens Axboe86db1e22008-01-29 14:53:40 +0100212
Tejun Heofd638362011-12-25 14:29:14 +0100213 /*
214 * Try to install. ioc shouldn't be installed if someone else
215 * already did or @task, which isn't %current, is exiting. Note
216 * that we need to allow ioc creation on exiting %current as exit
217 * path may issue IOs from e.g. exit_files(). The exit path is
218 * responsible for not issuing IO after exit_io_context().
219 */
Tejun Heo6e736be2011-12-14 00:33:38 +0100220 task_lock(task);
Tejun Heofd638362011-12-25 14:29:14 +0100221 if (!task->io_context &&
222 (task == current || !(task->flags & PF_EXITING)))
Tejun Heo6e736be2011-12-14 00:33:38 +0100223 task->io_context = ioc;
Tejun Heof2dbd762011-12-14 00:33:40 +0100224 else
Tejun Heo6e736be2011-12-14 00:33:38 +0100225 kmem_cache_free(iocontext_cachep, ioc);
Tejun Heo6e736be2011-12-14 00:33:38 +0100226 task_unlock(task);
Jens Axboe86db1e22008-01-29 14:53:40 +0100227}
Jens Axboe86db1e22008-01-29 14:53:40 +0100228
Tejun Heo6e736be2011-12-14 00:33:38 +0100229/**
230 * get_task_io_context - get io_context of a task
231 * @task: task of interest
232 * @gfp_flags: allocation flags, used if allocation is necessary
233 * @node: allocation node, used if allocation is necessary
Jens Axboe86db1e22008-01-29 14:53:40 +0100234 *
Tejun Heo6e736be2011-12-14 00:33:38 +0100235 * Return io_context of @task. If it doesn't exist, it is created with
236 * @gfp_flags and @node. The returned io_context has its reference count
237 * incremented.
238 *
239 * This function always goes through task_lock() and it's better to use
Tejun Heof2dbd762011-12-14 00:33:40 +0100240 * %current->io_context + get_io_context() for %current.
Jens Axboe86db1e22008-01-29 14:53:40 +0100241 */
Tejun Heo6e736be2011-12-14 00:33:38 +0100242struct io_context *get_task_io_context(struct task_struct *task,
243 gfp_t gfp_flags, int node)
Jens Axboe86db1e22008-01-29 14:53:40 +0100244{
Tejun Heo6e736be2011-12-14 00:33:38 +0100245 struct io_context *ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +0100246
Tejun Heo6e736be2011-12-14 00:33:38 +0100247 might_sleep_if(gfp_flags & __GFP_WAIT);
Jens Axboe86db1e22008-01-29 14:53:40 +0100248
Tejun Heof2dbd762011-12-14 00:33:40 +0100249 do {
250 task_lock(task);
251 ioc = task->io_context;
252 if (likely(ioc)) {
253 get_io_context(ioc);
254 task_unlock(task);
255 return ioc;
256 }
Tejun Heo6e736be2011-12-14 00:33:38 +0100257 task_unlock(task);
Tejun Heof2dbd762011-12-14 00:33:40 +0100258 } while (create_io_context(task, gfp_flags, node));
Tejun Heo6e736be2011-12-14 00:33:38 +0100259
Tejun Heof2dbd762011-12-14 00:33:40 +0100260 return NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100261}
Tejun Heo6e736be2011-12-14 00:33:38 +0100262EXPORT_SYMBOL(get_task_io_context);
Jens Axboe86db1e22008-01-29 14:53:40 +0100263
Tejun Heo47fdd4c2011-12-14 00:33:42 +0100264/**
265 * ioc_lookup_icq - lookup io_cq from ioc
266 * @ioc: the associated io_context
267 * @q: the associated request_queue
268 *
269 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
270 * with @q->queue_lock held.
271 */
272struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
273{
274 struct io_cq *icq;
275
276 lockdep_assert_held(q->queue_lock);
277
278 /*
279 * icq's are indexed from @ioc using radix tree and hint pointer,
280 * both of which are protected with RCU. All removals are done
281 * holding both q and ioc locks, and we're holding q lock - if we
282 * find a icq which points to us, it's guaranteed to be valid.
283 */
284 rcu_read_lock();
285 icq = rcu_dereference(ioc->icq_hint);
286 if (icq && icq->q == q)
287 goto out;
288
289 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
290 if (icq && icq->q == q)
291 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
292 else
293 icq = NULL;
294out:
295 rcu_read_unlock();
296 return icq;
297}
298EXPORT_SYMBOL(ioc_lookup_icq);
299
Tejun Heof1f8cc92011-12-14 00:33:42 +0100300/**
301 * ioc_create_icq - create and link io_cq
302 * @q: request_queue of interest
303 * @gfp_mask: allocation mask
304 *
305 * Make sure io_cq linking %current->io_context and @q exists. If either
306 * io_context and/or icq don't exist, they will be created using @gfp_mask.
307 *
308 * The caller is responsible for ensuring @ioc won't go away and @q is
309 * alive and will stay alive until this function returns.
310 */
311struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
312{
313 struct elevator_type *et = q->elevator->type;
314 struct io_context *ioc;
315 struct io_cq *icq;
316
317 /* allocate stuff */
318 ioc = create_io_context(current, gfp_mask, q->node);
319 if (!ioc)
320 return NULL;
321
322 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
323 q->node);
324 if (!icq)
325 return NULL;
326
327 if (radix_tree_preload(gfp_mask) < 0) {
328 kmem_cache_free(et->icq_cache, icq);
329 return NULL;
330 }
331
332 icq->ioc = ioc;
333 icq->q = q;
334 INIT_LIST_HEAD(&icq->q_node);
335 INIT_HLIST_NODE(&icq->ioc_node);
336
337 /* lock both q and ioc and try to link @icq */
338 spin_lock_irq(q->queue_lock);
339 spin_lock(&ioc->lock);
340
341 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
342 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
343 list_add(&icq->q_node, &q->icq_list);
344 if (et->ops.elevator_init_icq_fn)
345 et->ops.elevator_init_icq_fn(icq);
346 } else {
347 kmem_cache_free(et->icq_cache, icq);
348 icq = ioc_lookup_icq(ioc, q);
349 if (!icq)
350 printk(KERN_ERR "cfq: icq link failed!\n");
351 }
352
353 spin_unlock(&ioc->lock);
354 spin_unlock_irq(q->queue_lock);
355 radix_tree_preload_end();
356 return icq;
357}
358
Tejun Heodc869002011-12-14 00:33:38 +0100359void ioc_set_changed(struct io_context *ioc, int which)
360{
Tejun Heoc5869802011-12-14 00:33:41 +0100361 struct io_cq *icq;
Tejun Heodc869002011-12-14 00:33:38 +0100362 struct hlist_node *n;
363
Tejun Heoc5869802011-12-14 00:33:41 +0100364 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
365 set_bit(which, &icq->changed);
Tejun Heodc869002011-12-14 00:33:38 +0100366}
367
368/**
369 * ioc_ioprio_changed - notify ioprio change
370 * @ioc: io_context of interest
371 * @ioprio: new ioprio
372 *
Tejun Heoc5869802011-12-14 00:33:41 +0100373 * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
374 * icq's. iosched is responsible for checking the bit and applying it on
Tejun Heodc869002011-12-14 00:33:38 +0100375 * request issue path.
376 */
377void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
378{
379 unsigned long flags;
380
381 spin_lock_irqsave(&ioc->lock, flags);
382 ioc->ioprio = ioprio;
Tejun Heoc5869802011-12-14 00:33:41 +0100383 ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
Tejun Heodc869002011-12-14 00:33:38 +0100384 spin_unlock_irqrestore(&ioc->lock, flags);
385}
386
387/**
388 * ioc_cgroup_changed - notify cgroup change
389 * @ioc: io_context of interest
390 *
Tejun Heoc5869802011-12-14 00:33:41 +0100391 * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
Tejun Heodc869002011-12-14 00:33:38 +0100392 * iosched is responsible for checking the bit and applying it on request
393 * issue path.
394 */
395void ioc_cgroup_changed(struct io_context *ioc)
396{
397 unsigned long flags;
398
399 spin_lock_irqsave(&ioc->lock, flags);
Tejun Heoc5869802011-12-14 00:33:41 +0100400 ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
Tejun Heodc869002011-12-14 00:33:38 +0100401 spin_unlock_irqrestore(&ioc->lock, flags);
402}
Jens Axboe64c42992011-12-19 10:36:44 +0100403EXPORT_SYMBOL(ioc_cgroup_changed);
Tejun Heodc869002011-12-14 00:33:38 +0100404
Adrian Bunk13341592008-02-18 13:45:53 +0100405static int __init blk_ioc_init(void)
Jens Axboe86db1e22008-01-29 14:53:40 +0100406{
407 iocontext_cachep = kmem_cache_create("blkdev_ioc",
408 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
409 return 0;
410}
411subsys_initcall(blk_ioc_init);