blob: 8bebf06bac76c6ea0179536b12636f1d18ab1997 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010011
12#include "blk.h"
13
14/*
15 * For io context allocations
16 */
17static struct kmem_cache *iocontext_cachep;
18
19static void cfq_dtor(struct io_context *ioc)
20{
Jens Axboeffc4e752008-02-19 10:02:29 +010021 if (!hlist_empty(&ioc->cic_list)) {
22 struct cfq_io_context *cic;
Jens Axboe86db1e22008-01-29 14:53:40 +010023
Paul Bollee2bd9672011-06-02 13:05:02 +020024 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
Jens Axboeffc4e752008-02-19 10:02:29 +010025 cic_list);
26 cic->dtor(ioc);
27 }
Jens Axboe86db1e22008-01-29 14:53:40 +010028}
29
Tejun Heo42ec57a2011-12-14 00:33:37 +010030/**
31 * put_io_context - put a reference of io_context
32 * @ioc: io_context to put
33 *
34 * Decrement reference count of @ioc and release it if the count reaches
35 * zero.
Jens Axboe86db1e22008-01-29 14:53:40 +010036 */
Tejun Heo42ec57a2011-12-14 00:33:37 +010037void put_io_context(struct io_context *ioc)
Jens Axboe86db1e22008-01-29 14:53:40 +010038{
39 if (ioc == NULL)
Tejun Heo42ec57a2011-12-14 00:33:37 +010040 return;
Jens Axboe86db1e22008-01-29 14:53:40 +010041
Tejun Heo42ec57a2011-12-14 00:33:37 +010042 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
Jens Axboe86db1e22008-01-29 14:53:40 +010043
Tejun Heo42ec57a2011-12-14 00:33:37 +010044 if (!atomic_long_dec_and_test(&ioc->refcount))
45 return;
Jens Axboe86db1e22008-01-29 14:53:40 +010046
Tejun Heo42ec57a2011-12-14 00:33:37 +010047 rcu_read_lock();
48 cfq_dtor(ioc);
49 rcu_read_unlock();
50
51 kmem_cache_free(iocontext_cachep, ioc);
Jens Axboe86db1e22008-01-29 14:53:40 +010052}
53EXPORT_SYMBOL(put_io_context);
54
55static void cfq_exit(struct io_context *ioc)
56{
Jens Axboe86db1e22008-01-29 14:53:40 +010057 rcu_read_lock();
Jens Axboe86db1e22008-01-29 14:53:40 +010058
Jens Axboeffc4e752008-02-19 10:02:29 +010059 if (!hlist_empty(&ioc->cic_list)) {
60 struct cfq_io_context *cic;
61
Paul Bollee2bd9672011-06-02 13:05:02 +020062 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
Jens Axboeffc4e752008-02-19 10:02:29 +010063 cic_list);
64 cic->exit(ioc);
65 }
66 rcu_read_unlock();
Jens Axboe86db1e22008-01-29 14:53:40 +010067}
68
Bart Van Assche27667c92010-12-21 15:07:45 +010069/* Called by the exiting task */
Louis Rillingb69f2292009-12-04 14:52:42 +010070void exit_io_context(struct task_struct *task)
Jens Axboe86db1e22008-01-29 14:53:40 +010071{
72 struct io_context *ioc;
73
Louis Rillingb69f2292009-12-04 14:52:42 +010074 task_lock(task);
75 ioc = task->io_context;
76 task->io_context = NULL;
77 task_unlock(task);
Jens Axboe86db1e22008-01-29 14:53:40 +010078
Bart Van Assche27667c92010-12-21 15:07:45 +010079 if (atomic_dec_and_test(&ioc->nr_tasks))
Jens Axboe86db1e22008-01-29 14:53:40 +010080 cfq_exit(ioc);
81
Louis Rilling61cc74f2009-12-04 14:52:41 +010082 put_io_context(ioc);
Jens Axboe86db1e22008-01-29 14:53:40 +010083}
84
85struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
86{
Paul Bolledf415652011-06-06 05:11:34 +020087 struct io_context *ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +010088
Tejun Heo42ec57a2011-12-14 00:33:37 +010089 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
90 node);
91 if (unlikely(!ioc))
92 return NULL;
93
94 /* initialize */
95 atomic_long_set(&ioc->refcount, 1);
96 atomic_set(&ioc->nr_tasks, 1);
97 spin_lock_init(&ioc->lock);
98 INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
99 INIT_HLIST_HEAD(&ioc->cic_list);
Jens Axboe86db1e22008-01-29 14:53:40 +0100100
Paul Bolledf415652011-06-06 05:11:34 +0200101 return ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +0100102}
103
Tejun Heo42ec57a2011-12-14 00:33:37 +0100104/**
105 * current_io_context - get io_context of %current
106 * @gfp_flags: allocation flags, used if allocation is necessary
107 * @node: allocation node, used if allocation is necessary
Jens Axboe86db1e22008-01-29 14:53:40 +0100108 *
Tejun Heo42ec57a2011-12-14 00:33:37 +0100109 * Return io_context of %current. If it doesn't exist, it is created with
110 * @gfp_flags and @node. The returned io_context does NOT have its
111 * reference count incremented. Because io_context is exited only on task
112 * exit, %current can be sure that the returned io_context is valid and
113 * alive as long as it is executing.
Jens Axboe86db1e22008-01-29 14:53:40 +0100114 */
115struct io_context *current_io_context(gfp_t gfp_flags, int node)
116{
117 struct task_struct *tsk = current;
118 struct io_context *ret;
119
120 ret = tsk->io_context;
121 if (likely(ret))
122 return ret;
123
124 ret = alloc_io_context(gfp_flags, node);
125 if (ret) {
126 /* make sure set_task_ioprio() sees the settings above */
127 smp_wmb();
128 tsk->io_context = ret;
129 }
130
131 return ret;
132}
133
134/*
135 * If the current task has no IO context then create one and initialise it.
136 * If it does have a context, take a ref on it.
137 *
138 * This is always called in the context of the task which submitted the I/O.
139 */
140struct io_context *get_io_context(gfp_t gfp_flags, int node)
141{
Paul Bolledf415652011-06-06 05:11:34 +0200142 struct io_context *ioc = NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100143
144 /*
145 * Check for unlikely race with exiting task. ioc ref count is
146 * zero when ioc is being detached.
147 */
148 do {
Paul Bolledf415652011-06-06 05:11:34 +0200149 ioc = current_io_context(gfp_flags, node);
150 if (unlikely(!ioc))
Jens Axboe86db1e22008-01-29 14:53:40 +0100151 break;
Paul Bolledf415652011-06-06 05:11:34 +0200152 } while (!atomic_long_inc_not_zero(&ioc->refcount));
Jens Axboe86db1e22008-01-29 14:53:40 +0100153
Paul Bolledf415652011-06-06 05:11:34 +0200154 return ioc;
Jens Axboe86db1e22008-01-29 14:53:40 +0100155}
156EXPORT_SYMBOL(get_io_context);
157
Adrian Bunk13341592008-02-18 13:45:53 +0100158static int __init blk_ioc_init(void)
Jens Axboe86db1e22008-01-29 14:53:40 +0100159{
160 iocontext_cachep = kmem_cache_create("blkdev_ioc",
161 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
162 return 0;
163}
164subsys_initcall(blk_ioc_init);