blob: 7e1371c4bccf93143e6234e15b11924b4375274f [file] [log] [blame]
Jens Axboefd0928d2008-01-24 08:52:45 +01001#ifndef IOCONTEXT_H
2#define IOCONTEXT_H
3
Jens Axboe4ac845a2008-01-24 08:44:49 +01004#include <linux/radix-tree.h>
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02005#include <linux/rcupdate.h>
Tejun Heob2efa052011-12-14 00:33:39 +01006#include <linux/workqueue.h>
Jens Axboe4ac845a2008-01-24 08:44:49 +01007
Tejun Heodc869002011-12-14 00:33:38 +01008enum {
Tejun Heoc5869802011-12-14 00:33:41 +01009 ICQ_IOPRIO_CHANGED,
10 ICQ_CGROUP_CHANGED,
Tejun Heodc869002011-12-14 00:33:38 +010011};
12
Tejun Heof1f8cc92011-12-14 00:33:42 +010013/*
14 * An io_cq (icq) is association between an io_context (ioc) and a
15 * request_queue (q). This is used by elevators which need to track
16 * information per ioc - q pair.
17 *
18 * Elevator can request use of icq by setting elevator_type->icq_size and
19 * ->icq_align. Both size and align must be larger than that of struct
20 * io_cq and elevator can use the tail area for private information. The
21 * recommended way to do this is defining a struct which contains io_cq as
22 * the first member followed by private members and using its size and
23 * align. For example,
24 *
25 * struct snail_io_cq {
26 * struct io_cq icq;
27 * int poke_snail;
28 * int feed_snail;
29 * };
30 *
31 * struct elevator_type snail_elv_type {
32 * .ops = { ... },
33 * .icq_size = sizeof(struct snail_io_cq),
34 * .icq_align = __alignof__(struct snail_io_cq),
35 * ...
36 * };
37 *
38 * If icq_size is set, block core will manage icq's. All requests will
39 * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
40 * is called and be holding a reference to the associated io_context.
41 *
42 * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
43 * called and, on destruction, ->elevator_exit_icq_fn(). Both functions
44 * are called with both the associated io_context and queue locks held.
45 *
46 * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
47 * queue lock but the returned icq is valid only until the queue lock is
48 * released. Elevators can not and should not try to create or destroy
49 * icq's.
50 *
51 * As icq's are linked from both ioc and q, the locking rules are a bit
52 * complex.
53 *
54 * - ioc lock nests inside q lock.
55 *
56 * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
57 * q->icq_list and icq->q_node by q lock.
58 *
59 * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
60 * itself is protected by q lock. However, both the indexes and icq
61 * itself are also RCU managed and lookup can be performed holding only
62 * the q lock.
63 *
64 * - icq's are not reference counted. They are destroyed when either the
65 * ioc or q goes away. Each request with icq set holds an extra
66 * reference to ioc to ensure it stays until the request is completed.
67 *
68 * - Linking and unlinking icq's are performed while holding both ioc and q
69 * locks. Due to the lock ordering, q exit is simple but ioc exit
70 * requires reverse-order double lock dance.
71 */
Tejun Heoc5869802011-12-14 00:33:41 +010072struct io_cq {
73 struct request_queue *q;
74 struct io_context *ioc;
Jens Axboefd0928d2008-01-24 08:52:45 +010075
Tejun Heo7e5a8792011-12-14 00:33:42 +010076 /*
77 * q_node and ioc_node link io_cq through icq_list of q and ioc
78 * respectively. Both fields are unused once ioc_exit_icq() is
79 * called and shared with __rcu_icq_cache and __rcu_head which are
80 * used for RCU free of io_cq.
81 */
82 union {
83 struct list_head q_node;
84 struct kmem_cache *__rcu_icq_cache;
85 };
86 union {
87 struct hlist_node ioc_node;
88 struct rcu_head __rcu_head;
89 };
Jens Axboefd0928d2008-01-24 08:52:45 +010090
Tejun Heoc5869802011-12-14 00:33:41 +010091 unsigned long changed;
Jens Axboefd0928d2008-01-24 08:52:45 +010092};
93
94/*
Jens Axboed38ecf92008-01-24 08:53:35 +010095 * I/O subsystem state of the associated processes. It is refcounted
96 * and kmalloc'ed. These could be shared between processes.
Jens Axboefd0928d2008-01-24 08:52:45 +010097 */
98struct io_context {
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070099 atomic_long_t refcount;
Jens Axboed38ecf92008-01-24 08:53:35 +0100100 atomic_t nr_tasks;
101
102 /* all the fields below are protected by this lock */
103 spinlock_t lock;
Jens Axboefd0928d2008-01-24 08:52:45 +0100104
105 unsigned short ioprio;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500106
Jens Axboefd0928d2008-01-24 08:52:45 +0100107 /*
108 * For request batching
109 */
Jens Axboefd0928d2008-01-24 08:52:45 +0100110 int nr_batch_requests; /* Number of requests left in the batch */
Richard Kennedy58c24a62010-02-26 14:00:43 +0100111 unsigned long last_waited; /* Time last woken after wait for request */
Jens Axboefd0928d2008-01-24 08:52:45 +0100112
Tejun Heoc5869802011-12-14 00:33:41 +0100113 struct radix_tree_root icq_tree;
114 struct io_cq __rcu *icq_hint;
115 struct hlist_head icq_list;
Tejun Heob2efa052011-12-14 00:33:39 +0100116
117 struct work_struct release_work;
Jens Axboefd0928d2008-01-24 08:52:45 +0100118};
119
Jens Axboed38ecf92008-01-24 08:53:35 +0100120static inline struct io_context *ioc_task_link(struct io_context *ioc)
121{
122 /*
123 * if ref count is zero, don't allow sharing (ioc is going away, it's
124 * a race).
125 */
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -0700126 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
Li Zefancbb4f262009-07-31 08:55:48 +0200127 atomic_inc(&ioc->nr_tasks);
Jens Axboed38ecf92008-01-24 08:53:35 +0100128 return ioc;
Jens Axboed237e5c2008-04-15 09:25:33 +0200129 }
Jens Axboed38ecf92008-01-24 08:53:35 +0100130
131 return NULL;
132}
133
Louis Rillingb69f2292009-12-04 14:52:42 +0100134struct task_struct;
Jens Axboeda9cbc82008-06-30 20:42:08 +0200135#ifdef CONFIG_BLOCK
Tejun Heob2efa052011-12-14 00:33:39 +0100136void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
Louis Rillingb69f2292009-12-04 14:52:42 +0100137void exit_io_context(struct task_struct *task);
Tejun Heo6e736be2011-12-14 00:33:38 +0100138struct io_context *get_task_io_context(struct task_struct *task,
139 gfp_t gfp_flags, int node);
Tejun Heodc869002011-12-14 00:33:38 +0100140void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
141void ioc_cgroup_changed(struct io_context *ioc);
Jens Axboeda9cbc82008-06-30 20:42:08 +0200142#else
Jens Axboeda9cbc82008-06-30 20:42:08 +0200143struct io_context;
Tejun Heob2efa052011-12-14 00:33:39 +0100144static inline void put_io_context(struct io_context *ioc,
145 struct request_queue *locked_q) { }
Tejun Heo42ec57a2011-12-14 00:33:37 +0100146static inline void exit_io_context(struct task_struct *task) { }
Jens Axboeda9cbc82008-06-30 20:42:08 +0200147#endif
148
Jens Axboefd0928d2008-01-24 08:52:45 +0100149#endif