Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1 | #ifndef IOCONTEXT_H |
| 2 | #define IOCONTEXT_H |
| 3 | |
Jens Axboe | 4ac845a | 2008-01-24 08:44:49 +0100 | [diff] [blame] | 4 | #include <linux/radix-tree.h> |
Fabio Checconi | 34e6bbf | 2008-04-02 14:31:02 +0200 | [diff] [blame] | 5 | #include <linux/rcupdate.h> |
Jens Axboe | 4ac845a | 2008-01-24 08:44:49 +0100 | [diff] [blame] | 6 | |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 7 | struct cfq_queue; |
Shaohua Li | 383cd72 | 2011-07-12 14:24:35 +0200 | [diff] [blame] | 8 | struct cfq_ttime { |
| 9 | unsigned long last_end_request; |
| 10 | |
| 11 | unsigned long ttime_total; |
| 12 | unsigned long ttime_samples; |
| 13 | unsigned long ttime_mean; |
| 14 | }; |
| 15 | |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 16 | struct cfq_io_context { |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 17 | void *key; |
| 18 | |
| 19 | struct cfq_queue *cfqq[2]; |
| 20 | |
| 21 | struct io_context *ioc; |
| 22 | |
Shaohua Li | 383cd72 | 2011-07-12 14:24:35 +0200 | [diff] [blame] | 23 | struct cfq_ttime ttime; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 24 | |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 25 | struct list_head queue_list; |
Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 26 | struct hlist_node cic_list; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 27 | |
| 28 | void (*dtor)(struct io_context *); /* destructor */ |
| 29 | void (*exit)(struct io_context *); /* called on task exit */ |
Fabio Checconi | 34e6bbf | 2008-04-02 14:31:02 +0200 | [diff] [blame] | 30 | |
| 31 | struct rcu_head rcu_head; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 32 | }; |
| 33 | |
| 34 | /* |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 35 | * I/O subsystem state of the associated processes. It is refcounted |
| 36 | * and kmalloc'ed. These could be shared between processes. |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 37 | */ |
| 38 | struct io_context { |
Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 39 | atomic_long_t refcount; |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 40 | atomic_t nr_tasks; |
| 41 | |
| 42 | /* all the fields below are protected by this lock */ |
| 43 | spinlock_t lock; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 44 | |
| 45 | unsigned short ioprio; |
| 46 | unsigned short ioprio_changed; |
| 47 | |
Ben Blum | 67523c4 | 2010-03-10 15:22:11 -0800 | [diff] [blame] | 48 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) |
Vivek Goyal | 31e4c28 | 2009-12-03 12:59:42 -0500 | [diff] [blame] | 49 | unsigned short cgroup_changed; |
| 50 | #endif |
| 51 | |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 52 | /* |
| 53 | * For request batching |
| 54 | */ |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 55 | int nr_batch_requests; /* Number of requests left in the batch */ |
Richard Kennedy | 58c24a6 | 2010-02-26 14:00:43 +0100 | [diff] [blame] | 56 | unsigned long last_waited; /* Time last woken after wait for request */ |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 57 | |
Jens Axboe | 4ac845a | 2008-01-24 08:44:49 +0100 | [diff] [blame] | 58 | struct radix_tree_root radix_root; |
Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 59 | struct hlist_head cic_list; |
Arnd Bergmann | 4d2deb4 | 2010-02-24 20:01:56 +0100 | [diff] [blame] | 60 | void __rcu *ioc_data; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 61 | }; |
| 62 | |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 63 | static inline struct io_context *ioc_task_link(struct io_context *ioc) |
| 64 | { |
| 65 | /* |
| 66 | * if ref count is zero, don't allow sharing (ioc is going away, it's |
| 67 | * a race). |
| 68 | */ |
Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 69 | if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { |
Li Zefan | cbb4f26 | 2009-07-31 08:55:48 +0200 | [diff] [blame] | 70 | atomic_inc(&ioc->nr_tasks); |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 71 | return ioc; |
Jens Axboe | d237e5c | 2008-04-15 09:25:33 +0200 | [diff] [blame] | 72 | } |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 73 | |
| 74 | return NULL; |
| 75 | } |
| 76 | |
Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 77 | struct task_struct; |
Jens Axboe | da9cbc8 | 2008-06-30 20:42:08 +0200 | [diff] [blame] | 78 | #ifdef CONFIG_BLOCK |
| 79 | int put_io_context(struct io_context *ioc); |
Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 80 | void exit_io_context(struct task_struct *task); |
Jens Axboe | da9cbc8 | 2008-06-30 20:42:08 +0200 | [diff] [blame] | 81 | struct io_context *get_io_context(gfp_t gfp_flags, int node); |
| 82 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node); |
Jens Axboe | da9cbc8 | 2008-06-30 20:42:08 +0200 | [diff] [blame] | 83 | #else |
Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 84 | static inline void exit_io_context(struct task_struct *task) |
Jens Axboe | da9cbc8 | 2008-06-30 20:42:08 +0200 | [diff] [blame] | 85 | { |
| 86 | } |
| 87 | |
| 88 | struct io_context; |
| 89 | static inline int put_io_context(struct io_context *ioc) |
| 90 | { |
| 91 | return 1; |
| 92 | } |
| 93 | #endif |
| 94 | |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 95 | #endif |