blob: 3e70b21884a948880f90e28684e5d5778e7a3d35 [file] [log] [blame]
Jens Axboefd0928d2008-01-24 08:52:45 +01001#ifndef IOCONTEXT_H
2#define IOCONTEXT_H
3
Jens Axboe4ac845a2008-01-24 08:44:49 +01004#include <linux/radix-tree.h>
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02005#include <linux/rcupdate.h>
Jens Axboe4ac845a2008-01-24 08:44:49 +01006
Jens Axboefd0928d2008-01-24 08:52:45 +01007struct cfq_queue;
8struct cfq_io_context {
Jens Axboefd0928d2008-01-24 08:52:45 +01009 void *key;
10
11 struct cfq_queue *cfqq[2];
12
13 struct io_context *ioc;
14
15 unsigned long last_end_request;
Jens Axboefd0928d2008-01-24 08:52:45 +010016
17 unsigned long ttime_total;
18 unsigned long ttime_samples;
19 unsigned long ttime_mean;
20
Jens Axboefd0928d2008-01-24 08:52:45 +010021 struct list_head queue_list;
Jens Axboeffc4e752008-02-19 10:02:29 +010022 struct hlist_node cic_list;
Jens Axboefd0928d2008-01-24 08:52:45 +010023
24 void (*dtor)(struct io_context *); /* destructor */
25 void (*exit)(struct io_context *); /* called on task exit */
Fabio Checconi34e6bbf2008-04-02 14:31:02 +020026
27 struct rcu_head rcu_head;
Jens Axboefd0928d2008-01-24 08:52:45 +010028};
29
30/*
Jens Axboed38ecf92008-01-24 08:53:35 +010031 * I/O subsystem state of the associated processes. It is refcounted
32 * and kmalloc'ed. These could be shared between processes.
Jens Axboefd0928d2008-01-24 08:52:45 +010033 */
34struct io_context {
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070035 atomic_long_t refcount;
Jens Axboed38ecf92008-01-24 08:53:35 +010036 atomic_t nr_tasks;
37
38 /* all the fields below are protected by this lock */
39 spinlock_t lock;
Jens Axboefd0928d2008-01-24 08:52:45 +010040
41 unsigned short ioprio;
42 unsigned short ioprio_changed;
43
Ben Blum67523c42010-03-10 15:22:11 -080044#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
Vivek Goyal31e4c282009-12-03 12:59:42 -050045 unsigned short cgroup_changed;
46#endif
47
Jens Axboefd0928d2008-01-24 08:52:45 +010048 /*
49 * For request batching
50 */
Jens Axboefd0928d2008-01-24 08:52:45 +010051 int nr_batch_requests; /* Number of requests left in the batch */
Richard Kennedy58c24a62010-02-26 14:00:43 +010052 unsigned long last_waited; /* Time last woken after wait for request */
Jens Axboefd0928d2008-01-24 08:52:45 +010053
Jens Axboe4ac845a2008-01-24 08:44:49 +010054 struct radix_tree_root radix_root;
Jens Axboeffc4e752008-02-19 10:02:29 +010055 struct hlist_head cic_list;
Arnd Bergmann4d2deb42010-02-24 20:01:56 +010056 void __rcu *ioc_data;
Jens Axboefd0928d2008-01-24 08:52:45 +010057};
58
Jens Axboed38ecf92008-01-24 08:53:35 +010059static inline struct io_context *ioc_task_link(struct io_context *ioc)
60{
61 /*
62 * if ref count is zero, don't allow sharing (ioc is going away, it's
63 * a race).
64 */
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070065 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
Li Zefancbb4f262009-07-31 08:55:48 +020066 atomic_inc(&ioc->nr_tasks);
Jens Axboed38ecf92008-01-24 08:53:35 +010067 return ioc;
Jens Axboed237e5c2008-04-15 09:25:33 +020068 }
Jens Axboed38ecf92008-01-24 08:53:35 +010069
70 return NULL;
71}
72
Louis Rillingb69f2292009-12-04 14:52:42 +010073struct task_struct;
Jens Axboeda9cbc82008-06-30 20:42:08 +020074#ifdef CONFIG_BLOCK
75int put_io_context(struct io_context *ioc);
Louis Rillingb69f2292009-12-04 14:52:42 +010076void exit_io_context(struct task_struct *task);
Jens Axboeda9cbc82008-06-30 20:42:08 +020077struct io_context *get_io_context(gfp_t gfp_flags, int node);
78struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
79void copy_io_context(struct io_context **pdst, struct io_context **psrc);
80#else
Louis Rillingb69f2292009-12-04 14:52:42 +010081static inline void exit_io_context(struct task_struct *task)
Jens Axboeda9cbc82008-06-30 20:42:08 +020082{
83}
84
85struct io_context;
86static inline int put_io_context(struct io_context *ioc)
87{
88 return 1;
89}
90#endif
91
Jens Axboefd0928d2008-01-24 08:52:45 +010092#endif