blob: a63235996309cee04c7343d7eed480779bd9b994 [file] [log] [blame]
Jens Axboefd0928d2008-01-24 08:52:45 +01001#ifndef IOCONTEXT_H
2#define IOCONTEXT_H
3
Jens Axboe4ac845a2008-01-24 08:44:49 +01004#include <linux/radix-tree.h>
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02005#include <linux/rcupdate.h>
Jens Axboe4ac845a2008-01-24 08:44:49 +01006
Jens Axboefd0928d2008-01-24 08:52:45 +01007/*
8 * This is the per-process anticipatory I/O scheduler state.
9 */
10struct as_io_context {
11 spinlock_t lock;
12
13 void (*dtor)(struct as_io_context *aic); /* destructor */
14 void (*exit)(struct as_io_context *aic); /* called on task exit */
15
16 unsigned long state;
17 atomic_t nr_queued; /* queued reads & sync writes */
18 atomic_t nr_dispatched; /* number of requests gone to the drivers */
19
20 /* IO History tracking */
21 /* Thinktime */
22 unsigned long last_end_request;
23 unsigned long ttime_total;
24 unsigned long ttime_samples;
25 unsigned long ttime_mean;
26 /* Layout pattern */
27 unsigned int seek_samples;
28 sector_t last_request_pos;
29 u64 seek_total;
30 sector_t seek_mean;
31};
32
33struct cfq_queue;
34struct cfq_io_context {
Jens Axboefd0928d2008-01-24 08:52:45 +010035 void *key;
Jens Axboe4ac845a2008-01-24 08:44:49 +010036 unsigned long dead_key;
Jens Axboefd0928d2008-01-24 08:52:45 +010037
38 struct cfq_queue *cfqq[2];
39
40 struct io_context *ioc;
41
42 unsigned long last_end_request;
Jens Axboefd0928d2008-01-24 08:52:45 +010043
44 unsigned long ttime_total;
45 unsigned long ttime_samples;
46 unsigned long ttime_mean;
47
Jens Axboefd0928d2008-01-24 08:52:45 +010048 struct list_head queue_list;
Jens Axboeffc4e752008-02-19 10:02:29 +010049 struct hlist_node cic_list;
Jens Axboefd0928d2008-01-24 08:52:45 +010050
51 void (*dtor)(struct io_context *); /* destructor */
52 void (*exit)(struct io_context *); /* called on task exit */
Fabio Checconi34e6bbf2008-04-02 14:31:02 +020053
54 struct rcu_head rcu_head;
Jens Axboefd0928d2008-01-24 08:52:45 +010055};
56
57/*
Jens Axboed38ecf92008-01-24 08:53:35 +010058 * I/O subsystem state of the associated processes. It is refcounted
59 * and kmalloc'ed. These could be shared between processes.
Jens Axboefd0928d2008-01-24 08:52:45 +010060 */
61struct io_context {
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070062 atomic_long_t refcount;
Jens Axboed38ecf92008-01-24 08:53:35 +010063 atomic_t nr_tasks;
64
65 /* all the fields below are protected by this lock */
66 spinlock_t lock;
Jens Axboefd0928d2008-01-24 08:52:45 +010067
68 unsigned short ioprio;
69 unsigned short ioprio_changed;
70
Vivek Goyal31e4c282009-12-03 12:59:42 -050071#ifdef CONFIG_BLK_CGROUP
72 unsigned short cgroup_changed;
73#endif
74
Jens Axboefd0928d2008-01-24 08:52:45 +010075 /*
76 * For request batching
77 */
78 unsigned long last_waited; /* Time last woken after wait for request */
79 int nr_batch_requests; /* Number of requests left in the batch */
80
81 struct as_io_context *aic;
Jens Axboe4ac845a2008-01-24 08:44:49 +010082 struct radix_tree_root radix_root;
Jens Axboeffc4e752008-02-19 10:02:29 +010083 struct hlist_head cic_list;
Jens Axboefd0928d2008-01-24 08:52:45 +010084 void *ioc_data;
85};
86
Jens Axboed38ecf92008-01-24 08:53:35 +010087static inline struct io_context *ioc_task_link(struct io_context *ioc)
88{
89 /*
90 * if ref count is zero, don't allow sharing (ioc is going away, it's
91 * a race).
92 */
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070093 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
Li Zefancbb4f262009-07-31 08:55:48 +020094 atomic_inc(&ioc->nr_tasks);
Jens Axboed38ecf92008-01-24 08:53:35 +010095 return ioc;
Jens Axboed237e5c2008-04-15 09:25:33 +020096 }
Jens Axboed38ecf92008-01-24 08:53:35 +010097
98 return NULL;
99}
100
Louis Rillingb69f2292009-12-04 14:52:42 +0100101struct task_struct;
Jens Axboeda9cbc82008-06-30 20:42:08 +0200102#ifdef CONFIG_BLOCK
103int put_io_context(struct io_context *ioc);
Louis Rillingb69f2292009-12-04 14:52:42 +0100104void exit_io_context(struct task_struct *task);
Jens Axboeda9cbc82008-06-30 20:42:08 +0200105struct io_context *get_io_context(gfp_t gfp_flags, int node);
106struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
107void copy_io_context(struct io_context **pdst, struct io_context **psrc);
108#else
Louis Rillingb69f2292009-12-04 14:52:42 +0100109static inline void exit_io_context(struct task_struct *task)
Jens Axboeda9cbc82008-06-30 20:42:08 +0200110{
111}
112
113struct io_context;
114static inline int put_io_context(struct io_context *ioc)
115{
116 return 1;
117}
118#endif
119
Jens Axboefd0928d2008-01-24 08:52:45 +0100120#endif