blob: 08b987bccf89aff533268c0718770583ba204026 [file] [log] [blame]
Jens Axboefd0928d2008-01-24 08:52:45 +01001#ifndef IOCONTEXT_H
2#define IOCONTEXT_H
3
Jens Axboe4ac845a2008-01-24 08:44:49 +01004#include <linux/radix-tree.h>
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02005#include <linux/rcupdate.h>
Jens Axboe4ac845a2008-01-24 08:44:49 +01006
Jens Axboefd0928d2008-01-24 08:52:45 +01007/*
8 * This is the per-process anticipatory I/O scheduler state.
9 */
10struct as_io_context {
11 spinlock_t lock;
12
13 void (*dtor)(struct as_io_context *aic); /* destructor */
14 void (*exit)(struct as_io_context *aic); /* called on task exit */
15
16 unsigned long state;
17 atomic_t nr_queued; /* queued reads & sync writes */
18 atomic_t nr_dispatched; /* number of requests gone to the drivers */
19
20 /* IO History tracking */
21 /* Thinktime */
22 unsigned long last_end_request;
23 unsigned long ttime_total;
24 unsigned long ttime_samples;
25 unsigned long ttime_mean;
26 /* Layout pattern */
27 unsigned int seek_samples;
28 sector_t last_request_pos;
29 u64 seek_total;
30 sector_t seek_mean;
31};
32
33struct cfq_queue;
34struct cfq_io_context {
Jens Axboefd0928d2008-01-24 08:52:45 +010035 void *key;
Jens Axboe4ac845a2008-01-24 08:44:49 +010036 unsigned long dead_key;
Jens Axboefd0928d2008-01-24 08:52:45 +010037
38 struct cfq_queue *cfqq[2];
39
40 struct io_context *ioc;
41
42 unsigned long last_end_request;
43 sector_t last_request_pos;
44
45 unsigned long ttime_total;
46 unsigned long ttime_samples;
47 unsigned long ttime_mean;
48
49 unsigned int seek_samples;
50 u64 seek_total;
51 sector_t seek_mean;
52
53 struct list_head queue_list;
Jens Axboeffc4e752008-02-19 10:02:29 +010054 struct hlist_node cic_list;
Jens Axboefd0928d2008-01-24 08:52:45 +010055
56 void (*dtor)(struct io_context *); /* destructor */
57 void (*exit)(struct io_context *); /* called on task exit */
Fabio Checconi34e6bbf2008-04-02 14:31:02 +020058
59 struct rcu_head rcu_head;
Jens Axboefd0928d2008-01-24 08:52:45 +010060};
61
62/*
Jens Axboed38ecf92008-01-24 08:53:35 +010063 * I/O subsystem state of the associated processes. It is refcounted
64 * and kmalloc'ed. These could be shared between processes.
Jens Axboefd0928d2008-01-24 08:52:45 +010065 */
66struct io_context {
67 atomic_t refcount;
Jens Axboed38ecf92008-01-24 08:53:35 +010068 atomic_t nr_tasks;
69
70 /* all the fields below are protected by this lock */
71 spinlock_t lock;
Jens Axboefd0928d2008-01-24 08:52:45 +010072
73 unsigned short ioprio;
74 unsigned short ioprio_changed;
75
76 /*
77 * For request batching
78 */
79 unsigned long last_waited; /* Time last woken after wait for request */
80 int nr_batch_requests; /* Number of requests left in the batch */
81
82 struct as_io_context *aic;
Jens Axboe4ac845a2008-01-24 08:44:49 +010083 struct radix_tree_root radix_root;
Jens Axboeffc4e752008-02-19 10:02:29 +010084 struct hlist_head cic_list;
Jens Axboefd0928d2008-01-24 08:52:45 +010085 void *ioc_data;
86};
87
Jens Axboed38ecf92008-01-24 08:53:35 +010088static inline struct io_context *ioc_task_link(struct io_context *ioc)
89{
90 /*
91 * if ref count is zero, don't allow sharing (ioc is going away, it's
92 * a race).
93 */
Jens Axboed237e5c2008-04-15 09:25:33 +020094 if (ioc && atomic_inc_not_zero(&ioc->refcount)) {
95 atomic_inc(&ioc->nr_tasks);
Jens Axboed38ecf92008-01-24 08:53:35 +010096 return ioc;
Jens Axboed237e5c2008-04-15 09:25:33 +020097 }
Jens Axboed38ecf92008-01-24 08:53:35 +010098
99 return NULL;
100}
101
Jens Axboeda9cbc82008-06-30 20:42:08 +0200102#ifdef CONFIG_BLOCK
103int put_io_context(struct io_context *ioc);
104void exit_io_context(void);
105struct io_context *get_io_context(gfp_t gfp_flags, int node);
106struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
107void copy_io_context(struct io_context **pdst, struct io_context **psrc);
108#else
109static inline void exit_io_context(void)
110{
111}
112
113struct io_context;
114static inline int put_io_context(struct io_context *ioc)
115{
116 return 1;
117}
118#endif
119
Jens Axboefd0928d2008-01-24 08:52:45 +0100120#endif