blob: a0bb301afac0812998370e6af2a778b08f14dc1c [file] [log] [blame]
Jens Axboefd0928d2008-01-24 08:52:45 +01001#ifndef IOCONTEXT_H
2#define IOCONTEXT_H
3
Jens Axboe4ac845a2008-01-24 08:44:49 +01004#include <linux/radix-tree.h>
Fabio Checconi34e6bbf2008-04-02 14:31:02 +02005#include <linux/rcupdate.h>
Jens Axboe4ac845a2008-01-24 08:44:49 +01006
Jens Axboefd0928d2008-01-24 08:52:45 +01007struct cfq_queue;
8struct cfq_io_context {
Jens Axboefd0928d2008-01-24 08:52:45 +01009 void *key;
Jens Axboe4ac845a2008-01-24 08:44:49 +010010 unsigned long dead_key;
Jens Axboefd0928d2008-01-24 08:52:45 +010011
12 struct cfq_queue *cfqq[2];
13
14 struct io_context *ioc;
15
16 unsigned long last_end_request;
Jens Axboefd0928d2008-01-24 08:52:45 +010017
18 unsigned long ttime_total;
19 unsigned long ttime_samples;
20 unsigned long ttime_mean;
21
Jens Axboefd0928d2008-01-24 08:52:45 +010022 struct list_head queue_list;
Jens Axboeffc4e752008-02-19 10:02:29 +010023 struct hlist_node cic_list;
Jens Axboefd0928d2008-01-24 08:52:45 +010024
25 void (*dtor)(struct io_context *); /* destructor */
26 void (*exit)(struct io_context *); /* called on task exit */
Fabio Checconi34e6bbf2008-04-02 14:31:02 +020027
28 struct rcu_head rcu_head;
Jens Axboefd0928d2008-01-24 08:52:45 +010029};
30
31/*
Jens Axboed38ecf92008-01-24 08:53:35 +010032 * I/O subsystem state of the associated processes. It is refcounted
33 * and kmalloc'ed. These could be shared between processes.
Jens Axboefd0928d2008-01-24 08:52:45 +010034 */
35struct io_context {
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070036 atomic_long_t refcount;
Jens Axboed38ecf92008-01-24 08:53:35 +010037 atomic_t nr_tasks;
38
39 /* all the fields below are protected by this lock */
40 spinlock_t lock;
Jens Axboefd0928d2008-01-24 08:52:45 +010041
42 unsigned short ioprio;
43 unsigned short ioprio_changed;
44
Ben Blum67523c42010-03-10 15:22:11 -080045#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
Vivek Goyal31e4c282009-12-03 12:59:42 -050046 unsigned short cgroup_changed;
47#endif
48
Jens Axboefd0928d2008-01-24 08:52:45 +010049 /*
50 * For request batching
51 */
Jens Axboefd0928d2008-01-24 08:52:45 +010052 int nr_batch_requests; /* Number of requests left in the batch */
Richard Kennedy58c24a62010-02-26 14:00:43 +010053 unsigned long last_waited; /* Time last woken after wait for request */
Jens Axboefd0928d2008-01-24 08:52:45 +010054
Jens Axboe4ac845a2008-01-24 08:44:49 +010055 struct radix_tree_root radix_root;
Jens Axboeffc4e752008-02-19 10:02:29 +010056 struct hlist_head cic_list;
Jens Axboefd0928d2008-01-24 08:52:45 +010057 void *ioc_data;
58};
59
Jens Axboed38ecf92008-01-24 08:53:35 +010060static inline struct io_context *ioc_task_link(struct io_context *ioc)
61{
62 /*
63 * if ref count is zero, don't allow sharing (ioc is going away, it's
64 * a race).
65 */
Nikanth Karthikesand9c7d392009-06-10 12:57:06 -070066 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
Li Zefancbb4f262009-07-31 08:55:48 +020067 atomic_inc(&ioc->nr_tasks);
Jens Axboed38ecf92008-01-24 08:53:35 +010068 return ioc;
Jens Axboed237e5c2008-04-15 09:25:33 +020069 }
Jens Axboed38ecf92008-01-24 08:53:35 +010070
71 return NULL;
72}
73
Louis Rillingb69f2292009-12-04 14:52:42 +010074struct task_struct;
Jens Axboeda9cbc82008-06-30 20:42:08 +020075#ifdef CONFIG_BLOCK
76int put_io_context(struct io_context *ioc);
Louis Rillingb69f2292009-12-04 14:52:42 +010077void exit_io_context(struct task_struct *task);
Jens Axboeda9cbc82008-06-30 20:42:08 +020078struct io_context *get_io_context(gfp_t gfp_flags, int node);
79struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
80void copy_io_context(struct io_context **pdst, struct io_context **psrc);
81#else
Louis Rillingb69f2292009-12-04 14:52:42 +010082static inline void exit_io_context(struct task_struct *task)
Jens Axboeda9cbc82008-06-30 20:42:08 +020083{
84}
85
86struct io_context;
87static inline int put_io_context(struct io_context *ioc)
88{
89 return 1;
90}
91#endif
92
Jens Axboefd0928d2008-01-24 08:52:45 +010093#endif