Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1 | #ifndef IOCONTEXT_H |
| 2 | #define IOCONTEXT_H |
| 3 | |
Jens Axboe | 4ac845a | 2008-01-24 08:44:49 +0100 | [diff] [blame] | 4 | #include <linux/radix-tree.h> |
Fabio Checconi | 34e6bbf | 2008-04-02 14:31:02 +0200 | [diff] [blame] | 5 | #include <linux/rcupdate.h> |
Jens Axboe | 4ac845a | 2008-01-24 08:44:49 +0100 | [diff] [blame] | 6 | |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 7 | /* |
| 8 | * This is the per-process anticipatory I/O scheduler state. |
| 9 | */ |
| 10 | struct as_io_context { |
| 11 | spinlock_t lock; |
| 12 | |
| 13 | void (*dtor)(struct as_io_context *aic); /* destructor */ |
| 14 | void (*exit)(struct as_io_context *aic); /* called on task exit */ |
| 15 | |
| 16 | unsigned long state; |
| 17 | atomic_t nr_queued; /* queued reads & sync writes */ |
| 18 | atomic_t nr_dispatched; /* number of requests gone to the drivers */ |
| 19 | |
| 20 | /* IO History tracking */ |
| 21 | /* Thinktime */ |
| 22 | unsigned long last_end_request; |
| 23 | unsigned long ttime_total; |
| 24 | unsigned long ttime_samples; |
| 25 | unsigned long ttime_mean; |
| 26 | /* Layout pattern */ |
| 27 | unsigned int seek_samples; |
| 28 | sector_t last_request_pos; |
| 29 | u64 seek_total; |
| 30 | sector_t seek_mean; |
| 31 | }; |
| 32 | |
| 33 | struct cfq_queue; |
| 34 | struct cfq_io_context { |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 35 | void *key; |
Jens Axboe | 4ac845a | 2008-01-24 08:44:49 +0100 | [diff] [blame] | 36 | unsigned long dead_key; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 37 | |
| 38 | struct cfq_queue *cfqq[2]; |
| 39 | |
| 40 | struct io_context *ioc; |
| 41 | |
| 42 | unsigned long last_end_request; |
| 43 | sector_t last_request_pos; |
| 44 | |
| 45 | unsigned long ttime_total; |
| 46 | unsigned long ttime_samples; |
| 47 | unsigned long ttime_mean; |
| 48 | |
| 49 | unsigned int seek_samples; |
| 50 | u64 seek_total; |
| 51 | sector_t seek_mean; |
| 52 | |
| 53 | struct list_head queue_list; |
Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 54 | struct hlist_node cic_list; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 55 | |
| 56 | void (*dtor)(struct io_context *); /* destructor */ |
| 57 | void (*exit)(struct io_context *); /* called on task exit */ |
Fabio Checconi | 34e6bbf | 2008-04-02 14:31:02 +0200 | [diff] [blame] | 58 | |
| 59 | struct rcu_head rcu_head; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 60 | }; |
| 61 | |
| 62 | /* |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 63 | * I/O subsystem state of the associated processes. It is refcounted |
| 64 | * and kmalloc'ed. These could be shared between processes. |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 65 | */ |
| 66 | struct io_context { |
Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 67 | atomic_long_t refcount; |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 68 | atomic_t nr_tasks; |
| 69 | |
| 70 | /* all the fields below are protected by this lock */ |
| 71 | spinlock_t lock; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 72 | |
| 73 | unsigned short ioprio; |
| 74 | unsigned short ioprio_changed; |
| 75 | |
| 76 | /* |
| 77 | * For request batching |
| 78 | */ |
| 79 | unsigned long last_waited; /* Time last woken after wait for request */ |
| 80 | int nr_batch_requests; /* Number of requests left in the batch */ |
| 81 | |
| 82 | struct as_io_context *aic; |
Jens Axboe | 4ac845a | 2008-01-24 08:44:49 +0100 | [diff] [blame] | 83 | struct radix_tree_root radix_root; |
Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 84 | struct hlist_head cic_list; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 85 | void *ioc_data; |
| 86 | }; |
| 87 | |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 88 | static inline struct io_context *ioc_task_link(struct io_context *ioc) |
| 89 | { |
| 90 | /* |
| 91 | * if ref count is zero, don't allow sharing (ioc is going away, it's |
| 92 | * a race). |
| 93 | */ |
Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 94 | if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { |
Li Zefan | cbb4f26 | 2009-07-31 08:55:48 +0200 | [diff] [blame^] | 95 | atomic_inc(&ioc->nr_tasks); |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 96 | return ioc; |
Jens Axboe | d237e5c | 2008-04-15 09:25:33 +0200 | [diff] [blame] | 97 | } |
Jens Axboe | d38ecf9 | 2008-01-24 08:53:35 +0100 | [diff] [blame] | 98 | |
| 99 | return NULL; |
| 100 | } |
| 101 | |
Jens Axboe | da9cbc8 | 2008-06-30 20:42:08 +0200 | [diff] [blame] | 102 | #ifdef CONFIG_BLOCK |
| 103 | int put_io_context(struct io_context *ioc); |
| 104 | void exit_io_context(void); |
| 105 | struct io_context *get_io_context(gfp_t gfp_flags, int node); |
| 106 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node); |
| 107 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); |
| 108 | #else |
| 109 | static inline void exit_io_context(void) |
| 110 | { |
| 111 | } |
| 112 | |
| 113 | struct io_context; |
| 114 | static inline int put_io_context(struct io_context *ioc) |
| 115 | { |
| 116 | return 1; |
| 117 | } |
| 118 | #endif |
| 119 | |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 120 | #endif |