blob: 186807ea62e21d6bbceb8ba1bd1701e358a72b16 [file] [log] [blame]
#ifndef IOCONTEXT_H
#define IOCONTEXT_H
/*
* This is the per-process anticipatory I/O scheduler state.
*/
struct as_io_context {
spinlock_t lock;
void (*dtor)(struct as_io_context *aic); /* destructor */
void (*exit)(struct as_io_context *aic); /* called on task exit */
unsigned long state;
atomic_t nr_queued; /* queued reads & sync writes */
atomic_t nr_dispatched; /* number of requests gone to the drivers */
/* IO History tracking */
/* Thinktime */
unsigned long last_end_request;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
/* Layout pattern */
unsigned int seek_samples;
sector_t last_request_pos;
u64 seek_total;
sector_t seek_mean;
};
struct cfq_queue;
struct cfq_io_context {
struct rb_node rb_node;
void *key;
struct cfq_queue *cfqq[2];
struct io_context *ioc;
unsigned long last_end_request;
sector_t last_request_pos;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
unsigned int seek_samples;
u64 seek_total;
sector_t seek_mean;
struct list_head queue_list;
void (*dtor)(struct io_context *); /* destructor */
void (*exit)(struct io_context *); /* called on task exit */
};
/*
* This is the per-process I/O subsystem state. It is refcounted and
* kmalloc'ed. Currently all fields are modified in process io context
* (apart from the atomic refcount), so require no locking.
*/
struct io_context {
atomic_t refcount;
struct task_struct *task;
unsigned short ioprio;
unsigned short ioprio_changed;
/*
* For request batching
*/
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
struct as_io_context *aic;
struct rb_root cic_root;
void *ioc_data;
};
#endif