Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 1 | #ifndef BLK_INTERNAL_H |
| 2 | #define BLK_INTERNAL_H |
| 3 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 4 | /* Amount of time in which a process may batch requests */ |
| 5 | #define BLK_BATCH_TIME (HZ/50UL) |
| 6 | |
| 7 | /* Number of requests a "batching" process may submit */ |
| 8 | #define BLK_BATCH_REQ 32 |
| 9 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 10 | extern struct kmem_cache *blk_requestq_cachep; |
| 11 | extern struct kobj_type blk_queue_ktype; |
| 12 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 13 | void rq_init(struct request_queue *q, struct request *rq); |
| 14 | void init_request_from_bio(struct request *req, struct bio *bio); |
| 15 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
| 16 | struct bio *bio); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 17 | void __blk_queue_free_tags(struct request_queue *q); |
| 18 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 19 | void blk_unplug_work(struct work_struct *work); |
| 20 | void blk_unplug_timeout(unsigned long data); |
| 21 | |
| 22 | struct io_context *current_io_context(gfp_t gfp_flags, int node); |
| 23 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 24 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
| 25 | struct bio *bio); |
| 26 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
| 27 | struct bio *bio); |
| 28 | int attempt_back_merge(struct request_queue *q, struct request *rq); |
| 29 | int attempt_front_merge(struct request_queue *q, struct request *rq); |
| 30 | void blk_recalc_rq_segments(struct request *rq); |
| 31 | void blk_recalc_rq_sectors(struct request *rq, int nsect); |
| 32 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 33 | void blk_queue_congestion_threshold(struct request_queue *q); |
| 34 | |
| 35 | /* |
| 36 | * Return the threshold (number of used requests) at which the queue is |
| 37 | * considered to be congested. It include a little hysteresis to keep the |
| 38 | * context switch rate down. |
| 39 | */ |
| 40 | static inline int queue_congestion_on_threshold(struct request_queue *q) |
| 41 | { |
| 42 | return q->nr_congestion_on; |
| 43 | } |
| 44 | |
| 45 | /* |
| 46 | * The threshold at which a queue is considered to be uncongested |
| 47 | */ |
| 48 | static inline int queue_congestion_off_threshold(struct request_queue *q) |
| 49 | { |
| 50 | return q->nr_congestion_off; |
| 51 | } |
| 52 | |
| 53 | #endif |