Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 1 | #ifndef BLK_INTERNAL_H |
| 2 | #define BLK_INTERNAL_H |
| 3 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 4 | /* Amount of time in which a process may batch requests */ |
| 5 | #define BLK_BATCH_TIME (HZ/50UL) |
| 6 | |
| 7 | /* Number of requests a "batching" process may submit */ |
| 8 | #define BLK_BATCH_REQ 32 |
| 9 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 10 | extern struct kmem_cache *blk_requestq_cachep; |
| 11 | extern struct kobj_type blk_queue_ktype; |
| 12 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 13 | void init_request_from_bio(struct request *req, struct bio *bio); |
| 14 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
| 15 | struct bio *bio); |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 16 | void __blk_queue_free_tags(struct request_queue *q); |
| 17 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 18 | void blk_unplug_work(struct work_struct *work); |
| 19 | void blk_unplug_timeout(unsigned long data); |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 20 | void blk_rq_timed_out_timer(unsigned long data); |
| 21 | void blk_delete_timer(struct request *); |
| 22 | void blk_add_timer(struct request *); |
Jens Axboe | f73e2d1 | 2008-10-17 14:03:08 +0200 | [diff] [blame] | 23 | void __generic_unplug_device(struct request_queue *); |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * Internal atomic flags for request handling |
| 27 | */ |
| 28 | enum rq_atomic_flags { |
| 29 | REQ_ATOM_COMPLETE = 0, |
| 30 | }; |
| 31 | |
| 32 | /* |
| 33 | * EH timer and IO completion will both attempt to 'grab' the request, make |
| 34 | * sure that only one of them suceeds |
| 35 | */ |
| 36 | static inline int blk_mark_rq_complete(struct request *rq) |
| 37 | { |
| 38 | return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); |
| 39 | } |
| 40 | |
| 41 | static inline void blk_clear_rq_complete(struct request *rq) |
| 42 | { |
| 43 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); |
| 44 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 45 | |
Jens Axboe | 581d4e2 | 2008-09-14 05:56:33 -0700 | [diff] [blame] | 46 | #ifdef CONFIG_FAIL_IO_TIMEOUT |
| 47 | int blk_should_fake_timeout(struct request_queue *); |
| 48 | ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); |
| 49 | ssize_t part_timeout_store(struct device *, struct device_attribute *, |
| 50 | const char *, size_t); |
| 51 | #else |
| 52 | static inline int blk_should_fake_timeout(struct request_queue *q) |
| 53 | { |
| 54 | return 0; |
| 55 | } |
| 56 | #endif |
| 57 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 58 | struct io_context *current_io_context(gfp_t gfp_flags, int node); |
| 59 | |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 60 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
| 61 | struct bio *bio); |
| 62 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
| 63 | struct bio *bio); |
| 64 | int attempt_back_merge(struct request_queue *q, struct request *rq); |
| 65 | int attempt_front_merge(struct request_queue *q, struct request *rq); |
| 66 | void blk_recalc_rq_segments(struct request *rq); |
| 67 | void blk_recalc_rq_sectors(struct request *rq, int nsect); |
| 68 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 69 | void blk_queue_congestion_threshold(struct request_queue *q); |
| 70 | |
Adrian Bunk | ff88972 | 2008-03-04 11:23:45 +0100 | [diff] [blame] | 71 | int blk_dev_init(void); |
| 72 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 73 | /* |
| 74 | * Return the threshold (number of used requests) at which the queue is |
| 75 | * considered to be congested. It include a little hysteresis to keep the |
| 76 | * context switch rate down. |
| 77 | */ |
| 78 | static inline int queue_congestion_on_threshold(struct request_queue *q) |
| 79 | { |
| 80 | return q->nr_congestion_on; |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * The threshold at which a queue is considered to be uncongested |
| 85 | */ |
| 86 | static inline int queue_congestion_off_threshold(struct request_queue *q) |
| 87 | { |
| 88 | return q->nr_congestion_off; |
| 89 | } |
| 90 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 91 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 92 | |
| 93 | #define rq_for_each_integrity_segment(bvl, _rq, _iter) \ |
| 94 | __rq_for_each_bio(_iter.bio, _rq) \ |
| 95 | bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i) |
| 96 | |
| 97 | #endif /* BLK_DEV_INTEGRITY */ |
| 98 | |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 99 | static inline int blk_cpu_to_group(int cpu) |
| 100 | { |
| 101 | #ifdef CONFIG_SCHED_MC |
Rusty Russell | be4d638 | 2008-12-26 22:23:43 +1030 | [diff] [blame] | 102 | const struct cpumask *mask = cpu_coregroup_mask(cpu); |
| 103 | return cpumask_first(mask); |
Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 104 | #elif defined(CONFIG_SCHED_SMT) |
| 105 | return first_cpu(per_cpu(cpu_sibling_map, cpu)); |
| 106 | #else |
| 107 | return cpu; |
| 108 | #endif |
| 109 | } |
| 110 | |
Jens Axboe | fb8ec18 | 2009-02-02 08:42:32 +0100 | [diff] [blame] | 111 | static inline int blk_do_io_stat(struct request_queue *q) |
| 112 | { |
| 113 | if (q) |
| 114 | return blk_queue_io_stat(q); |
| 115 | |
| 116 | return 0; |
| 117 | } |
| 118 | |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 119 | #endif |