Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to setting various queue properties from drivers |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/init.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ |
| 10 | |
| 11 | #include "blk.h" |
| 12 | |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 13 | unsigned long blk_max_low_pfn; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 14 | EXPORT_SYMBOL(blk_max_low_pfn); |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 15 | |
| 16 | unsigned long blk_max_pfn; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 17 | |
| 18 | /** |
| 19 | * blk_queue_prep_rq - set a prepare_request function for queue |
| 20 | * @q: queue |
| 21 | * @pfn: prepare_request function |
| 22 | * |
| 23 | * It's possible for a queue to register a prepare_request callback which |
| 24 | * is invoked before the request is handed to the request_fn. The goal of |
| 25 | * the function is to prepare a request for I/O, it can be used to build a |
| 26 | * cdb from the request data for instance. |
| 27 | * |
| 28 | */ |
| 29 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) |
| 30 | { |
| 31 | q->prep_rq_fn = pfn; |
| 32 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 33 | EXPORT_SYMBOL(blk_queue_prep_rq); |
| 34 | |
| 35 | /** |
David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 36 | * blk_queue_set_discard - set a discard_sectors function for queue |
| 37 | * @q: queue |
| 38 | * @dfn: prepare_discard function |
| 39 | * |
| 40 | * It's possible for a queue to register a discard callback which is used |
| 41 | * to transform a discard request into the appropriate type for the |
| 42 | * hardware. If none is registered, then discard requests are failed |
| 43 | * with %EOPNOTSUPP. |
| 44 | * |
| 45 | */ |
| 46 | void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn) |
| 47 | { |
| 48 | q->prepare_discard_fn = dfn; |
| 49 | } |
| 50 | EXPORT_SYMBOL(blk_queue_set_discard); |
| 51 | |
| 52 | /** |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 53 | * blk_queue_merge_bvec - set a merge_bvec function for queue |
| 54 | * @q: queue |
| 55 | * @mbfn: merge_bvec_fn |
| 56 | * |
| 57 | * Usually queues have static limitations on the max sectors or segments that |
| 58 | * we can put in a request. Stacking drivers may have some settings that |
| 59 | * are dynamic, and thus we have to query the queue whether it is ok to |
| 60 | * add a new bio_vec to a bio at a given offset or not. If the block device |
| 61 | * has such limitations, it needs to register a merge_bvec_fn to control |
| 62 | * the size of bio's sent to it. Note that a block device *must* allow a |
| 63 | * single page to be added to an empty bio. The block device driver may want |
| 64 | * to use the bio_split() function to deal with these bio's. By default |
| 65 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are |
| 66 | * honored. |
| 67 | */ |
| 68 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) |
| 69 | { |
| 70 | q->merge_bvec_fn = mbfn; |
| 71 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 72 | EXPORT_SYMBOL(blk_queue_merge_bvec); |
| 73 | |
| 74 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) |
| 75 | { |
| 76 | q->softirq_done_fn = fn; |
| 77 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 78 | EXPORT_SYMBOL(blk_queue_softirq_done); |
| 79 | |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 80 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) |
| 81 | { |
| 82 | q->rq_timeout = timeout; |
| 83 | } |
| 84 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); |
| 85 | |
| 86 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) |
| 87 | { |
| 88 | q->rq_timed_out_fn = fn; |
| 89 | } |
| 90 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); |
| 91 | |
Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 92 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) |
| 93 | { |
| 94 | q->lld_busy_fn = fn; |
| 95 | } |
| 96 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
| 97 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 98 | /** |
| 99 | * blk_queue_make_request - define an alternate make_request function for a device |
| 100 | * @q: the request queue for the device to be affected |
| 101 | * @mfn: the alternate make_request function |
| 102 | * |
| 103 | * Description: |
| 104 | * The normal way for &struct bios to be passed to a device |
| 105 | * driver is for them to be collected into requests on a request |
| 106 | * queue, and then to allow the device driver to select requests |
| 107 | * off that queue when it is ready. This works well for many block |
| 108 | * devices. However some block devices (typically virtual devices |
| 109 | * such as md or lvm) do not benefit from the processing on the |
| 110 | * request queue, and are served best by having the requests passed |
| 111 | * directly to them. This can be achieved by providing a function |
| 112 | * to blk_queue_make_request(). |
| 113 | * |
| 114 | * Caveat: |
| 115 | * The driver that does this *must* be able to deal appropriately |
| 116 | * with buffers in "highmemory". This can be accomplished by either calling |
| 117 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling |
| 118 | * blk_queue_bounce() to create a buffer in normal memory. |
| 119 | **/ |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 120 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 121 | { |
| 122 | /* |
| 123 | * set defaults |
| 124 | */ |
| 125 | q->nr_requests = BLKDEV_MAX_RQ; |
| 126 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); |
| 127 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); |
Milan Broz | 0e435ac | 2008-12-03 12:55:08 +0100 | [diff] [blame] | 128 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); |
| 129 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); |
| 130 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 131 | q->make_request_fn = mfn; |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 132 | q->backing_dev_info.ra_pages = |
| 133 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 134 | q->backing_dev_info.state = 0; |
| 135 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; |
| 136 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); |
| 137 | blk_queue_hardsect_size(q, 512); |
| 138 | blk_queue_dma_alignment(q, 511); |
| 139 | blk_queue_congestion_threshold(q); |
| 140 | q->nr_batching = BLK_BATCH_REQ; |
| 141 | |
| 142 | q->unplug_thresh = 4; /* hmm */ |
| 143 | q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ |
| 144 | if (q->unplug_delay == 0) |
| 145 | q->unplug_delay = 1; |
| 146 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 147 | q->unplug_timer.function = blk_unplug_timeout; |
| 148 | q->unplug_timer.data = (unsigned long)q; |
| 149 | |
| 150 | /* |
| 151 | * by default assume old behaviour and bounce for any highmem page |
| 152 | */ |
| 153 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
| 154 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 155 | EXPORT_SYMBOL(blk_queue_make_request); |
| 156 | |
| 157 | /** |
| 158 | * blk_queue_bounce_limit - set bounce buffer limit for queue |
Tejun Heo | cd0aca2 | 2009-04-15 22:10:25 +0900 | [diff] [blame] | 159 | * @q: the request queue for the device |
| 160 | * @dma_mask: the maximum address the device can handle |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 161 | * |
| 162 | * Description: |
| 163 | * Different hardware can have different requirements as to what pages |
| 164 | * it can do I/O directly to. A low level driver can call |
| 165 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce |
Tejun Heo | cd0aca2 | 2009-04-15 22:10:25 +0900 | [diff] [blame] | 166 | * buffers for doing I/O to pages residing above @dma_mask. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 167 | **/ |
Tejun Heo | cd0aca2 | 2009-04-15 22:10:25 +0900 | [diff] [blame] | 168 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 169 | { |
Tejun Heo | cd0aca2 | 2009-04-15 22:10:25 +0900 | [diff] [blame] | 170 | unsigned long b_pfn = dma_mask >> PAGE_SHIFT; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 171 | int dma = 0; |
| 172 | |
| 173 | q->bounce_gfp = GFP_NOIO; |
| 174 | #if BITS_PER_LONG == 64 |
Tejun Heo | cd0aca2 | 2009-04-15 22:10:25 +0900 | [diff] [blame] | 175 | /* |
| 176 | * Assume anything <= 4GB can be handled by IOMMU. Actually |
| 177 | * some IOMMUs can handle everything, but I don't know of a |
| 178 | * way to test this here. |
| 179 | */ |
| 180 | if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 181 | dma = 1; |
| 182 | q->bounce_pfn = max_low_pfn; |
| 183 | #else |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 184 | if (b_pfn < blk_max_low_pfn) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 185 | dma = 1; |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 186 | q->bounce_pfn = b_pfn; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 187 | #endif |
| 188 | if (dma) { |
| 189 | init_emergency_isa_pool(); |
| 190 | q->bounce_gfp = GFP_NOIO | GFP_DMA; |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 191 | q->bounce_pfn = b_pfn; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 192 | } |
| 193 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 194 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
| 195 | |
| 196 | /** |
| 197 | * blk_queue_max_sectors - set max sectors for a request for this queue |
| 198 | * @q: the request queue for the device |
| 199 | * @max_sectors: max sectors in the usual 512b unit |
| 200 | * |
| 201 | * Description: |
| 202 | * Enables a low level driver to set an upper limit on the size of |
| 203 | * received requests. |
| 204 | **/ |
| 205 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) |
| 206 | { |
| 207 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { |
| 208 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 209 | printk(KERN_INFO "%s: set to minimum %d\n", |
| 210 | __func__, max_sectors); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | if (BLK_DEF_MAX_SECTORS > max_sectors) |
| 214 | q->max_hw_sectors = q->max_sectors = max_sectors; |
| 215 | else { |
| 216 | q->max_sectors = BLK_DEF_MAX_SECTORS; |
| 217 | q->max_hw_sectors = max_sectors; |
| 218 | } |
| 219 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 220 | EXPORT_SYMBOL(blk_queue_max_sectors); |
| 221 | |
| 222 | /** |
| 223 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue |
| 224 | * @q: the request queue for the device |
| 225 | * @max_segments: max number of segments |
| 226 | * |
| 227 | * Description: |
| 228 | * Enables a low level driver to set an upper limit on the number of |
| 229 | * physical data segments in a request. This would be the largest sized |
| 230 | * scatter list the driver could handle. |
| 231 | **/ |
| 232 | void blk_queue_max_phys_segments(struct request_queue *q, |
| 233 | unsigned short max_segments) |
| 234 | { |
| 235 | if (!max_segments) { |
| 236 | max_segments = 1; |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 237 | printk(KERN_INFO "%s: set to minimum %d\n", |
| 238 | __func__, max_segments); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | q->max_phys_segments = max_segments; |
| 242 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 243 | EXPORT_SYMBOL(blk_queue_max_phys_segments); |
| 244 | |
| 245 | /** |
| 246 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue |
| 247 | * @q: the request queue for the device |
| 248 | * @max_segments: max number of segments |
| 249 | * |
| 250 | * Description: |
| 251 | * Enables a low level driver to set an upper limit on the number of |
| 252 | * hw data segments in a request. This would be the largest number of |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 253 | * address/length pairs the host adapter can actually give at once |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 254 | * to the device. |
| 255 | **/ |
| 256 | void blk_queue_max_hw_segments(struct request_queue *q, |
| 257 | unsigned short max_segments) |
| 258 | { |
| 259 | if (!max_segments) { |
| 260 | max_segments = 1; |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 261 | printk(KERN_INFO "%s: set to minimum %d\n", |
| 262 | __func__, max_segments); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | q->max_hw_segments = max_segments; |
| 266 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 267 | EXPORT_SYMBOL(blk_queue_max_hw_segments); |
| 268 | |
| 269 | /** |
| 270 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
| 271 | * @q: the request queue for the device |
| 272 | * @max_size: max size of segment in bytes |
| 273 | * |
| 274 | * Description: |
| 275 | * Enables a low level driver to set an upper limit on the size of a |
| 276 | * coalesced segment |
| 277 | **/ |
| 278 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) |
| 279 | { |
| 280 | if (max_size < PAGE_CACHE_SIZE) { |
| 281 | max_size = PAGE_CACHE_SIZE; |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 282 | printk(KERN_INFO "%s: set to minimum %d\n", |
| 283 | __func__, max_size); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | q->max_segment_size = max_size; |
| 287 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 288 | EXPORT_SYMBOL(blk_queue_max_segment_size); |
| 289 | |
| 290 | /** |
| 291 | * blk_queue_hardsect_size - set hardware sector size for the queue |
| 292 | * @q: the request queue for the device |
| 293 | * @size: the hardware sector size, in bytes |
| 294 | * |
| 295 | * Description: |
| 296 | * This should typically be set to the lowest possible sector size |
| 297 | * that the hardware can operate on (possible without reverting to |
| 298 | * even internal read-modify-write operations). Usually the default |
| 299 | * of 512 covers most hardware. |
| 300 | **/ |
| 301 | void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) |
| 302 | { |
| 303 | q->hardsect_size = size; |
| 304 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 305 | EXPORT_SYMBOL(blk_queue_hardsect_size); |
| 306 | |
| 307 | /* |
| 308 | * Returns the minimum that is _not_ zero, unless both are zero. |
| 309 | */ |
| 310 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) |
| 311 | |
| 312 | /** |
| 313 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers |
| 314 | * @t: the stacking driver (top) |
| 315 | * @b: the underlying device (bottom) |
| 316 | **/ |
| 317 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) |
| 318 | { |
| 319 | /* zero is "infinity" */ |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 320 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
| 321 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
Milan Broz | 0e435ac | 2008-12-03 12:55:08 +0100 | [diff] [blame] | 322 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 323 | |
FUJITA Tomonori | 18af8b2 | 2008-12-04 08:56:35 +0100 | [diff] [blame] | 324 | t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); |
| 325 | t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); |
| 326 | t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); |
Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 327 | t->hardsect_size = max(t->hardsect_size, b->hardsect_size); |
Neil Brown | e7e72bf | 2008-05-14 16:05:54 -0700 | [diff] [blame] | 328 | if (!t->queue_lock) |
| 329 | WARN_ON_ONCE(1); |
| 330 | else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { |
| 331 | unsigned long flags; |
| 332 | spin_lock_irqsave(t->queue_lock, flags); |
Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 333 | queue_flag_clear(QUEUE_FLAG_CLUSTER, t); |
Neil Brown | e7e72bf | 2008-05-14 16:05:54 -0700 | [diff] [blame] | 334 | spin_unlock_irqrestore(t->queue_lock, flags); |
| 335 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 336 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 337 | EXPORT_SYMBOL(blk_queue_stack_limits); |
| 338 | |
| 339 | /** |
Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 340 | * blk_queue_dma_pad - set pad mask |
| 341 | * @q: the request queue for the device |
| 342 | * @mask: pad mask |
| 343 | * |
FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 344 | * Set dma pad mask. |
Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 345 | * |
FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 346 | * Appending pad buffer to a request modifies the last entry of a |
| 347 | * scatter list such that it includes the pad buffer. |
Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 348 | **/ |
| 349 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) |
| 350 | { |
| 351 | q->dma_pad_mask = mask; |
| 352 | } |
| 353 | EXPORT_SYMBOL(blk_queue_dma_pad); |
| 354 | |
| 355 | /** |
FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 356 | * blk_queue_update_dma_pad - update pad mask |
| 357 | * @q: the request queue for the device |
| 358 | * @mask: pad mask |
| 359 | * |
| 360 | * Update dma pad mask. |
| 361 | * |
| 362 | * Appending pad buffer to a request modifies the last entry of a |
| 363 | * scatter list such that it includes the pad buffer. |
| 364 | **/ |
| 365 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) |
| 366 | { |
| 367 | if (mask > q->dma_pad_mask) |
| 368 | q->dma_pad_mask = mask; |
| 369 | } |
| 370 | EXPORT_SYMBOL(blk_queue_update_dma_pad); |
| 371 | |
| 372 | /** |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 373 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 374 | * @q: the request queue for the device |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 375 | * @dma_drain_needed: fn which returns non-zero if drain is necessary |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 376 | * @buf: physically contiguous buffer |
| 377 | * @size: size of the buffer in bytes |
| 378 | * |
| 379 | * Some devices have excess DMA problems and can't simply discard (or |
| 380 | * zero fill) the unwanted piece of the transfer. They have to have a |
| 381 | * real area of memory to transfer it into. The use case for this is |
| 382 | * ATAPI devices in DMA mode. If the packet command causes a transfer |
| 383 | * bigger than the transfer size some HBAs will lock up if there |
| 384 | * aren't DMA elements to contain the excess transfer. What this API |
| 385 | * does is adjust the queue so that the buf is always appended |
| 386 | * silently to the scatterlist. |
| 387 | * |
| 388 | * Note: This routine adjusts max_hw_segments to make room for |
| 389 | * appending the drain buffer. If you call |
| 390 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after |
| 391 | * calling this routine, you must set the limit to one fewer than your |
| 392 | * device can support otherwise there won't be room for the drain |
| 393 | * buffer. |
| 394 | */ |
Harvey Harrison | 448da4d | 2008-03-04 11:30:18 +0100 | [diff] [blame] | 395 | int blk_queue_dma_drain(struct request_queue *q, |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 396 | dma_drain_needed_fn *dma_drain_needed, |
| 397 | void *buf, unsigned int size) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 398 | { |
| 399 | if (q->max_hw_segments < 2 || q->max_phys_segments < 2) |
| 400 | return -EINVAL; |
| 401 | /* make room for appending the drain */ |
| 402 | --q->max_hw_segments; |
| 403 | --q->max_phys_segments; |
Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 404 | q->dma_drain_needed = dma_drain_needed; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 405 | q->dma_drain_buffer = buf; |
| 406 | q->dma_drain_size = size; |
| 407 | |
| 408 | return 0; |
| 409 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 410 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); |
| 411 | |
| 412 | /** |
| 413 | * blk_queue_segment_boundary - set boundary rules for segment merging |
| 414 | * @q: the request queue for the device |
| 415 | * @mask: the memory boundary mask |
| 416 | **/ |
| 417 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) |
| 418 | { |
| 419 | if (mask < PAGE_CACHE_SIZE - 1) { |
| 420 | mask = PAGE_CACHE_SIZE - 1; |
Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 421 | printk(KERN_INFO "%s: set to minimum %lx\n", |
| 422 | __func__, mask); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | q->seg_boundary_mask = mask; |
| 426 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 427 | EXPORT_SYMBOL(blk_queue_segment_boundary); |
| 428 | |
| 429 | /** |
| 430 | * blk_queue_dma_alignment - set dma length and memory alignment |
| 431 | * @q: the request queue for the device |
| 432 | * @mask: alignment mask |
| 433 | * |
| 434 | * description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 435 | * set required memory and length alignment for direct dma transactions. |
Alan Cox | 8feb4d2 | 2009-04-01 15:01:39 +0100 | [diff] [blame] | 436 | * this is used when building direct io requests for the queue. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 437 | * |
| 438 | **/ |
| 439 | void blk_queue_dma_alignment(struct request_queue *q, int mask) |
| 440 | { |
| 441 | q->dma_alignment = mask; |
| 442 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 443 | EXPORT_SYMBOL(blk_queue_dma_alignment); |
| 444 | |
| 445 | /** |
| 446 | * blk_queue_update_dma_alignment - update dma length and memory alignment |
| 447 | * @q: the request queue for the device |
| 448 | * @mask: alignment mask |
| 449 | * |
| 450 | * description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 451 | * update required memory and length alignment for direct dma transactions. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 452 | * If the requested alignment is larger than the current alignment, then |
| 453 | * the current queue alignment is updated to the new value, otherwise it |
| 454 | * is left alone. The design of this is to allow multiple objects |
| 455 | * (driver, device, transport etc) to set their respective |
| 456 | * alignments without having them interfere. |
| 457 | * |
| 458 | **/ |
| 459 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) |
| 460 | { |
| 461 | BUG_ON(mask > PAGE_SIZE); |
| 462 | |
| 463 | if (mask > q->dma_alignment) |
| 464 | q->dma_alignment = mask; |
| 465 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 466 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); |
| 467 | |
Harvey Harrison | aeb3d3a | 2008-08-28 09:27:42 +0200 | [diff] [blame] | 468 | static int __init blk_settings_init(void) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 469 | { |
| 470 | blk_max_low_pfn = max_low_pfn - 1; |
| 471 | blk_max_pfn = max_pfn - 1; |
| 472 | return 0; |
| 473 | } |
| 474 | subsys_initcall(blk_settings_init); |