blob: 8dd86418f35d88229fdebc890e28cbfb62705c55 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10
11#include "blk.h"
12
Jens Axboe6728cb02008-01-31 13:03:55 +010013unsigned long blk_max_low_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +010014EXPORT_SYMBOL(blk_max_low_pfn);
Jens Axboe6728cb02008-01-31 13:03:55 +010015
16unsigned long blk_max_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +010017
18/**
19 * blk_queue_prep_rq - set a prepare_request function for queue
20 * @q: queue
21 * @pfn: prepare_request function
22 *
23 * It's possible for a queue to register a prepare_request callback which
24 * is invoked before the request is handed to the request_fn. The goal of
25 * the function is to prepare a request for I/O, it can be used to build a
26 * cdb from the request data for instance.
27 *
28 */
29void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
30{
31 q->prep_rq_fn = pfn;
32}
Jens Axboe86db1e22008-01-29 14:53:40 +010033EXPORT_SYMBOL(blk_queue_prep_rq);
34
35/**
36 * blk_queue_merge_bvec - set a merge_bvec function for queue
37 * @q: queue
38 * @mbfn: merge_bvec_fn
39 *
40 * Usually queues have static limitations on the max sectors or segments that
41 * we can put in a request. Stacking drivers may have some settings that
42 * are dynamic, and thus we have to query the queue whether it is ok to
43 * add a new bio_vec to a bio at a given offset or not. If the block device
44 * has such limitations, it needs to register a merge_bvec_fn to control
45 * the size of bio's sent to it. Note that a block device *must* allow a
46 * single page to be added to an empty bio. The block device driver may want
47 * to use the bio_split() function to deal with these bio's. By default
48 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
49 * honored.
50 */
51void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
52{
53 q->merge_bvec_fn = mbfn;
54}
Jens Axboe86db1e22008-01-29 14:53:40 +010055EXPORT_SYMBOL(blk_queue_merge_bvec);
56
57void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
58{
59 q->softirq_done_fn = fn;
60}
Jens Axboe86db1e22008-01-29 14:53:40 +010061EXPORT_SYMBOL(blk_queue_softirq_done);
62
63/**
64 * blk_queue_make_request - define an alternate make_request function for a device
65 * @q: the request queue for the device to be affected
66 * @mfn: the alternate make_request function
67 *
68 * Description:
69 * The normal way for &struct bios to be passed to a device
70 * driver is for them to be collected into requests on a request
71 * queue, and then to allow the device driver to select requests
72 * off that queue when it is ready. This works well for many block
73 * devices. However some block devices (typically virtual devices
74 * such as md or lvm) do not benefit from the processing on the
75 * request queue, and are served best by having the requests passed
76 * directly to them. This can be achieved by providing a function
77 * to blk_queue_make_request().
78 *
79 * Caveat:
80 * The driver that does this *must* be able to deal appropriately
81 * with buffers in "highmemory". This can be accomplished by either calling
82 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
83 * blk_queue_bounce() to create a buffer in normal memory.
84 **/
Jens Axboe6728cb02008-01-31 13:03:55 +010085void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
Jens Axboe86db1e22008-01-29 14:53:40 +010086{
87 /*
88 * set defaults
89 */
90 q->nr_requests = BLKDEV_MAX_RQ;
91 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
92 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
93 q->make_request_fn = mfn;
Jens Axboe6728cb02008-01-31 13:03:55 +010094 q->backing_dev_info.ra_pages =
95 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
Jens Axboe86db1e22008-01-29 14:53:40 +010096 q->backing_dev_info.state = 0;
97 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
98 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
99 blk_queue_hardsect_size(q, 512);
100 blk_queue_dma_alignment(q, 511);
101 blk_queue_congestion_threshold(q);
102 q->nr_batching = BLK_BATCH_REQ;
103
104 q->unplug_thresh = 4; /* hmm */
105 q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
106 if (q->unplug_delay == 0)
107 q->unplug_delay = 1;
108
109 INIT_WORK(&q->unplug_work, blk_unplug_work);
110
111 q->unplug_timer.function = blk_unplug_timeout;
112 q->unplug_timer.data = (unsigned long)q;
113
114 /*
115 * by default assume old behaviour and bounce for any highmem page
116 */
117 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
118}
Jens Axboe86db1e22008-01-29 14:53:40 +0100119EXPORT_SYMBOL(blk_queue_make_request);
120
121/**
122 * blk_queue_bounce_limit - set bounce buffer limit for queue
123 * @q: the request queue for the device
124 * @dma_addr: bus address limit
125 *
126 * Description:
127 * Different hardware can have different requirements as to what pages
128 * it can do I/O directly to. A low level driver can call
129 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
130 * buffers for doing I/O to pages residing above @page.
131 **/
132void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
133{
Jens Axboe6728cb02008-01-31 13:03:55 +0100134 unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
Jens Axboe86db1e22008-01-29 14:53:40 +0100135 int dma = 0;
136
137 q->bounce_gfp = GFP_NOIO;
138#if BITS_PER_LONG == 64
139 /* Assume anything <= 4GB can be handled by IOMMU.
140 Actually some IOMMUs can handle everything, but I don't
141 know of a way to test this here. */
Andrea Arcangeli00d61e32008-04-02 09:06:44 +0200142 if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
Jens Axboe86db1e22008-01-29 14:53:40 +0100143 dma = 1;
144 q->bounce_pfn = max_low_pfn;
145#else
Jens Axboe6728cb02008-01-31 13:03:55 +0100146 if (b_pfn < blk_max_low_pfn)
Jens Axboe86db1e22008-01-29 14:53:40 +0100147 dma = 1;
Jens Axboe6728cb02008-01-31 13:03:55 +0100148 q->bounce_pfn = b_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +0100149#endif
150 if (dma) {
151 init_emergency_isa_pool();
152 q->bounce_gfp = GFP_NOIO | GFP_DMA;
Jens Axboe6728cb02008-01-31 13:03:55 +0100153 q->bounce_pfn = b_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +0100154 }
155}
Jens Axboe86db1e22008-01-29 14:53:40 +0100156EXPORT_SYMBOL(blk_queue_bounce_limit);
157
158/**
159 * blk_queue_max_sectors - set max sectors for a request for this queue
160 * @q: the request queue for the device
161 * @max_sectors: max sectors in the usual 512b unit
162 *
163 * Description:
164 * Enables a low level driver to set an upper limit on the size of
165 * received requests.
166 **/
167void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
168{
169 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
170 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
Harvey Harrison24c03d42008-05-01 04:35:17 -0700171 printk(KERN_INFO "%s: set to minimum %d\n",
172 __func__, max_sectors);
Jens Axboe86db1e22008-01-29 14:53:40 +0100173 }
174
175 if (BLK_DEF_MAX_SECTORS > max_sectors)
176 q->max_hw_sectors = q->max_sectors = max_sectors;
177 else {
178 q->max_sectors = BLK_DEF_MAX_SECTORS;
179 q->max_hw_sectors = max_sectors;
180 }
181}
Jens Axboe86db1e22008-01-29 14:53:40 +0100182EXPORT_SYMBOL(blk_queue_max_sectors);
183
184/**
185 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
186 * @q: the request queue for the device
187 * @max_segments: max number of segments
188 *
189 * Description:
190 * Enables a low level driver to set an upper limit on the number of
191 * physical data segments in a request. This would be the largest sized
192 * scatter list the driver could handle.
193 **/
194void blk_queue_max_phys_segments(struct request_queue *q,
195 unsigned short max_segments)
196{
197 if (!max_segments) {
198 max_segments = 1;
Harvey Harrison24c03d42008-05-01 04:35:17 -0700199 printk(KERN_INFO "%s: set to minimum %d\n",
200 __func__, max_segments);
Jens Axboe86db1e22008-01-29 14:53:40 +0100201 }
202
203 q->max_phys_segments = max_segments;
204}
Jens Axboe86db1e22008-01-29 14:53:40 +0100205EXPORT_SYMBOL(blk_queue_max_phys_segments);
206
207/**
208 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
209 * @q: the request queue for the device
210 * @max_segments: max number of segments
211 *
212 * Description:
213 * Enables a low level driver to set an upper limit on the number of
214 * hw data segments in a request. This would be the largest number of
215 * address/length pairs the host adapter can actually give as once
216 * to the device.
217 **/
218void blk_queue_max_hw_segments(struct request_queue *q,
219 unsigned short max_segments)
220{
221 if (!max_segments) {
222 max_segments = 1;
Harvey Harrison24c03d42008-05-01 04:35:17 -0700223 printk(KERN_INFO "%s: set to minimum %d\n",
224 __func__, max_segments);
Jens Axboe86db1e22008-01-29 14:53:40 +0100225 }
226
227 q->max_hw_segments = max_segments;
228}
Jens Axboe86db1e22008-01-29 14:53:40 +0100229EXPORT_SYMBOL(blk_queue_max_hw_segments);
230
231/**
232 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
233 * @q: the request queue for the device
234 * @max_size: max size of segment in bytes
235 *
236 * Description:
237 * Enables a low level driver to set an upper limit on the size of a
238 * coalesced segment
239 **/
240void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
241{
242 if (max_size < PAGE_CACHE_SIZE) {
243 max_size = PAGE_CACHE_SIZE;
Harvey Harrison24c03d42008-05-01 04:35:17 -0700244 printk(KERN_INFO "%s: set to minimum %d\n",
245 __func__, max_size);
Jens Axboe86db1e22008-01-29 14:53:40 +0100246 }
247
248 q->max_segment_size = max_size;
249}
Jens Axboe86db1e22008-01-29 14:53:40 +0100250EXPORT_SYMBOL(blk_queue_max_segment_size);
251
252/**
253 * blk_queue_hardsect_size - set hardware sector size for the queue
254 * @q: the request queue for the device
255 * @size: the hardware sector size, in bytes
256 *
257 * Description:
258 * This should typically be set to the lowest possible sector size
259 * that the hardware can operate on (possible without reverting to
260 * even internal read-modify-write operations). Usually the default
261 * of 512 covers most hardware.
262 **/
263void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
264{
265 q->hardsect_size = size;
266}
Jens Axboe86db1e22008-01-29 14:53:40 +0100267EXPORT_SYMBOL(blk_queue_hardsect_size);
268
269/*
270 * Returns the minimum that is _not_ zero, unless both are zero.
271 */
272#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
273
274/**
275 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
276 * @t: the stacking driver (top)
277 * @b: the underlying device (bottom)
278 **/
279void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
280{
281 /* zero is "infinity" */
Jens Axboe6728cb02008-01-31 13:03:55 +0100282 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
283 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
Jens Axboe86db1e22008-01-29 14:53:40 +0100284
Jens Axboe6728cb02008-01-31 13:03:55 +0100285 t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
286 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
287 t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
288 t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
Neil Browne7e72bf2008-05-14 16:05:54 -0700289 if (!t->queue_lock)
290 WARN_ON_ONCE(1);
291 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
292 unsigned long flags;
293 spin_lock_irqsave(t->queue_lock, flags);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200294 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
Neil Browne7e72bf2008-05-14 16:05:54 -0700295 spin_unlock_irqrestore(t->queue_lock, flags);
296 }
Jens Axboe86db1e22008-01-29 14:53:40 +0100297}
Jens Axboe86db1e22008-01-29 14:53:40 +0100298EXPORT_SYMBOL(blk_queue_stack_limits);
299
300/**
Tejun Heoe3790c72008-03-04 11:18:17 +0100301 * blk_queue_dma_pad - set pad mask
302 * @q: the request queue for the device
303 * @mask: pad mask
304 *
305 * Set pad mask. Direct IO requests are padded to the mask specified.
306 *
307 * Appending pad buffer to a request modifies ->data_len such that it
308 * includes the pad buffer. The original requested data length can be
309 * obtained using blk_rq_raw_data_len().
310 **/
311void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
312{
313 q->dma_pad_mask = mask;
314}
315EXPORT_SYMBOL(blk_queue_dma_pad);
316
317/**
Jens Axboe86db1e22008-01-29 14:53:40 +0100318 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
Jens Axboe86db1e22008-01-29 14:53:40 +0100319 * @q: the request queue for the device
Tejun Heo2fb98e82008-02-19 11:36:53 +0100320 * @dma_drain_needed: fn which returns non-zero if drain is necessary
Jens Axboe86db1e22008-01-29 14:53:40 +0100321 * @buf: physically contiguous buffer
322 * @size: size of the buffer in bytes
323 *
324 * Some devices have excess DMA problems and can't simply discard (or
325 * zero fill) the unwanted piece of the transfer. They have to have a
326 * real area of memory to transfer it into. The use case for this is
327 * ATAPI devices in DMA mode. If the packet command causes a transfer
328 * bigger than the transfer size some HBAs will lock up if there
329 * aren't DMA elements to contain the excess transfer. What this API
330 * does is adjust the queue so that the buf is always appended
331 * silently to the scatterlist.
332 *
333 * Note: This routine adjusts max_hw_segments to make room for
334 * appending the drain buffer. If you call
335 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
336 * calling this routine, you must set the limit to one fewer than your
337 * device can support otherwise there won't be room for the drain
338 * buffer.
339 */
Harvey Harrison448da4d2008-03-04 11:30:18 +0100340int blk_queue_dma_drain(struct request_queue *q,
Tejun Heo2fb98e82008-02-19 11:36:53 +0100341 dma_drain_needed_fn *dma_drain_needed,
342 void *buf, unsigned int size)
Jens Axboe86db1e22008-01-29 14:53:40 +0100343{
344 if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
345 return -EINVAL;
346 /* make room for appending the drain */
347 --q->max_hw_segments;
348 --q->max_phys_segments;
Tejun Heo2fb98e82008-02-19 11:36:53 +0100349 q->dma_drain_needed = dma_drain_needed;
Jens Axboe86db1e22008-01-29 14:53:40 +0100350 q->dma_drain_buffer = buf;
351 q->dma_drain_size = size;
352
353 return 0;
354}
Jens Axboe86db1e22008-01-29 14:53:40 +0100355EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
356
357/**
358 * blk_queue_segment_boundary - set boundary rules for segment merging
359 * @q: the request queue for the device
360 * @mask: the memory boundary mask
361 **/
362void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
363{
364 if (mask < PAGE_CACHE_SIZE - 1) {
365 mask = PAGE_CACHE_SIZE - 1;
Harvey Harrison24c03d42008-05-01 04:35:17 -0700366 printk(KERN_INFO "%s: set to minimum %lx\n",
367 __func__, mask);
Jens Axboe86db1e22008-01-29 14:53:40 +0100368 }
369
370 q->seg_boundary_mask = mask;
371}
Jens Axboe86db1e22008-01-29 14:53:40 +0100372EXPORT_SYMBOL(blk_queue_segment_boundary);
373
374/**
375 * blk_queue_dma_alignment - set dma length and memory alignment
376 * @q: the request queue for the device
377 * @mask: alignment mask
378 *
379 * description:
380 * set required memory and length aligment for direct dma transactions.
381 * this is used when buiding direct io requests for the queue.
382 *
383 **/
384void blk_queue_dma_alignment(struct request_queue *q, int mask)
385{
386 q->dma_alignment = mask;
387}
Jens Axboe86db1e22008-01-29 14:53:40 +0100388EXPORT_SYMBOL(blk_queue_dma_alignment);
389
390/**
391 * blk_queue_update_dma_alignment - update dma length and memory alignment
392 * @q: the request queue for the device
393 * @mask: alignment mask
394 *
395 * description:
396 * update required memory and length aligment for direct dma transactions.
397 * If the requested alignment is larger than the current alignment, then
398 * the current queue alignment is updated to the new value, otherwise it
399 * is left alone. The design of this is to allow multiple objects
400 * (driver, device, transport etc) to set their respective
401 * alignments without having them interfere.
402 *
403 **/
404void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
405{
406 BUG_ON(mask > PAGE_SIZE);
407
408 if (mask > q->dma_alignment)
409 q->dma_alignment = mask;
410}
Jens Axboe86db1e22008-01-29 14:53:40 +0100411EXPORT_SYMBOL(blk_queue_update_dma_alignment);
412
Adrian Bunk52ff4ca2008-02-18 13:45:55 +0100413static int __init blk_settings_init(void)
Jens Axboe86db1e22008-01-29 14:53:40 +0100414{
415 blk_max_low_pfn = max_low_pfn - 1;
416 blk_max_pfn = max_pfn - 1;
417 return 0;
418}
419subsys_initcall(blk_settings_init);