blob: f5ed5a1187ba8564527b70f682328b31eea4549a [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
Martin K. Petersen70dd5bf2009-07-31 11:49:12 -040010#include <linux/gcd.h>
Martin K. Petersen2cda2722010-03-15 12:46:51 +010011#include <linux/lcm.h>
Randy Dunlapad5ebd22009-11-11 13:47:45 +010012#include <linux/jiffies.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/gfp.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010014
15#include "blk.h"
16
Jens Axboe6728cb02008-01-31 13:03:55 +010017unsigned long blk_max_low_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +010018EXPORT_SYMBOL(blk_max_low_pfn);
Jens Axboe6728cb02008-01-31 13:03:55 +010019
20unsigned long blk_max_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +010021
22/**
23 * blk_queue_prep_rq - set a prepare_request function for queue
24 * @q: queue
25 * @pfn: prepare_request function
26 *
27 * It's possible for a queue to register a prepare_request callback which
28 * is invoked before the request is handed to the request_fn. The goal of
29 * the function is to prepare a request for I/O, it can be used to build a
30 * cdb from the request data for instance.
31 *
32 */
33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
34{
35 q->prep_rq_fn = pfn;
36}
Jens Axboe86db1e22008-01-29 14:53:40 +010037EXPORT_SYMBOL(blk_queue_prep_rq);
38
39/**
40 * blk_queue_merge_bvec - set a merge_bvec function for queue
41 * @q: queue
42 * @mbfn: merge_bvec_fn
43 *
44 * Usually queues have static limitations on the max sectors or segments that
45 * we can put in a request. Stacking drivers may have some settings that
46 * are dynamic, and thus we have to query the queue whether it is ok to
47 * add a new bio_vec to a bio at a given offset or not. If the block device
48 * has such limitations, it needs to register a merge_bvec_fn to control
49 * the size of bio's sent to it. Note that a block device *must* allow a
50 * single page to be added to an empty bio. The block device driver may want
51 * to use the bio_split() function to deal with these bio's. By default
52 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
53 * honored.
54 */
55void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
56{
57 q->merge_bvec_fn = mbfn;
58}
Jens Axboe86db1e22008-01-29 14:53:40 +010059EXPORT_SYMBOL(blk_queue_merge_bvec);
60
61void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
62{
63 q->softirq_done_fn = fn;
64}
Jens Axboe86db1e22008-01-29 14:53:40 +010065EXPORT_SYMBOL(blk_queue_softirq_done);
66
Jens Axboe242f9dc2008-09-14 05:55:09 -070067void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
68{
69 q->rq_timeout = timeout;
70}
71EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
72
73void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
74{
75 q->rq_timed_out_fn = fn;
76}
77EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
78
Kiyoshi Uedaef9e3fa2008-10-01 16:12:15 +020079void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
80{
81 q->lld_busy_fn = fn;
82}
83EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
84
Jens Axboe86db1e22008-01-29 14:53:40 +010085/**
Martin K. Petersene475bba2009-06-16 08:23:52 +020086 * blk_set_default_limits - reset limits to default values
Randy Dunlapf740f5c2009-06-19 09:18:32 +020087 * @lim: the queue_limits structure to reset
Martin K. Petersene475bba2009-06-16 08:23:52 +020088 *
89 * Description:
90 * Returns a queue_limit struct to its default state. Can be used by
91 * stacking drivers like DM that stage table swaps and reuse an
92 * existing device queue.
93 */
94void blk_set_default_limits(struct queue_limits *lim)
95{
Martin K. Petersen8a783622010-02-26 00:20:39 -050096 lim->max_segments = BLK_MAX_SEGMENTS;
Martin K. Petersene475bba2009-06-16 08:23:52 +020097 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
Martin K. Peterseneb28d312010-02-26 00:20:37 -050098 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
Martin K. Petersen5dee2472009-09-21 21:46:05 +020099 lim->max_sectors = BLK_DEF_MAX_SECTORS;
100 lim->max_hw_sectors = INT_MAX;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100101 lim->max_discard_sectors = 0;
102 lim->discard_granularity = 0;
103 lim->discard_alignment = 0;
104 lim->discard_misaligned = 0;
Martin K. Petersen98262f22009-12-03 09:24:48 +0100105 lim->discard_zeroes_data = -1;
Martin K. Petersene475bba2009-06-16 08:23:52 +0200106 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
Martin K. Petersen3a02c8e2009-06-18 09:56:03 +0200107 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
Martin K. Petersene475bba2009-06-16 08:23:52 +0200108 lim->alignment_offset = 0;
109 lim->io_opt = 0;
110 lim->misaligned = 0;
111 lim->no_cluster = 0;
112}
113EXPORT_SYMBOL(blk_set_default_limits);
114
115/**
Jens Axboe86db1e22008-01-29 14:53:40 +0100116 * blk_queue_make_request - define an alternate make_request function for a device
117 * @q: the request queue for the device to be affected
118 * @mfn: the alternate make_request function
119 *
120 * Description:
121 * The normal way for &struct bios to be passed to a device
122 * driver is for them to be collected into requests on a request
123 * queue, and then to allow the device driver to select requests
124 * off that queue when it is ready. This works well for many block
125 * devices. However some block devices (typically virtual devices
126 * such as md or lvm) do not benefit from the processing on the
127 * request queue, and are served best by having the requests passed
128 * directly to them. This can be achieved by providing a function
129 * to blk_queue_make_request().
130 *
131 * Caveat:
132 * The driver that does this *must* be able to deal appropriately
133 * with buffers in "highmemory". This can be accomplished by either calling
134 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
135 * blk_queue_bounce() to create a buffer in normal memory.
136 **/
Jens Axboe6728cb02008-01-31 13:03:55 +0100137void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
Jens Axboe86db1e22008-01-29 14:53:40 +0100138{
139 /*
140 * set defaults
141 */
142 q->nr_requests = BLKDEV_MAX_RQ;
Milan Broz0e435ac2008-12-03 12:55:08 +0100143
Jens Axboe86db1e22008-01-29 14:53:40 +0100144 q->make_request_fn = mfn;
Jens Axboe86db1e22008-01-29 14:53:40 +0100145 blk_queue_dma_alignment(q, 511);
146 blk_queue_congestion_threshold(q);
147 q->nr_batching = BLK_BATCH_REQ;
148
149 q->unplug_thresh = 4; /* hmm */
Randy Dunlapad5ebd22009-11-11 13:47:45 +0100150 q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */
Jens Axboe86db1e22008-01-29 14:53:40 +0100151 if (q->unplug_delay == 0)
152 q->unplug_delay = 1;
153
Jens Axboe86db1e22008-01-29 14:53:40 +0100154 q->unplug_timer.function = blk_unplug_timeout;
155 q->unplug_timer.data = (unsigned long)q;
156
Martin K. Petersene475bba2009-06-16 08:23:52 +0200157 blk_set_default_limits(&q->limits);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500158 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
Martin K. Petersene475bba2009-06-16 08:23:52 +0200159
Jens Axboe86db1e22008-01-29 14:53:40 +0100160 /*
Jens Axboea4e7d462009-07-28 09:07:29 +0200161 * If the caller didn't supply a lock, fall back to our embedded
162 * per-queue locks
163 */
164 if (!q->queue_lock)
165 q->queue_lock = &q->__queue_lock;
166
167 /*
Jens Axboe86db1e22008-01-29 14:53:40 +0100168 * by default assume old behaviour and bounce for any highmem page
169 */
170 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
171}
Jens Axboe86db1e22008-01-29 14:53:40 +0100172EXPORT_SYMBOL(blk_queue_make_request);
173
174/**
175 * blk_queue_bounce_limit - set bounce buffer limit for queue
Tejun Heocd0aca22009-04-15 22:10:25 +0900176 * @q: the request queue for the device
177 * @dma_mask: the maximum address the device can handle
Jens Axboe86db1e22008-01-29 14:53:40 +0100178 *
179 * Description:
180 * Different hardware can have different requirements as to what pages
181 * it can do I/O directly to. A low level driver can call
182 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
Tejun Heocd0aca22009-04-15 22:10:25 +0900183 * buffers for doing I/O to pages residing above @dma_mask.
Jens Axboe86db1e22008-01-29 14:53:40 +0100184 **/
Tejun Heocd0aca22009-04-15 22:10:25 +0900185void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
Jens Axboe86db1e22008-01-29 14:53:40 +0100186{
Tejun Heocd0aca22009-04-15 22:10:25 +0900187 unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
Jens Axboe86db1e22008-01-29 14:53:40 +0100188 int dma = 0;
189
190 q->bounce_gfp = GFP_NOIO;
191#if BITS_PER_LONG == 64
Tejun Heocd0aca22009-04-15 22:10:25 +0900192 /*
193 * Assume anything <= 4GB can be handled by IOMMU. Actually
194 * some IOMMUs can handle everything, but I don't know of a
195 * way to test this here.
196 */
197 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
Jens Axboe86db1e22008-01-29 14:53:40 +0100198 dma = 1;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400199 q->limits.bounce_pfn = max_low_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +0100200#else
Jens Axboe6728cb02008-01-31 13:03:55 +0100201 if (b_pfn < blk_max_low_pfn)
Jens Axboe86db1e22008-01-29 14:53:40 +0100202 dma = 1;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400203 q->limits.bounce_pfn = b_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +0100204#endif
205 if (dma) {
206 init_emergency_isa_pool();
207 q->bounce_gfp = GFP_NOIO | GFP_DMA;
Martin K. Petersen025146e2009-05-22 17:17:51 -0400208 q->limits.bounce_pfn = b_pfn;
Jens Axboe86db1e22008-01-29 14:53:40 +0100209 }
210}
Jens Axboe86db1e22008-01-29 14:53:40 +0100211EXPORT_SYMBOL(blk_queue_bounce_limit);
212
213/**
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500214 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
Jens Axboe86db1e22008-01-29 14:53:40 +0100215 * @q: the request queue for the device
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500216 * @max_hw_sectors: max hardware sectors in the usual 512b unit
Jens Axboe86db1e22008-01-29 14:53:40 +0100217 *
218 * Description:
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500219 * Enables a low level driver to set a hard upper limit,
220 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
221 * the device driver based upon the combined capabilities of I/O
222 * controller and storage device.
223 *
224 * max_sectors is a soft limit imposed by the block layer for
225 * filesystem type requests. This value can be overridden on a
226 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
227 * The soft limit can not exceed max_hw_sectors.
Jens Axboe86db1e22008-01-29 14:53:40 +0100228 **/
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500229void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
Jens Axboe86db1e22008-01-29 14:53:40 +0100230{
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500231 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
232 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
Harvey Harrison24c03d42008-05-01 04:35:17 -0700233 printk(KERN_INFO "%s: set to minimum %d\n",
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500234 __func__, max_hw_sectors);
Jens Axboe86db1e22008-01-29 14:53:40 +0100235 }
236
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500237 q->limits.max_hw_sectors = max_hw_sectors;
238 q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
239 BLK_DEF_MAX_SECTORS);
Jens Axboe86db1e22008-01-29 14:53:40 +0100240}
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500241EXPORT_SYMBOL(blk_queue_max_hw_sectors);
Jens Axboe86db1e22008-01-29 14:53:40 +0100242
243/**
Christoph Hellwig67efc922009-09-30 13:54:20 +0200244 * blk_queue_max_discard_sectors - set max sectors for a single discard
245 * @q: the request queue for the device
Randy Dunlapc7ebf062009-10-12 08:20:47 +0200246 * @max_discard_sectors: maximum number of sectors to discard
Christoph Hellwig67efc922009-09-30 13:54:20 +0200247 **/
248void blk_queue_max_discard_sectors(struct request_queue *q,
249 unsigned int max_discard_sectors)
250{
251 q->limits.max_discard_sectors = max_discard_sectors;
252}
253EXPORT_SYMBOL(blk_queue_max_discard_sectors);
254
255/**
Martin K. Petersen8a783622010-02-26 00:20:39 -0500256 * blk_queue_max_segments - set max hw segments for a request for this queue
Jens Axboe86db1e22008-01-29 14:53:40 +0100257 * @q: the request queue for the device
258 * @max_segments: max number of segments
259 *
260 * Description:
261 * Enables a low level driver to set an upper limit on the number of
Martin K. Petersen8a783622010-02-26 00:20:39 -0500262 * hw data segments in a request.
Jens Axboe86db1e22008-01-29 14:53:40 +0100263 **/
Martin K. Petersen8a783622010-02-26 00:20:39 -0500264void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
Jens Axboe86db1e22008-01-29 14:53:40 +0100265{
266 if (!max_segments) {
267 max_segments = 1;
Harvey Harrison24c03d42008-05-01 04:35:17 -0700268 printk(KERN_INFO "%s: set to minimum %d\n",
269 __func__, max_segments);
Jens Axboe86db1e22008-01-29 14:53:40 +0100270 }
271
Martin K. Petersen8a783622010-02-26 00:20:39 -0500272 q->limits.max_segments = max_segments;
Jens Axboe86db1e22008-01-29 14:53:40 +0100273}
Martin K. Petersen8a783622010-02-26 00:20:39 -0500274EXPORT_SYMBOL(blk_queue_max_segments);
Jens Axboe86db1e22008-01-29 14:53:40 +0100275
276/**
277 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
278 * @q: the request queue for the device
279 * @max_size: max size of segment in bytes
280 *
281 * Description:
282 * Enables a low level driver to set an upper limit on the size of a
283 * coalesced segment
284 **/
285void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
286{
287 if (max_size < PAGE_CACHE_SIZE) {
288 max_size = PAGE_CACHE_SIZE;
Harvey Harrison24c03d42008-05-01 04:35:17 -0700289 printk(KERN_INFO "%s: set to minimum %d\n",
290 __func__, max_size);
Jens Axboe86db1e22008-01-29 14:53:40 +0100291 }
292
Martin K. Petersen025146e2009-05-22 17:17:51 -0400293 q->limits.max_segment_size = max_size;
Jens Axboe86db1e22008-01-29 14:53:40 +0100294}
Jens Axboe86db1e22008-01-29 14:53:40 +0100295EXPORT_SYMBOL(blk_queue_max_segment_size);
296
297/**
Martin K. Petersene1defc42009-05-22 17:17:49 -0400298 * blk_queue_logical_block_size - set logical block size for the queue
Jens Axboe86db1e22008-01-29 14:53:40 +0100299 * @q: the request queue for the device
Martin K. Petersene1defc42009-05-22 17:17:49 -0400300 * @size: the logical block size, in bytes
Jens Axboe86db1e22008-01-29 14:53:40 +0100301 *
302 * Description:
Martin K. Petersene1defc42009-05-22 17:17:49 -0400303 * This should be set to the lowest possible block size that the
304 * storage device can address. The default of 512 covers most
305 * hardware.
Jens Axboe86db1e22008-01-29 14:53:40 +0100306 **/
Martin K. Petersene1defc42009-05-22 17:17:49 -0400307void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
Jens Axboe86db1e22008-01-29 14:53:40 +0100308{
Martin K. Petersen025146e2009-05-22 17:17:51 -0400309 q->limits.logical_block_size = size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400310
311 if (q->limits.physical_block_size < size)
312 q->limits.physical_block_size = size;
313
314 if (q->limits.io_min < q->limits.physical_block_size)
315 q->limits.io_min = q->limits.physical_block_size;
Jens Axboe86db1e22008-01-29 14:53:40 +0100316}
Martin K. Petersene1defc42009-05-22 17:17:49 -0400317EXPORT_SYMBOL(blk_queue_logical_block_size);
Jens Axboe86db1e22008-01-29 14:53:40 +0100318
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400319/**
320 * blk_queue_physical_block_size - set physical block size for the queue
321 * @q: the request queue for the device
322 * @size: the physical block size, in bytes
323 *
324 * Description:
325 * This should be set to the lowest possible sector size that the
326 * hardware can operate on without reverting to read-modify-write
327 * operations.
328 */
329void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
330{
331 q->limits.physical_block_size = size;
332
333 if (q->limits.physical_block_size < q->limits.logical_block_size)
334 q->limits.physical_block_size = q->limits.logical_block_size;
335
336 if (q->limits.io_min < q->limits.physical_block_size)
337 q->limits.io_min = q->limits.physical_block_size;
338}
339EXPORT_SYMBOL(blk_queue_physical_block_size);
340
341/**
342 * blk_queue_alignment_offset - set physical block alignment offset
343 * @q: the request queue for the device
Randy Dunlap8ebf9752009-06-11 20:00:41 -0700344 * @offset: alignment offset in bytes
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400345 *
346 * Description:
347 * Some devices are naturally misaligned to compensate for things like
348 * the legacy DOS partition table 63-sector offset. Low-level drivers
349 * should call this function for devices whose first sector is not
350 * naturally aligned.
351 */
352void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
353{
354 q->limits.alignment_offset =
355 offset & (q->limits.physical_block_size - 1);
356 q->limits.misaligned = 0;
357}
358EXPORT_SYMBOL(blk_queue_alignment_offset);
359
360/**
Martin K. Petersen7c958e32009-07-31 11:49:11 -0400361 * blk_limits_io_min - set minimum request size for a device
362 * @limits: the queue limits
363 * @min: smallest I/O size in bytes
364 *
365 * Description:
366 * Some devices have an internal block size bigger than the reported
367 * hardware sector size. This function can be used to signal the
368 * smallest I/O the device can perform without incurring a performance
369 * penalty.
370 */
371void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
372{
373 limits->io_min = min;
374
375 if (limits->io_min < limits->logical_block_size)
376 limits->io_min = limits->logical_block_size;
377
378 if (limits->io_min < limits->physical_block_size)
379 limits->io_min = limits->physical_block_size;
380}
381EXPORT_SYMBOL(blk_limits_io_min);
382
383/**
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400384 * blk_queue_io_min - set minimum request size for the queue
385 * @q: the request queue for the device
Randy Dunlap8ebf9752009-06-11 20:00:41 -0700386 * @min: smallest I/O size in bytes
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400387 *
388 * Description:
Martin K. Petersen7e5f5fb2009-07-31 11:49:13 -0400389 * Storage devices may report a granularity or preferred minimum I/O
390 * size which is the smallest request the device can perform without
391 * incurring a performance penalty. For disk drives this is often the
392 * physical block size. For RAID arrays it is often the stripe chunk
393 * size. A properly aligned multiple of minimum_io_size is the
394 * preferred request size for workloads where a high number of I/O
395 * operations is desired.
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400396 */
397void blk_queue_io_min(struct request_queue *q, unsigned int min)
398{
Martin K. Petersen7c958e32009-07-31 11:49:11 -0400399 blk_limits_io_min(&q->limits, min);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400400}
401EXPORT_SYMBOL(blk_queue_io_min);
402
403/**
Martin K. Petersen3c5820c2009-09-11 21:54:52 +0200404 * blk_limits_io_opt - set optimal request size for a device
405 * @limits: the queue limits
406 * @opt: smallest I/O size in bytes
407 *
408 * Description:
409 * Storage devices may report an optimal I/O size, which is the
410 * device's preferred unit for sustained I/O. This is rarely reported
411 * for disk drives. For RAID arrays it is usually the stripe width or
412 * the internal track size. A properly aligned multiple of
413 * optimal_io_size is the preferred request size for workloads where
414 * sustained throughput is desired.
415 */
416void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
417{
418 limits->io_opt = opt;
419}
420EXPORT_SYMBOL(blk_limits_io_opt);
421
422/**
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400423 * blk_queue_io_opt - set optimal request size for the queue
424 * @q: the request queue for the device
Randy Dunlap8ebf9752009-06-11 20:00:41 -0700425 * @opt: optimal request size in bytes
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400426 *
427 * Description:
Martin K. Petersen7e5f5fb2009-07-31 11:49:13 -0400428 * Storage devices may report an optimal I/O size, which is the
429 * device's preferred unit for sustained I/O. This is rarely reported
430 * for disk drives. For RAID arrays it is usually the stripe width or
431 * the internal track size. A properly aligned multiple of
432 * optimal_io_size is the preferred request size for workloads where
433 * sustained throughput is desired.
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400434 */
435void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
436{
Martin K. Petersen3c5820c2009-09-11 21:54:52 +0200437 blk_limits_io_opt(&q->limits, opt);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400438}
439EXPORT_SYMBOL(blk_queue_io_opt);
440
Jens Axboe86db1e22008-01-29 14:53:40 +0100441/*
442 * Returns the minimum that is _not_ zero, unless both are zero.
443 */
444#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
445
446/**
447 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
448 * @t: the stacking driver (top)
449 * @b: the underlying device (bottom)
450 **/
451void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
452{
Martin K. Petersenfef24662009-07-31 11:49:10 -0400453 blk_stack_limits(&t->limits, &b->limits, 0);
Martin K. Petersen025146e2009-05-22 17:17:51 -0400454
Neil Browne7e72bf2008-05-14 16:05:54 -0700455 if (!t->queue_lock)
456 WARN_ON_ONCE(1);
457 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
458 unsigned long flags;
459 spin_lock_irqsave(t->queue_lock, flags);
Nick Piggin75ad23b2008-04-29 14:48:33 +0200460 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
Neil Browne7e72bf2008-05-14 16:05:54 -0700461 spin_unlock_irqrestore(t->queue_lock, flags);
462 }
Jens Axboe86db1e22008-01-29 14:53:40 +0100463}
Jens Axboe86db1e22008-01-29 14:53:40 +0100464EXPORT_SYMBOL(blk_queue_stack_limits);
465
466/**
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400467 * blk_stack_limits - adjust queue_limits for stacked devices
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100468 * @t: the stacking driver limits (top device)
469 * @b: the underlying queue limits (bottom, component device)
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500470 * @start: first data sector within component device
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400471 *
472 * Description:
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100473 * This function is used by stacking drivers like MD and DM to ensure
474 * that all component devices have compatible block sizes and
475 * alignments. The stacking driver must provide a queue_limits
476 * struct (top) and then iteratively call the stacking function for
477 * all component (bottom) devices. The stacking function will
478 * attempt to combine the values and ensure proper alignment.
479 *
480 * Returns 0 if the top and bottom queue_limits are compatible. The
481 * top device's block sizes and alignment offsets may be adjusted to
482 * ensure alignment with the bottom device. If no compatible sizes
483 * and alignments exist, -1 is returned and the resulting top
484 * queue_limits will have the misaligned flag set to indicate that
485 * the alignment_offset is undefined.
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400486 */
487int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500488 sector_t start)
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400489{
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500490 unsigned int top, bottom, alignment, ret = 0;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100491
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400492 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
493 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
Martin K. Petersen77634f32009-06-09 06:23:22 +0200494 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400495
496 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
497 b->seg_boundary_mask);
498
Martin K. Petersen8a783622010-02-26 00:20:39 -0500499 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400500
501 t->max_segment_size = min_not_zero(t->max_segment_size,
502 b->max_segment_size);
503
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500504 t->misaligned |= b->misaligned;
505
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500506 alignment = queue_limit_alignment_offset(b, start);
Martin K. Petersen9504e082009-12-21 15:55:51 +0100507
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100508 /* Bottom device has different alignment. Check that it is
509 * compatible with the current top alignment.
510 */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100511 if (t->alignment_offset != alignment) {
512
513 top = max(t->physical_block_size, t->io_min)
514 + t->alignment_offset;
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100515 bottom = max(b->physical_block_size, b->io_min) + alignment;
Martin K. Petersen9504e082009-12-21 15:55:51 +0100516
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100517 /* Verify that top and bottom intervals line up */
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500518 if (max(top, bottom) & (min(top, bottom) - 1)) {
Martin K. Petersen9504e082009-12-21 15:55:51 +0100519 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500520 ret = -1;
521 }
Martin K. Petersen9504e082009-12-21 15:55:51 +0100522 }
523
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400524 t->logical_block_size = max(t->logical_block_size,
525 b->logical_block_size);
526
527 t->physical_block_size = max(t->physical_block_size,
528 b->physical_block_size);
529
530 t->io_min = max(t->io_min, b->io_min);
Martin K. Petersen9504e082009-12-21 15:55:51 +0100531 t->io_opt = lcm(t->io_opt, b->io_opt);
532
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400533 t->no_cluster |= b->no_cluster;
Martin K. Petersen98262f22009-12-03 09:24:48 +0100534 t->discard_zeroes_data &= b->discard_zeroes_data;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400535
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100536 /* Physical block size a multiple of the logical block size? */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100537 if (t->physical_block_size & (t->logical_block_size - 1)) {
538 t->physical_block_size = t->logical_block_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400539 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500540 ret = -1;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100541 }
542
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100543 /* Minimum I/O a multiple of the physical block size? */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100544 if (t->io_min & (t->physical_block_size - 1)) {
545 t->io_min = t->physical_block_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400546 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500547 ret = -1;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400548 }
549
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100550 /* Optimal I/O a multiple of the physical block size? */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100551 if (t->io_opt & (t->physical_block_size - 1)) {
552 t->io_opt = 0;
553 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500554 ret = -1;
Martin K. Petersen9504e082009-12-21 15:55:51 +0100555 }
Martin K. Petersen70dd5bf2009-07-31 11:49:12 -0400556
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100557 /* Find lowest common alignment_offset */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100558 t->alignment_offset = lcm(t->alignment_offset, alignment)
559 & (max(t->physical_block_size, t->io_min) - 1);
Martin K. Petersen70dd5bf2009-07-31 11:49:12 -0400560
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100561 /* Verify that new alignment_offset is on a logical block boundary */
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500562 if (t->alignment_offset & (t->logical_block_size - 1)) {
Martin K. Petersen9504e082009-12-21 15:55:51 +0100563 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500564 ret = -1;
565 }
Martin K. Petersen9504e082009-12-21 15:55:51 +0100566
567 /* Discard alignment and granularity */
568 if (b->discard_granularity) {
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500569 alignment = queue_limit_discard_alignment(b, start);
Martin K. Petersen9504e082009-12-21 15:55:51 +0100570
571 if (t->discard_granularity != 0 &&
572 t->discard_alignment != alignment) {
573 top = t->discard_granularity + t->discard_alignment;
574 bottom = b->discard_granularity + alignment;
575
576 /* Verify that top and bottom intervals line up */
577 if (max(top, bottom) & (min(top, bottom) - 1))
578 t->discard_misaligned = 1;
579 }
580
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100581 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
582 b->max_discard_sectors);
Martin K. Petersen9504e082009-12-21 15:55:51 +0100583 t->discard_granularity = max(t->discard_granularity,
584 b->discard_granularity);
585 t->discard_alignment = lcm(t->discard_alignment, alignment) &
586 (t->discard_granularity - 1);
587 }
588
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500589 return ret;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400590}
Mike Snitzer5d85d322009-05-28 11:04:53 +0200591EXPORT_SYMBOL(blk_stack_limits);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400592
593/**
Martin K. Petersen17be8c22010-01-11 03:21:49 -0500594 * bdev_stack_limits - adjust queue limits for stacked drivers
595 * @t: the stacking driver limits (top device)
596 * @bdev: the component block_device (bottom)
597 * @start: first data sector within component device
598 *
599 * Description:
600 * Merges queue limits for a top device and a block_device. Returns
601 * 0 if alignment didn't change. Returns -1 if adding the bottom
602 * device caused misalignment.
603 */
604int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
605 sector_t start)
606{
607 struct request_queue *bq = bdev_get_queue(bdev);
608
609 start += get_start_sect(bdev);
610
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500611 return blk_stack_limits(t, &bq->limits, start);
Martin K. Petersen17be8c22010-01-11 03:21:49 -0500612}
613EXPORT_SYMBOL(bdev_stack_limits);
614
615/**
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400616 * disk_stack_limits - adjust queue limits for stacked drivers
Martin K. Petersen77634f32009-06-09 06:23:22 +0200617 * @disk: MD/DM gendisk (top)
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400618 * @bdev: the underlying block device (bottom)
619 * @offset: offset to beginning of data within component device
620 *
621 * Description:
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500622 * Merges the limits for a top level gendisk and a bottom level
623 * block_device.
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400624 */
625void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
626 sector_t offset)
627{
628 struct request_queue *t = disk->queue;
629 struct request_queue *b = bdev_get_queue(bdev);
630
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500631 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400632 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
633
634 disk_name(disk, 0, top);
635 bdevname(bdev, bottom);
636
637 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
638 top, bottom);
639 }
640
641 if (!t->queue_lock)
642 WARN_ON_ONCE(1);
643 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
644 unsigned long flags;
645
646 spin_lock_irqsave(t->queue_lock, flags);
647 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
648 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
649 spin_unlock_irqrestore(t->queue_lock, flags);
650 }
651}
652EXPORT_SYMBOL(disk_stack_limits);
653
654/**
Tejun Heoe3790c72008-03-04 11:18:17 +0100655 * blk_queue_dma_pad - set pad mask
656 * @q: the request queue for the device
657 * @mask: pad mask
658 *
FUJITA Tomonori27f82212008-07-04 09:30:03 +0200659 * Set dma pad mask.
Tejun Heoe3790c72008-03-04 11:18:17 +0100660 *
FUJITA Tomonori27f82212008-07-04 09:30:03 +0200661 * Appending pad buffer to a request modifies the last entry of a
662 * scatter list such that it includes the pad buffer.
Tejun Heoe3790c72008-03-04 11:18:17 +0100663 **/
664void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
665{
666 q->dma_pad_mask = mask;
667}
668EXPORT_SYMBOL(blk_queue_dma_pad);
669
670/**
FUJITA Tomonori27f82212008-07-04 09:30:03 +0200671 * blk_queue_update_dma_pad - update pad mask
672 * @q: the request queue for the device
673 * @mask: pad mask
674 *
675 * Update dma pad mask.
676 *
677 * Appending pad buffer to a request modifies the last entry of a
678 * scatter list such that it includes the pad buffer.
679 **/
680void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
681{
682 if (mask > q->dma_pad_mask)
683 q->dma_pad_mask = mask;
684}
685EXPORT_SYMBOL(blk_queue_update_dma_pad);
686
687/**
Jens Axboe86db1e22008-01-29 14:53:40 +0100688 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
Jens Axboe86db1e22008-01-29 14:53:40 +0100689 * @q: the request queue for the device
Tejun Heo2fb98e82008-02-19 11:36:53 +0100690 * @dma_drain_needed: fn which returns non-zero if drain is necessary
Jens Axboe86db1e22008-01-29 14:53:40 +0100691 * @buf: physically contiguous buffer
692 * @size: size of the buffer in bytes
693 *
694 * Some devices have excess DMA problems and can't simply discard (or
695 * zero fill) the unwanted piece of the transfer. They have to have a
696 * real area of memory to transfer it into. The use case for this is
697 * ATAPI devices in DMA mode. If the packet command causes a transfer
698 * bigger than the transfer size some HBAs will lock up if there
699 * aren't DMA elements to contain the excess transfer. What this API
700 * does is adjust the queue so that the buf is always appended
701 * silently to the scatterlist.
702 *
Martin K. Petersen8a783622010-02-26 00:20:39 -0500703 * Note: This routine adjusts max_hw_segments to make room for appending
704 * the drain buffer. If you call blk_queue_max_segments() after calling
705 * this routine, you must set the limit to one fewer than your device
706 * can support otherwise there won't be room for the drain buffer.
Jens Axboe86db1e22008-01-29 14:53:40 +0100707 */
Harvey Harrison448da4d2008-03-04 11:30:18 +0100708int blk_queue_dma_drain(struct request_queue *q,
Tejun Heo2fb98e82008-02-19 11:36:53 +0100709 dma_drain_needed_fn *dma_drain_needed,
710 void *buf, unsigned int size)
Jens Axboe86db1e22008-01-29 14:53:40 +0100711{
Martin K. Petersen8a783622010-02-26 00:20:39 -0500712 if (queue_max_segments(q) < 2)
Jens Axboe86db1e22008-01-29 14:53:40 +0100713 return -EINVAL;
714 /* make room for appending the drain */
Martin K. Petersen8a783622010-02-26 00:20:39 -0500715 blk_queue_max_segments(q, queue_max_segments(q) - 1);
Tejun Heo2fb98e82008-02-19 11:36:53 +0100716 q->dma_drain_needed = dma_drain_needed;
Jens Axboe86db1e22008-01-29 14:53:40 +0100717 q->dma_drain_buffer = buf;
718 q->dma_drain_size = size;
719
720 return 0;
721}
Jens Axboe86db1e22008-01-29 14:53:40 +0100722EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
723
724/**
725 * blk_queue_segment_boundary - set boundary rules for segment merging
726 * @q: the request queue for the device
727 * @mask: the memory boundary mask
728 **/
729void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
730{
731 if (mask < PAGE_CACHE_SIZE - 1) {
732 mask = PAGE_CACHE_SIZE - 1;
Harvey Harrison24c03d42008-05-01 04:35:17 -0700733 printk(KERN_INFO "%s: set to minimum %lx\n",
734 __func__, mask);
Jens Axboe86db1e22008-01-29 14:53:40 +0100735 }
736
Martin K. Petersen025146e2009-05-22 17:17:51 -0400737 q->limits.seg_boundary_mask = mask;
Jens Axboe86db1e22008-01-29 14:53:40 +0100738}
Jens Axboe86db1e22008-01-29 14:53:40 +0100739EXPORT_SYMBOL(blk_queue_segment_boundary);
740
741/**
742 * blk_queue_dma_alignment - set dma length and memory alignment
743 * @q: the request queue for the device
744 * @mask: alignment mask
745 *
746 * description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200747 * set required memory and length alignment for direct dma transactions.
Alan Cox8feb4d22009-04-01 15:01:39 +0100748 * this is used when building direct io requests for the queue.
Jens Axboe86db1e22008-01-29 14:53:40 +0100749 *
750 **/
751void blk_queue_dma_alignment(struct request_queue *q, int mask)
752{
753 q->dma_alignment = mask;
754}
Jens Axboe86db1e22008-01-29 14:53:40 +0100755EXPORT_SYMBOL(blk_queue_dma_alignment);
756
757/**
758 * blk_queue_update_dma_alignment - update dma length and memory alignment
759 * @q: the request queue for the device
760 * @mask: alignment mask
761 *
762 * description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200763 * update required memory and length alignment for direct dma transactions.
Jens Axboe86db1e22008-01-29 14:53:40 +0100764 * If the requested alignment is larger than the current alignment, then
765 * the current queue alignment is updated to the new value, otherwise it
766 * is left alone. The design of this is to allow multiple objects
767 * (driver, device, transport etc) to set their respective
768 * alignments without having them interfere.
769 *
770 **/
771void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
772{
773 BUG_ON(mask > PAGE_SIZE);
774
775 if (mask > q->dma_alignment)
776 q->dma_alignment = mask;
777}
Jens Axboe86db1e22008-01-29 14:53:40 +0100778EXPORT_SYMBOL(blk_queue_update_dma_alignment);
779
Harvey Harrisonaeb3d3a2008-08-28 09:27:42 +0200780static int __init blk_settings_init(void)
Jens Axboe86db1e22008-01-29 14:53:40 +0100781{
782 blk_max_low_pfn = max_low_pfn - 1;
783 blk_max_pfn = max_pfn - 1;
784 return 0;
785}
786subsys_initcall(blk_settings_init);