blob: 9373b58dfab185878baf9cb196bcd61dd7637894 [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Lukas Czerner5dba3082011-05-06 19:26:27 -060012struct bio_batch {
13 atomic_t done;
14 unsigned long flags;
15 struct completion *wait;
16};
17
18static void bio_batch_end_io(struct bio *bio, int err)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040019{
Lukas Czerner5dba3082011-05-06 19:26:27 -060020 struct bio_batch *bb = bio->bi_private;
21
Lukas Czerner8af19542011-05-06 19:30:01 -060022 if (err && (err != -EOPNOTSUPP))
Lukas Czerner5dba3082011-05-06 19:26:27 -060023 clear_bit(BIO_UPTODATE, &bb->flags);
Lukas Czerner5dba3082011-05-06 19:26:27 -060024 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040026 bio_put(bio);
27}
28
29/**
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
36 *
37 * Description:
38 * Issue a discard request for the sectors in question.
39 */
40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42{
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig8c555362010-08-18 05:29:22 -040045 int type = REQ_WRITE | REQ_DISCARD;
Jens Axboe10d1f9e2010-07-15 10:49:31 -060046 unsigned int max_discard_sectors;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020047 unsigned int granularity, alignment, mask;
Lukas Czerner5dba3082011-05-06 19:26:27 -060048 struct bio_batch bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040049 struct bio *bio;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040050 int ret = 0;
51
52 if (!q)
53 return -ENXIO;
54
55 if (!blk_queue_discard(q))
56 return -EOPNOTSUPP;
57
Paolo Bonzinif6ff53d2012-08-02 09:48:49 +020058 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
Paolo Bonzinic6e66632012-08-02 09:48:50 +020060 mask = granularity - 1;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
Paolo Bonzinif6ff53d2012-08-02 09:48:49 +020062
Jens Axboe10d1f9e2010-07-15 10:49:31 -060063 /*
64 * Ensure that max_discard_sectors is of the proper
Paolo Bonzinic6e66632012-08-02 09:48:50 +020065 * granularity, so that requests stay aligned after a split.
Jens Axboe10d1f9e2010-07-15 10:49:31 -060066 */
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
Paolo Bonzinif6ff53d2012-08-02 09:48:49 +020068 max_discard_sectors = round_down(max_discard_sectors, granularity);
Jens Axboe4c645002011-07-23 20:34:59 +020069 if (unlikely(!max_discard_sectors)) {
Mike Snitzer0f799602011-07-06 21:30:50 +020070 /* Avoid infinite loop below. Being cautious never hurts. */
71 return -EOPNOTSUPP;
Jens Axboe10d1f9e2010-07-15 10:49:31 -060072 }
73
Christoph Hellwigdd3932e2010-09-16 20:51:46 +020074 if (flags & BLKDEV_DISCARD_SECURE) {
Adrian Hunter8d57a982010-08-11 14:17:49 -070075 if (!blk_queue_secdiscard(q))
76 return -EOPNOTSUPP;
Christoph Hellwig8c555362010-08-18 05:29:22 -040077 type |= REQ_SECURE;
Adrian Hunter8d57a982010-08-11 14:17:49 -070078 }
79
Lukas Czerner5dba3082011-05-06 19:26:27 -060080 atomic_set(&bb.done, 1);
81 bb.flags = 1 << BIO_UPTODATE;
82 bb.wait = &wait;
83
84 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020085 unsigned int req_sects;
86 sector_t end_sect;
87
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040088 bio = bio_alloc(gfp_mask, 1);
Christoph Hellwig66ac0282010-06-18 16:59:42 +020089 if (!bio) {
90 ret = -ENOMEM;
91 break;
92 }
93
Paolo Bonzinic6e66632012-08-02 09:48:50 +020094 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
95
96 /*
97 * If splitting a request, and the next starting sector would be
98 * misaligned, stop the discard at the previous aligned sector.
99 */
100 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) {
102 end_sect =
103 round_down(end_sect - alignment, granularity)
104 + alignment;
105 req_sects = end_sect - sector;
106 }
107
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400108 bio->bi_sector = sector;
Lukas Czerner5dba3082011-05-06 19:26:27 -0600109 bio->bi_end_io = bio_batch_end_io;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400110 bio->bi_bdev = bdev;
Lukas Czerner5dba3082011-05-06 19:26:27 -0600111 bio->bi_private = &bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400112
Paolo Bonzinic6e66632012-08-02 09:48:50 +0200113 bio->bi_size = req_sects << 9;
114 nr_sects -= req_sects;
115 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400116
Lukas Czerner5dba3082011-05-06 19:26:27 -0600117 atomic_inc(&bb.done);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400118 submit_bio(type, bio);
Lukas Czerner5dba3082011-05-06 19:26:27 -0600119 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400120
Lukas Czerner5dba3082011-05-06 19:26:27 -0600121 /* Wait for bios in-flight */
122 if (!atomic_dec_and_test(&bb.done))
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200123 wait_for_completion(&wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400124
Lukas Czerner8af19542011-05-06 19:30:01 -0600125 if (!test_bit(BIO_UPTODATE, &bb.flags))
Lukas Czerner5dba3082011-05-06 19:26:27 -0600126 ret = -EIO;
Christoph Hellwig66ac0282010-06-18 16:59:42 +0200127
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400128 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400129}
130EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400131
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400132/**
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400133 * blkdev_issue_write_same - queue a write same operation
134 * @bdev: target blockdev
135 * @sector: start sector
136 * @nr_sects: number of sectors to write
137 * @gfp_mask: memory allocation flags (for bio_alloc)
138 * @page: page containing data to write
139 *
140 * Description:
141 * Issue a write same request for the sectors in question.
142 */
143int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
144 sector_t nr_sects, gfp_t gfp_mask,
145 struct page *page)
146{
147 DECLARE_COMPLETION_ONSTACK(wait);
148 struct request_queue *q = bdev_get_queue(bdev);
149 unsigned int max_write_same_sectors;
150 struct bio_batch bb;
151 struct bio *bio;
152 int ret = 0;
153
154 if (!q)
155 return -ENXIO;
156
157 max_write_same_sectors = q->limits.max_write_same_sectors;
158
159 if (max_write_same_sectors == 0)
160 return -EOPNOTSUPP;
161
162 atomic_set(&bb.done, 1);
163 bb.flags = 1 << BIO_UPTODATE;
164 bb.wait = &wait;
165
166 while (nr_sects) {
167 bio = bio_alloc(gfp_mask, 1);
168 if (!bio) {
169 ret = -ENOMEM;
170 break;
171 }
172
173 bio->bi_sector = sector;
174 bio->bi_end_io = bio_batch_end_io;
175 bio->bi_bdev = bdev;
176 bio->bi_private = &bb;
177 bio->bi_vcnt = 1;
178 bio->bi_io_vec->bv_page = page;
179 bio->bi_io_vec->bv_offset = 0;
180 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
181
182 if (nr_sects > max_write_same_sectors) {
183 bio->bi_size = max_write_same_sectors << 9;
184 nr_sects -= max_write_same_sectors;
185 sector += max_write_same_sectors;
186 } else {
187 bio->bi_size = nr_sects << 9;
188 nr_sects = 0;
189 }
190
191 atomic_inc(&bb.done);
192 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
193 }
194
195 /* Wait for bios in-flight */
196 if (!atomic_dec_and_test(&bb.done))
197 wait_for_completion(&wait);
198
199 if (!test_bit(BIO_UPTODATE, &bb.flags))
200 ret = -ENOTSUPP;
201
202 return ret;
203}
204EXPORT_SYMBOL(blkdev_issue_write_same);
205
206/**
Ben Hutchings291d24f2011-03-01 13:45:24 -0500207 * blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400208 * @bdev: blockdev to issue
209 * @sector: start sector
210 * @nr_sects: number of sectors to write
211 * @gfp_mask: memory allocation flags (for bio_alloc)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400212 *
213 * Description:
214 * Generate and issue number of bios with zerofiled pages.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400215 */
216
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400217int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200218 sector_t nr_sects, gfp_t gfp_mask)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400219{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200220 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400221 struct bio *bio;
222 struct bio_batch bb;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100223 unsigned int sz;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400224 DECLARE_COMPLETION_ONSTACK(wait);
225
Lukas Czerner0aeea182011-03-11 10:23:53 +0100226 atomic_set(&bb.done, 1);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400227 bb.flags = 1 << BIO_UPTODATE;
228 bb.wait = &wait;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400229
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200230 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400231 while (nr_sects != 0) {
232 bio = bio_alloc(gfp_mask,
233 min(nr_sects, (sector_t)BIO_MAX_PAGES));
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200234 if (!bio) {
235 ret = -ENOMEM;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400236 break;
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200237 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400238
239 bio->bi_sector = sector;
240 bio->bi_bdev = bdev;
241 bio->bi_end_io = bio_batch_end_io;
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200242 bio->bi_private = &bb;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400243
Jens Axboe0341aaf2010-04-29 09:28:21 +0200244 while (nr_sects != 0) {
245 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400246 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
247 nr_sects -= ret >> 9;
248 sector += ret >> 9;
249 if (ret < (sz << 9))
250 break;
251 }
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200252 ret = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100253 atomic_inc(&bb.done);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400254 submit_bio(WRITE, bio);
255 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400256
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200257 /* Wait for bios in-flight */
Lukas Czerner0aeea182011-03-11 10:23:53 +0100258 if (!atomic_dec_and_test(&bb.done))
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200259 wait_for_completion(&wait);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400260
261 if (!test_bit(BIO_UPTODATE, &bb.flags))
262 /* One of bios in the batch was completed with error.*/
263 ret = -EIO;
264
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400265 return ret;
266}
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400267
268/**
269 * blkdev_issue_zeroout - zero-fill a block range
270 * @bdev: blockdev to write
271 * @sector: start sector
272 * @nr_sects: number of sectors to write
273 * @gfp_mask: memory allocation flags (for bio_alloc)
274 *
275 * Description:
276 * Generate and issue number of bios with zerofiled pages.
277 */
278
279int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
280 sector_t nr_sects, gfp_t gfp_mask)
281{
282 if (bdev_write_same(bdev)) {
283 unsigned char bdn[BDEVNAME_SIZE];
284
285 if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
286 ZERO_PAGE(0)))
287 return 0;
288
289 bdevname(bdev, bdn);
290 pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
291 }
292
293 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
294}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400295EXPORT_SYMBOL(blkdev_issue_zeroout);