blob: 9ebf65379556a0f5730b3934acd2fa78e232e816 [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Lukas Czerner5dba3082011-05-06 19:26:27 -060012struct bio_batch {
13 atomic_t done;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020014 int error;
Lukas Czerner5dba3082011-05-06 19:26:27 -060015 struct completion *wait;
16};
17
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020018static void bio_batch_end_io(struct bio *bio)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040019{
Lukas Czerner5dba3082011-05-06 19:26:27 -060020 struct bio_batch *bb = bio->bi_private;
21
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020022 if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
23 bb->error = bio->bi_error;
Lukas Czerner5dba3082011-05-06 19:26:27 -060024 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040026 bio_put(bio);
27}
28
29/**
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
36 *
37 * Description:
38 * Issue a discard request for the sectors in question.
39 */
40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42{
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig8c555362010-08-18 05:29:22 -040045 int type = REQ_WRITE | REQ_DISCARD;
Ming Lina22c4d72015-10-22 09:59:42 -070046 unsigned int granularity;
47 int alignment;
Lukas Czerner5dba3082011-05-06 19:26:27 -060048 struct bio_batch bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040049 struct bio *bio;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040050 int ret = 0;
Shaohua Li0cfbcaf2012-12-14 11:15:51 +080051 struct blk_plug plug;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040052
53 if (!q)
54 return -ENXIO;
55
56 if (!blk_queue_discard(q))
57 return -EOPNOTSUPP;
58
Ming Lina22c4d72015-10-22 09:59:42 -070059 /* Zero-sector (unknown) and one-sector granularities are the same. */
60 granularity = max(q->limits.discard_granularity >> 9, 1U);
61 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
62
Christoph Hellwigdd3932e2010-09-16 20:51:46 +020063 if (flags & BLKDEV_DISCARD_SECURE) {
Adrian Hunter8d57a982010-08-11 14:17:49 -070064 if (!blk_queue_secdiscard(q))
65 return -EOPNOTSUPP;
Christoph Hellwig8c555362010-08-18 05:29:22 -040066 type |= REQ_SECURE;
Adrian Hunter8d57a982010-08-11 14:17:49 -070067 }
68
Lukas Czerner5dba3082011-05-06 19:26:27 -060069 atomic_set(&bb.done, 1);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020070 bb.error = 0;
Lukas Czerner5dba3082011-05-06 19:26:27 -060071 bb.wait = &wait;
72
Shaohua Li0cfbcaf2012-12-14 11:15:51 +080073 blk_start_plug(&plug);
Lukas Czerner5dba3082011-05-06 19:26:27 -060074 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020075 unsigned int req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070076 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020077
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040078 bio = bio_alloc(gfp_mask, 1);
Christoph Hellwig66ac0282010-06-18 16:59:42 +020079 if (!bio) {
80 ret = -ENOMEM;
81 break;
82 }
83
Ming Lina22c4d72015-10-22 09:59:42 -070084 /* Make sure bi_size doesn't overflow */
85 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
86
87 /*
88 * If splitting a request, and the next starting sector would be
89 * misaligned, stop the discard at the previous aligned sector.
90 */
Paolo Bonzinic6e66632012-08-02 09:48:50 +020091 end_sect = sector + req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070092 tmp = end_sect;
93 if (req_sects < nr_sects &&
94 sector_div(tmp, granularity) != alignment) {
95 end_sect = end_sect - alignment;
96 sector_div(end_sect, granularity);
97 end_sect = end_sect * granularity + alignment;
98 req_sects = end_sect - sector;
99 }
Paolo Bonzinic6e66632012-08-02 09:48:50 +0200100
Kent Overstreet4f024f32013-10-11 15:44:27 -0700101 bio->bi_iter.bi_sector = sector;
Lukas Czerner5dba3082011-05-06 19:26:27 -0600102 bio->bi_end_io = bio_batch_end_io;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400103 bio->bi_bdev = bdev;
Lukas Czerner5dba3082011-05-06 19:26:27 -0600104 bio->bi_private = &bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400105
Kent Overstreet4f024f32013-10-11 15:44:27 -0700106 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +0200107 nr_sects -= req_sects;
108 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400109
Lukas Czerner5dba3082011-05-06 19:26:27 -0600110 atomic_inc(&bb.done);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400111 submit_bio(type, bio);
Jens Axboec8123f82014-02-12 09:34:01 -0700112
113 /*
114 * We can loop for a long time in here, if someone does
115 * full device discards (like mkfs). Be nice and allow
116 * us to schedule out to avoid softlocking if preempt
117 * is disabled.
118 */
119 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -0600120 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800121 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400122
Lukas Czerner5dba3082011-05-06 19:26:27 -0600123 /* Wait for bios in-flight */
124 if (!atomic_dec_and_test(&bb.done))
Vladimir Davydov55770222013-02-14 18:19:59 +0400125 wait_for_completion_io(&wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400126
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200127 if (bb.error)
128 return bb.error;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400129 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400130}
131EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400132
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400133/**
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400134 * blkdev_issue_write_same - queue a write same operation
135 * @bdev: target blockdev
136 * @sector: start sector
137 * @nr_sects: number of sectors to write
138 * @gfp_mask: memory allocation flags (for bio_alloc)
139 * @page: page containing data to write
140 *
141 * Description:
142 * Issue a write same request for the sectors in question.
143 */
144int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
145 sector_t nr_sects, gfp_t gfp_mask,
146 struct page *page)
147{
148 DECLARE_COMPLETION_ONSTACK(wait);
149 struct request_queue *q = bdev_get_queue(bdev);
150 unsigned int max_write_same_sectors;
151 struct bio_batch bb;
152 struct bio *bio;
153 int ret = 0;
154
155 if (!q)
156 return -ENXIO;
157
Ming Linb49a0872015-05-22 00:46:56 -0700158 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
159 max_write_same_sectors = UINT_MAX >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400160
161 atomic_set(&bb.done, 1);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200162 bb.error = 0;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400163 bb.wait = &wait;
164
165 while (nr_sects) {
166 bio = bio_alloc(gfp_mask, 1);
167 if (!bio) {
168 ret = -ENOMEM;
169 break;
170 }
171
Kent Overstreet4f024f32013-10-11 15:44:27 -0700172 bio->bi_iter.bi_sector = sector;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400173 bio->bi_end_io = bio_batch_end_io;
174 bio->bi_bdev = bdev;
175 bio->bi_private = &bb;
176 bio->bi_vcnt = 1;
177 bio->bi_io_vec->bv_page = page;
178 bio->bi_io_vec->bv_offset = 0;
179 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
180
181 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700182 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400183 nr_sects -= max_write_same_sectors;
184 sector += max_write_same_sectors;
185 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700186 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400187 nr_sects = 0;
188 }
189
190 atomic_inc(&bb.done);
191 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
192 }
193
194 /* Wait for bios in-flight */
195 if (!atomic_dec_and_test(&bb.done))
Vladimir Davydov55770222013-02-14 18:19:59 +0400196 wait_for_completion_io(&wait);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400197
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200198 if (bb.error)
199 return bb.error;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400200 return ret;
201}
202EXPORT_SYMBOL(blkdev_issue_write_same);
203
204/**
Ben Hutchings291d24f2011-03-01 13:45:24 -0500205 * blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400206 * @bdev: blockdev to issue
207 * @sector: start sector
208 * @nr_sects: number of sectors to write
209 * @gfp_mask: memory allocation flags (for bio_alloc)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400210 *
211 * Description:
212 * Generate and issue number of bios with zerofiled pages.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400213 */
214
Fabian Frederick35086782014-05-26 22:19:14 +0200215static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
216 sector_t nr_sects, gfp_t gfp_mask)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400217{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200218 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400219 struct bio *bio;
220 struct bio_batch bb;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100221 unsigned int sz;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400222 DECLARE_COMPLETION_ONSTACK(wait);
223
Lukas Czerner0aeea182011-03-11 10:23:53 +0100224 atomic_set(&bb.done, 1);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200225 bb.error = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400226 bb.wait = &wait;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400227
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200228 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400229 while (nr_sects != 0) {
230 bio = bio_alloc(gfp_mask,
231 min(nr_sects, (sector_t)BIO_MAX_PAGES));
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200232 if (!bio) {
233 ret = -ENOMEM;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400234 break;
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200235 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400236
Kent Overstreet4f024f32013-10-11 15:44:27 -0700237 bio->bi_iter.bi_sector = sector;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400238 bio->bi_bdev = bdev;
239 bio->bi_end_io = bio_batch_end_io;
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200240 bio->bi_private = &bb;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400241
Jens Axboe0341aaf2010-04-29 09:28:21 +0200242 while (nr_sects != 0) {
243 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400244 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
245 nr_sects -= ret >> 9;
246 sector += ret >> 9;
247 if (ret < (sz << 9))
248 break;
249 }
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200250 ret = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100251 atomic_inc(&bb.done);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400252 submit_bio(WRITE, bio);
253 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400254
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200255 /* Wait for bios in-flight */
Lukas Czerner0aeea182011-03-11 10:23:53 +0100256 if (!atomic_dec_and_test(&bb.done))
Vladimir Davydov55770222013-02-14 18:19:59 +0400257 wait_for_completion_io(&wait);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400258
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200259 if (bb.error)
260 return bb.error;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400261 return ret;
262}
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400263
264/**
265 * blkdev_issue_zeroout - zero-fill a block range
266 * @bdev: blockdev to write
267 * @sector: start sector
268 * @nr_sects: number of sectors to write
269 * @gfp_mask: memory allocation flags (for bio_alloc)
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500270 * @discard: whether to discard the block range
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400271 *
272 * Description:
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500273 * Zero-fill a block range. If the discard flag is set and the block
274 * device guarantees that subsequent READ operations to the block range
275 * in question will return zeroes, the blocks will be discarded. Should
276 * the discard request fail, if the discard flag is not set, or if
277 * discard_zeroes_data is not supported, this function will resort to
278 * zeroing the blocks manually, thus provisioning (allocating,
279 * anchoring) them. If the block device supports the WRITE SAME command
280 * blkdev_issue_zeroout() will use it to optimize the process of
281 * clearing the block range. Otherwise the zeroing will be performed
282 * using regular WRITE calls.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400283 */
284
285int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500286 sector_t nr_sects, gfp_t gfp_mask, bool discard)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400287{
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500288 struct request_queue *q = bdev_get_queue(bdev);
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500289
Martin K. Petersen9f9ee1f2015-02-05 10:14:54 -0700290 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
291 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
292 return 0;
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500293
Martin K. Petersen9f9ee1f2015-02-05 10:14:54 -0700294 if (bdev_write_same(bdev) &&
295 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
296 ZERO_PAGE(0)) == 0)
297 return 0;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400298
299 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
300}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400301EXPORT_SYMBOL(blkdev_issue_zeroout);