blob: ed89c8f4b2a0497af0e45f075e4781dbb5cf87d2 [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Mike Christie4e49ea42016-06-05 14:31:41 -050012static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
Christoph Hellwig9082e872016-04-16 14:55:27 -040013 gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040014{
Christoph Hellwig9082e872016-04-16 14:55:27 -040015 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060016
Christoph Hellwig9082e872016-04-16 14:55:27 -040017 if (bio) {
18 bio_chain(bio, new);
Mike Christie4e49ea42016-06-05 14:31:41 -050019 submit_bio(bio);
Christoph Hellwig9082e872016-04-16 14:55:27 -040020 }
21
22 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040023}
24
Christoph Hellwig38f25252016-04-16 14:55:28 -040025int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +020026 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -050027 struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040028{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040029 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040030 struct bio *bio = *biop;
Ming Lina22c4d72015-10-22 09:59:42 -070031 unsigned int granularity;
Christoph Hellwigef295ec2016-10-28 08:48:16 -060032 unsigned int op;
Ming Lina22c4d72015-10-22 09:59:42 -070033 int alignment;
Darrick J. Wong28b2be22016-10-11 13:51:08 -070034 sector_t bs_mask;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040035
36 if (!q)
37 return -ENXIO;
Christoph Hellwig288dab82016-06-09 16:00:36 +020038
39 if (flags & BLKDEV_DISCARD_SECURE) {
Christoph Hellwige950fdf2016-07-19 11:23:33 +020040 if (flags & BLKDEV_DISCARD_ZERO)
41 return -EOPNOTSUPP;
Christoph Hellwig288dab82016-06-09 16:00:36 +020042 if (!blk_queue_secure_erase(q))
43 return -EOPNOTSUPP;
44 op = REQ_OP_SECURE_ERASE;
45 } else {
46 if (!blk_queue_discard(q))
47 return -EOPNOTSUPP;
Christoph Hellwige950fdf2016-07-19 11:23:33 +020048 if ((flags & BLKDEV_DISCARD_ZERO) &&
49 !q->limits.discard_zeroes_data)
50 return -EOPNOTSUPP;
Christoph Hellwig288dab82016-06-09 16:00:36 +020051 op = REQ_OP_DISCARD;
52 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040053
Darrick J. Wong28b2be22016-10-11 13:51:08 -070054 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
55 if ((sector | nr_sects) & bs_mask)
56 return -EINVAL;
57
Ming Lina22c4d72015-10-22 09:59:42 -070058 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
61
Lukas Czerner5dba3082011-05-06 19:26:27 -060062 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020063 unsigned int req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070064 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020065
Ming Lina22c4d72015-10-22 09:59:42 -070066 /* Make sure bi_size doesn't overflow */
67 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
68
Christoph Hellwig9082e872016-04-16 14:55:27 -040069 /**
Ming Lina22c4d72015-10-22 09:59:42 -070070 * If splitting a request, and the next starting sector would be
71 * misaligned, stop the discard at the previous aligned sector.
72 */
Paolo Bonzinic6e66632012-08-02 09:48:50 +020073 end_sect = sector + req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070074 tmp = end_sect;
75 if (req_sects < nr_sects &&
76 sector_div(tmp, granularity) != alignment) {
77 end_sect = end_sect - alignment;
78 sector_div(end_sect, granularity);
79 end_sect = end_sect * granularity + alignment;
80 req_sects = end_sect - sector;
81 }
Paolo Bonzinic6e66632012-08-02 09:48:50 +020082
Christoph Hellwigf9d03f92016-12-08 15:20:32 -070083 bio = next_bio(bio, 0, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070084 bio->bi_iter.bi_sector = sector;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040085 bio->bi_bdev = bdev;
Christoph Hellwig288dab82016-06-09 16:00:36 +020086 bio_set_op_attrs(bio, op, 0);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040087
Kent Overstreet4f024f32013-10-11 15:44:27 -070088 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020089 nr_sects -= req_sects;
90 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040091
Jens Axboec8123f82014-02-12 09:34:01 -070092 /*
93 * We can loop for a long time in here, if someone does
94 * full device discards (like mkfs). Be nice and allow
95 * us to schedule out to avoid softlocking if preempt
96 * is disabled.
97 */
98 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060099 }
Christoph Hellwig38f25252016-04-16 14:55:28 -0400100
101 *biop = bio;
102 return 0;
103}
104EXPORT_SYMBOL(__blkdev_issue_discard);
105
106/**
107 * blkdev_issue_discard - queue a discard
108 * @bdev: blockdev to issue discard for
109 * @sector: start sector
110 * @nr_sects: number of sectors to discard
111 * @gfp_mask: memory allocation flags (for bio_alloc)
112 * @flags: BLKDEV_IFL_* flags to control behaviour
113 *
114 * Description:
115 * Issue a discard request for the sectors in question.
116 */
117int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
118 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
119{
Christoph Hellwig38f25252016-04-16 14:55:28 -0400120 struct bio *bio = NULL;
121 struct blk_plug plug;
122 int ret;
123
Christoph Hellwig38f25252016-04-16 14:55:28 -0400124 blk_start_plug(&plug);
Christoph Hellwig288dab82016-06-09 16:00:36 +0200125 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
Christoph Hellwig38f25252016-04-16 14:55:28 -0400126 &bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400127 if (!ret && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500128 ret = submit_bio_wait(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200129 if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO))
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400130 ret = 0;
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500131 bio_put(bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400132 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800133 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400134
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400135 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400136}
137EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400138
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400139/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800140 * __blkdev_issue_write_same - generate number of bios with same page
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400141 * @bdev: target blockdev
142 * @sector: start sector
143 * @nr_sects: number of sectors to write
144 * @gfp_mask: memory allocation flags (for bio_alloc)
145 * @page: page containing data to write
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800146 * @biop: pointer to anchor bio
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400147 *
148 * Description:
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800149 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400150 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800151static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
152 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
153 struct bio **biop)
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400154{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400155 struct request_queue *q = bdev_get_queue(bdev);
156 unsigned int max_write_same_sectors;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800157 struct bio *bio = *biop;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700158 sector_t bs_mask;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400159
160 if (!q)
161 return -ENXIO;
162
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700163 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
164 if ((sector | nr_sects) & bs_mask)
165 return -EINVAL;
166
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800167 if (!bdev_write_same(bdev))
168 return -EOPNOTSUPP;
169
Ming Linb49a0872015-05-22 00:46:56 -0700170 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
171 max_write_same_sectors = UINT_MAX >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400172
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400173 while (nr_sects) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500174 bio = next_bio(bio, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700175 bio->bi_iter.bi_sector = sector;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400176 bio->bi_bdev = bdev;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400177 bio->bi_vcnt = 1;
178 bio->bi_io_vec->bv_page = page;
179 bio->bi_io_vec->bv_offset = 0;
180 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500181 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400182
183 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700184 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400185 nr_sects -= max_write_same_sectors;
186 sector += max_write_same_sectors;
187 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700188 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400189 nr_sects = 0;
190 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800191 cond_resched();
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400192 }
193
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800194 *biop = bio;
195 return 0;
196}
197
198/**
199 * blkdev_issue_write_same - queue a write same operation
200 * @bdev: target blockdev
201 * @sector: start sector
202 * @nr_sects: number of sectors to write
203 * @gfp_mask: memory allocation flags (for bio_alloc)
204 * @page: page containing data
205 *
206 * Description:
207 * Issue a write same request for the sectors in question.
208 */
209int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
210 sector_t nr_sects, gfp_t gfp_mask,
211 struct page *page)
212{
213 struct bio *bio = NULL;
214 struct blk_plug plug;
215 int ret;
216
217 blk_start_plug(&plug);
218 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
219 &bio);
220 if (ret == 0 && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500221 ret = submit_bio_wait(bio);
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500222 bio_put(bio);
223 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800224 blk_finish_plug(&plug);
Christoph Hellwig3f40bf22016-07-19 11:23:34 +0200225 return ret;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400226}
227EXPORT_SYMBOL(blkdev_issue_write_same);
228
229/**
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800230 * __blkdev_issue_write_zeroes - generate number of bios with WRITE ZEROES
231 * @bdev: blockdev to issue
232 * @sector: start sector
233 * @nr_sects: number of sectors to write
234 * @gfp_mask: memory allocation flags (for bio_alloc)
235 * @biop: pointer to anchor bio
236 *
237 * Description:
238 * Generate and issue number of bios(REQ_OP_WRITE_ZEROES) with zerofiled pages.
239 */
240static int __blkdev_issue_write_zeroes(struct block_device *bdev,
241 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
242 struct bio **biop)
243{
244 struct bio *bio = *biop;
245 unsigned int max_write_zeroes_sectors;
246 struct request_queue *q = bdev_get_queue(bdev);
247
248 if (!q)
249 return -ENXIO;
250
251 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
252 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
253
254 if (max_write_zeroes_sectors == 0)
255 return -EOPNOTSUPP;
256
257 while (nr_sects) {
258 bio = next_bio(bio, 0, gfp_mask);
259 bio->bi_iter.bi_sector = sector;
260 bio->bi_bdev = bdev;
261 bio_set_op_attrs(bio, REQ_OP_WRITE_ZEROES, 0);
262
263 if (nr_sects > max_write_zeroes_sectors) {
264 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
265 nr_sects -= max_write_zeroes_sectors;
266 sector += max_write_zeroes_sectors;
267 } else {
268 bio->bi_iter.bi_size = nr_sects << 9;
269 nr_sects = 0;
270 }
271 cond_resched();
272 }
273
274 *biop = bio;
275 return 0;
276}
277
278/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800279 * __blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400280 * @bdev: blockdev to issue
281 * @sector: start sector
282 * @nr_sects: number of sectors to write
283 * @gfp_mask: memory allocation flags (for bio_alloc)
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800284 * @biop: pointer to anchor bio
285 * @discard: discard flag
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400286 *
287 * Description:
288 * Generate and issue number of bios with zerofiled pages.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400289 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800290int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
291 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
292 bool discard)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400293{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200294 int ret;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800295 int bi_size = 0;
296 struct bio *bio = *biop;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100297 unsigned int sz;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700298 sector_t bs_mask;
299
300 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
301 if ((sector | nr_sects) & bs_mask)
302 return -EINVAL;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400303
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800304 if (discard) {
305 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
306 BLKDEV_DISCARD_ZERO, biop);
307 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
308 goto out;
309 }
310
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800311 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
312 biop);
313 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
314 goto out;
315
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800316 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
317 ZERO_PAGE(0), biop);
318 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
319 goto out;
320
321 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400322 while (nr_sects != 0) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500323 bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
Christoph Hellwig9082e872016-04-16 14:55:27 -0400324 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700325 bio->bi_iter.bi_sector = sector;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400326 bio->bi_bdev = bdev;
Mike Christie95fe6c12016-06-05 14:31:48 -0500327 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400328
Jens Axboe0341aaf2010-04-29 09:28:21 +0200329 while (nr_sects != 0) {
330 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800331 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
332 nr_sects -= bi_size >> 9;
333 sector += bi_size >> 9;
334 if (bi_size < (sz << 9))
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400335 break;
336 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800337 cond_resched();
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400338 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400339
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800340 *biop = bio;
341out:
342 return ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400343}
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800344EXPORT_SYMBOL(__blkdev_issue_zeroout);
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400345
346/**
347 * blkdev_issue_zeroout - zero-fill a block range
348 * @bdev: blockdev to write
349 * @sector: start sector
350 * @nr_sects: number of sectors to write
351 * @gfp_mask: memory allocation flags (for bio_alloc)
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500352 * @discard: whether to discard the block range
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400353 *
354 * Description:
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500355 * Zero-fill a block range. If the discard flag is set and the block
356 * device guarantees that subsequent READ operations to the block range
357 * in question will return zeroes, the blocks will be discarded. Should
358 * the discard request fail, if the discard flag is not set, or if
359 * discard_zeroes_data is not supported, this function will resort to
360 * zeroing the blocks manually, thus provisioning (allocating,
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800361 * anchoring) them. If the block device supports WRITE ZEROES or WRITE SAME
362 * command(s), blkdev_issue_zeroout() will use it to optimize the process of
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500363 * clearing the block range. Otherwise the zeroing will be performed
364 * using regular WRITE calls.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400365 */
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400366int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500367 sector_t nr_sects, gfp_t gfp_mask, bool discard)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400368{
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800369 int ret;
370 struct bio *bio = NULL;
371 struct blk_plug plug;
372
373 blk_start_plug(&plug);
374 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
375 &bio, discard);
376 if (ret == 0 && bio) {
377 ret = submit_bio_wait(bio);
378 bio_put(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200379 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800380 blk_finish_plug(&plug);
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500381
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800382 return ret;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400383}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400384EXPORT_SYMBOL(blkdev_issue_zeroout);