blob: a676084d474043d7221f85100fc71008a4e49d2d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04002/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
Mike Christie4e49ea42016-06-05 14:31:41 -050013static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
Christoph Hellwig9082e872016-04-16 14:55:27 -040014 gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040015{
Christoph Hellwig9082e872016-04-16 14:55:27 -040016 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060017
Christoph Hellwig9082e872016-04-16 14:55:27 -040018 if (bio) {
19 bio_chain(bio, new);
Mike Christie4e49ea42016-06-05 14:31:41 -050020 submit_bio(bio);
Christoph Hellwig9082e872016-04-16 14:55:27 -040021 }
22
23 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040024}
25
Christoph Hellwig38f25252016-04-16 14:55:28 -040026int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +020027 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -050028 struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040029{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040030 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040031 struct bio *bio = *biop;
Ming Lina22c4d72015-10-22 09:59:42 -070032 unsigned int granularity;
Christoph Hellwigef295ec2016-10-28 08:48:16 -060033 unsigned int op;
Ming Lina22c4d72015-10-22 09:59:42 -070034 int alignment;
Darrick J. Wong28b2be22016-10-11 13:51:08 -070035 sector_t bs_mask;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040036
37 if (!q)
38 return -ENXIO;
Christoph Hellwig288dab82016-06-09 16:00:36 +020039
Ilya Dryomova13553c2018-01-11 14:09:12 +010040 if (bdev_read_only(bdev))
41 return -EPERM;
42
Christoph Hellwig288dab82016-06-09 16:00:36 +020043 if (flags & BLKDEV_DISCARD_SECURE) {
44 if (!blk_queue_secure_erase(q))
45 return -EOPNOTSUPP;
46 op = REQ_OP_SECURE_ERASE;
47 } else {
48 if (!blk_queue_discard(q))
49 return -EOPNOTSUPP;
50 op = REQ_OP_DISCARD;
51 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040052
Darrick J. Wong28b2be22016-10-11 13:51:08 -070053 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
54 if ((sector | nr_sects) & bs_mask)
55 return -EINVAL;
56
Ming Lina22c4d72015-10-22 09:59:42 -070057 /* Zero-sector (unknown) and one-sector granularities are the same. */
58 granularity = max(q->limits.discard_granularity >> 9, 1U);
59 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
60
Lukas Czerner5dba3082011-05-06 19:26:27 -060061 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020062 unsigned int req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070063 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020064
Ming Lina22c4d72015-10-22 09:59:42 -070065 /* Make sure bi_size doesn't overflow */
66 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
67
Christoph Hellwig9082e872016-04-16 14:55:27 -040068 /**
Ming Lina22c4d72015-10-22 09:59:42 -070069 * If splitting a request, and the next starting sector would be
70 * misaligned, stop the discard at the previous aligned sector.
71 */
Paolo Bonzinic6e66632012-08-02 09:48:50 +020072 end_sect = sector + req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070073 tmp = end_sect;
74 if (req_sects < nr_sects &&
75 sector_div(tmp, granularity) != alignment) {
76 end_sect = end_sect - alignment;
77 sector_div(end_sect, granularity);
78 end_sect = end_sect * granularity + alignment;
79 req_sects = end_sect - sector;
80 }
Paolo Bonzinic6e66632012-08-02 09:48:50 +020081
Christoph Hellwigf9d03f92016-12-08 15:20:32 -070082 bio = next_bio(bio, 0, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070083 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +020084 bio_set_dev(bio, bdev);
Christoph Hellwig288dab82016-06-09 16:00:36 +020085 bio_set_op_attrs(bio, op, 0);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040086
Kent Overstreet4f024f32013-10-11 15:44:27 -070087 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020088 nr_sects -= req_sects;
89 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040090
Jens Axboec8123f82014-02-12 09:34:01 -070091 /*
92 * We can loop for a long time in here, if someone does
93 * full device discards (like mkfs). Be nice and allow
94 * us to schedule out to avoid softlocking if preempt
95 * is disabled.
96 */
97 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060098 }
Christoph Hellwig38f25252016-04-16 14:55:28 -040099
100 *biop = bio;
101 return 0;
102}
103EXPORT_SYMBOL(__blkdev_issue_discard);
104
105/**
106 * blkdev_issue_discard - queue a discard
107 * @bdev: blockdev to issue discard for
108 * @sector: start sector
109 * @nr_sects: number of sectors to discard
110 * @gfp_mask: memory allocation flags (for bio_alloc)
Eric Biggerse5549112017-01-23 11:41:39 -0800111 * @flags: BLKDEV_DISCARD_* flags to control behaviour
Christoph Hellwig38f25252016-04-16 14:55:28 -0400112 *
113 * Description:
114 * Issue a discard request for the sectors in question.
115 */
116int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
117 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
118{
Christoph Hellwig38f25252016-04-16 14:55:28 -0400119 struct bio *bio = NULL;
120 struct blk_plug plug;
121 int ret;
122
Christoph Hellwig38f25252016-04-16 14:55:28 -0400123 blk_start_plug(&plug);
Christoph Hellwig288dab82016-06-09 16:00:36 +0200124 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
Christoph Hellwig38f25252016-04-16 14:55:28 -0400125 &bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400126 if (!ret && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500127 ret = submit_bio_wait(bio);
Christoph Hellwig48920ff2017-04-05 19:21:23 +0200128 if (ret == -EOPNOTSUPP)
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400129 ret = 0;
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500130 bio_put(bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400131 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800132 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400133
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400134 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400135}
136EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400137
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400138/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800139 * __blkdev_issue_write_same - generate number of bios with same page
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400140 * @bdev: target blockdev
141 * @sector: start sector
142 * @nr_sects: number of sectors to write
143 * @gfp_mask: memory allocation flags (for bio_alloc)
144 * @page: page containing data to write
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800145 * @biop: pointer to anchor bio
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400146 *
147 * Description:
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800148 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400149 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800150static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
151 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
152 struct bio **biop)
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400153{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400154 struct request_queue *q = bdev_get_queue(bdev);
155 unsigned int max_write_same_sectors;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800156 struct bio *bio = *biop;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700157 sector_t bs_mask;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400158
159 if (!q)
160 return -ENXIO;
161
Ilya Dryomova13553c2018-01-11 14:09:12 +0100162 if (bdev_read_only(bdev))
163 return -EPERM;
164
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700165 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
166 if ((sector | nr_sects) & bs_mask)
167 return -EINVAL;
168
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800169 if (!bdev_write_same(bdev))
170 return -EOPNOTSUPP;
171
Ming Linb49a0872015-05-22 00:46:56 -0700172 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
173 max_write_same_sectors = UINT_MAX >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400174
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400175 while (nr_sects) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500176 bio = next_bio(bio, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700177 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200178 bio_set_dev(bio, bdev);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400179 bio->bi_vcnt = 1;
180 bio->bi_io_vec->bv_page = page;
181 bio->bi_io_vec->bv_offset = 0;
182 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500183 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400184
185 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700186 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400187 nr_sects -= max_write_same_sectors;
188 sector += max_write_same_sectors;
189 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700190 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400191 nr_sects = 0;
192 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800193 cond_resched();
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400194 }
195
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800196 *biop = bio;
197 return 0;
198}
199
200/**
201 * blkdev_issue_write_same - queue a write same operation
202 * @bdev: target blockdev
203 * @sector: start sector
204 * @nr_sects: number of sectors to write
205 * @gfp_mask: memory allocation flags (for bio_alloc)
206 * @page: page containing data
207 *
208 * Description:
209 * Issue a write same request for the sectors in question.
210 */
211int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
212 sector_t nr_sects, gfp_t gfp_mask,
213 struct page *page)
214{
215 struct bio *bio = NULL;
216 struct blk_plug plug;
217 int ret;
218
219 blk_start_plug(&plug);
220 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
221 &bio);
222 if (ret == 0 && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500223 ret = submit_bio_wait(bio);
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500224 bio_put(bio);
225 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800226 blk_finish_plug(&plug);
Christoph Hellwig3f40bf22016-07-19 11:23:34 +0200227 return ret;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400228}
229EXPORT_SYMBOL(blkdev_issue_write_same);
230
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800231static int __blkdev_issue_write_zeroes(struct block_device *bdev,
232 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200233 struct bio **biop, unsigned flags)
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800234{
235 struct bio *bio = *biop;
236 unsigned int max_write_zeroes_sectors;
237 struct request_queue *q = bdev_get_queue(bdev);
238
239 if (!q)
240 return -ENXIO;
241
Ilya Dryomova13553c2018-01-11 14:09:12 +0100242 if (bdev_read_only(bdev))
243 return -EPERM;
244
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800245 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
246 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
247
248 if (max_write_zeroes_sectors == 0)
249 return -EOPNOTSUPP;
250
251 while (nr_sects) {
252 bio = next_bio(bio, 0, gfp_mask);
253 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200254 bio_set_dev(bio, bdev);
Christoph Hellwigd928be92017-04-05 19:21:09 +0200255 bio->bi_opf = REQ_OP_WRITE_ZEROES;
256 if (flags & BLKDEV_ZERO_NOUNMAP)
257 bio->bi_opf |= REQ_NOUNMAP;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800258
259 if (nr_sects > max_write_zeroes_sectors) {
260 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
261 nr_sects -= max_write_zeroes_sectors;
262 sector += max_write_zeroes_sectors;
263 } else {
264 bio->bi_iter.bi_size = nr_sects << 9;
265 nr_sects = 0;
266 }
267 cond_resched();
268 }
269
270 *biop = bio;
271 return 0;
272}
273
Damien Le Moal615d22a2017-07-06 20:21:15 +0900274/*
275 * Convert a number of 512B sectors to a number of pages.
276 * The result is limited to a number of pages that can fit into a BIO.
277 * Also make sure that the result is always at least 1 (page) for the cases
278 * where nr_sects is lower than the number of sectors in a page.
279 */
280static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
281{
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600282 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900283
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600284 return min(pages, (sector_t)BIO_MAX_PAGES);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900285}
286
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200287static int __blkdev_issue_zero_pages(struct block_device *bdev,
288 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
289 struct bio **biop)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400290{
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200291 struct request_queue *q = bdev_get_queue(bdev);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800292 struct bio *bio = *biop;
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200293 int bi_size = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100294 unsigned int sz;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700295
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200296 if (!q)
297 return -ENXIO;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400298
Ilya Dryomova13553c2018-01-11 14:09:12 +0100299 if (bdev_read_only(bdev))
300 return -EPERM;
301
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400302 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900303 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
304 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700305 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200306 bio_set_dev(bio, bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500307 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400308
Jens Axboe0341aaf2010-04-29 09:28:21 +0200309 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900310 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
311 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800312 nr_sects -= bi_size >> 9;
313 sector += bi_size >> 9;
Damien Le Moal615d22a2017-07-06 20:21:15 +0900314 if (bi_size < sz)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400315 break;
316 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800317 cond_resched();
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400318 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400319
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800320 *biop = bio;
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200321 return 0;
322}
323
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400324/**
325 * __blkdev_issue_zeroout - generate number of zero filed write bios
326 * @bdev: blockdev to issue
327 * @sector: start sector
328 * @nr_sects: number of sectors to write
329 * @gfp_mask: memory allocation flags (for bio_alloc)
330 * @biop: pointer to anchor bio
331 * @flags: controls detailed behavior
332 *
333 * Description:
334 * Zero-fill a block range, either using hardware offload or by explicitly
335 * writing zeroes to the device.
336 *
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400337 * If a device is using logical block provisioning, the underlying space will
338 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
339 *
340 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
341 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
342 */
343int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
344 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
345 unsigned flags)
346{
347 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400348 sector_t bs_mask;
349
350 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
351 if ((sector | nr_sects) & bs_mask)
352 return -EINVAL;
353
354 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
355 biop, flags);
356 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200357 return ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400358
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200359 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
360 biop);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400361}
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800362EXPORT_SYMBOL(__blkdev_issue_zeroout);
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400363
364/**
365 * blkdev_issue_zeroout - zero-fill a block range
366 * @bdev: blockdev to write
367 * @sector: start sector
368 * @nr_sects: number of sectors to write
369 * @gfp_mask: memory allocation flags (for bio_alloc)
Christoph Hellwigee472d82017-04-05 19:21:08 +0200370 * @flags: controls detailed behavior
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400371 *
372 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200373 * Zero-fill a block range, either using hardware offload or by explicitly
374 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
375 * valid values for %flags.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400376 */
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400377int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200378 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400379{
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200380 int ret = 0;
381 sector_t bs_mask;
382 struct bio *bio;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800383 struct blk_plug plug;
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200384 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800385
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200386 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
387 if ((sector | nr_sects) & bs_mask)
388 return -EINVAL;
389
390retry:
391 bio = NULL;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800392 blk_start_plug(&plug);
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200393 if (try_write_zeroes) {
394 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
395 gfp_mask, &bio, flags);
396 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
397 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
398 gfp_mask, &bio);
399 } else {
400 /* No zeroing offload support */
401 ret = -EOPNOTSUPP;
402 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800403 if (ret == 0 && bio) {
404 ret = submit_bio_wait(bio);
405 bio_put(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200406 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800407 blk_finish_plug(&plug);
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200408 if (ret && try_write_zeroes) {
409 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
410 try_write_zeroes = false;
411 goto retry;
412 }
413 if (!bdev_write_zeroes_sectors(bdev)) {
414 /*
415 * Zeroing offload support was indicated, but the
416 * device reported ILLEGAL REQUEST (for some devices
417 * there is no non-destructive way to verify whether
418 * WRITE ZEROES is actually supported).
419 */
420 ret = -EOPNOTSUPP;
421 }
422 }
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500423
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800424 return ret;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400425}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400426EXPORT_SYMBOL(blkdev_issue_zeroout);