blob: 1f196cf0aa5de34715cefde262b9dd1dc53e5751 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04002/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
Mike Christie4e49ea42016-06-05 14:31:41 -050013static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
Christoph Hellwig9082e872016-04-16 14:55:27 -040014 gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040015{
Christoph Hellwig9082e872016-04-16 14:55:27 -040016 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060017
Christoph Hellwig9082e872016-04-16 14:55:27 -040018 if (bio) {
19 bio_chain(bio, new);
Mike Christie4e49ea42016-06-05 14:31:41 -050020 submit_bio(bio);
Christoph Hellwig9082e872016-04-16 14:55:27 -040021 }
22
23 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040024}
25
Christoph Hellwig38f25252016-04-16 14:55:28 -040026int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +020027 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -050028 struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040029{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040030 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040031 struct bio *bio = *biop;
Christoph Hellwigef295ec2016-10-28 08:48:16 -060032 unsigned int op;
Darrick J. Wong28b2be22016-10-11 13:51:08 -070033 sector_t bs_mask;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040034
35 if (!q)
36 return -ENXIO;
Christoph Hellwig288dab82016-06-09 16:00:36 +020037
Ilya Dryomova13553c2018-01-11 14:09:12 +010038 if (bdev_read_only(bdev))
39 return -EPERM;
40
Christoph Hellwig288dab82016-06-09 16:00:36 +020041 if (flags & BLKDEV_DISCARD_SECURE) {
42 if (!blk_queue_secure_erase(q))
43 return -EOPNOTSUPP;
44 op = REQ_OP_SECURE_ERASE;
45 } else {
46 if (!blk_queue_discard(q))
47 return -EOPNOTSUPP;
48 op = REQ_OP_DISCARD;
49 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040050
Darrick J. Wong28b2be22016-10-11 13:51:08 -070051 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
52 if ((sector | nr_sects) & bs_mask)
53 return -EINVAL;
54
Lukas Czerner5dba3082011-05-06 19:26:27 -060055 while (nr_sects) {
Ming Lei744889b72018-10-12 15:53:10 +080056 unsigned int req_sects = nr_sects;
57 sector_t end_sect;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020058
Mikulas Patockab88aef32018-07-03 13:34:22 -040059 if (!req_sects)
60 goto fail;
Ming Lei14657ef2018-10-29 20:57:17 +080061 req_sects = min(req_sects, bio_allowed_max_sectors(q));
Ming Lina22c4d72015-10-22 09:59:42 -070062
Paolo Bonzinic6e66632012-08-02 09:48:50 +020063 end_sect = sector + req_sects;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020064
Christoph Hellwigf9d03f92016-12-08 15:20:32 -070065 bio = next_bio(bio, 0, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070066 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +020067 bio_set_dev(bio, bdev);
Christoph Hellwig288dab82016-06-09 16:00:36 +020068 bio_set_op_attrs(bio, op, 0);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040069
Kent Overstreet4f024f32013-10-11 15:44:27 -070070 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020071 nr_sects -= req_sects;
72 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040073
Jens Axboec8123f82014-02-12 09:34:01 -070074 /*
75 * We can loop for a long time in here, if someone does
76 * full device discards (like mkfs). Be nice and allow
77 * us to schedule out to avoid softlocking if preempt
78 * is disabled.
79 */
80 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060081 }
Christoph Hellwig38f25252016-04-16 14:55:28 -040082
83 *biop = bio;
84 return 0;
Mikulas Patockab88aef32018-07-03 13:34:22 -040085
86fail:
87 if (bio) {
88 submit_bio_wait(bio);
89 bio_put(bio);
90 }
91 *biop = NULL;
92 return -EOPNOTSUPP;
Christoph Hellwig38f25252016-04-16 14:55:28 -040093}
94EXPORT_SYMBOL(__blkdev_issue_discard);
95
96/**
97 * blkdev_issue_discard - queue a discard
98 * @bdev: blockdev to issue discard for
99 * @sector: start sector
100 * @nr_sects: number of sectors to discard
101 * @gfp_mask: memory allocation flags (for bio_alloc)
Eric Biggerse5549112017-01-23 11:41:39 -0800102 * @flags: BLKDEV_DISCARD_* flags to control behaviour
Christoph Hellwig38f25252016-04-16 14:55:28 -0400103 *
104 * Description:
105 * Issue a discard request for the sectors in question.
106 */
107int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
108 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
109{
Christoph Hellwig38f25252016-04-16 14:55:28 -0400110 struct bio *bio = NULL;
111 struct blk_plug plug;
112 int ret;
113
Christoph Hellwig38f25252016-04-16 14:55:28 -0400114 blk_start_plug(&plug);
Christoph Hellwig288dab82016-06-09 16:00:36 +0200115 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
Christoph Hellwig38f25252016-04-16 14:55:28 -0400116 &bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400117 if (!ret && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500118 ret = submit_bio_wait(bio);
Christoph Hellwig48920ff2017-04-05 19:21:23 +0200119 if (ret == -EOPNOTSUPP)
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400120 ret = 0;
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500121 bio_put(bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400122 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800123 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400124
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400125 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400126}
127EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400128
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400129/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800130 * __blkdev_issue_write_same - generate number of bios with same page
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400131 * @bdev: target blockdev
132 * @sector: start sector
133 * @nr_sects: number of sectors to write
134 * @gfp_mask: memory allocation flags (for bio_alloc)
135 * @page: page containing data to write
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800136 * @biop: pointer to anchor bio
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400137 *
138 * Description:
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800139 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400140 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800141static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
142 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
143 struct bio **biop)
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400144{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400145 struct request_queue *q = bdev_get_queue(bdev);
146 unsigned int max_write_same_sectors;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800147 struct bio *bio = *biop;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700148 sector_t bs_mask;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400149
150 if (!q)
151 return -ENXIO;
152
Ilya Dryomova13553c2018-01-11 14:09:12 +0100153 if (bdev_read_only(bdev))
154 return -EPERM;
155
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700156 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
157 if ((sector | nr_sects) & bs_mask)
158 return -EINVAL;
159
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800160 if (!bdev_write_same(bdev))
161 return -EOPNOTSUPP;
162
Ming Linb49a0872015-05-22 00:46:56 -0700163 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
Ming Lei1ea5c402018-10-29 20:57:19 +0800164 max_write_same_sectors = bio_allowed_max_sectors(q);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400165
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400166 while (nr_sects) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500167 bio = next_bio(bio, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700168 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200169 bio_set_dev(bio, bdev);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400170 bio->bi_vcnt = 1;
171 bio->bi_io_vec->bv_page = page;
172 bio->bi_io_vec->bv_offset = 0;
173 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500174 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400175
176 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700177 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400178 nr_sects -= max_write_same_sectors;
179 sector += max_write_same_sectors;
180 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700181 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400182 nr_sects = 0;
183 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800184 cond_resched();
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400185 }
186
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800187 *biop = bio;
188 return 0;
189}
190
191/**
192 * blkdev_issue_write_same - queue a write same operation
193 * @bdev: target blockdev
194 * @sector: start sector
195 * @nr_sects: number of sectors to write
196 * @gfp_mask: memory allocation flags (for bio_alloc)
197 * @page: page containing data
198 *
199 * Description:
200 * Issue a write same request for the sectors in question.
201 */
202int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
203 sector_t nr_sects, gfp_t gfp_mask,
204 struct page *page)
205{
206 struct bio *bio = NULL;
207 struct blk_plug plug;
208 int ret;
209
210 blk_start_plug(&plug);
211 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
212 &bio);
213 if (ret == 0 && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500214 ret = submit_bio_wait(bio);
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500215 bio_put(bio);
216 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800217 blk_finish_plug(&plug);
Christoph Hellwig3f40bf22016-07-19 11:23:34 +0200218 return ret;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400219}
220EXPORT_SYMBOL(blkdev_issue_write_same);
221
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800222static int __blkdev_issue_write_zeroes(struct block_device *bdev,
223 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200224 struct bio **biop, unsigned flags)
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800225{
226 struct bio *bio = *biop;
227 unsigned int max_write_zeroes_sectors;
228 struct request_queue *q = bdev_get_queue(bdev);
229
230 if (!q)
231 return -ENXIO;
232
Ilya Dryomova13553c2018-01-11 14:09:12 +0100233 if (bdev_read_only(bdev))
234 return -EPERM;
235
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800236 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
237 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
238
239 if (max_write_zeroes_sectors == 0)
240 return -EOPNOTSUPP;
241
242 while (nr_sects) {
243 bio = next_bio(bio, 0, gfp_mask);
244 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200245 bio_set_dev(bio, bdev);
Christoph Hellwigd928be92017-04-05 19:21:09 +0200246 bio->bi_opf = REQ_OP_WRITE_ZEROES;
247 if (flags & BLKDEV_ZERO_NOUNMAP)
248 bio->bi_opf |= REQ_NOUNMAP;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800249
250 if (nr_sects > max_write_zeroes_sectors) {
251 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
252 nr_sects -= max_write_zeroes_sectors;
253 sector += max_write_zeroes_sectors;
254 } else {
255 bio->bi_iter.bi_size = nr_sects << 9;
256 nr_sects = 0;
257 }
258 cond_resched();
259 }
260
261 *biop = bio;
262 return 0;
263}
264
Damien Le Moal615d22a2017-07-06 20:21:15 +0900265/*
266 * Convert a number of 512B sectors to a number of pages.
267 * The result is limited to a number of pages that can fit into a BIO.
268 * Also make sure that the result is always at least 1 (page) for the cases
269 * where nr_sects is lower than the number of sectors in a page.
270 */
271static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
272{
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600273 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900274
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600275 return min(pages, (sector_t)BIO_MAX_PAGES);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900276}
277
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200278static int __blkdev_issue_zero_pages(struct block_device *bdev,
279 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
280 struct bio **biop)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400281{
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200282 struct request_queue *q = bdev_get_queue(bdev);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800283 struct bio *bio = *biop;
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200284 int bi_size = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100285 unsigned int sz;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700286
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200287 if (!q)
288 return -ENXIO;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400289
Ilya Dryomova13553c2018-01-11 14:09:12 +0100290 if (bdev_read_only(bdev))
291 return -EPERM;
292
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400293 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900294 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
295 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700296 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200297 bio_set_dev(bio, bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500298 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400299
Jens Axboe0341aaf2010-04-29 09:28:21 +0200300 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900301 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
302 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800303 nr_sects -= bi_size >> 9;
304 sector += bi_size >> 9;
Damien Le Moal615d22a2017-07-06 20:21:15 +0900305 if (bi_size < sz)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400306 break;
307 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800308 cond_resched();
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400309 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400310
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800311 *biop = bio;
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200312 return 0;
313}
314
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400315/**
316 * __blkdev_issue_zeroout - generate number of zero filed write bios
317 * @bdev: blockdev to issue
318 * @sector: start sector
319 * @nr_sects: number of sectors to write
320 * @gfp_mask: memory allocation flags (for bio_alloc)
321 * @biop: pointer to anchor bio
322 * @flags: controls detailed behavior
323 *
324 * Description:
325 * Zero-fill a block range, either using hardware offload or by explicitly
326 * writing zeroes to the device.
327 *
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400328 * If a device is using logical block provisioning, the underlying space will
329 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
330 *
331 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
332 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
333 */
334int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
335 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
336 unsigned flags)
337{
338 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400339 sector_t bs_mask;
340
341 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
342 if ((sector | nr_sects) & bs_mask)
343 return -EINVAL;
344
345 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
346 biop, flags);
347 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200348 return ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400349
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200350 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
351 biop);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400352}
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800353EXPORT_SYMBOL(__blkdev_issue_zeroout);
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400354
355/**
356 * blkdev_issue_zeroout - zero-fill a block range
357 * @bdev: blockdev to write
358 * @sector: start sector
359 * @nr_sects: number of sectors to write
360 * @gfp_mask: memory allocation flags (for bio_alloc)
Christoph Hellwigee472d82017-04-05 19:21:08 +0200361 * @flags: controls detailed behavior
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400362 *
363 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200364 * Zero-fill a block range, either using hardware offload or by explicitly
365 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
366 * valid values for %flags.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400367 */
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400368int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200369 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400370{
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200371 int ret = 0;
372 sector_t bs_mask;
373 struct bio *bio;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800374 struct blk_plug plug;
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200375 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800376
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200377 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
378 if ((sector | nr_sects) & bs_mask)
379 return -EINVAL;
380
381retry:
382 bio = NULL;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800383 blk_start_plug(&plug);
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200384 if (try_write_zeroes) {
385 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
386 gfp_mask, &bio, flags);
387 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
388 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
389 gfp_mask, &bio);
390 } else {
391 /* No zeroing offload support */
392 ret = -EOPNOTSUPP;
393 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800394 if (ret == 0 && bio) {
395 ret = submit_bio_wait(bio);
396 bio_put(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200397 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800398 blk_finish_plug(&plug);
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200399 if (ret && try_write_zeroes) {
400 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
401 try_write_zeroes = false;
402 goto retry;
403 }
404 if (!bdev_write_zeroes_sectors(bdev)) {
405 /*
406 * Zeroing offload support was indicated, but the
407 * device reported ILLEGAL REQUEST (for some devices
408 * there is no non-destructive way to verify whether
409 * WRITE ZEROES is actually supported).
410 */
411 ret = -EOPNOTSUPP;
412 }
413 }
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500414
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800415 return ret;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400416}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400417EXPORT_SYMBOL(blkdev_issue_zeroout);