blob: bbd44666f2b516c758a0334a2e7b45ce3a291c84 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04002/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
Mike Christie4e49ea42016-06-05 14:31:41 -050013static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
Christoph Hellwig9082e872016-04-16 14:55:27 -040014 gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040015{
Christoph Hellwig9082e872016-04-16 14:55:27 -040016 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060017
Christoph Hellwig9082e872016-04-16 14:55:27 -040018 if (bio) {
19 bio_chain(bio, new);
Mike Christie4e49ea42016-06-05 14:31:41 -050020 submit_bio(bio);
Christoph Hellwig9082e872016-04-16 14:55:27 -040021 }
22
23 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040024}
25
Christoph Hellwig38f25252016-04-16 14:55:28 -040026int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +020027 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -050028 struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040029{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040030 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040031 struct bio *bio = *biop;
Christoph Hellwigef295ec2016-10-28 08:48:16 -060032 unsigned int op;
Darrick J. Wong28b2be22016-10-11 13:51:08 -070033 sector_t bs_mask;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040034
35 if (!q)
36 return -ENXIO;
Christoph Hellwig288dab82016-06-09 16:00:36 +020037
Ilya Dryomova13553c2018-01-11 14:09:12 +010038 if (bdev_read_only(bdev))
39 return -EPERM;
40
Christoph Hellwig288dab82016-06-09 16:00:36 +020041 if (flags & BLKDEV_DISCARD_SECURE) {
42 if (!blk_queue_secure_erase(q))
43 return -EOPNOTSUPP;
44 op = REQ_OP_SECURE_ERASE;
45 } else {
46 if (!blk_queue_discard(q))
47 return -EOPNOTSUPP;
48 op = REQ_OP_DISCARD;
49 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040050
Darrick J. Wong28b2be22016-10-11 13:51:08 -070051 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
52 if ((sector | nr_sects) & bs_mask)
53 return -EINVAL;
54
Lukas Czerner5dba3082011-05-06 19:26:27 -060055 while (nr_sects) {
Ming Lei744889b72018-10-12 15:53:10 +080056 unsigned int req_sects = nr_sects;
57 sector_t end_sect;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020058
Mikulas Patockab88aef32018-07-03 13:34:22 -040059 if (!req_sects)
60 goto fail;
Jens Axboeaf097f52018-05-08 15:09:41 -060061 if (req_sects > UINT_MAX >> 9)
62 req_sects = UINT_MAX >> 9;
Ming Lina22c4d72015-10-22 09:59:42 -070063
Paolo Bonzinic6e66632012-08-02 09:48:50 +020064 end_sect = sector + req_sects;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020065
Christoph Hellwigf9d03f92016-12-08 15:20:32 -070066 bio = next_bio(bio, 0, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070067 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +020068 bio_set_dev(bio, bdev);
Christoph Hellwig288dab82016-06-09 16:00:36 +020069 bio_set_op_attrs(bio, op, 0);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040070
Kent Overstreet4f024f32013-10-11 15:44:27 -070071 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020072 nr_sects -= req_sects;
73 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040074
Jens Axboec8123f82014-02-12 09:34:01 -070075 /*
76 * We can loop for a long time in here, if someone does
77 * full device discards (like mkfs). Be nice and allow
78 * us to schedule out to avoid softlocking if preempt
79 * is disabled.
80 */
81 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060082 }
Christoph Hellwig38f25252016-04-16 14:55:28 -040083
84 *biop = bio;
85 return 0;
Mikulas Patockab88aef32018-07-03 13:34:22 -040086
87fail:
88 if (bio) {
89 submit_bio_wait(bio);
90 bio_put(bio);
91 }
92 *biop = NULL;
93 return -EOPNOTSUPP;
Christoph Hellwig38f25252016-04-16 14:55:28 -040094}
95EXPORT_SYMBOL(__blkdev_issue_discard);
96
97/**
98 * blkdev_issue_discard - queue a discard
99 * @bdev: blockdev to issue discard for
100 * @sector: start sector
101 * @nr_sects: number of sectors to discard
102 * @gfp_mask: memory allocation flags (for bio_alloc)
Eric Biggerse5549112017-01-23 11:41:39 -0800103 * @flags: BLKDEV_DISCARD_* flags to control behaviour
Christoph Hellwig38f25252016-04-16 14:55:28 -0400104 *
105 * Description:
106 * Issue a discard request for the sectors in question.
107 */
108int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
109 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
110{
Christoph Hellwig38f25252016-04-16 14:55:28 -0400111 struct bio *bio = NULL;
112 struct blk_plug plug;
113 int ret;
114
Christoph Hellwig38f25252016-04-16 14:55:28 -0400115 blk_start_plug(&plug);
Christoph Hellwig288dab82016-06-09 16:00:36 +0200116 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
Christoph Hellwig38f25252016-04-16 14:55:28 -0400117 &bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400118 if (!ret && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500119 ret = submit_bio_wait(bio);
Christoph Hellwig48920ff2017-04-05 19:21:23 +0200120 if (ret == -EOPNOTSUPP)
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400121 ret = 0;
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500122 bio_put(bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400123 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800124 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400125
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400126 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400127}
128EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400129
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400130/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800131 * __blkdev_issue_write_same - generate number of bios with same page
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400132 * @bdev: target blockdev
133 * @sector: start sector
134 * @nr_sects: number of sectors to write
135 * @gfp_mask: memory allocation flags (for bio_alloc)
136 * @page: page containing data to write
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800137 * @biop: pointer to anchor bio
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400138 *
139 * Description:
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800140 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400141 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800142static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
143 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
144 struct bio **biop)
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400145{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400146 struct request_queue *q = bdev_get_queue(bdev);
147 unsigned int max_write_same_sectors;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800148 struct bio *bio = *biop;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700149 sector_t bs_mask;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400150
151 if (!q)
152 return -ENXIO;
153
Ilya Dryomova13553c2018-01-11 14:09:12 +0100154 if (bdev_read_only(bdev))
155 return -EPERM;
156
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700157 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
158 if ((sector | nr_sects) & bs_mask)
159 return -EINVAL;
160
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800161 if (!bdev_write_same(bdev))
162 return -EOPNOTSUPP;
163
Ming Linb49a0872015-05-22 00:46:56 -0700164 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
165 max_write_same_sectors = UINT_MAX >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400166
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400167 while (nr_sects) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500168 bio = next_bio(bio, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700169 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200170 bio_set_dev(bio, bdev);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400171 bio->bi_vcnt = 1;
172 bio->bi_io_vec->bv_page = page;
173 bio->bi_io_vec->bv_offset = 0;
174 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500175 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400176
177 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700178 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400179 nr_sects -= max_write_same_sectors;
180 sector += max_write_same_sectors;
181 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700182 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400183 nr_sects = 0;
184 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800185 cond_resched();
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400186 }
187
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800188 *biop = bio;
189 return 0;
190}
191
192/**
193 * blkdev_issue_write_same - queue a write same operation
194 * @bdev: target blockdev
195 * @sector: start sector
196 * @nr_sects: number of sectors to write
197 * @gfp_mask: memory allocation flags (for bio_alloc)
198 * @page: page containing data
199 *
200 * Description:
201 * Issue a write same request for the sectors in question.
202 */
203int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
204 sector_t nr_sects, gfp_t gfp_mask,
205 struct page *page)
206{
207 struct bio *bio = NULL;
208 struct blk_plug plug;
209 int ret;
210
211 blk_start_plug(&plug);
212 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
213 &bio);
214 if (ret == 0 && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500215 ret = submit_bio_wait(bio);
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500216 bio_put(bio);
217 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800218 blk_finish_plug(&plug);
Christoph Hellwig3f40bf22016-07-19 11:23:34 +0200219 return ret;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400220}
221EXPORT_SYMBOL(blkdev_issue_write_same);
222
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800223static int __blkdev_issue_write_zeroes(struct block_device *bdev,
224 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200225 struct bio **biop, unsigned flags)
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800226{
227 struct bio *bio = *biop;
228 unsigned int max_write_zeroes_sectors;
229 struct request_queue *q = bdev_get_queue(bdev);
230
231 if (!q)
232 return -ENXIO;
233
Ilya Dryomova13553c2018-01-11 14:09:12 +0100234 if (bdev_read_only(bdev))
235 return -EPERM;
236
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800237 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
238 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
239
240 if (max_write_zeroes_sectors == 0)
241 return -EOPNOTSUPP;
242
243 while (nr_sects) {
244 bio = next_bio(bio, 0, gfp_mask);
245 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200246 bio_set_dev(bio, bdev);
Christoph Hellwigd928be92017-04-05 19:21:09 +0200247 bio->bi_opf = REQ_OP_WRITE_ZEROES;
248 if (flags & BLKDEV_ZERO_NOUNMAP)
249 bio->bi_opf |= REQ_NOUNMAP;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800250
251 if (nr_sects > max_write_zeroes_sectors) {
252 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
253 nr_sects -= max_write_zeroes_sectors;
254 sector += max_write_zeroes_sectors;
255 } else {
256 bio->bi_iter.bi_size = nr_sects << 9;
257 nr_sects = 0;
258 }
259 cond_resched();
260 }
261
262 *biop = bio;
263 return 0;
264}
265
Damien Le Moal615d22a2017-07-06 20:21:15 +0900266/*
267 * Convert a number of 512B sectors to a number of pages.
268 * The result is limited to a number of pages that can fit into a BIO.
269 * Also make sure that the result is always at least 1 (page) for the cases
270 * where nr_sects is lower than the number of sectors in a page.
271 */
272static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
273{
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600274 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900275
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600276 return min(pages, (sector_t)BIO_MAX_PAGES);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900277}
278
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200279static int __blkdev_issue_zero_pages(struct block_device *bdev,
280 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
281 struct bio **biop)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400282{
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200283 struct request_queue *q = bdev_get_queue(bdev);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800284 struct bio *bio = *biop;
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200285 int bi_size = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100286 unsigned int sz;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700287
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200288 if (!q)
289 return -ENXIO;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400290
Ilya Dryomova13553c2018-01-11 14:09:12 +0100291 if (bdev_read_only(bdev))
292 return -EPERM;
293
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400294 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900295 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
296 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700297 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200298 bio_set_dev(bio, bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500299 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400300
Jens Axboe0341aaf2010-04-29 09:28:21 +0200301 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900302 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
303 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800304 nr_sects -= bi_size >> 9;
305 sector += bi_size >> 9;
Damien Le Moal615d22a2017-07-06 20:21:15 +0900306 if (bi_size < sz)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400307 break;
308 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800309 cond_resched();
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400310 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400311
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800312 *biop = bio;
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200313 return 0;
314}
315
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400316/**
317 * __blkdev_issue_zeroout - generate number of zero filed write bios
318 * @bdev: blockdev to issue
319 * @sector: start sector
320 * @nr_sects: number of sectors to write
321 * @gfp_mask: memory allocation flags (for bio_alloc)
322 * @biop: pointer to anchor bio
323 * @flags: controls detailed behavior
324 *
325 * Description:
326 * Zero-fill a block range, either using hardware offload or by explicitly
327 * writing zeroes to the device.
328 *
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400329 * If a device is using logical block provisioning, the underlying space will
330 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
331 *
332 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
333 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
334 */
335int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
336 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
337 unsigned flags)
338{
339 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400340 sector_t bs_mask;
341
342 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
343 if ((sector | nr_sects) & bs_mask)
344 return -EINVAL;
345
346 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
347 biop, flags);
348 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200349 return ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400350
Ilya Dryomov425a4db2017-10-16 15:59:09 +0200351 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
352 biop);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400353}
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800354EXPORT_SYMBOL(__blkdev_issue_zeroout);
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400355
356/**
357 * blkdev_issue_zeroout - zero-fill a block range
358 * @bdev: blockdev to write
359 * @sector: start sector
360 * @nr_sects: number of sectors to write
361 * @gfp_mask: memory allocation flags (for bio_alloc)
Christoph Hellwigee472d82017-04-05 19:21:08 +0200362 * @flags: controls detailed behavior
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400363 *
364 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200365 * Zero-fill a block range, either using hardware offload or by explicitly
366 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
367 * valid values for %flags.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400368 */
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400369int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200370 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400371{
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200372 int ret = 0;
373 sector_t bs_mask;
374 struct bio *bio;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800375 struct blk_plug plug;
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200376 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800377
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200378 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
379 if ((sector | nr_sects) & bs_mask)
380 return -EINVAL;
381
382retry:
383 bio = NULL;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800384 blk_start_plug(&plug);
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200385 if (try_write_zeroes) {
386 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
387 gfp_mask, &bio, flags);
388 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
389 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
390 gfp_mask, &bio);
391 } else {
392 /* No zeroing offload support */
393 ret = -EOPNOTSUPP;
394 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800395 if (ret == 0 && bio) {
396 ret = submit_bio_wait(bio);
397 bio_put(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200398 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800399 blk_finish_plug(&plug);
Ilya Dryomovd5ce4c32017-10-16 15:59:10 +0200400 if (ret && try_write_zeroes) {
401 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
402 try_write_zeroes = false;
403 goto retry;
404 }
405 if (!bdev_write_zeroes_sectors(bdev)) {
406 /*
407 * Zeroing offload support was indicated, but the
408 * device reported ILLEGAL REQUEST (for some devices
409 * there is no non-destructive way to verify whether
410 * WRITE ZEROES is actually supported).
411 */
412 ret = -EOPNOTSUPP;
413 }
414 }
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500415
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800416 return ret;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400417}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400418EXPORT_SYMBOL(blkdev_issue_zeroout);