blob: 62240f8832ca6e62f8f3e5dddedb9428a978250d [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Mike Christie4e49ea42016-06-05 14:31:41 -050012static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
Christoph Hellwig9082e872016-04-16 14:55:27 -040013 gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040014{
Christoph Hellwig9082e872016-04-16 14:55:27 -040015 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060016
Christoph Hellwig9082e872016-04-16 14:55:27 -040017 if (bio) {
18 bio_chain(bio, new);
Mike Christie4e49ea42016-06-05 14:31:41 -050019 submit_bio(bio);
Christoph Hellwig9082e872016-04-16 14:55:27 -040020 }
21
22 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040023}
24
Christoph Hellwig38f25252016-04-16 14:55:28 -040025int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +020026 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -050027 struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040028{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040029 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040030 struct bio *bio = *biop;
Ming Lina22c4d72015-10-22 09:59:42 -070031 unsigned int granularity;
Christoph Hellwigef295ec2016-10-28 08:48:16 -060032 unsigned int op;
Ming Lina22c4d72015-10-22 09:59:42 -070033 int alignment;
Darrick J. Wong28b2be22016-10-11 13:51:08 -070034 sector_t bs_mask;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040035
36 if (!q)
37 return -ENXIO;
Christoph Hellwig288dab82016-06-09 16:00:36 +020038
39 if (flags & BLKDEV_DISCARD_SECURE) {
40 if (!blk_queue_secure_erase(q))
41 return -EOPNOTSUPP;
42 op = REQ_OP_SECURE_ERASE;
43 } else {
44 if (!blk_queue_discard(q))
45 return -EOPNOTSUPP;
46 op = REQ_OP_DISCARD;
47 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040048
Darrick J. Wong28b2be22016-10-11 13:51:08 -070049 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
50 if ((sector | nr_sects) & bs_mask)
51 return -EINVAL;
52
Ming Lina22c4d72015-10-22 09:59:42 -070053 /* Zero-sector (unknown) and one-sector granularities are the same. */
54 granularity = max(q->limits.discard_granularity >> 9, 1U);
55 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
56
Lukas Czerner5dba3082011-05-06 19:26:27 -060057 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020058 unsigned int req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070059 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020060
Ming Lina22c4d72015-10-22 09:59:42 -070061 /* Make sure bi_size doesn't overflow */
62 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
63
Christoph Hellwig9082e872016-04-16 14:55:27 -040064 /**
Ming Lina22c4d72015-10-22 09:59:42 -070065 * If splitting a request, and the next starting sector would be
66 * misaligned, stop the discard at the previous aligned sector.
67 */
Paolo Bonzinic6e66632012-08-02 09:48:50 +020068 end_sect = sector + req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070069 tmp = end_sect;
70 if (req_sects < nr_sects &&
71 sector_div(tmp, granularity) != alignment) {
72 end_sect = end_sect - alignment;
73 sector_div(end_sect, granularity);
74 end_sect = end_sect * granularity + alignment;
75 req_sects = end_sect - sector;
76 }
Paolo Bonzinic6e66632012-08-02 09:48:50 +020077
Christoph Hellwigf9d03f92016-12-08 15:20:32 -070078 bio = next_bio(bio, 0, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070079 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +020080 bio_set_dev(bio, bdev);
Christoph Hellwig288dab82016-06-09 16:00:36 +020081 bio_set_op_attrs(bio, op, 0);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040082
Kent Overstreet4f024f32013-10-11 15:44:27 -070083 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020084 nr_sects -= req_sects;
85 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040086
Jens Axboec8123f82014-02-12 09:34:01 -070087 /*
88 * We can loop for a long time in here, if someone does
89 * full device discards (like mkfs). Be nice and allow
90 * us to schedule out to avoid softlocking if preempt
91 * is disabled.
92 */
93 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060094 }
Christoph Hellwig38f25252016-04-16 14:55:28 -040095
96 *biop = bio;
97 return 0;
98}
99EXPORT_SYMBOL(__blkdev_issue_discard);
100
101/**
102 * blkdev_issue_discard - queue a discard
103 * @bdev: blockdev to issue discard for
104 * @sector: start sector
105 * @nr_sects: number of sectors to discard
106 * @gfp_mask: memory allocation flags (for bio_alloc)
Eric Biggerse5549112017-01-23 11:41:39 -0800107 * @flags: BLKDEV_DISCARD_* flags to control behaviour
Christoph Hellwig38f25252016-04-16 14:55:28 -0400108 *
109 * Description:
110 * Issue a discard request for the sectors in question.
111 */
112int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
113 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
114{
Christoph Hellwig38f25252016-04-16 14:55:28 -0400115 struct bio *bio = NULL;
116 struct blk_plug plug;
117 int ret;
118
Christoph Hellwig38f25252016-04-16 14:55:28 -0400119 blk_start_plug(&plug);
Christoph Hellwig288dab82016-06-09 16:00:36 +0200120 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
Christoph Hellwig38f25252016-04-16 14:55:28 -0400121 &bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400122 if (!ret && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500123 ret = submit_bio_wait(bio);
Christoph Hellwig48920ff2017-04-05 19:21:23 +0200124 if (ret == -EOPNOTSUPP)
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400125 ret = 0;
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500126 bio_put(bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400127 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800128 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400129
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400130 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400131}
132EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400133
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400134/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800135 * __blkdev_issue_write_same - generate number of bios with same page
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400136 * @bdev: target blockdev
137 * @sector: start sector
138 * @nr_sects: number of sectors to write
139 * @gfp_mask: memory allocation flags (for bio_alloc)
140 * @page: page containing data to write
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800141 * @biop: pointer to anchor bio
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400142 *
143 * Description:
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800144 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400145 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800146static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
147 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
148 struct bio **biop)
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400149{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400150 struct request_queue *q = bdev_get_queue(bdev);
151 unsigned int max_write_same_sectors;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800152 struct bio *bio = *biop;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700153 sector_t bs_mask;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400154
155 if (!q)
156 return -ENXIO;
157
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700158 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
159 if ((sector | nr_sects) & bs_mask)
160 return -EINVAL;
161
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800162 if (!bdev_write_same(bdev))
163 return -EOPNOTSUPP;
164
Ming Linb49a0872015-05-22 00:46:56 -0700165 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
166 max_write_same_sectors = UINT_MAX >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400167
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400168 while (nr_sects) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500169 bio = next_bio(bio, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700170 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200171 bio_set_dev(bio, bdev);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400172 bio->bi_vcnt = 1;
173 bio->bi_io_vec->bv_page = page;
174 bio->bi_io_vec->bv_offset = 0;
175 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500176 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400177
178 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700179 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400180 nr_sects -= max_write_same_sectors;
181 sector += max_write_same_sectors;
182 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700183 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400184 nr_sects = 0;
185 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800186 cond_resched();
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400187 }
188
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800189 *biop = bio;
190 return 0;
191}
192
193/**
194 * blkdev_issue_write_same - queue a write same operation
195 * @bdev: target blockdev
196 * @sector: start sector
197 * @nr_sects: number of sectors to write
198 * @gfp_mask: memory allocation flags (for bio_alloc)
199 * @page: page containing data
200 *
201 * Description:
202 * Issue a write same request for the sectors in question.
203 */
204int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
205 sector_t nr_sects, gfp_t gfp_mask,
206 struct page *page)
207{
208 struct bio *bio = NULL;
209 struct blk_plug plug;
210 int ret;
211
212 blk_start_plug(&plug);
213 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
214 &bio);
215 if (ret == 0 && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500216 ret = submit_bio_wait(bio);
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500217 bio_put(bio);
218 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800219 blk_finish_plug(&plug);
Christoph Hellwig3f40bf22016-07-19 11:23:34 +0200220 return ret;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400221}
222EXPORT_SYMBOL(blkdev_issue_write_same);
223
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800224static int __blkdev_issue_write_zeroes(struct block_device *bdev,
225 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200226 struct bio **biop, unsigned flags)
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800227{
228 struct bio *bio = *biop;
229 unsigned int max_write_zeroes_sectors;
230 struct request_queue *q = bdev_get_queue(bdev);
231
232 if (!q)
233 return -ENXIO;
234
235 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
236 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
237
238 if (max_write_zeroes_sectors == 0)
239 return -EOPNOTSUPP;
240
241 while (nr_sects) {
242 bio = next_bio(bio, 0, gfp_mask);
243 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200244 bio_set_dev(bio, bdev);
Christoph Hellwigd928be92017-04-05 19:21:09 +0200245 bio->bi_opf = REQ_OP_WRITE_ZEROES;
246 if (flags & BLKDEV_ZERO_NOUNMAP)
247 bio->bi_opf |= REQ_NOUNMAP;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800248
249 if (nr_sects > max_write_zeroes_sectors) {
250 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
251 nr_sects -= max_write_zeroes_sectors;
252 sector += max_write_zeroes_sectors;
253 } else {
254 bio->bi_iter.bi_size = nr_sects << 9;
255 nr_sects = 0;
256 }
257 cond_resched();
258 }
259
260 *biop = bio;
261 return 0;
262}
263
Damien Le Moal615d22a2017-07-06 20:21:15 +0900264/*
265 * Convert a number of 512B sectors to a number of pages.
266 * The result is limited to a number of pages that can fit into a BIO.
267 * Also make sure that the result is always at least 1 (page) for the cases
268 * where nr_sects is lower than the number of sectors in a page.
269 */
270static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
271{
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600272 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900273
Mikulas Patocka09c2c352017-09-11 09:46:49 -0600274 return min(pages, (sector_t)BIO_MAX_PAGES);
Damien Le Moal615d22a2017-07-06 20:21:15 +0900275}
276
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800277/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800278 * __blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400279 * @bdev: blockdev to issue
280 * @sector: start sector
281 * @nr_sects: number of sectors to write
282 * @gfp_mask: memory allocation flags (for bio_alloc)
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800283 * @biop: pointer to anchor bio
Christoph Hellwigee472d82017-04-05 19:21:08 +0200284 * @flags: controls detailed behavior
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400285 *
286 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200287 * Zero-fill a block range, either using hardware offload or by explicitly
288 * writing zeroes to the device.
289 *
Christoph Hellwig71027e92017-04-05 19:21:20 +0200290 * Note that this function may fail with -EOPNOTSUPP if the driver signals
291 * zeroing offload support, but the device fails to process the command (for
292 * some devices there is no non-destructive way to verify whether this
293 * operation is actually supported). In this case the caller should call
294 * retry the call to blkdev_issue_zeroout() and the fallback path will be used.
295 *
Christoph Hellwigee472d82017-04-05 19:21:08 +0200296 * If a device is using logical block provisioning, the underlying space will
297 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
Christoph Hellwigcb365b92017-04-05 19:21:10 +0200298 *
299 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
300 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400301 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800302int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
303 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200304 unsigned flags)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400305{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200306 int ret;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800307 int bi_size = 0;
308 struct bio *bio = *biop;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100309 unsigned int sz;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700310 sector_t bs_mask;
311
312 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
313 if ((sector | nr_sects) & bs_mask)
314 return -EINVAL;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400315
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800316 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200317 biop, flags);
Christoph Hellwigcb365b92017-04-05 19:21:10 +0200318 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800319 goto out;
320
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800321 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400322 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900323 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
324 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700325 bio->bi_iter.bi_sector = sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200326 bio_set_dev(bio, bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500327 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400328
Jens Axboe0341aaf2010-04-29 09:28:21 +0200329 while (nr_sects != 0) {
Damien Le Moal615d22a2017-07-06 20:21:15 +0900330 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
331 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800332 nr_sects -= bi_size >> 9;
333 sector += bi_size >> 9;
Damien Le Moal615d22a2017-07-06 20:21:15 +0900334 if (bi_size < sz)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400335 break;
336 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800337 cond_resched();
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400338 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400339
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800340 *biop = bio;
341out:
342 return ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400343}
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800344EXPORT_SYMBOL(__blkdev_issue_zeroout);
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400345
346/**
347 * blkdev_issue_zeroout - zero-fill a block range
348 * @bdev: blockdev to write
349 * @sector: start sector
350 * @nr_sects: number of sectors to write
351 * @gfp_mask: memory allocation flags (for bio_alloc)
Christoph Hellwigee472d82017-04-05 19:21:08 +0200352 * @flags: controls detailed behavior
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400353 *
354 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200355 * Zero-fill a block range, either using hardware offload or by explicitly
356 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
357 * valid values for %flags.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400358 */
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400359int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200360 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400361{
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800362 int ret;
363 struct bio *bio = NULL;
364 struct blk_plug plug;
365
366 blk_start_plug(&plug);
367 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200368 &bio, flags);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800369 if (ret == 0 && bio) {
370 ret = submit_bio_wait(bio);
371 bio_put(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200372 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800373 blk_finish_plug(&plug);
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500374
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800375 return ret;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400376}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400377EXPORT_SYMBOL(blkdev_issue_zeroout);