blob: 2f6d2cb2e1a21488c597eb111e28a9b07a52a3a0 [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Mike Christie4e49ea42016-06-05 14:31:41 -050012static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
Christoph Hellwig9082e872016-04-16 14:55:27 -040013 gfp_t gfp)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040014{
Christoph Hellwig9082e872016-04-16 14:55:27 -040015 struct bio *new = bio_alloc(gfp, nr_pages);
Lukas Czerner5dba3082011-05-06 19:26:27 -060016
Christoph Hellwig9082e872016-04-16 14:55:27 -040017 if (bio) {
18 bio_chain(bio, new);
Mike Christie4e49ea42016-06-05 14:31:41 -050019 submit_bio(bio);
Christoph Hellwig9082e872016-04-16 14:55:27 -040020 }
21
22 return new;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040023}
24
Christoph Hellwig38f25252016-04-16 14:55:28 -040025int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
Christoph Hellwig288dab82016-06-09 16:00:36 +020026 sector_t nr_sects, gfp_t gfp_mask, int flags,
Mike Christie469e3212016-06-05 14:31:49 -050027 struct bio **biop)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040028{
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040029 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig38f25252016-04-16 14:55:28 -040030 struct bio *bio = *biop;
Ming Lina22c4d72015-10-22 09:59:42 -070031 unsigned int granularity;
Christoph Hellwigef295ec2016-10-28 08:48:16 -060032 unsigned int op;
Ming Lina22c4d72015-10-22 09:59:42 -070033 int alignment;
Darrick J. Wong28b2be22016-10-11 13:51:08 -070034 sector_t bs_mask;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040035
36 if (!q)
37 return -ENXIO;
Christoph Hellwig288dab82016-06-09 16:00:36 +020038
39 if (flags & BLKDEV_DISCARD_SECURE) {
Christoph Hellwige950fdf2016-07-19 11:23:33 +020040 if (flags & BLKDEV_DISCARD_ZERO)
41 return -EOPNOTSUPP;
Christoph Hellwig288dab82016-06-09 16:00:36 +020042 if (!blk_queue_secure_erase(q))
43 return -EOPNOTSUPP;
44 op = REQ_OP_SECURE_ERASE;
45 } else {
46 if (!blk_queue_discard(q))
47 return -EOPNOTSUPP;
Christoph Hellwige950fdf2016-07-19 11:23:33 +020048 if ((flags & BLKDEV_DISCARD_ZERO) &&
49 !q->limits.discard_zeroes_data)
50 return -EOPNOTSUPP;
Christoph Hellwig288dab82016-06-09 16:00:36 +020051 op = REQ_OP_DISCARD;
52 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040053
Darrick J. Wong28b2be22016-10-11 13:51:08 -070054 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
55 if ((sector | nr_sects) & bs_mask)
56 return -EINVAL;
57
Ming Lina22c4d72015-10-22 09:59:42 -070058 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
61
Lukas Czerner5dba3082011-05-06 19:26:27 -060062 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020063 unsigned int req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070064 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020065
Ming Lina22c4d72015-10-22 09:59:42 -070066 /* Make sure bi_size doesn't overflow */
67 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
68
Christoph Hellwig9082e872016-04-16 14:55:27 -040069 /**
Ming Lina22c4d72015-10-22 09:59:42 -070070 * If splitting a request, and the next starting sector would be
71 * misaligned, stop the discard at the previous aligned sector.
72 */
Paolo Bonzinic6e66632012-08-02 09:48:50 +020073 end_sect = sector + req_sects;
Ming Lina22c4d72015-10-22 09:59:42 -070074 tmp = end_sect;
75 if (req_sects < nr_sects &&
76 sector_div(tmp, granularity) != alignment) {
77 end_sect = end_sect - alignment;
78 sector_div(end_sect, granularity);
79 end_sect = end_sect * granularity + alignment;
80 req_sects = end_sect - sector;
81 }
Paolo Bonzinic6e66632012-08-02 09:48:50 +020082
Christoph Hellwigf9d03f92016-12-08 15:20:32 -070083 bio = next_bio(bio, 0, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -070084 bio->bi_iter.bi_sector = sector;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040085 bio->bi_bdev = bdev;
Christoph Hellwig288dab82016-06-09 16:00:36 +020086 bio_set_op_attrs(bio, op, 0);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040087
Kent Overstreet4f024f32013-10-11 15:44:27 -070088 bio->bi_iter.bi_size = req_sects << 9;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020089 nr_sects -= req_sects;
90 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040091
Jens Axboec8123f82014-02-12 09:34:01 -070092 /*
93 * We can loop for a long time in here, if someone does
94 * full device discards (like mkfs). Be nice and allow
95 * us to schedule out to avoid softlocking if preempt
96 * is disabled.
97 */
98 cond_resched();
Lukas Czerner5dba3082011-05-06 19:26:27 -060099 }
Christoph Hellwig38f25252016-04-16 14:55:28 -0400100
101 *biop = bio;
102 return 0;
103}
104EXPORT_SYMBOL(__blkdev_issue_discard);
105
106/**
107 * blkdev_issue_discard - queue a discard
108 * @bdev: blockdev to issue discard for
109 * @sector: start sector
110 * @nr_sects: number of sectors to discard
111 * @gfp_mask: memory allocation flags (for bio_alloc)
Eric Biggerse5549112017-01-23 11:41:39 -0800112 * @flags: BLKDEV_DISCARD_* flags to control behaviour
Christoph Hellwig38f25252016-04-16 14:55:28 -0400113 *
114 * Description:
115 * Issue a discard request for the sectors in question.
116 */
117int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
118 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
119{
Christoph Hellwig38f25252016-04-16 14:55:28 -0400120 struct bio *bio = NULL;
121 struct blk_plug plug;
122 int ret;
123
Christoph Hellwig38f25252016-04-16 14:55:28 -0400124 blk_start_plug(&plug);
Christoph Hellwig288dab82016-06-09 16:00:36 +0200125 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
Christoph Hellwig38f25252016-04-16 14:55:28 -0400126 &bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400127 if (!ret && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500128 ret = submit_bio_wait(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200129 if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO))
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400130 ret = 0;
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500131 bio_put(bio);
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400132 }
Shaohua Li0cfbcaf2012-12-14 11:15:51 +0800133 blk_finish_plug(&plug);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400134
Mike Snitzerbbd848e0f2016-05-05 11:54:21 -0400135 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400136}
137EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400138
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400139/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800140 * __blkdev_issue_write_same - generate number of bios with same page
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400141 * @bdev: target blockdev
142 * @sector: start sector
143 * @nr_sects: number of sectors to write
144 * @gfp_mask: memory allocation flags (for bio_alloc)
145 * @page: page containing data to write
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800146 * @biop: pointer to anchor bio
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400147 *
148 * Description:
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800149 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400150 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800151static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
152 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
153 struct bio **biop)
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400154{
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400155 struct request_queue *q = bdev_get_queue(bdev);
156 unsigned int max_write_same_sectors;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800157 struct bio *bio = *biop;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700158 sector_t bs_mask;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400159
160 if (!q)
161 return -ENXIO;
162
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700163 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
164 if ((sector | nr_sects) & bs_mask)
165 return -EINVAL;
166
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800167 if (!bdev_write_same(bdev))
168 return -EOPNOTSUPP;
169
Ming Linb49a0872015-05-22 00:46:56 -0700170 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
171 max_write_same_sectors = UINT_MAX >> 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400172
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400173 while (nr_sects) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500174 bio = next_bio(bio, 1, gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700175 bio->bi_iter.bi_sector = sector;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400176 bio->bi_bdev = bdev;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400177 bio->bi_vcnt = 1;
178 bio->bi_io_vec->bv_page = page;
179 bio->bi_io_vec->bv_offset = 0;
180 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
Mike Christie95fe6c12016-06-05 14:31:48 -0500181 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400182
183 if (nr_sects > max_write_same_sectors) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700184 bio->bi_iter.bi_size = max_write_same_sectors << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400185 nr_sects -= max_write_same_sectors;
186 sector += max_write_same_sectors;
187 } else {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700188 bio->bi_iter.bi_size = nr_sects << 9;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400189 nr_sects = 0;
190 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800191 cond_resched();
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400192 }
193
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800194 *biop = bio;
195 return 0;
196}
197
198/**
199 * blkdev_issue_write_same - queue a write same operation
200 * @bdev: target blockdev
201 * @sector: start sector
202 * @nr_sects: number of sectors to write
203 * @gfp_mask: memory allocation flags (for bio_alloc)
204 * @page: page containing data
205 *
206 * Description:
207 * Issue a write same request for the sectors in question.
208 */
209int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
210 sector_t nr_sects, gfp_t gfp_mask,
211 struct page *page)
212{
213 struct bio *bio = NULL;
214 struct blk_plug plug;
215 int ret;
216
217 blk_start_plug(&plug);
218 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
219 &bio);
220 if (ret == 0 && bio) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500221 ret = submit_bio_wait(bio);
Shaun Tancheff05bd92d2016-06-07 11:32:13 -0500222 bio_put(bio);
223 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800224 blk_finish_plug(&plug);
Christoph Hellwig3f40bf22016-07-19 11:23:34 +0200225 return ret;
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400226}
227EXPORT_SYMBOL(blkdev_issue_write_same);
228
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800229static int __blkdev_issue_write_zeroes(struct block_device *bdev,
230 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200231 struct bio **biop, unsigned flags)
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800232{
233 struct bio *bio = *biop;
234 unsigned int max_write_zeroes_sectors;
235 struct request_queue *q = bdev_get_queue(bdev);
236
237 if (!q)
238 return -ENXIO;
239
240 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
241 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
242
243 if (max_write_zeroes_sectors == 0)
244 return -EOPNOTSUPP;
245
246 while (nr_sects) {
247 bio = next_bio(bio, 0, gfp_mask);
248 bio->bi_iter.bi_sector = sector;
249 bio->bi_bdev = bdev;
Christoph Hellwigd928be92017-04-05 19:21:09 +0200250 bio->bi_opf = REQ_OP_WRITE_ZEROES;
251 if (flags & BLKDEV_ZERO_NOUNMAP)
252 bio->bi_opf |= REQ_NOUNMAP;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800253
254 if (nr_sects > max_write_zeroes_sectors) {
255 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
256 nr_sects -= max_write_zeroes_sectors;
257 sector += max_write_zeroes_sectors;
258 } else {
259 bio->bi_iter.bi_size = nr_sects << 9;
260 nr_sects = 0;
261 }
262 cond_resched();
263 }
264
265 *biop = bio;
266 return 0;
267}
268
269/**
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800270 * __blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400271 * @bdev: blockdev to issue
272 * @sector: start sector
273 * @nr_sects: number of sectors to write
274 * @gfp_mask: memory allocation flags (for bio_alloc)
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800275 * @biop: pointer to anchor bio
Christoph Hellwigee472d82017-04-05 19:21:08 +0200276 * @flags: controls detailed behavior
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400277 *
278 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200279 * Zero-fill a block range, either using hardware offload or by explicitly
280 * writing zeroes to the device.
281 *
282 * If a device is using logical block provisioning, the underlying space will
283 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400284 */
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800285int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
286 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200287 unsigned flags)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400288{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200289 int ret;
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800290 int bi_size = 0;
291 struct bio *bio = *biop;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100292 unsigned int sz;
Darrick J. Wong28b2be22016-10-11 13:51:08 -0700293 sector_t bs_mask;
294
295 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
296 if ((sector | nr_sects) & bs_mask)
297 return -EINVAL;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400298
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800299 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
Christoph Hellwigd928be92017-04-05 19:21:09 +0200300 biop, flags);
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800301 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
302 goto out;
303
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800304 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400305 while (nr_sects != 0) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500306 bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
Christoph Hellwig9082e872016-04-16 14:55:27 -0400307 gfp_mask);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700308 bio->bi_iter.bi_sector = sector;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400309 bio->bi_bdev = bdev;
Mike Christie95fe6c12016-06-05 14:31:48 -0500310 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400311
Jens Axboe0341aaf2010-04-29 09:28:21 +0200312 while (nr_sects != 0) {
313 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800314 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
315 nr_sects -= bi_size >> 9;
316 sector += bi_size >> 9;
317 if (bi_size < (sz << 9))
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400318 break;
319 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800320 cond_resched();
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400321 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400322
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800323 *biop = bio;
324out:
325 return ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400326}
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800327EXPORT_SYMBOL(__blkdev_issue_zeroout);
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400328
329/**
330 * blkdev_issue_zeroout - zero-fill a block range
331 * @bdev: blockdev to write
332 * @sector: start sector
333 * @nr_sects: number of sectors to write
334 * @gfp_mask: memory allocation flags (for bio_alloc)
Christoph Hellwigee472d82017-04-05 19:21:08 +0200335 * @flags: controls detailed behavior
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400336 *
337 * Description:
Christoph Hellwigee472d82017-04-05 19:21:08 +0200338 * Zero-fill a block range, either using hardware offload or by explicitly
339 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
340 * valid values for %flags.
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400341 */
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400342int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200343 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400344{
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800345 int ret;
346 struct bio *bio = NULL;
347 struct blk_plug plug;
348
Christoph Hellwigee472d82017-04-05 19:21:08 +0200349 if (!(flags & BLKDEV_ZERO_NOUNMAP)) {
Christoph Hellwigbef13312017-01-13 15:18:16 -0700350 if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
351 BLKDEV_DISCARD_ZERO))
352 return 0;
353 }
354
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800355 blk_start_plug(&plug);
356 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200357 &bio, flags);
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800358 if (ret == 0 && bio) {
359 ret = submit_bio_wait(bio);
360 bio_put(bio);
Christoph Hellwige950fdf2016-07-19 11:23:33 +0200361 }
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800362 blk_finish_plug(&plug);
Martin K. Petersend93ba7a2015-01-20 20:06:30 -0500363
Chaitanya Kulkarnie73c23f2016-11-30 12:28:58 -0800364 return ret;
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400365}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400366EXPORT_SYMBOL(blkdev_issue_zeroout);