Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 2 | /* |
| 3 | * Functions related to generic helpers functions |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/scatterlist.h> |
| 10 | |
| 11 | #include "blk.h" |
| 12 | |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 13 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 14 | gfp_t gfp) |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 15 | { |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 16 | struct bio *new = bio_alloc(gfp, nr_pages); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 17 | |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 18 | if (bio) { |
| 19 | bio_chain(bio, new); |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 20 | submit_bio(bio); |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 21 | } |
| 22 | |
| 23 | return new; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 24 | } |
| 25 | |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 26 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 27 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
Mike Christie | 469e321 | 2016-06-05 14:31:49 -0500 | [diff] [blame] | 28 | struct bio **biop) |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 29 | { |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 30 | struct request_queue *q = bdev_get_queue(bdev); |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 31 | struct bio *bio = *biop; |
Christoph Hellwig | ef295ec | 2016-10-28 08:48:16 -0600 | [diff] [blame] | 32 | unsigned int op; |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 33 | sector_t bs_mask; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 34 | |
| 35 | if (!q) |
| 36 | return -ENXIO; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 37 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 38 | if (bdev_read_only(bdev)) |
| 39 | return -EPERM; |
| 40 | |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 41 | if (flags & BLKDEV_DISCARD_SECURE) { |
| 42 | if (!blk_queue_secure_erase(q)) |
| 43 | return -EOPNOTSUPP; |
| 44 | op = REQ_OP_SECURE_ERASE; |
| 45 | } else { |
| 46 | if (!blk_queue_discard(q)) |
| 47 | return -EOPNOTSUPP; |
| 48 | op = REQ_OP_DISCARD; |
| 49 | } |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 50 | |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 51 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 52 | if ((sector | nr_sects) & bs_mask) |
| 53 | return -EINVAL; |
| 54 | |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 55 | while (nr_sects) { |
Ming Lei | 744889b7 | 2018-10-12 15:53:10 +0800 | [diff] [blame] | 56 | unsigned int req_sects = nr_sects; |
| 57 | sector_t end_sect; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 58 | |
Mikulas Patocka | b88aef3 | 2018-07-03 13:34:22 -0400 | [diff] [blame] | 59 | if (!req_sects) |
| 60 | goto fail; |
Ming Lei | 14657ef | 2018-10-29 20:57:17 +0800 | [diff] [blame] | 61 | req_sects = min(req_sects, bio_allowed_max_sectors(q)); |
Ming Lin | a22c4d7 | 2015-10-22 09:59:42 -0700 | [diff] [blame] | 62 | |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 63 | end_sect = sector + req_sects; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 64 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 65 | bio = next_bio(bio, 0, gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 66 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 67 | bio_set_dev(bio, bdev); |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 68 | bio_set_op_attrs(bio, op, 0); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 69 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 70 | bio->bi_iter.bi_size = req_sects << 9; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 71 | nr_sects -= req_sects; |
| 72 | sector = end_sect; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 73 | |
Jens Axboe | c8123f8 | 2014-02-12 09:34:01 -0700 | [diff] [blame] | 74 | /* |
| 75 | * We can loop for a long time in here, if someone does |
| 76 | * full device discards (like mkfs). Be nice and allow |
| 77 | * us to schedule out to avoid softlocking if preempt |
| 78 | * is disabled. |
| 79 | */ |
| 80 | cond_resched(); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 81 | } |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 82 | |
| 83 | *biop = bio; |
| 84 | return 0; |
Mikulas Patocka | b88aef3 | 2018-07-03 13:34:22 -0400 | [diff] [blame] | 85 | |
| 86 | fail: |
| 87 | if (bio) { |
| 88 | submit_bio_wait(bio); |
| 89 | bio_put(bio); |
| 90 | } |
| 91 | *biop = NULL; |
| 92 | return -EOPNOTSUPP; |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 93 | } |
| 94 | EXPORT_SYMBOL(__blkdev_issue_discard); |
| 95 | |
| 96 | /** |
| 97 | * blkdev_issue_discard - queue a discard |
| 98 | * @bdev: blockdev to issue discard for |
| 99 | * @sector: start sector |
| 100 | * @nr_sects: number of sectors to discard |
| 101 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Eric Biggers | e554911 | 2017-01-23 11:41:39 -0800 | [diff] [blame] | 102 | * @flags: BLKDEV_DISCARD_* flags to control behaviour |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 103 | * |
| 104 | * Description: |
| 105 | * Issue a discard request for the sectors in question. |
| 106 | */ |
| 107 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| 108 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) |
| 109 | { |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 110 | struct bio *bio = NULL; |
| 111 | struct blk_plug plug; |
| 112 | int ret; |
| 113 | |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 114 | blk_start_plug(&plug); |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 115 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 116 | &bio); |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 117 | if (!ret && bio) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 118 | ret = submit_bio_wait(bio); |
Christoph Hellwig | 48920ff | 2017-04-05 19:21:23 +0200 | [diff] [blame] | 119 | if (ret == -EOPNOTSUPP) |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 120 | ret = 0; |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 121 | bio_put(bio); |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 122 | } |
Shaohua Li | 0cfbcaf | 2012-12-14 11:15:51 +0800 | [diff] [blame] | 123 | blk_finish_plug(&plug); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 124 | |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 125 | return ret; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 126 | } |
| 127 | EXPORT_SYMBOL(blkdev_issue_discard); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 128 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 129 | /** |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 130 | * __blkdev_issue_write_same - generate number of bios with same page |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 131 | * @bdev: target blockdev |
| 132 | * @sector: start sector |
| 133 | * @nr_sects: number of sectors to write |
| 134 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 135 | * @page: page containing data to write |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 136 | * @biop: pointer to anchor bio |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 137 | * |
| 138 | * Description: |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 139 | * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 140 | */ |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 141 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 142 | sector_t nr_sects, gfp_t gfp_mask, struct page *page, |
| 143 | struct bio **biop) |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 144 | { |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 145 | struct request_queue *q = bdev_get_queue(bdev); |
| 146 | unsigned int max_write_same_sectors; |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 147 | struct bio *bio = *biop; |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 148 | sector_t bs_mask; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 149 | |
| 150 | if (!q) |
| 151 | return -ENXIO; |
| 152 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 153 | if (bdev_read_only(bdev)) |
| 154 | return -EPERM; |
| 155 | |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 156 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 157 | if ((sector | nr_sects) & bs_mask) |
| 158 | return -EINVAL; |
| 159 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 160 | if (!bdev_write_same(bdev)) |
| 161 | return -EOPNOTSUPP; |
| 162 | |
Ming Lin | b49a087 | 2015-05-22 00:46:56 -0700 | [diff] [blame] | 163 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
Ming Lei | 1ea5c40 | 2018-10-29 20:57:19 +0800 | [diff] [blame] | 164 | max_write_same_sectors = bio_allowed_max_sectors(q); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 165 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 166 | while (nr_sects) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 167 | bio = next_bio(bio, 1, gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 168 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 169 | bio_set_dev(bio, bdev); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 170 | bio->bi_vcnt = 1; |
| 171 | bio->bi_io_vec->bv_page = page; |
| 172 | bio->bi_io_vec->bv_offset = 0; |
| 173 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 174 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 175 | |
| 176 | if (nr_sects > max_write_same_sectors) { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 177 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 178 | nr_sects -= max_write_same_sectors; |
| 179 | sector += max_write_same_sectors; |
| 180 | } else { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 181 | bio->bi_iter.bi_size = nr_sects << 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 182 | nr_sects = 0; |
| 183 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 184 | cond_resched(); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 185 | } |
| 186 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 187 | *biop = bio; |
| 188 | return 0; |
| 189 | } |
| 190 | |
| 191 | /** |
| 192 | * blkdev_issue_write_same - queue a write same operation |
| 193 | * @bdev: target blockdev |
| 194 | * @sector: start sector |
| 195 | * @nr_sects: number of sectors to write |
| 196 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 197 | * @page: page containing data |
| 198 | * |
| 199 | * Description: |
| 200 | * Issue a write same request for the sectors in question. |
| 201 | */ |
| 202 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 203 | sector_t nr_sects, gfp_t gfp_mask, |
| 204 | struct page *page) |
| 205 | { |
| 206 | struct bio *bio = NULL; |
| 207 | struct blk_plug plug; |
| 208 | int ret; |
| 209 | |
| 210 | blk_start_plug(&plug); |
| 211 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, |
| 212 | &bio); |
| 213 | if (ret == 0 && bio) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 214 | ret = submit_bio_wait(bio); |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 215 | bio_put(bio); |
| 216 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 217 | blk_finish_plug(&plug); |
Christoph Hellwig | 3f40bf2 | 2016-07-19 11:23:34 +0200 | [diff] [blame] | 218 | return ret; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 219 | } |
| 220 | EXPORT_SYMBOL(blkdev_issue_write_same); |
| 221 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 222 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
| 223 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
Christoph Hellwig | d928be9 | 2017-04-05 19:21:09 +0200 | [diff] [blame] | 224 | struct bio **biop, unsigned flags) |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 225 | { |
| 226 | struct bio *bio = *biop; |
| 227 | unsigned int max_write_zeroes_sectors; |
| 228 | struct request_queue *q = bdev_get_queue(bdev); |
| 229 | |
| 230 | if (!q) |
| 231 | return -ENXIO; |
| 232 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 233 | if (bdev_read_only(bdev)) |
| 234 | return -EPERM; |
| 235 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 236 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ |
| 237 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); |
| 238 | |
| 239 | if (max_write_zeroes_sectors == 0) |
| 240 | return -EOPNOTSUPP; |
| 241 | |
| 242 | while (nr_sects) { |
| 243 | bio = next_bio(bio, 0, gfp_mask); |
| 244 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 245 | bio_set_dev(bio, bdev); |
Christoph Hellwig | d928be9 | 2017-04-05 19:21:09 +0200 | [diff] [blame] | 246 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
| 247 | if (flags & BLKDEV_ZERO_NOUNMAP) |
| 248 | bio->bi_opf |= REQ_NOUNMAP; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 249 | |
| 250 | if (nr_sects > max_write_zeroes_sectors) { |
| 251 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; |
| 252 | nr_sects -= max_write_zeroes_sectors; |
| 253 | sector += max_write_zeroes_sectors; |
| 254 | } else { |
| 255 | bio->bi_iter.bi_size = nr_sects << 9; |
| 256 | nr_sects = 0; |
| 257 | } |
| 258 | cond_resched(); |
| 259 | } |
| 260 | |
| 261 | *biop = bio; |
| 262 | return 0; |
| 263 | } |
| 264 | |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 265 | /* |
| 266 | * Convert a number of 512B sectors to a number of pages. |
| 267 | * The result is limited to a number of pages that can fit into a BIO. |
| 268 | * Also make sure that the result is always at least 1 (page) for the cases |
| 269 | * where nr_sects is lower than the number of sectors in a page. |
| 270 | */ |
| 271 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) |
| 272 | { |
Mikulas Patocka | 09c2c35 | 2017-09-11 09:46:49 -0600 | [diff] [blame] | 273 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 274 | |
Mikulas Patocka | 09c2c35 | 2017-09-11 09:46:49 -0600 | [diff] [blame] | 275 | return min(pages, (sector_t)BIO_MAX_PAGES); |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 276 | } |
| 277 | |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 278 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
| 279 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
| 280 | struct bio **biop) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 281 | { |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 282 | struct request_queue *q = bdev_get_queue(bdev); |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 283 | struct bio *bio = *biop; |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 284 | int bi_size = 0; |
Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 285 | unsigned int sz; |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 286 | |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 287 | if (!q) |
| 288 | return -ENXIO; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 289 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 290 | if (bdev_read_only(bdev)) |
| 291 | return -EPERM; |
| 292 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 293 | while (nr_sects != 0) { |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 294 | bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), |
| 295 | gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 296 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 297 | bio_set_dev(bio, bdev); |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 298 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 299 | |
Jens Axboe | 0341aaf | 2010-04-29 09:28:21 +0200 | [diff] [blame] | 300 | while (nr_sects != 0) { |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 301 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); |
| 302 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 303 | nr_sects -= bi_size >> 9; |
| 304 | sector += bi_size >> 9; |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 305 | if (bi_size < sz) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 306 | break; |
| 307 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 308 | cond_resched(); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 309 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 310 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 311 | *biop = bio; |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 312 | return 0; |
| 313 | } |
| 314 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 315 | /** |
| 316 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
| 317 | * @bdev: blockdev to issue |
| 318 | * @sector: start sector |
| 319 | * @nr_sects: number of sectors to write |
| 320 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 321 | * @biop: pointer to anchor bio |
| 322 | * @flags: controls detailed behavior |
| 323 | * |
| 324 | * Description: |
| 325 | * Zero-fill a block range, either using hardware offload or by explicitly |
| 326 | * writing zeroes to the device. |
| 327 | * |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 328 | * If a device is using logical block provisioning, the underlying space will |
| 329 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. |
| 330 | * |
| 331 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return |
| 332 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. |
| 333 | */ |
| 334 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
| 335 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, |
| 336 | unsigned flags) |
| 337 | { |
| 338 | int ret; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 339 | sector_t bs_mask; |
| 340 | |
| 341 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 342 | if ((sector | nr_sects) & bs_mask) |
| 343 | return -EINVAL; |
| 344 | |
| 345 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
| 346 | biop, flags); |
| 347 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 348 | return ret; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 349 | |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 350 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
| 351 | biop); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 352 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 353 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 354 | |
| 355 | /** |
| 356 | * blkdev_issue_zeroout - zero-fill a block range |
| 357 | * @bdev: blockdev to write |
| 358 | * @sector: start sector |
| 359 | * @nr_sects: number of sectors to write |
| 360 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 361 | * @flags: controls detailed behavior |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 362 | * |
| 363 | * Description: |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 364 | * Zero-fill a block range, either using hardware offload or by explicitly |
| 365 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the |
| 366 | * valid values for %flags. |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 367 | */ |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 368 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 369 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 370 | { |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 371 | int ret = 0; |
| 372 | sector_t bs_mask; |
| 373 | struct bio *bio; |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 374 | struct blk_plug plug; |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 375 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 376 | |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 377 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 378 | if ((sector | nr_sects) & bs_mask) |
| 379 | return -EINVAL; |
| 380 | |
| 381 | retry: |
| 382 | bio = NULL; |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 383 | blk_start_plug(&plug); |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 384 | if (try_write_zeroes) { |
| 385 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, |
| 386 | gfp_mask, &bio, flags); |
| 387 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
| 388 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, |
| 389 | gfp_mask, &bio); |
| 390 | } else { |
| 391 | /* No zeroing offload support */ |
| 392 | ret = -EOPNOTSUPP; |
| 393 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 394 | if (ret == 0 && bio) { |
| 395 | ret = submit_bio_wait(bio); |
| 396 | bio_put(bio); |
Christoph Hellwig | e950fdf | 2016-07-19 11:23:33 +0200 | [diff] [blame] | 397 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 398 | blk_finish_plug(&plug); |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 399 | if (ret && try_write_zeroes) { |
| 400 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
| 401 | try_write_zeroes = false; |
| 402 | goto retry; |
| 403 | } |
| 404 | if (!bdev_write_zeroes_sectors(bdev)) { |
| 405 | /* |
| 406 | * Zeroing offload support was indicated, but the |
| 407 | * device reported ILLEGAL REQUEST (for some devices |
| 408 | * there is no non-destructive way to verify whether |
| 409 | * WRITE ZEROES is actually supported). |
| 410 | */ |
| 411 | ret = -EOPNOTSUPP; |
| 412 | } |
| 413 | } |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 414 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 415 | return ret; |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 416 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 417 | EXPORT_SYMBOL(blkdev_issue_zeroout); |