Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to generic helpers functions |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/scatterlist.h> |
| 9 | |
| 10 | #include "blk.h" |
| 11 | |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 12 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 13 | gfp_t gfp) |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 14 | { |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 15 | struct bio *new = bio_alloc(gfp, nr_pages); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 16 | |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 17 | if (bio) { |
| 18 | bio_chain(bio, new); |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 19 | submit_bio(bio); |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 20 | } |
| 21 | |
| 22 | return new; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 23 | } |
| 24 | |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 26 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
Mike Christie | 469e321 | 2016-06-05 14:31:49 -0500 | [diff] [blame] | 27 | struct bio **biop) |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 28 | { |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 29 | struct request_queue *q = bdev_get_queue(bdev); |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 30 | struct bio *bio = *biop; |
Ming Lin | a22c4d7 | 2015-10-22 09:59:42 -0700 | [diff] [blame] | 31 | unsigned int granularity; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 32 | enum req_op op; |
Ming Lin | a22c4d7 | 2015-10-22 09:59:42 -0700 | [diff] [blame] | 33 | int alignment; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 34 | |
| 35 | if (!q) |
| 36 | return -ENXIO; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 37 | |
| 38 | if (flags & BLKDEV_DISCARD_SECURE) { |
Christoph Hellwig | e950fdf | 2016-07-19 11:23:33 +0200 | [diff] [blame] | 39 | if (flags & BLKDEV_DISCARD_ZERO) |
| 40 | return -EOPNOTSUPP; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 41 | if (!blk_queue_secure_erase(q)) |
| 42 | return -EOPNOTSUPP; |
| 43 | op = REQ_OP_SECURE_ERASE; |
| 44 | } else { |
| 45 | if (!blk_queue_discard(q)) |
| 46 | return -EOPNOTSUPP; |
Christoph Hellwig | e950fdf | 2016-07-19 11:23:33 +0200 | [diff] [blame] | 47 | if ((flags & BLKDEV_DISCARD_ZERO) && |
| 48 | !q->limits.discard_zeroes_data) |
| 49 | return -EOPNOTSUPP; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 50 | op = REQ_OP_DISCARD; |
| 51 | } |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 52 | |
Ming Lin | a22c4d7 | 2015-10-22 09:59:42 -0700 | [diff] [blame] | 53 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
| 54 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
| 55 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; |
| 56 | |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 57 | while (nr_sects) { |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 58 | unsigned int req_sects; |
Ming Lin | a22c4d7 | 2015-10-22 09:59:42 -0700 | [diff] [blame] | 59 | sector_t end_sect, tmp; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 60 | |
Ming Lin | a22c4d7 | 2015-10-22 09:59:42 -0700 | [diff] [blame] | 61 | /* Make sure bi_size doesn't overflow */ |
| 62 | req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); |
| 63 | |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 64 | /** |
Ming Lin | a22c4d7 | 2015-10-22 09:59:42 -0700 | [diff] [blame] | 65 | * If splitting a request, and the next starting sector would be |
| 66 | * misaligned, stop the discard at the previous aligned sector. |
| 67 | */ |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 68 | end_sect = sector + req_sects; |
Ming Lin | a22c4d7 | 2015-10-22 09:59:42 -0700 | [diff] [blame] | 69 | tmp = end_sect; |
| 70 | if (req_sects < nr_sects && |
| 71 | sector_div(tmp, granularity) != alignment) { |
| 72 | end_sect = end_sect - alignment; |
| 73 | sector_div(end_sect, granularity); |
| 74 | end_sect = end_sect * granularity + alignment; |
| 75 | req_sects = end_sect - sector; |
| 76 | } |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 77 | |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 78 | bio = next_bio(bio, 1, gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 79 | bio->bi_iter.bi_sector = sector; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 80 | bio->bi_bdev = bdev; |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 81 | bio_set_op_attrs(bio, op, 0); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 82 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 83 | bio->bi_iter.bi_size = req_sects << 9; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 84 | nr_sects -= req_sects; |
| 85 | sector = end_sect; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 86 | |
Jens Axboe | c8123f8 | 2014-02-12 09:34:01 -0700 | [diff] [blame] | 87 | /* |
| 88 | * We can loop for a long time in here, if someone does |
| 89 | * full device discards (like mkfs). Be nice and allow |
| 90 | * us to schedule out to avoid softlocking if preempt |
| 91 | * is disabled. |
| 92 | */ |
| 93 | cond_resched(); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 94 | } |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 95 | |
| 96 | *biop = bio; |
| 97 | return 0; |
| 98 | } |
| 99 | EXPORT_SYMBOL(__blkdev_issue_discard); |
| 100 | |
| 101 | /** |
| 102 | * blkdev_issue_discard - queue a discard |
| 103 | * @bdev: blockdev to issue discard for |
| 104 | * @sector: start sector |
| 105 | * @nr_sects: number of sectors to discard |
| 106 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 107 | * @flags: BLKDEV_IFL_* flags to control behaviour |
| 108 | * |
| 109 | * Description: |
| 110 | * Issue a discard request for the sectors in question. |
| 111 | */ |
| 112 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| 113 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) |
| 114 | { |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 115 | struct bio *bio = NULL; |
| 116 | struct blk_plug plug; |
| 117 | int ret; |
| 118 | |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 119 | blk_start_plug(&plug); |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 120 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 121 | &bio); |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 122 | if (!ret && bio) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 123 | ret = submit_bio_wait(bio); |
Christoph Hellwig | e950fdf | 2016-07-19 11:23:33 +0200 | [diff] [blame] | 124 | if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO)) |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 125 | ret = 0; |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 126 | bio_put(bio); |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 127 | } |
Shaohua Li | 0cfbcaf | 2012-12-14 11:15:51 +0800 | [diff] [blame] | 128 | blk_finish_plug(&plug); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 129 | |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 130 | return ret; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 131 | } |
| 132 | EXPORT_SYMBOL(blkdev_issue_discard); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 133 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 134 | /** |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 135 | * blkdev_issue_write_same - queue a write same operation |
| 136 | * @bdev: target blockdev |
| 137 | * @sector: start sector |
| 138 | * @nr_sects: number of sectors to write |
| 139 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 140 | * @page: page containing data to write |
| 141 | * |
| 142 | * Description: |
| 143 | * Issue a write same request for the sectors in question. |
| 144 | */ |
| 145 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
| 146 | sector_t nr_sects, gfp_t gfp_mask, |
| 147 | struct page *page) |
| 148 | { |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 149 | struct request_queue *q = bdev_get_queue(bdev); |
| 150 | unsigned int max_write_same_sectors; |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 151 | struct bio *bio = NULL; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 152 | int ret = 0; |
| 153 | |
| 154 | if (!q) |
| 155 | return -ENXIO; |
| 156 | |
Ming Lin | b49a087 | 2015-05-22 00:46:56 -0700 | [diff] [blame] | 157 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
| 158 | max_write_same_sectors = UINT_MAX >> 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 159 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 160 | while (nr_sects) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 161 | bio = next_bio(bio, 1, gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 162 | bio->bi_iter.bi_sector = sector; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 163 | bio->bi_bdev = bdev; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 164 | bio->bi_vcnt = 1; |
| 165 | bio->bi_io_vec->bv_page = page; |
| 166 | bio->bi_io_vec->bv_offset = 0; |
| 167 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 168 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 169 | |
| 170 | if (nr_sects > max_write_same_sectors) { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 171 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 172 | nr_sects -= max_write_same_sectors; |
| 173 | sector += max_write_same_sectors; |
| 174 | } else { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 175 | bio->bi_iter.bi_size = nr_sects << 9; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 176 | nr_sects = 0; |
| 177 | } |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 178 | } |
| 179 | |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 180 | if (bio) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 181 | ret = submit_bio_wait(bio); |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 182 | bio_put(bio); |
| 183 | } |
Christoph Hellwig | 3f40bf2 | 2016-07-19 11:23:34 +0200 | [diff] [blame] | 184 | return ret; |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 185 | } |
| 186 | EXPORT_SYMBOL(blkdev_issue_write_same); |
| 187 | |
| 188 | /** |
Ben Hutchings | 291d24f | 2011-03-01 13:45:24 -0500 | [diff] [blame] | 189 | * blkdev_issue_zeroout - generate number of zero filed write bios |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 190 | * @bdev: blockdev to issue |
| 191 | * @sector: start sector |
| 192 | * @nr_sects: number of sectors to write |
| 193 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 194 | * |
| 195 | * Description: |
| 196 | * Generate and issue number of bios with zerofiled pages. |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 197 | */ |
| 198 | |
Fabian Frederick | 3508678 | 2014-05-26 22:19:14 +0200 | [diff] [blame] | 199 | static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
| 200 | sector_t nr_sects, gfp_t gfp_mask) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 201 | { |
Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 202 | int ret; |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 203 | struct bio *bio = NULL; |
Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 204 | unsigned int sz; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 205 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 206 | while (nr_sects != 0) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 207 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 208 | gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 209 | bio->bi_iter.bi_sector = sector; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 210 | bio->bi_bdev = bdev; |
Mike Christie | 95fe6c1 | 2016-06-05 14:31:48 -0500 | [diff] [blame] | 211 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 212 | |
Jens Axboe | 0341aaf | 2010-04-29 09:28:21 +0200 | [diff] [blame] | 213 | while (nr_sects != 0) { |
| 214 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 215 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); |
| 216 | nr_sects -= ret >> 9; |
| 217 | sector += ret >> 9; |
| 218 | if (ret < (sz << 9)) |
| 219 | break; |
| 220 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 221 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 222 | |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 223 | if (bio) { |
Linus Torvalds | d05d7f4 | 2016-07-26 15:03:07 -0700 | [diff] [blame] | 224 | ret = submit_bio_wait(bio); |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 225 | bio_put(bio); |
| 226 | return ret; |
| 227 | } |
Christoph Hellwig | 9082e87 | 2016-04-16 14:55:27 -0400 | [diff] [blame] | 228 | return 0; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 229 | } |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 230 | |
| 231 | /** |
| 232 | * blkdev_issue_zeroout - zero-fill a block range |
| 233 | * @bdev: blockdev to write |
| 234 | * @sector: start sector |
| 235 | * @nr_sects: number of sectors to write |
| 236 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 237 | * @discard: whether to discard the block range |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 238 | * |
| 239 | * Description: |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 240 | * Zero-fill a block range. If the discard flag is set and the block |
| 241 | * device guarantees that subsequent READ operations to the block range |
| 242 | * in question will return zeroes, the blocks will be discarded. Should |
| 243 | * the discard request fail, if the discard flag is not set, or if |
| 244 | * discard_zeroes_data is not supported, this function will resort to |
| 245 | * zeroing the blocks manually, thus provisioning (allocating, |
| 246 | * anchoring) them. If the block device supports the WRITE SAME command |
| 247 | * blkdev_issue_zeroout() will use it to optimize the process of |
| 248 | * clearing the block range. Otherwise the zeroing will be performed |
| 249 | * using regular WRITE calls. |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 250 | */ |
| 251 | |
| 252 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 253 | sector_t nr_sects, gfp_t gfp_mask, bool discard) |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 254 | { |
Christoph Hellwig | e950fdf | 2016-07-19 11:23:33 +0200 | [diff] [blame] | 255 | if (discard) { |
| 256 | if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, |
| 257 | BLKDEV_DISCARD_ZERO)) |
| 258 | return 0; |
| 259 | } |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 260 | |
Martin K. Petersen | 9f9ee1f | 2015-02-05 10:14:54 -0700 | [diff] [blame] | 261 | if (bdev_write_same(bdev) && |
| 262 | blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, |
| 263 | ZERO_PAGE(0)) == 0) |
| 264 | return 0; |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 265 | |
| 266 | return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); |
| 267 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 268 | EXPORT_SYMBOL(blkdev_issue_zeroout); |