blob: 5677fd33d7d2c4668cc0ce7017c87f04b5c4e6cf [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Lukas Czerner5dba3082011-05-06 19:26:27 -060012struct bio_batch {
13 atomic_t done;
14 unsigned long flags;
15 struct completion *wait;
16};
17
18static void bio_batch_end_io(struct bio *bio, int err)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040019{
Lukas Czerner5dba3082011-05-06 19:26:27 -060020 struct bio_batch *bb = bio->bi_private;
21
Lukas Czerner8af19542011-05-06 19:30:01 -060022 if (err && (err != -EOPNOTSUPP))
Lukas Czerner5dba3082011-05-06 19:26:27 -060023 clear_bit(BIO_UPTODATE, &bb->flags);
Lukas Czerner5dba3082011-05-06 19:26:27 -060024 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040026 bio_put(bio);
27}
28
29/**
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
36 *
37 * Description:
38 * Issue a discard request for the sectors in question.
39 */
40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42{
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig8c555362010-08-18 05:29:22 -040045 int type = REQ_WRITE | REQ_DISCARD;
Shaohua Li8dd2cb72012-12-14 11:15:36 +080046 sector_t max_discard_sectors;
47 sector_t granularity, alignment;
Lukas Czerner5dba3082011-05-06 19:26:27 -060048 struct bio_batch bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040049 struct bio *bio;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040050 int ret = 0;
51
52 if (!q)
53 return -ENXIO;
54
55 if (!blk_queue_discard(q))
56 return -EOPNOTSUPP;
57
Paolo Bonzinif6ff53d2012-08-02 09:48:49 +020058 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
Shaohua Li8dd2cb72012-12-14 11:15:36 +080060 alignment = bdev_discard_alignment(bdev) >> 9;
61 alignment = sector_div(alignment, granularity);
Paolo Bonzinif6ff53d2012-08-02 09:48:49 +020062
Jens Axboe10d1f9e2010-07-15 10:49:31 -060063 /*
64 * Ensure that max_discard_sectors is of the proper
Paolo Bonzinic6e66632012-08-02 09:48:50 +020065 * granularity, so that requests stay aligned after a split.
Jens Axboe10d1f9e2010-07-15 10:49:31 -060066 */
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
Shaohua Li8dd2cb72012-12-14 11:15:36 +080068 sector_div(max_discard_sectors, granularity);
69 max_discard_sectors *= granularity;
Jens Axboe4c645002011-07-23 20:34:59 +020070 if (unlikely(!max_discard_sectors)) {
Mike Snitzer0f799602011-07-06 21:30:50 +020071 /* Avoid infinite loop below. Being cautious never hurts. */
72 return -EOPNOTSUPP;
Jens Axboe10d1f9e2010-07-15 10:49:31 -060073 }
74
Christoph Hellwigdd3932e2010-09-16 20:51:46 +020075 if (flags & BLKDEV_DISCARD_SECURE) {
Adrian Hunter8d57a982010-08-11 14:17:49 -070076 if (!blk_queue_secdiscard(q))
77 return -EOPNOTSUPP;
Christoph Hellwig8c555362010-08-18 05:29:22 -040078 type |= REQ_SECURE;
Adrian Hunter8d57a982010-08-11 14:17:49 -070079 }
80
Lukas Czerner5dba3082011-05-06 19:26:27 -060081 atomic_set(&bb.done, 1);
82 bb.flags = 1 << BIO_UPTODATE;
83 bb.wait = &wait;
84
85 while (nr_sects) {
Paolo Bonzinic6e66632012-08-02 09:48:50 +020086 unsigned int req_sects;
Shaohua Li8dd2cb72012-12-14 11:15:36 +080087 sector_t end_sect, tmp;
Paolo Bonzinic6e66632012-08-02 09:48:50 +020088
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040089 bio = bio_alloc(gfp_mask, 1);
Christoph Hellwig66ac0282010-06-18 16:59:42 +020090 if (!bio) {
91 ret = -ENOMEM;
92 break;
93 }
94
Paolo Bonzinic6e66632012-08-02 09:48:50 +020095 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
96
97 /*
98 * If splitting a request, and the next starting sector would be
99 * misaligned, stop the discard at the previous aligned sector.
100 */
101 end_sect = sector + req_sects;
Shaohua Li8dd2cb72012-12-14 11:15:36 +0800102 tmp = end_sect;
103 if (req_sects < nr_sects &&
104 sector_div(tmp, granularity) != alignment) {
105 end_sect = end_sect - alignment;
106 sector_div(end_sect, granularity);
107 end_sect = end_sect * granularity + alignment;
Paolo Bonzinic6e66632012-08-02 09:48:50 +0200108 req_sects = end_sect - sector;
109 }
110
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400111 bio->bi_sector = sector;
Lukas Czerner5dba3082011-05-06 19:26:27 -0600112 bio->bi_end_io = bio_batch_end_io;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400113 bio->bi_bdev = bdev;
Lukas Czerner5dba3082011-05-06 19:26:27 -0600114 bio->bi_private = &bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400115
Paolo Bonzinic6e66632012-08-02 09:48:50 +0200116 bio->bi_size = req_sects << 9;
117 nr_sects -= req_sects;
118 sector = end_sect;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400119
Lukas Czerner5dba3082011-05-06 19:26:27 -0600120 atomic_inc(&bb.done);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400121 submit_bio(type, bio);
Lukas Czerner5dba3082011-05-06 19:26:27 -0600122 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400123
Lukas Czerner5dba3082011-05-06 19:26:27 -0600124 /* Wait for bios in-flight */
125 if (!atomic_dec_and_test(&bb.done))
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200126 wait_for_completion(&wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400127
Lukas Czerner8af19542011-05-06 19:30:01 -0600128 if (!test_bit(BIO_UPTODATE, &bb.flags))
Lukas Czerner5dba3082011-05-06 19:26:27 -0600129 ret = -EIO;
Christoph Hellwig66ac0282010-06-18 16:59:42 +0200130
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400131 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400132}
133EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400134
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400135/**
Martin K. Petersen4363ac72012-09-18 12:19:27 -0400136 * blkdev_issue_write_same - queue a write same operation
137 * @bdev: target blockdev
138 * @sector: start sector
139 * @nr_sects: number of sectors to write
140 * @gfp_mask: memory allocation flags (for bio_alloc)
141 * @page: page containing data to write
142 *
143 * Description:
144 * Issue a write same request for the sectors in question.
145 */
146int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
147 sector_t nr_sects, gfp_t gfp_mask,
148 struct page *page)
149{
150 DECLARE_COMPLETION_ONSTACK(wait);
151 struct request_queue *q = bdev_get_queue(bdev);
152 unsigned int max_write_same_sectors;
153 struct bio_batch bb;
154 struct bio *bio;
155 int ret = 0;
156
157 if (!q)
158 return -ENXIO;
159
160 max_write_same_sectors = q->limits.max_write_same_sectors;
161
162 if (max_write_same_sectors == 0)
163 return -EOPNOTSUPP;
164
165 atomic_set(&bb.done, 1);
166 bb.flags = 1 << BIO_UPTODATE;
167 bb.wait = &wait;
168
169 while (nr_sects) {
170 bio = bio_alloc(gfp_mask, 1);
171 if (!bio) {
172 ret = -ENOMEM;
173 break;
174 }
175
176 bio->bi_sector = sector;
177 bio->bi_end_io = bio_batch_end_io;
178 bio->bi_bdev = bdev;
179 bio->bi_private = &bb;
180 bio->bi_vcnt = 1;
181 bio->bi_io_vec->bv_page = page;
182 bio->bi_io_vec->bv_offset = 0;
183 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
184
185 if (nr_sects > max_write_same_sectors) {
186 bio->bi_size = max_write_same_sectors << 9;
187 nr_sects -= max_write_same_sectors;
188 sector += max_write_same_sectors;
189 } else {
190 bio->bi_size = nr_sects << 9;
191 nr_sects = 0;
192 }
193
194 atomic_inc(&bb.done);
195 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
196 }
197
198 /* Wait for bios in-flight */
199 if (!atomic_dec_and_test(&bb.done))
200 wait_for_completion(&wait);
201
202 if (!test_bit(BIO_UPTODATE, &bb.flags))
203 ret = -ENOTSUPP;
204
205 return ret;
206}
207EXPORT_SYMBOL(blkdev_issue_write_same);
208
209/**
Ben Hutchings291d24f2011-03-01 13:45:24 -0500210 * blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400211 * @bdev: blockdev to issue
212 * @sector: start sector
213 * @nr_sects: number of sectors to write
214 * @gfp_mask: memory allocation flags (for bio_alloc)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400215 *
216 * Description:
217 * Generate and issue number of bios with zerofiled pages.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400218 */
219
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400220int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200221 sector_t nr_sects, gfp_t gfp_mask)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400222{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200223 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400224 struct bio *bio;
225 struct bio_batch bb;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100226 unsigned int sz;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400227 DECLARE_COMPLETION_ONSTACK(wait);
228
Lukas Czerner0aeea182011-03-11 10:23:53 +0100229 atomic_set(&bb.done, 1);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400230 bb.flags = 1 << BIO_UPTODATE;
231 bb.wait = &wait;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400232
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200233 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400234 while (nr_sects != 0) {
235 bio = bio_alloc(gfp_mask,
236 min(nr_sects, (sector_t)BIO_MAX_PAGES));
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200237 if (!bio) {
238 ret = -ENOMEM;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400239 break;
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200240 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400241
242 bio->bi_sector = sector;
243 bio->bi_bdev = bdev;
244 bio->bi_end_io = bio_batch_end_io;
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200245 bio->bi_private = &bb;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400246
Jens Axboe0341aaf2010-04-29 09:28:21 +0200247 while (nr_sects != 0) {
248 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400249 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
250 nr_sects -= ret >> 9;
251 sector += ret >> 9;
252 if (ret < (sz << 9))
253 break;
254 }
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200255 ret = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100256 atomic_inc(&bb.done);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400257 submit_bio(WRITE, bio);
258 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400259
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200260 /* Wait for bios in-flight */
Lukas Czerner0aeea182011-03-11 10:23:53 +0100261 if (!atomic_dec_and_test(&bb.done))
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200262 wait_for_completion(&wait);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400263
264 if (!test_bit(BIO_UPTODATE, &bb.flags))
265 /* One of bios in the batch was completed with error.*/
266 ret = -EIO;
267
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400268 return ret;
269}
Martin K. Petersen579e8f32012-09-18 12:19:28 -0400270
271/**
272 * blkdev_issue_zeroout - zero-fill a block range
273 * @bdev: blockdev to write
274 * @sector: start sector
275 * @nr_sects: number of sectors to write
276 * @gfp_mask: memory allocation flags (for bio_alloc)
277 *
278 * Description:
279 * Generate and issue number of bios with zerofiled pages.
280 */
281
282int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
283 sector_t nr_sects, gfp_t gfp_mask)
284{
285 if (bdev_write_same(bdev)) {
286 unsigned char bdn[BDEVNAME_SIZE];
287
288 if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
289 ZERO_PAGE(0)))
290 return 0;
291
292 bdevname(bdev, bdn);
293 pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
294 }
295
296 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
297}
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400298EXPORT_SYMBOL(blkdev_issue_zeroout);