blob: 39a7f255c2b67e17ebd326299af8fe113ac51e52 [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
Lukas Czerner5dba3082011-05-06 19:26:27 -060012struct bio_batch {
13 atomic_t done;
14 unsigned long flags;
15 struct completion *wait;
16};
17
18static void bio_batch_end_io(struct bio *bio, int err)
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040019{
Lukas Czerner5dba3082011-05-06 19:26:27 -060020 struct bio_batch *bb = bio->bi_private;
21
Lukas Czerner8af19542011-05-06 19:30:01 -060022 if (err && (err != -EOPNOTSUPP))
Lukas Czerner5dba3082011-05-06 19:26:27 -060023 clear_bit(BIO_UPTODATE, &bb->flags);
Lukas Czerner5dba3082011-05-06 19:26:27 -060024 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040026 bio_put(bio);
27}
28
29/**
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
36 *
37 * Description:
38 * Issue a discard request for the sectors in question.
39 */
40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42{
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig8c555362010-08-18 05:29:22 -040045 int type = REQ_WRITE | REQ_DISCARD;
Jens Axboe10d1f9e2010-07-15 10:49:31 -060046 unsigned int max_discard_sectors;
Lukas Czerner5dba3082011-05-06 19:26:27 -060047 struct bio_batch bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040048 struct bio *bio;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040049 int ret = 0;
50
51 if (!q)
52 return -ENXIO;
53
54 if (!blk_queue_discard(q))
55 return -EOPNOTSUPP;
56
Jens Axboe10d1f9e2010-07-15 10:49:31 -060057 /*
58 * Ensure that max_discard_sectors is of the proper
59 * granularity
60 */
61 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
62 if (q->limits.discard_granularity) {
63 unsigned int disc_sects = q->limits.discard_granularity >> 9;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040064
Jens Axboe10d1f9e2010-07-15 10:49:31 -060065 max_discard_sectors &= ~(disc_sects - 1);
66 }
67
Christoph Hellwigdd3932e2010-09-16 20:51:46 +020068 if (flags & BLKDEV_DISCARD_SECURE) {
Adrian Hunter8d57a982010-08-11 14:17:49 -070069 if (!blk_queue_secdiscard(q))
70 return -EOPNOTSUPP;
Christoph Hellwig8c555362010-08-18 05:29:22 -040071 type |= REQ_SECURE;
Adrian Hunter8d57a982010-08-11 14:17:49 -070072 }
73
Lukas Czerner5dba3082011-05-06 19:26:27 -060074 atomic_set(&bb.done, 1);
75 bb.flags = 1 << BIO_UPTODATE;
76 bb.wait = &wait;
77
78 while (nr_sects) {
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040079 bio = bio_alloc(gfp_mask, 1);
Christoph Hellwig66ac0282010-06-18 16:59:42 +020080 if (!bio) {
81 ret = -ENOMEM;
82 break;
83 }
84
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040085 bio->bi_sector = sector;
Lukas Czerner5dba3082011-05-06 19:26:27 -060086 bio->bi_end_io = bio_batch_end_io;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040087 bio->bi_bdev = bdev;
Lukas Czerner5dba3082011-05-06 19:26:27 -060088 bio->bi_private = &bb;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040089
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040090 if (nr_sects > max_discard_sectors) {
91 bio->bi_size = max_discard_sectors << 9;
92 nr_sects -= max_discard_sectors;
93 sector += max_discard_sectors;
94 } else {
95 bio->bi_size = nr_sects << 9;
96 nr_sects = 0;
97 }
98
Lukas Czerner5dba3082011-05-06 19:26:27 -060099 atomic_inc(&bb.done);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400100 submit_bio(type, bio);
Lukas Czerner5dba3082011-05-06 19:26:27 -0600101 }
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400102
Lukas Czerner5dba3082011-05-06 19:26:27 -0600103 /* Wait for bios in-flight */
104 if (!atomic_dec_and_test(&bb.done))
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200105 wait_for_completion(&wait);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400106
Lukas Czerner8af19542011-05-06 19:30:01 -0600107 if (!test_bit(BIO_UPTODATE, &bb.flags))
Lukas Czerner5dba3082011-05-06 19:26:27 -0600108 ret = -EIO;
Christoph Hellwig66ac0282010-06-18 16:59:42 +0200109
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400110 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400111}
112EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400113
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400114/**
Maya Erez73937f52012-05-24 23:33:05 +0300115 * blkdev_issue_sanitize - queue a sanitize request
116 * @bdev: blockdev to issue sanitize for
117 * @gfp_mask: memory allocation flags (for bio_alloc)
118 *
119 * Description:
120 * Issue a sanitize request for the specified block device
121 */
122int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask)
123{
124 DECLARE_COMPLETION_ONSTACK(wait);
125 struct request_queue *q = bdev_get_queue(bdev);
126 int type = REQ_WRITE | REQ_SANITIZE;
127 struct bio_batch bb;
128 struct bio *bio;
129 int ret = 0;
130
131 if (!q)
132 return -ENXIO;
133
134 if (!blk_queue_sanitize(q)) {
135 pr_err("%s - card doesn't support sanitize", __func__);
136 return -EOPNOTSUPP;
137 }
138
139 bio = bio_alloc(gfp_mask, 1);
140 if (!bio)
141 return -ENOMEM;
142
143 atomic_set(&bb.done, 1);
144 bb.flags = 1 << BIO_UPTODATE;
145 bb.wait = &wait;
146
147 bio->bi_end_io = bio_batch_end_io;
148 bio->bi_bdev = bdev;
149 bio->bi_private = &bb;
150
151 atomic_inc(&bb.done);
152 submit_bio(type, bio);
153
154 /* Wait for bios in-flight */
155 if (!atomic_dec_and_test(&bb.done))
156 wait_for_completion(&wait);
157
158 if (!test_bit(BIO_UPTODATE, &bb.flags))
159 ret = -EIO;
160
161 return ret;
162}
163EXPORT_SYMBOL(blkdev_issue_sanitize);
164
165/**
Ben Hutchings291d24f2011-03-01 13:45:24 -0500166 * blkdev_issue_zeroout - generate number of zero filed write bios
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400167 * @bdev: blockdev to issue
168 * @sector: start sector
169 * @nr_sects: number of sectors to write
170 * @gfp_mask: memory allocation flags (for bio_alloc)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400171 *
172 * Description:
173 * Generate and issue number of bios with zerofiled pages.
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400174 */
175
176int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200177 sector_t nr_sects, gfp_t gfp_mask)
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400178{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200179 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400180 struct bio *bio;
181 struct bio_batch bb;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100182 unsigned int sz;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400183 DECLARE_COMPLETION_ONSTACK(wait);
184
Lukas Czerner0aeea182011-03-11 10:23:53 +0100185 atomic_set(&bb.done, 1);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400186 bb.flags = 1 << BIO_UPTODATE;
187 bb.wait = &wait;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400188
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200189 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400190 while (nr_sects != 0) {
191 bio = bio_alloc(gfp_mask,
192 min(nr_sects, (sector_t)BIO_MAX_PAGES));
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200193 if (!bio) {
194 ret = -ENOMEM;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400195 break;
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200196 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400197
198 bio->bi_sector = sector;
199 bio->bi_bdev = bdev;
200 bio->bi_end_io = bio_batch_end_io;
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200201 bio->bi_private = &bb;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400202
Jens Axboe0341aaf2010-04-29 09:28:21 +0200203 while (nr_sects != 0) {
204 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400205 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
206 nr_sects -= ret >> 9;
207 sector += ret >> 9;
208 if (ret < (sz << 9))
209 break;
210 }
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200211 ret = 0;
Lukas Czerner0aeea182011-03-11 10:23:53 +0100212 atomic_inc(&bb.done);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400213 submit_bio(WRITE, bio);
214 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400215
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200216 /* Wait for bios in-flight */
Lukas Czerner0aeea182011-03-11 10:23:53 +0100217 if (!atomic_dec_and_test(&bb.done))
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200218 wait_for_completion(&wait);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400219
220 if (!test_bit(BIO_UPTODATE, &bb.flags))
221 /* One of bios in the batch was completed with error.*/
222 ret = -EIO;
223
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400224 return ret;
225}
226EXPORT_SYMBOL(blkdev_issue_zeroout);