blob: fe2e6ed0f510f378893bba54280f639bc585cb93 [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
12static void blkdev_discard_end_io(struct bio *bio, int err)
13{
14 if (err) {
15 if (err == -EOPNOTSUPP)
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 }
19
20 if (bio->bi_private)
21 complete(bio->bi_private);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040022
23 bio_put(bio);
24}
25
26/**
27 * blkdev_issue_discard - queue a discard
28 * @bdev: blockdev to issue discard for
29 * @sector: start sector
30 * @nr_sects: number of sectors to discard
31 * @gfp_mask: memory allocation flags (for bio_alloc)
32 * @flags: BLKDEV_IFL_* flags to control behaviour
33 *
34 * Description:
35 * Issue a discard request for the sectors in question.
36 */
37int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
38 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
39{
40 DECLARE_COMPLETION_ONSTACK(wait);
41 struct request_queue *q = bdev_get_queue(bdev);
Christoph Hellwig8c555362010-08-18 05:29:22 -040042 int type = REQ_WRITE | REQ_DISCARD;
Jens Axboe10d1f9e2010-07-15 10:49:31 -060043 unsigned int max_discard_sectors;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040044 struct bio *bio;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040045 int ret = 0;
46
47 if (!q)
48 return -ENXIO;
49
50 if (!blk_queue_discard(q))
51 return -EOPNOTSUPP;
52
Jens Axboe10d1f9e2010-07-15 10:49:31 -060053 /*
54 * Ensure that max_discard_sectors is of the proper
55 * granularity
56 */
57 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
58 if (q->limits.discard_granularity) {
59 unsigned int disc_sects = q->limits.discard_granularity >> 9;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040060
Jens Axboe10d1f9e2010-07-15 10:49:31 -060061 max_discard_sectors &= ~(disc_sects - 1);
62 }
63
Adrian Hunter8d57a982010-08-11 14:17:49 -070064 if (flags & BLKDEV_IFL_SECURE) {
65 if (!blk_queue_secdiscard(q))
66 return -EOPNOTSUPP;
Christoph Hellwig8c555362010-08-18 05:29:22 -040067 type |= REQ_SECURE;
Adrian Hunter8d57a982010-08-11 14:17:49 -070068 }
69
Jens Axboe10d1f9e2010-07-15 10:49:31 -060070 while (nr_sects && !ret) {
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040071 bio = bio_alloc(gfp_mask, 1);
Christoph Hellwig66ac0282010-06-18 16:59:42 +020072 if (!bio) {
73 ret = -ENOMEM;
74 break;
75 }
76
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040077 bio->bi_sector = sector;
78 bio->bi_end_io = blkdev_discard_end_io;
79 bio->bi_bdev = bdev;
80 if (flags & BLKDEV_IFL_WAIT)
81 bio->bi_private = &wait;
82
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040083 if (nr_sects > max_discard_sectors) {
84 bio->bi_size = max_discard_sectors << 9;
85 nr_sects -= max_discard_sectors;
86 sector += max_discard_sectors;
87 } else {
88 bio->bi_size = nr_sects << 9;
89 nr_sects = 0;
90 }
91
92 bio_get(bio);
93 submit_bio(type, bio);
94
95 if (flags & BLKDEV_IFL_WAIT)
96 wait_for_completion(&wait);
97
98 if (bio_flagged(bio, BIO_EOPNOTSUPP))
99 ret = -EOPNOTSUPP;
100 else if (!bio_flagged(bio, BIO_UPTODATE))
101 ret = -EIO;
102 bio_put(bio);
103 }
Christoph Hellwig66ac0282010-06-18 16:59:42 +0200104
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400105 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400106}
107EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400108
109struct bio_batch
110{
111 atomic_t done;
112 unsigned long flags;
113 struct completion *wait;
114 bio_end_io_t *end_io;
115};
116
117static void bio_batch_end_io(struct bio *bio, int err)
118{
119 struct bio_batch *bb = bio->bi_private;
Jens Axboe0341aaf2010-04-29 09:28:21 +0200120
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400121 if (err) {
122 if (err == -EOPNOTSUPP)
123 set_bit(BIO_EOPNOTSUPP, &bb->flags);
124 else
125 clear_bit(BIO_UPTODATE, &bb->flags);
126 }
127 if (bb) {
128 if (bb->end_io)
129 bb->end_io(bio, err);
130 atomic_inc(&bb->done);
131 complete(bb->wait);
132 }
133 bio_put(bio);
134}
135
136/**
137 * blkdev_issue_zeroout generate number of zero filed write bios
138 * @bdev: blockdev to issue
139 * @sector: start sector
140 * @nr_sects: number of sectors to write
141 * @gfp_mask: memory allocation flags (for bio_alloc)
142 * @flags: BLKDEV_IFL_* flags to control behaviour
143 *
144 * Description:
145 * Generate and issue number of bios with zerofiled pages.
146 * Send barrier at the beginning and at the end if requested. This guarantie
147 * correct request ordering. Empty barrier allow us to avoid post queue flush.
148 */
149
150int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
151 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
152{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200153 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400154 struct bio *bio;
155 struct bio_batch bb;
156 unsigned int sz, issued = 0;
157 DECLARE_COMPLETION_ONSTACK(wait);
158
159 atomic_set(&bb.done, 0);
160 bb.flags = 1 << BIO_UPTODATE;
161 bb.wait = &wait;
162 bb.end_io = NULL;
163
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400164submit:
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200165 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400166 while (nr_sects != 0) {
167 bio = bio_alloc(gfp_mask,
168 min(nr_sects, (sector_t)BIO_MAX_PAGES));
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200169 if (!bio) {
170 ret = -ENOMEM;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400171 break;
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200172 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400173
174 bio->bi_sector = sector;
175 bio->bi_bdev = bdev;
176 bio->bi_end_io = bio_batch_end_io;
177 if (flags & BLKDEV_IFL_WAIT)
178 bio->bi_private = &bb;
179
Jens Axboe0341aaf2010-04-29 09:28:21 +0200180 while (nr_sects != 0) {
181 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400182 if (sz == 0)
183 /* bio has maximum size possible */
184 break;
185 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
186 nr_sects -= ret >> 9;
187 sector += ret >> 9;
188 if (ret < (sz << 9))
189 break;
190 }
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200191 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400192 issued++;
193 submit_bio(WRITE, bio);
194 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400195
196 if (flags & BLKDEV_IFL_WAIT)
197 /* Wait for bios in-flight */
198 while ( issued != atomic_read(&bb.done))
199 wait_for_completion(&wait);
200
201 if (!test_bit(BIO_UPTODATE, &bb.flags))
202 /* One of bios in the batch was completed with error.*/
203 ret = -EIO;
204
205 if (ret)
206 goto out;
207
208 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
209 ret = -EOPNOTSUPP;
210 goto out;
211 }
212 if (nr_sects != 0)
213 goto submit;
214out:
215 return ret;
216}
217EXPORT_SYMBOL(blkdev_issue_zeroout);