blob: c1fc55a83ba14ad81f86567c14fc283ae9350b3f [file] [log] [blame]
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +04001/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
12static void blkdev_discard_end_io(struct bio *bio, int err)
13{
14 if (err) {
15 if (err == -EOPNOTSUPP)
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 }
19
20 if (bio->bi_private)
21 complete(bio->bi_private);
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040022
23 bio_put(bio);
24}
25
26/**
27 * blkdev_issue_discard - queue a discard
28 * @bdev: blockdev to issue discard for
29 * @sector: start sector
30 * @nr_sects: number of sectors to discard
31 * @gfp_mask: memory allocation flags (for bio_alloc)
32 * @flags: BLKDEV_IFL_* flags to control behaviour
33 *
34 * Description:
35 * Issue a discard request for the sectors in question.
36 */
37int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
38 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
39{
40 DECLARE_COMPLETION_ONSTACK(wait);
41 struct request_queue *q = bdev_get_queue(bdev);
42 int type = flags & BLKDEV_IFL_BARRIER ?
43 DISCARD_BARRIER : DISCARD_NOBARRIER;
Jens Axboe10d1f9e2010-07-15 10:49:31 -060044 unsigned int max_discard_sectors;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040045 struct bio *bio;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040046 int ret = 0;
47
48 if (!q)
49 return -ENXIO;
50
51 if (!blk_queue_discard(q))
52 return -EOPNOTSUPP;
53
Jens Axboe10d1f9e2010-07-15 10:49:31 -060054 /*
55 * Ensure that max_discard_sectors is of the proper
56 * granularity
57 */
58 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59 if (q->limits.discard_granularity) {
60 unsigned int disc_sects = q->limits.discard_granularity >> 9;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040061
Jens Axboe10d1f9e2010-07-15 10:49:31 -060062 max_discard_sectors &= ~(disc_sects - 1);
63 }
64
65 while (nr_sects && !ret) {
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040066 bio = bio_alloc(gfp_mask, 1);
Christoph Hellwig66ac0282010-06-18 16:59:42 +020067 if (!bio) {
68 ret = -ENOMEM;
69 break;
70 }
71
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040072 bio->bi_sector = sector;
73 bio->bi_end_io = blkdev_discard_end_io;
74 bio->bi_bdev = bdev;
75 if (flags & BLKDEV_IFL_WAIT)
76 bio->bi_private = &wait;
77
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +040078 if (nr_sects > max_discard_sectors) {
79 bio->bi_size = max_discard_sectors << 9;
80 nr_sects -= max_discard_sectors;
81 sector += max_discard_sectors;
82 } else {
83 bio->bi_size = nr_sects << 9;
84 nr_sects = 0;
85 }
86
87 bio_get(bio);
88 submit_bio(type, bio);
89
90 if (flags & BLKDEV_IFL_WAIT)
91 wait_for_completion(&wait);
92
93 if (bio_flagged(bio, BIO_EOPNOTSUPP))
94 ret = -EOPNOTSUPP;
95 else if (!bio_flagged(bio, BIO_UPTODATE))
96 ret = -EIO;
97 bio_put(bio);
98 }
Christoph Hellwig66ac0282010-06-18 16:59:42 +020099
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400100 return ret;
Dmitry Monakhovf31e7e42010-04-28 17:55:08 +0400101}
102EXPORT_SYMBOL(blkdev_issue_discard);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400103
104struct bio_batch
105{
106 atomic_t done;
107 unsigned long flags;
108 struct completion *wait;
109 bio_end_io_t *end_io;
110};
111
112static void bio_batch_end_io(struct bio *bio, int err)
113{
114 struct bio_batch *bb = bio->bi_private;
Jens Axboe0341aaf2010-04-29 09:28:21 +0200115
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400116 if (err) {
117 if (err == -EOPNOTSUPP)
118 set_bit(BIO_EOPNOTSUPP, &bb->flags);
119 else
120 clear_bit(BIO_UPTODATE, &bb->flags);
121 }
122 if (bb) {
123 if (bb->end_io)
124 bb->end_io(bio, err);
125 atomic_inc(&bb->done);
126 complete(bb->wait);
127 }
128 bio_put(bio);
129}
130
131/**
132 * blkdev_issue_zeroout generate number of zero filed write bios
133 * @bdev: blockdev to issue
134 * @sector: start sector
135 * @nr_sects: number of sectors to write
136 * @gfp_mask: memory allocation flags (for bio_alloc)
137 * @flags: BLKDEV_IFL_* flags to control behaviour
138 *
139 * Description:
140 * Generate and issue number of bios with zerofiled pages.
141 * Send barrier at the beginning and at the end if requested. This guarantie
142 * correct request ordering. Empty barrier allow us to avoid post queue flush.
143 */
144
145int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
146 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
147{
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200148 int ret;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400149 struct bio *bio;
150 struct bio_batch bb;
151 unsigned int sz, issued = 0;
152 DECLARE_COMPLETION_ONSTACK(wait);
153
154 atomic_set(&bb.done, 0);
155 bb.flags = 1 << BIO_UPTODATE;
156 bb.wait = &wait;
157 bb.end_io = NULL;
158
159 if (flags & BLKDEV_IFL_BARRIER) {
160 /* issue async barrier before the data */
161 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
162 if (ret)
163 return ret;
164 }
165submit:
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200166 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400167 while (nr_sects != 0) {
168 bio = bio_alloc(gfp_mask,
169 min(nr_sects, (sector_t)BIO_MAX_PAGES));
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200170 if (!bio) {
171 ret = -ENOMEM;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400172 break;
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200173 }
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400174
175 bio->bi_sector = sector;
176 bio->bi_bdev = bdev;
177 bio->bi_end_io = bio_batch_end_io;
178 if (flags & BLKDEV_IFL_WAIT)
179 bio->bi_private = &bb;
180
Jens Axboe0341aaf2010-04-29 09:28:21 +0200181 while (nr_sects != 0) {
182 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400183 if (sz == 0)
184 /* bio has maximum size possible */
185 break;
186 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
187 nr_sects -= ret >> 9;
188 sector += ret >> 9;
189 if (ret < (sz << 9))
190 break;
191 }
Dmitry Monakhov18edc8e2010-08-06 13:23:25 +0200192 ret = 0;
Dmitry Monakhov3f14d792010-04-28 17:55:09 +0400193 issued++;
194 submit_bio(WRITE, bio);
195 }
196 /*
197 * When all data bios are in flight. Send final barrier if requeted.
198 */
199 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
200 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
201 flags & BLKDEV_IFL_WAIT);
202
203
204 if (flags & BLKDEV_IFL_WAIT)
205 /* Wait for bios in-flight */
206 while ( issued != atomic_read(&bb.done))
207 wait_for_completion(&wait);
208
209 if (!test_bit(BIO_UPTODATE, &bb.flags))
210 /* One of bios in the batch was completed with error.*/
211 ret = -EIO;
212
213 if (ret)
214 goto out;
215
216 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
217 ret = -EOPNOTSUPP;
218 goto out;
219 }
220 if (nr_sects != 0)
221 goto submit;
222out:
223 return ret;
224}
225EXPORT_SYMBOL(blkdev_issue_zeroout);