blob: 74e404393172ce299d2ae0cc070b4232878d5cb2 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to barrier IO handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/gfp.h>
Jens Axboe86db1e22008-01-29 14:53:40 +01009
10#include "blk.h"
11
12/**
13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_*
16 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
17 *
18 * Description:
19 * For journalled file systems, doing ordered writes on a commit
20 * block instead of explicitly doing wait_on_buffer (which is bad
21 * for performance) can be a big win. Block drivers supporting this
22 * feature should call this function and indicate so.
23 *
24 **/
25int blk_queue_ordered(struct request_queue *q, unsigned ordered,
26 prepare_flush_fn *prepare_flush_fn)
27{
Tejun Heo313e4292008-11-28 13:32:02 +090028 if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
29 QUEUE_ORDERED_DO_POSTFLUSH))) {
Harvey Harrison24c03d42008-05-01 04:35:17 -070030 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
Jens Axboe86db1e22008-01-29 14:53:40 +010031 return -EINVAL;
32 }
33
34 if (ordered != QUEUE_ORDERED_NONE &&
35 ordered != QUEUE_ORDERED_DRAIN &&
36 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
37 ordered != QUEUE_ORDERED_DRAIN_FUA &&
38 ordered != QUEUE_ORDERED_TAG &&
39 ordered != QUEUE_ORDERED_TAG_FLUSH &&
40 ordered != QUEUE_ORDERED_TAG_FUA) {
41 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
42 return -EINVAL;
43 }
44
45 q->ordered = ordered;
46 q->next_ordered = ordered;
47 q->prepare_flush_fn = prepare_flush_fn;
48
49 return 0;
50}
Jens Axboe86db1e22008-01-29 14:53:40 +010051EXPORT_SYMBOL(blk_queue_ordered);
52
53/*
54 * Cache flushing for ordered writes handling
55 */
Adrian Bunk6f6a0362008-04-29 09:49:06 +020056unsigned blk_ordered_cur_seq(struct request_queue *q)
Jens Axboe86db1e22008-01-29 14:53:40 +010057{
58 if (!q->ordseq)
59 return 0;
60 return 1 << ffz(q->ordseq);
61}
62
63unsigned blk_ordered_req_seq(struct request *rq)
64{
65 struct request_queue *q = rq->q;
66
67 BUG_ON(q->ordseq == 0);
68
69 if (rq == &q->pre_flush_rq)
70 return QUEUE_ORDSEQ_PREFLUSH;
71 if (rq == &q->bar_rq)
72 return QUEUE_ORDSEQ_BAR;
73 if (rq == &q->post_flush_rq)
74 return QUEUE_ORDSEQ_POSTFLUSH;
75
76 /*
77 * !fs requests don't need to follow barrier ordering. Always
78 * put them at the front. This fixes the following deadlock.
79 *
80 * http://thread.gmane.org/gmane.linux.kernel/537473
81 */
Christoph Hellwig33659eb2010-08-07 18:17:56 +020082 if (rq->cmd_type != REQ_TYPE_FS)
Jens Axboe86db1e22008-01-29 14:53:40 +010083 return QUEUE_ORDSEQ_DRAIN;
84
85 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
86 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
87 return QUEUE_ORDSEQ_DRAIN;
88 else
89 return QUEUE_ORDSEQ_DONE;
90}
91
Tejun Heo8f11b3e2008-11-28 13:32:05 +090092bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
Jens Axboe86db1e22008-01-29 14:53:40 +010093{
94 struct request *rq;
95
96 if (error && !q->orderr)
97 q->orderr = error;
98
99 BUG_ON(q->ordseq & seq);
100 q->ordseq |= seq;
101
102 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900103 return false;
Jens Axboe86db1e22008-01-29 14:53:40 +0100104
105 /*
106 * Okay, sequence complete.
107 */
108 q->ordseq = 0;
109 rq = q->orig_bar_rq;
Tejun Heo40cbbb72009-04-23 11:05:19 +0900110 __blk_end_request_all(rq, q->orderr);
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900111 return true;
Jens Axboe86db1e22008-01-29 14:53:40 +0100112}
113
114static void pre_flush_end_io(struct request *rq, int error)
115{
116 elv_completed_request(rq->q, rq);
117 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
118}
119
120static void bar_end_io(struct request *rq, int error)
121{
122 elv_completed_request(rq->q, rq);
123 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
124}
125
126static void post_flush_end_io(struct request *rq, int error)
127{
128 elv_completed_request(rq->q, rq);
129 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
130}
131
132static void queue_flush(struct request_queue *q, unsigned which)
133{
134 struct request *rq;
135 rq_end_io_fn *end_io;
136
Tejun Heo313e4292008-11-28 13:32:02 +0900137 if (which == QUEUE_ORDERED_DO_PREFLUSH) {
Jens Axboe86db1e22008-01-29 14:53:40 +0100138 rq = &q->pre_flush_rq;
139 end_io = pre_flush_end_io;
140 } else {
141 rq = &q->post_flush_rq;
142 end_io = post_flush_end_io;
143 }
144
FUJITA Tomonori2a4aa302008-04-29 09:54:36 +0200145 blk_rq_init(q, rq);
FUJITA Tomonori1afb20f2008-04-25 12:26:28 +0200146 rq->cmd_flags = REQ_HARDBARRIER;
Jens Axboe86db1e22008-01-29 14:53:40 +0100147 rq->rq_disk = q->bar_rq.rq_disk;
148 rq->end_io = end_io;
149 q->prepare_flush_fn(q, rq);
150
151 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
152}
153
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900154static inline bool start_ordered(struct request_queue *q, struct request **rqp)
Jens Axboe86db1e22008-01-29 14:53:40 +0100155{
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900156 struct request *rq = *rqp;
157 unsigned skip = 0;
158
Jens Axboe86db1e22008-01-29 14:53:40 +0100159 q->orderr = 0;
160 q->ordered = q->next_ordered;
161 q->ordseq |= QUEUE_ORDSEQ_STARTED;
162
Tejun Heo58eea922008-11-28 13:32:06 +0900163 /*
164 * For an empty barrier, there's no actual BAR request, which
165 * in turn makes POSTFLUSH unnecessary. Mask them off.
166 */
Tejun Heo5b936292009-05-07 22:24:38 +0900167 if (!blk_rq_sectors(rq)) {
Tejun Heo58eea922008-11-28 13:32:06 +0900168 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
169 QUEUE_ORDERED_DO_POSTFLUSH);
Tejun Heoa185eb42008-11-28 13:32:07 +0900170 /*
171 * Empty barrier on a write-through device w/ ordered
172 * tag has no command to issue and without any command
173 * to issue, ordering by tag can't be used. Drain
174 * instead.
175 */
176 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
177 !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
178 q->ordered &= ~QUEUE_ORDERED_BY_TAG;
179 q->ordered |= QUEUE_ORDERED_BY_DRAIN;
180 }
181 }
Tejun Heo58eea922008-11-28 13:32:06 +0900182
Tejun Heof6716202008-11-28 13:32:04 +0900183 /* stash away the original request */
Tejun Heo9934c8c2009-05-08 11:54:16 +0900184 blk_dequeue_request(rq);
Jens Axboe86db1e22008-01-29 14:53:40 +0100185 q->orig_bar_rq = rq;
Tejun Heof6716202008-11-28 13:32:04 +0900186 rq = NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +0100187
188 /*
189 * Queue ordered sequence. As we stack them at the head, we
190 * need to queue in reverse order. Note that we rely on that
191 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
Tejun Heo58eea922008-11-28 13:32:06 +0900192 * request gets inbetween ordered sequence.
Jens Axboe86db1e22008-01-29 14:53:40 +0100193 */
Tejun Heo58eea922008-11-28 13:32:06 +0900194 if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
Tejun Heo313e4292008-11-28 13:32:02 +0900195 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
Tejun Heof6716202008-11-28 13:32:04 +0900196 rq = &q->post_flush_rq;
197 } else
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900198 skip |= QUEUE_ORDSEQ_POSTFLUSH;
Jens Axboe86db1e22008-01-29 14:53:40 +0100199
Tejun Heof6716202008-11-28 13:32:04 +0900200 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
201 rq = &q->bar_rq;
202
203 /* initialize proxy request and queue it */
204 blk_rq_init(q, rq);
205 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
206 rq->cmd_flags |= REQ_RW;
207 if (q->ordered & QUEUE_ORDERED_DO_FUA)
208 rq->cmd_flags |= REQ_FUA;
209 init_request_from_bio(rq, q->orig_bar_rq->bio);
210 rq->end_io = bar_end_io;
211
212 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
213 } else
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900214 skip |= QUEUE_ORDSEQ_BAR;
Jens Axboe86db1e22008-01-29 14:53:40 +0100215
Tejun Heo313e4292008-11-28 13:32:02 +0900216 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
217 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
Jens Axboe86db1e22008-01-29 14:53:40 +0100218 rq = &q->pre_flush_rq;
219 } else
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900220 skip |= QUEUE_ORDSEQ_PREFLUSH;
Jens Axboe86db1e22008-01-29 14:53:40 +0100221
Jens Axboe0a7ae2f2009-05-20 08:54:31 +0200222 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
Jens Axboe86db1e22008-01-29 14:53:40 +0100223 rq = NULL;
Tejun Heof6716202008-11-28 13:32:04 +0900224 else
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900225 skip |= QUEUE_ORDSEQ_DRAIN;
Jens Axboe86db1e22008-01-29 14:53:40 +0100226
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900227 *rqp = rq;
228
229 /*
230 * Complete skipped sequences. If whole sequence is complete,
231 * return false to tell elevator that this request is gone.
232 */
233 return !blk_ordered_complete_seq(q, skip, 0);
Jens Axboe86db1e22008-01-29 14:53:40 +0100234}
235
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900236bool blk_do_ordered(struct request_queue *q, struct request **rqp)
Jens Axboe86db1e22008-01-29 14:53:40 +0100237{
238 struct request *rq = *rqp;
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200239 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
240 (rq->cmd_flags & REQ_HARDBARRIER);
Jens Axboe86db1e22008-01-29 14:53:40 +0100241
242 if (!q->ordseq) {
243 if (!is_barrier)
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900244 return true;
Jens Axboe86db1e22008-01-29 14:53:40 +0100245
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900246 if (q->next_ordered != QUEUE_ORDERED_NONE)
247 return start_ordered(q, rqp);
248 else {
Jens Axboe86db1e22008-01-29 14:53:40 +0100249 /*
Tejun Heoa7384672008-11-28 13:32:03 +0900250 * Queue ordering not supported. Terminate
251 * with prejudice.
Jens Axboe86db1e22008-01-29 14:53:40 +0100252 */
Tejun Heo9934c8c2009-05-08 11:54:16 +0900253 blk_dequeue_request(rq);
Tejun Heo40cbbb72009-04-23 11:05:19 +0900254 __blk_end_request_all(rq, -EOPNOTSUPP);
Jens Axboe86db1e22008-01-29 14:53:40 +0100255 *rqp = NULL;
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900256 return false;
Jens Axboe86db1e22008-01-29 14:53:40 +0100257 }
258 }
259
260 /*
261 * Ordered sequence in progress
262 */
263
264 /* Special requests are not subject to ordering rules. */
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200265 if (rq->cmd_type != REQ_TYPE_FS &&
Jens Axboe86db1e22008-01-29 14:53:40 +0100266 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900267 return true;
Jens Axboe86db1e22008-01-29 14:53:40 +0100268
Tejun Heo313e4292008-11-28 13:32:02 +0900269 if (q->ordered & QUEUE_ORDERED_BY_TAG) {
Jens Axboe86db1e22008-01-29 14:53:40 +0100270 /* Ordered by tag. Blocking the next barrier is enough. */
271 if (is_barrier && rq != &q->bar_rq)
272 *rqp = NULL;
273 } else {
274 /* Ordered by draining. Wait for turn. */
275 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
276 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
277 *rqp = NULL;
278 }
279
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900280 return true;
Jens Axboe86db1e22008-01-29 14:53:40 +0100281}
282
283static void bio_end_empty_barrier(struct bio *bio, int err)
284{
Jens Axboecc66b452008-03-04 11:47:46 +0100285 if (err) {
286 if (err == -EOPNOTSUPP)
287 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
Jens Axboe86db1e22008-01-29 14:53:40 +0100288 clear_bit(BIO_UPTODATE, &bio->bi_flags);
Jens Axboecc66b452008-03-04 11:47:46 +0100289 }
Dmitry Monakhovf17e2322010-04-28 17:55:07 +0400290 if (bio->bi_private)
291 complete(bio->bi_private);
292 bio_put(bio);
Jens Axboe86db1e22008-01-29 14:53:40 +0100293}
294
295/**
296 * blkdev_issue_flush - queue a flush
297 * @bdev: blockdev to issue flush for
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400298 * @gfp_mask: memory allocation flags (for bio_alloc)
Jens Axboe86db1e22008-01-29 14:53:40 +0100299 * @error_sector: error sector
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400300 * @flags: BLKDEV_IFL_* flags to control behaviour
Jens Axboe86db1e22008-01-29 14:53:40 +0100301 *
302 * Description:
303 * Issue a flush for the block device in question. Caller can supply
304 * room for storing the error offset in case of a flush error, if they
Dmitry Monakhovf17e2322010-04-28 17:55:07 +0400305 * wish to. If WAIT flag is not passed then caller may check only what
306 * request was pushed in some internal queue for later handling.
Jens Axboe86db1e22008-01-29 14:53:40 +0100307 */
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400308int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
309 sector_t *error_sector, unsigned long flags)
Jens Axboe86db1e22008-01-29 14:53:40 +0100310{
311 DECLARE_COMPLETION_ONSTACK(wait);
312 struct request_queue *q;
313 struct bio *bio;
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400314 int ret = 0;
Jens Axboe86db1e22008-01-29 14:53:40 +0100315
316 if (bdev->bd_disk == NULL)
317 return -ENXIO;
318
319 q = bdev_get_queue(bdev);
320 if (!q)
321 return -ENXIO;
322
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400323 bio = bio_alloc(gfp_mask, 0);
Jens Axboe86db1e22008-01-29 14:53:40 +0100324 bio->bi_end_io = bio_end_empty_barrier;
Jens Axboe86db1e22008-01-29 14:53:40 +0100325 bio->bi_bdev = bdev;
Dmitry Monakhovf17e2322010-04-28 17:55:07 +0400326 if (test_bit(BLKDEV_WAIT, &flags))
327 bio->bi_private = &wait;
328
329 bio_get(bio);
OGAWA Hirofumi2ebca852008-08-11 17:07:08 +0100330 submit_bio(WRITE_BARRIER, bio);
Dmitry Monakhovf17e2322010-04-28 17:55:07 +0400331 if (test_bit(BLKDEV_WAIT, &flags)) {
332 wait_for_completion(&wait);
333 /*
334 * The driver must store the error location in ->bi_sector, if
335 * it supports it. For non-stacked drivers, this should be
336 * copied from blk_rq_pos(rq).
337 */
338 if (error_sector)
339 *error_sector = bio->bi_sector;
340 }
Jens Axboe86db1e22008-01-29 14:53:40 +0100341
Jens Axboecc66b452008-03-04 11:47:46 +0100342 if (bio_flagged(bio, BIO_EOPNOTSUPP))
343 ret = -EOPNOTSUPP;
344 else if (!bio_flagged(bio, BIO_UPTODATE))
Jens Axboe86db1e22008-01-29 14:53:40 +0100345 ret = -EIO;
346
347 bio_put(bio);
348 return ret;
349}
Jens Axboe86db1e22008-01-29 14:53:40 +0100350EXPORT_SYMBOL(blkdev_issue_flush);