Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to barrier IO handling |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 8 | #include <linux/gfp.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 9 | |
| 10 | #include "blk.h" |
| 11 | |
| 12 | /** |
| 13 | * blk_queue_ordered - does this queue support ordered writes |
| 14 | * @q: the request queue |
| 15 | * @ordered: one of QUEUE_ORDERED_* |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 16 | * |
| 17 | * Description: |
| 18 | * For journalled file systems, doing ordered writes on a commit |
| 19 | * block instead of explicitly doing wait_on_buffer (which is bad |
| 20 | * for performance) can be a big win. Block drivers supporting this |
| 21 | * feature should call this function and indicate so. |
| 22 | * |
| 23 | **/ |
FUJITA Tomonori | 00fff26 | 2010-07-03 17:45:40 +0900 | [diff] [blame] | 24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 25 | { |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 26 | if (ordered != QUEUE_ORDERED_NONE && |
| 27 | ordered != QUEUE_ORDERED_DRAIN && |
| 28 | ordered != QUEUE_ORDERED_DRAIN_FLUSH && |
| 29 | ordered != QUEUE_ORDERED_DRAIN_FUA && |
| 30 | ordered != QUEUE_ORDERED_TAG && |
| 31 | ordered != QUEUE_ORDERED_TAG_FLUSH && |
| 32 | ordered != QUEUE_ORDERED_TAG_FUA) { |
| 33 | printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); |
| 34 | return -EINVAL; |
| 35 | } |
| 36 | |
| 37 | q->ordered = ordered; |
| 38 | q->next_ordered = ordered; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 39 | |
| 40 | return 0; |
| 41 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 42 | EXPORT_SYMBOL(blk_queue_ordered); |
| 43 | |
| 44 | /* |
| 45 | * Cache flushing for ordered writes handling |
| 46 | */ |
Adrian Bunk | 6f6a036 | 2008-04-29 09:49:06 +0200 | [diff] [blame] | 47 | unsigned blk_ordered_cur_seq(struct request_queue *q) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 48 | { |
| 49 | if (!q->ordseq) |
| 50 | return 0; |
| 51 | return 1 << ffz(q->ordseq); |
| 52 | } |
| 53 | |
| 54 | unsigned blk_ordered_req_seq(struct request *rq) |
| 55 | { |
| 56 | struct request_queue *q = rq->q; |
| 57 | |
| 58 | BUG_ON(q->ordseq == 0); |
| 59 | |
| 60 | if (rq == &q->pre_flush_rq) |
| 61 | return QUEUE_ORDSEQ_PREFLUSH; |
| 62 | if (rq == &q->bar_rq) |
| 63 | return QUEUE_ORDSEQ_BAR; |
| 64 | if (rq == &q->post_flush_rq) |
| 65 | return QUEUE_ORDSEQ_POSTFLUSH; |
| 66 | |
| 67 | /* |
| 68 | * !fs requests don't need to follow barrier ordering. Always |
| 69 | * put them at the front. This fixes the following deadlock. |
| 70 | * |
| 71 | * http://thread.gmane.org/gmane.linux.kernel/537473 |
| 72 | */ |
Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 73 | if (rq->cmd_type != REQ_TYPE_FS) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 74 | return QUEUE_ORDSEQ_DRAIN; |
| 75 | |
| 76 | if ((rq->cmd_flags & REQ_ORDERED_COLOR) == |
| 77 | (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) |
| 78 | return QUEUE_ORDSEQ_DRAIN; |
| 79 | else |
| 80 | return QUEUE_ORDSEQ_DONE; |
| 81 | } |
| 82 | |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 83 | bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 84 | { |
| 85 | struct request *rq; |
| 86 | |
| 87 | if (error && !q->orderr) |
| 88 | q->orderr = error; |
| 89 | |
| 90 | BUG_ON(q->ordseq & seq); |
| 91 | q->ordseq |= seq; |
| 92 | |
| 93 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 94 | return false; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 95 | |
| 96 | /* |
| 97 | * Okay, sequence complete. |
| 98 | */ |
| 99 | q->ordseq = 0; |
| 100 | rq = q->orig_bar_rq; |
Tejun Heo | 40cbbb7 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 101 | __blk_end_request_all(rq, q->orderr); |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 102 | return true; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | static void pre_flush_end_io(struct request *rq, int error) |
| 106 | { |
| 107 | elv_completed_request(rq->q, rq); |
| 108 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); |
| 109 | } |
| 110 | |
| 111 | static void bar_end_io(struct request *rq, int error) |
| 112 | { |
| 113 | elv_completed_request(rq->q, rq); |
| 114 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); |
| 115 | } |
| 116 | |
| 117 | static void post_flush_end_io(struct request *rq, int error) |
| 118 | { |
| 119 | elv_completed_request(rq->q, rq); |
| 120 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); |
| 121 | } |
| 122 | |
| 123 | static void queue_flush(struct request_queue *q, unsigned which) |
| 124 | { |
| 125 | struct request *rq; |
| 126 | rq_end_io_fn *end_io; |
| 127 | |
Tejun Heo | 313e429 | 2008-11-28 13:32:02 +0900 | [diff] [blame] | 128 | if (which == QUEUE_ORDERED_DO_PREFLUSH) { |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 129 | rq = &q->pre_flush_rq; |
| 130 | end_io = pre_flush_end_io; |
| 131 | } else { |
| 132 | rq = &q->post_flush_rq; |
| 133 | end_io = post_flush_end_io; |
| 134 | } |
| 135 | |
FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 136 | blk_rq_init(q, rq); |
FUJITA Tomonori | 28e18d0 | 2010-07-09 09:38:24 +0900 | [diff] [blame] | 137 | rq->cmd_type = REQ_TYPE_FS; |
FUJITA Tomonori | 8749534 | 2010-07-03 17:45:32 +0900 | [diff] [blame] | 138 | rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH; |
FUJITA Tomonori | 16f2319 | 2010-07-09 09:38:25 +0900 | [diff] [blame] | 139 | rq->rq_disk = q->orig_bar_rq->rq_disk; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 140 | rq->end_io = end_io; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 141 | |
| 142 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
| 143 | } |
| 144 | |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 145 | static inline bool start_ordered(struct request_queue *q, struct request **rqp) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 146 | { |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 147 | struct request *rq = *rqp; |
| 148 | unsigned skip = 0; |
| 149 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 150 | q->orderr = 0; |
| 151 | q->ordered = q->next_ordered; |
| 152 | q->ordseq |= QUEUE_ORDSEQ_STARTED; |
| 153 | |
Tejun Heo | 58eea92 | 2008-11-28 13:32:06 +0900 | [diff] [blame] | 154 | /* |
| 155 | * For an empty barrier, there's no actual BAR request, which |
| 156 | * in turn makes POSTFLUSH unnecessary. Mask them off. |
| 157 | */ |
Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 158 | if (!blk_rq_sectors(rq)) { |
Tejun Heo | 58eea92 | 2008-11-28 13:32:06 +0900 | [diff] [blame] | 159 | q->ordered &= ~(QUEUE_ORDERED_DO_BAR | |
| 160 | QUEUE_ORDERED_DO_POSTFLUSH); |
Tejun Heo | a185eb4 | 2008-11-28 13:32:07 +0900 | [diff] [blame] | 161 | /* |
| 162 | * Empty barrier on a write-through device w/ ordered |
| 163 | * tag has no command to issue and without any command |
| 164 | * to issue, ordering by tag can't be used. Drain |
| 165 | * instead. |
| 166 | */ |
| 167 | if ((q->ordered & QUEUE_ORDERED_BY_TAG) && |
| 168 | !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) { |
| 169 | q->ordered &= ~QUEUE_ORDERED_BY_TAG; |
| 170 | q->ordered |= QUEUE_ORDERED_BY_DRAIN; |
| 171 | } |
| 172 | } |
Tejun Heo | 58eea92 | 2008-11-28 13:32:06 +0900 | [diff] [blame] | 173 | |
Tejun Heo | f671620 | 2008-11-28 13:32:04 +0900 | [diff] [blame] | 174 | /* stash away the original request */ |
Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 175 | blk_dequeue_request(rq); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 176 | q->orig_bar_rq = rq; |
Tejun Heo | f671620 | 2008-11-28 13:32:04 +0900 | [diff] [blame] | 177 | rq = NULL; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 178 | |
| 179 | /* |
| 180 | * Queue ordered sequence. As we stack them at the head, we |
| 181 | * need to queue in reverse order. Note that we rely on that |
| 182 | * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs |
Tejun Heo | 58eea92 | 2008-11-28 13:32:06 +0900 | [diff] [blame] | 183 | * request gets inbetween ordered sequence. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 184 | */ |
Tejun Heo | 58eea92 | 2008-11-28 13:32:06 +0900 | [diff] [blame] | 185 | if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) { |
Tejun Heo | 313e429 | 2008-11-28 13:32:02 +0900 | [diff] [blame] | 186 | queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); |
Tejun Heo | f671620 | 2008-11-28 13:32:04 +0900 | [diff] [blame] | 187 | rq = &q->post_flush_rq; |
| 188 | } else |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 189 | skip |= QUEUE_ORDSEQ_POSTFLUSH; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 190 | |
Tejun Heo | f671620 | 2008-11-28 13:32:04 +0900 | [diff] [blame] | 191 | if (q->ordered & QUEUE_ORDERED_DO_BAR) { |
| 192 | rq = &q->bar_rq; |
| 193 | |
| 194 | /* initialize proxy request and queue it */ |
| 195 | blk_rq_init(q, rq); |
| 196 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) |
Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 197 | rq->cmd_flags |= REQ_WRITE; |
Tejun Heo | f671620 | 2008-11-28 13:32:04 +0900 | [diff] [blame] | 198 | if (q->ordered & QUEUE_ORDERED_DO_FUA) |
| 199 | rq->cmd_flags |= REQ_FUA; |
| 200 | init_request_from_bio(rq, q->orig_bar_rq->bio); |
| 201 | rq->end_io = bar_end_io; |
| 202 | |
| 203 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
| 204 | } else |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 205 | skip |= QUEUE_ORDSEQ_BAR; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 206 | |
Tejun Heo | 313e429 | 2008-11-28 13:32:02 +0900 | [diff] [blame] | 207 | if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { |
| 208 | queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 209 | rq = &q->pre_flush_rq; |
| 210 | } else |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 211 | skip |= QUEUE_ORDSEQ_PREFLUSH; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 212 | |
Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 213 | if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q)) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 214 | rq = NULL; |
Tejun Heo | f671620 | 2008-11-28 13:32:04 +0900 | [diff] [blame] | 215 | else |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 216 | skip |= QUEUE_ORDSEQ_DRAIN; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 217 | |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 218 | *rqp = rq; |
| 219 | |
| 220 | /* |
| 221 | * Complete skipped sequences. If whole sequence is complete, |
| 222 | * return false to tell elevator that this request is gone. |
| 223 | */ |
| 224 | return !blk_ordered_complete_seq(q, skip, 0); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 225 | } |
| 226 | |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 227 | bool blk_do_ordered(struct request_queue *q, struct request **rqp) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 228 | { |
| 229 | struct request *rq = *rqp; |
Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 230 | const int is_barrier = rq->cmd_type == REQ_TYPE_FS && |
| 231 | (rq->cmd_flags & REQ_HARDBARRIER); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 232 | |
| 233 | if (!q->ordseq) { |
| 234 | if (!is_barrier) |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 235 | return true; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 236 | |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 237 | if (q->next_ordered != QUEUE_ORDERED_NONE) |
| 238 | return start_ordered(q, rqp); |
| 239 | else { |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 240 | /* |
Tejun Heo | a738467 | 2008-11-28 13:32:03 +0900 | [diff] [blame] | 241 | * Queue ordering not supported. Terminate |
| 242 | * with prejudice. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 243 | */ |
Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 244 | blk_dequeue_request(rq); |
Tejun Heo | 40cbbb7 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 245 | __blk_end_request_all(rq, -EOPNOTSUPP); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 246 | *rqp = NULL; |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 247 | return false; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 248 | } |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * Ordered sequence in progress |
| 253 | */ |
| 254 | |
| 255 | /* Special requests are not subject to ordering rules. */ |
Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 256 | if (rq->cmd_type != REQ_TYPE_FS && |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 257 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 258 | return true; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 259 | |
Tejun Heo | 313e429 | 2008-11-28 13:32:02 +0900 | [diff] [blame] | 260 | if (q->ordered & QUEUE_ORDERED_BY_TAG) { |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 261 | /* Ordered by tag. Blocking the next barrier is enough. */ |
| 262 | if (is_barrier && rq != &q->bar_rq) |
| 263 | *rqp = NULL; |
| 264 | } else { |
| 265 | /* Ordered by draining. Wait for turn. */ |
| 266 | WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); |
| 267 | if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) |
| 268 | *rqp = NULL; |
| 269 | } |
| 270 | |
Tejun Heo | 8f11b3e | 2008-11-28 13:32:05 +0900 | [diff] [blame] | 271 | return true; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | static void bio_end_empty_barrier(struct bio *bio, int err) |
| 275 | { |
Jens Axboe | cc66b45 | 2008-03-04 11:47:46 +0100 | [diff] [blame] | 276 | if (err) { |
| 277 | if (err == -EOPNOTSUPP) |
| 278 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 279 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
Jens Axboe | cc66b45 | 2008-03-04 11:47:46 +0100 | [diff] [blame] | 280 | } |
Dmitry Monakhov | f17e232 | 2010-04-28 17:55:07 +0400 | [diff] [blame] | 281 | if (bio->bi_private) |
| 282 | complete(bio->bi_private); |
| 283 | bio_put(bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | /** |
| 287 | * blkdev_issue_flush - queue a flush |
| 288 | * @bdev: blockdev to issue flush for |
Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 289 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 290 | * @error_sector: error sector |
Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 291 | * @flags: BLKDEV_IFL_* flags to control behaviour |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 292 | * |
| 293 | * Description: |
| 294 | * Issue a flush for the block device in question. Caller can supply |
| 295 | * room for storing the error offset in case of a flush error, if they |
Dmitry Monakhov | f17e232 | 2010-04-28 17:55:07 +0400 | [diff] [blame] | 296 | * wish to. If WAIT flag is not passed then caller may check only what |
| 297 | * request was pushed in some internal queue for later handling. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 298 | */ |
Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 299 | int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
| 300 | sector_t *error_sector, unsigned long flags) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 301 | { |
| 302 | DECLARE_COMPLETION_ONSTACK(wait); |
| 303 | struct request_queue *q; |
| 304 | struct bio *bio; |
Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 305 | int ret = 0; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 306 | |
| 307 | if (bdev->bd_disk == NULL) |
| 308 | return -ENXIO; |
| 309 | |
| 310 | q = bdev_get_queue(bdev); |
| 311 | if (!q) |
| 312 | return -ENXIO; |
| 313 | |
Dave Chinner | f10d9f6 | 2010-07-13 17:50:50 +1000 | [diff] [blame] | 314 | /* |
| 315 | * some block devices may not have their queue correctly set up here |
| 316 | * (e.g. loop device without a backing file) and so issuing a flush |
| 317 | * here will panic. Ensure there is a request function before issuing |
| 318 | * the barrier. |
| 319 | */ |
| 320 | if (!q->make_request_fn) |
| 321 | return -ENXIO; |
| 322 | |
Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 323 | bio = bio_alloc(gfp_mask, 0); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 324 | bio->bi_end_io = bio_end_empty_barrier; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 325 | bio->bi_bdev = bdev; |
Dmitry Monakhov | f17e232 | 2010-04-28 17:55:07 +0400 | [diff] [blame] | 326 | if (test_bit(BLKDEV_WAIT, &flags)) |
| 327 | bio->bi_private = &wait; |
| 328 | |
| 329 | bio_get(bio); |
OGAWA Hirofumi | 2ebca85 | 2008-08-11 17:07:08 +0100 | [diff] [blame] | 330 | submit_bio(WRITE_BARRIER, bio); |
Dmitry Monakhov | f17e232 | 2010-04-28 17:55:07 +0400 | [diff] [blame] | 331 | if (test_bit(BLKDEV_WAIT, &flags)) { |
| 332 | wait_for_completion(&wait); |
| 333 | /* |
| 334 | * The driver must store the error location in ->bi_sector, if |
| 335 | * it supports it. For non-stacked drivers, this should be |
| 336 | * copied from blk_rq_pos(rq). |
| 337 | */ |
| 338 | if (error_sector) |
| 339 | *error_sector = bio->bi_sector; |
| 340 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 341 | |
Jens Axboe | cc66b45 | 2008-03-04 11:47:46 +0100 | [diff] [blame] | 342 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) |
| 343 | ret = -EOPNOTSUPP; |
| 344 | else if (!bio_flagged(bio, BIO_UPTODATE)) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 345 | ret = -EIO; |
| 346 | |
| 347 | bio_put(bio); |
| 348 | return ret; |
| 349 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 350 | EXPORT_SYMBOL(blkdev_issue_flush); |