blob: ce41f666de3e1d068e78698349c1629d7e118bc8 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
Omar Sandoval3140c3c2017-08-24 11:09:25 -07002 * Functions to sequence PREFLUSH and FUA writes.
Tejun Heoae1b1532011-01-25 12:43:54 +01003 *
4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
Omar Sandoval3140c3c2017-08-24 11:09:25 -07009 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
Tejun Heoae1b1532011-01-25 12:43:54 +010010 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
Mike Christie28a8f0d2016-06-05 14:32:25 -050013 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
Tejun Heoae1b1532011-01-25 12:43:54 +010015 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
Omar Sandoval3140c3c2017-08-24 11:09:25 -070019 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
20 * difference. The requests are either completed immediately if there's no data
21 * or executed as normal requests otherwise.
Tejun Heoae1b1532011-01-25 12:43:54 +010022 *
Mike Christie28a8f0d2016-06-05 14:32:25 -050023 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
Tejun Heoae1b1532011-01-25 12:43:54 +010024 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
Mike Christie28a8f0d2016-06-05 14:32:25 -050026 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
27 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
Tejun Heoae1b1532011-01-25 12:43:54 +010028 *
29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
Ming Lei7c94e1c2014-09-25 23:23:43 +080031 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
Mike Christie3a5e02c2016-06-05 14:32:23 -050032 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
Tejun Heoae1b1532011-01-25 12:43:54 +010033 * completes, all the requests which were pending are proceeded to the next
Omar Sandoval3140c3c2017-08-24 11:09:25 -070034 * step. This allows arbitrary merging of different types of PREFLUSH/FUA
Tejun Heoae1b1532011-01-25 12:43:54 +010035 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress. This makes
41 * double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 * This avoids issuing separate POSTFLUSHes for requests which shared
45 * PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 * starvation in the unlikely case where there are continuous stream of
Omar Sandoval3140c3c2017-08-24 11:09:25 -070050 * FUA (without PREFLUSH) requests.
Tejun Heoae1b1532011-01-25 12:43:54 +010051 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
Omar Sandoval3140c3c2017-08-24 11:09:25 -070055 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
Tejun Heoae1b1532011-01-25 12:43:54 +010056 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
Christoph Hellwige8064022016-10-20 15:12:13 +020059 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
Tejun Heoae1b1532011-01-25 12:43:54 +010060 * req_bio_endio().
61 *
Omar Sandoval3140c3c2017-08-24 11:09:25 -070062 * The above peculiarity requires that each PREFLUSH/FUA request has only one
Tejun Heoae1b1532011-01-25 12:43:54 +010063 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
Jens Axboe86db1e22008-01-29 14:53:40 +010065 */
Tejun Heoae1b1532011-01-25 12:43:54 +010066
Jens Axboe86db1e22008-01-29 14:53:40 +010067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/bio.h>
70#include <linux/blkdev.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090071#include <linux/gfp.h>
Jens Axboe320ae512013-10-24 09:20:05 +010072#include <linux/blk-mq.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010073
74#include "blk.h"
Jens Axboe320ae512013-10-24 09:20:05 +010075#include "blk-mq.h"
Ming Lei0048b482015-08-09 03:41:51 -040076#include "blk-mq-tag.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070077#include "blk-mq-sched.h"
Jens Axboe86db1e22008-01-29 14:53:40 +010078
Omar Sandoval3140c3c2017-08-24 11:09:25 -070079/* PREFLUSH/FUA sequences */
Tejun Heo4fed9472010-09-03 11:56:17 +020080enum {
Tejun Heoae1b1532011-01-25 12:43:54 +010081 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
82 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
83 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
84 REQ_FSEQ_DONE = (1 << 3),
Tejun Heo4fed9472010-09-03 11:56:17 +020085
Tejun Heoae1b1532011-01-25 12:43:54 +010086 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87 REQ_FSEQ_POSTFLUSH,
Tejun Heo47f70d52010-09-03 11:56:17 +020088
89 /*
Tejun Heoae1b1532011-01-25 12:43:54 +010090 * If flush has been pending longer than the following timeout,
91 * it's issued even if flush_data requests are still in flight.
Tejun Heo47f70d52010-09-03 11:56:17 +020092 */
Tejun Heoae1b1532011-01-25 12:43:54 +010093 FLUSH_PENDING_TIMEOUT = 5 * HZ,
94};
95
Ming Lei0bae3522014-09-25 23:23:45 +080096static bool blk_kick_flush(struct request_queue *q,
Hannes Reinecke84fca1b2018-06-06 16:21:40 +020097 struct blk_flush_queue *fq, unsigned int flags);
Tejun Heoae1b1532011-01-25 12:43:54 +010098
Jens Axboec888a8f2016-04-13 13:33:19 -060099static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
Tejun Heoae1b1532011-01-25 12:43:54 +0100100{
101 unsigned int policy = 0;
102
Jeff Moyerfa1bf422011-08-09 20:32:09 +0200103 if (blk_rq_sectors(rq))
104 policy |= REQ_FSEQ_DATA;
105
Jens Axboec888a8f2016-04-13 13:33:19 -0600106 if (fflags & (1UL << QUEUE_FLAG_WC)) {
Mike Christie28a8f0d2016-06-05 14:32:25 -0500107 if (rq->cmd_flags & REQ_PREFLUSH)
Tejun Heoae1b1532011-01-25 12:43:54 +0100108 policy |= REQ_FSEQ_PREFLUSH;
Jens Axboec888a8f2016-04-13 13:33:19 -0600109 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110 (rq->cmd_flags & REQ_FUA))
Tejun Heoae1b1532011-01-25 12:43:54 +0100111 policy |= REQ_FSEQ_POSTFLUSH;
112 }
113 return policy;
Tejun Heo47f70d52010-09-03 11:56:17 +0200114}
115
Tejun Heoae1b1532011-01-25 12:43:54 +0100116static unsigned int blk_flush_cur_seq(struct request *rq)
Jens Axboe86db1e22008-01-29 14:53:40 +0100117{
Tejun Heoae1b1532011-01-25 12:43:54 +0100118 return 1 << ffz(rq->flush.seq);
Jens Axboe86db1e22008-01-29 14:53:40 +0100119}
120
Tejun Heoae1b1532011-01-25 12:43:54 +0100121static void blk_flush_restore_request(struct request *rq)
Jens Axboe86db1e22008-01-29 14:53:40 +0100122{
Tejun Heoae1b1532011-01-25 12:43:54 +0100123 /*
124 * After flush data completion, @rq->bio is %NULL but we need to
125 * complete the bio again. @rq->biotail is guaranteed to equal the
126 * original @rq->bio. Restore it.
127 */
128 rq->bio = rq->biotail;
129
130 /* make @rq a normal request */
Christoph Hellwige8064022016-10-20 15:12:13 +0200131 rq->rq_flags &= ~RQF_FLUSH_SEQ;
Jeff Moyer4853aba2011-08-15 21:37:25 +0200132 rq->end_io = rq->flush.saved_end_io;
Jens Axboe320ae512013-10-24 09:20:05 +0100133}
134
Mike Snitzer10beafc2014-03-08 20:19:20 -0700135static bool blk_flush_queue_rq(struct request *rq, bool add_front)
Jens Axboe320ae512013-10-24 09:20:05 +0100136{
Christoph Hellwig18741982014-02-10 09:29:00 -0700137 if (rq->q->mq_ops) {
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700138 blk_mq_add_to_requeue_list(rq, add_front, true);
Christoph Hellwig18741982014-02-10 09:29:00 -0700139 return false;
140 } else {
Mike Snitzer10beafc2014-03-08 20:19:20 -0700141 if (add_front)
142 list_add(&rq->queuelist, &rq->q->queue_head);
143 else
144 list_add_tail(&rq->queuelist, &rq->q->queue_head);
Christoph Hellwig18741982014-02-10 09:29:00 -0700145 return true;
146 }
Jens Axboe86db1e22008-01-29 14:53:40 +0100147}
148
Tejun Heoae1b1532011-01-25 12:43:54 +0100149/**
150 * blk_flush_complete_seq - complete flush sequence
Omar Sandoval3140c3c2017-08-24 11:09:25 -0700151 * @rq: PREFLUSH/FUA request being sequenced
Ming Lei0bae3522014-09-25 23:23:45 +0800152 * @fq: flush queue
Tejun Heoae1b1532011-01-25 12:43:54 +0100153 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
154 * @error: whether an error occurred
155 *
156 * @rq just completed @seq part of its flush sequence, record the
157 * completion and trigger the next step.
158 *
159 * CONTEXT:
Ming Lei7c94e1c2014-09-25 23:23:43 +0800160 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
Tejun Heoae1b1532011-01-25 12:43:54 +0100161 *
162 * RETURNS:
163 * %true if requests were added to the dispatch queue, %false otherwise.
164 */
Ming Lei0bae3522014-09-25 23:23:45 +0800165static bool blk_flush_complete_seq(struct request *rq,
166 struct blk_flush_queue *fq,
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200167 unsigned int seq, blk_status_t error)
Jens Axboe86db1e22008-01-29 14:53:40 +0100168{
Tejun Heoae1b1532011-01-25 12:43:54 +0100169 struct request_queue *q = rq->q;
Ming Lei7c94e1c2014-09-25 23:23:43 +0800170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
Jens Axboe320ae512013-10-24 09:20:05 +0100171 bool queued = false, kicked;
Jens Axboe190b02e2018-06-09 06:37:14 -0600172 unsigned int cmd_flags;
Jens Axboe86db1e22008-01-29 14:53:40 +0100173
Tejun Heoae1b1532011-01-25 12:43:54 +0100174 BUG_ON(rq->flush.seq & seq);
175 rq->flush.seq |= seq;
Jens Axboe190b02e2018-06-09 06:37:14 -0600176 cmd_flags = rq->cmd_flags;
Jens Axboe86db1e22008-01-29 14:53:40 +0100177
Tejun Heoae1b1532011-01-25 12:43:54 +0100178 if (likely(!error))
179 seq = blk_flush_cur_seq(rq);
180 else
181 seq = REQ_FSEQ_DONE;
Tejun Heo28e7d182010-09-03 11:56:16 +0200182
Tejun Heoae1b1532011-01-25 12:43:54 +0100183 switch (seq) {
184 case REQ_FSEQ_PREFLUSH:
185 case REQ_FSEQ_POSTFLUSH:
186 /* queue for flush */
187 if (list_empty(pending))
Ming Lei7c94e1c2014-09-25 23:23:43 +0800188 fq->flush_pending_since = jiffies;
Tejun Heoae1b1532011-01-25 12:43:54 +0100189 list_move_tail(&rq->flush.list, pending);
Tejun Heo28e7d182010-09-03 11:56:16 +0200190 break;
Tejun Heoae1b1532011-01-25 12:43:54 +0100191
192 case REQ_FSEQ_DATA:
Ming Lei7c94e1c2014-09-25 23:23:43 +0800193 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
Mike Snitzer10beafc2014-03-08 20:19:20 -0700194 queued = blk_flush_queue_rq(rq, true);
Tejun Heoae1b1532011-01-25 12:43:54 +0100195 break;
196
197 case REQ_FSEQ_DONE:
Tejun Heo09d60c72010-09-03 11:56:17 +0200198 /*
Tejun Heoae1b1532011-01-25 12:43:54 +0100199 * @rq was previously adjusted by blk_flush_issue() for
200 * flush sequencing and may already have gone through the
201 * flush data request completion path. Restore @rq for
202 * normal completion and end it.
Tejun Heo09d60c72010-09-03 11:56:17 +0200203 */
Tejun Heoae1b1532011-01-25 12:43:54 +0100204 BUG_ON(!list_empty(&rq->queuelist));
205 list_del_init(&rq->flush.list);
206 blk_flush_restore_request(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100207 if (q->mq_ops)
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700208 blk_mq_end_request(rq, error);
Jens Axboe320ae512013-10-24 09:20:05 +0100209 else
210 __blk_end_request_all(rq, error);
Tejun Heo28e7d182010-09-03 11:56:16 +0200211 break;
Tejun Heoae1b1532011-01-25 12:43:54 +0100212
Tejun Heo28e7d182010-09-03 11:56:16 +0200213 default:
214 BUG();
215 }
Christoph Hellwigcde4c402010-09-03 11:56:17 +0200216
Jens Axboe190b02e2018-06-09 06:37:14 -0600217 kicked = blk_kick_flush(q, fq, cmd_flags);
Jens Axboe320ae512013-10-24 09:20:05 +0100218 return kicked | queued;
Tejun Heo28e7d182010-09-03 11:56:16 +0200219}
220
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200221static void flush_end_io(struct request *flush_rq, blk_status_t error)
Jens Axboe86db1e22008-01-29 14:53:40 +0100222{
Tejun Heoae1b1532011-01-25 12:43:54 +0100223 struct request_queue *q = flush_rq->q;
Jens Axboe320ae512013-10-24 09:20:05 +0100224 struct list_head *running;
Tejun Heoae1b1532011-01-25 12:43:54 +0100225 bool queued = false;
226 struct request *rq, *n;
Jens Axboe320ae512013-10-24 09:20:05 +0100227 unsigned long flags = 0;
Ming Leie97c2932014-09-25 23:23:46 +0800228 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
Tejun Heo8f11b3e2008-11-28 13:32:05 +0900229
Shaohua Li22302372014-05-30 08:06:42 -0600230 if (q->mq_ops) {
Ming Lei0048b482015-08-09 03:41:51 -0400231 struct blk_mq_hw_ctx *hctx;
232
233 /* release the tag's ownership to the req cloned from */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800234 spin_lock_irqsave(&fq->mq_flush_lock, flags);
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +0200235 hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
Ming Lei923218f2017-11-02 23:24:38 +0800236 if (!q->elevator) {
237 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
238 flush_rq->tag = -1;
239 } else {
240 blk_mq_put_driver_tag_hctx(hctx, flush_rq);
241 flush_rq->internal_tag = -1;
242 }
Shaohua Li22302372014-05-30 08:06:42 -0600243 }
Christoph Hellwig18741982014-02-10 09:29:00 -0700244
Ming Lei7c94e1c2014-09-25 23:23:43 +0800245 running = &fq->flush_queue[fq->flush_running_idx];
246 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
Tejun Heoae1b1532011-01-25 12:43:54 +0100247
248 /* account completion of the flush request */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800249 fq->flush_running_idx ^= 1;
Jens Axboe320ae512013-10-24 09:20:05 +0100250
251 if (!q->mq_ops)
252 elv_completed_request(q, flush_rq);
Tejun Heoae1b1532011-01-25 12:43:54 +0100253
254 /* and push the waiting requests to the next stage */
255 list_for_each_entry_safe(rq, n, running, flush.list) {
256 unsigned int seq = blk_flush_cur_seq(rq);
257
258 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
Ming Lei0bae3522014-09-25 23:23:45 +0800259 queued |= blk_flush_complete_seq(rq, fq, seq, error);
Tejun Heo28e7d182010-09-03 11:56:16 +0200260 }
261
Jens Axboe86db1e22008-01-29 14:53:40 +0100262 /*
shaohua.li@intel.com3ac0cc42011-05-06 11:34:41 -0600263 * Kick the queue to avoid stall for two cases:
264 * 1. Moving a request silently to empty queue_head may stall the
265 * queue.
266 * 2. When flush request is running in non-queueable queue, the
267 * queue is hold. Restart the queue after flush request is finished
268 * to avoid stall.
269 * This function is called from request completion path and calling
270 * directly into request_fn may confuse the driver. Always use
271 * kblockd.
Jens Axboe86db1e22008-01-29 14:53:40 +0100272 */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800273 if (queued || fq->flush_queue_delayed) {
Christoph Hellwig18741982014-02-10 09:29:00 -0700274 WARN_ON(q->mq_ops);
275 blk_run_queue_async(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100276 }
Ming Lei7c94e1c2014-09-25 23:23:43 +0800277 fq->flush_queue_delayed = 0;
Jens Axboe320ae512013-10-24 09:20:05 +0100278 if (q->mq_ops)
Ming Lei7c94e1c2014-09-25 23:23:43 +0800279 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
Jens Axboe320ae512013-10-24 09:20:05 +0100280}
281
Tejun Heoae1b1532011-01-25 12:43:54 +0100282/**
283 * blk_kick_flush - consider issuing flush request
284 * @q: request_queue being kicked
Ming Lei0bae3522014-09-25 23:23:45 +0800285 * @fq: flush queue
Hannes Reinecke84fca1b2018-06-06 16:21:40 +0200286 * @flags: cmd_flags of the original request
Tejun Heoae1b1532011-01-25 12:43:54 +0100287 *
288 * Flush related states of @q have changed, consider issuing flush request.
289 * Please read the comment at the top of this file for more info.
290 *
291 * CONTEXT:
Ming Lei7c94e1c2014-09-25 23:23:43 +0800292 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
Tejun Heoae1b1532011-01-25 12:43:54 +0100293 *
294 * RETURNS:
295 * %true if flush was issued, %false otherwise.
296 */
Hannes Reinecke84fca1b2018-06-06 16:21:40 +0200297static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
298 unsigned int flags)
Tejun Heoae1b1532011-01-25 12:43:54 +0100299{
Ming Lei7c94e1c2014-09-25 23:23:43 +0800300 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
Tejun Heoae1b1532011-01-25 12:43:54 +0100301 struct request *first_rq =
302 list_first_entry(pending, struct request, flush.list);
Ming Lei7c94e1c2014-09-25 23:23:43 +0800303 struct request *flush_rq = fq->flush_rq;
Tejun Heoae1b1532011-01-25 12:43:54 +0100304
305 /* C1 described at the top of this file */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800306 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
Tejun Heoae1b1532011-01-25 12:43:54 +0100307 return false;
308
Jens Axboe7520872c2017-02-17 11:40:44 -0700309 /* C2 and C3
310 *
311 * For blk-mq + scheduling, we can risk having all driver tags
312 * assigned to empty flushes, and we deadlock if we are expecting
313 * other requests to make progress. Don't defer for that case.
314 */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800315 if (!list_empty(&fq->flush_data_in_flight) &&
Jens Axboe7520872c2017-02-17 11:40:44 -0700316 !(q->mq_ops && q->elevator) &&
Tejun Heoae1b1532011-01-25 12:43:54 +0100317 time_before(jiffies,
Ming Lei7c94e1c2014-09-25 23:23:43 +0800318 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
Tejun Heoae1b1532011-01-25 12:43:54 +0100319 return false;
Tejun Heo28e7d182010-09-03 11:56:16 +0200320
321 /*
Tejun Heoae1b1532011-01-25 12:43:54 +0100322 * Issue flush and toggle pending_idx. This makes pending_idx
323 * different from running_idx, which means flush is in flight.
Tejun Heo28e7d182010-09-03 11:56:16 +0200324 */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800325 fq->flush_pending_idx ^= 1;
Christoph Hellwig18741982014-02-10 09:29:00 -0700326
Ming Lei7ddab5d2014-09-25 23:23:42 +0800327 blk_rq_init(q, flush_rq);
Ming Leif70ced02014-09-25 23:23:47 +0800328
329 /*
Ming Lei923218f2017-11-02 23:24:38 +0800330 * In case of none scheduler, borrow tag from the first request
331 * since they can't be in flight at the same time. And acquire
332 * the tag's ownership for flush req.
333 *
334 * In case of IO scheduler, flush rq need to borrow scheduler tag
335 * just for cheating put/get driver tag.
Ming Leif70ced02014-09-25 23:23:47 +0800336 */
337 if (q->mq_ops) {
Ming Lei0048b482015-08-09 03:41:51 -0400338 struct blk_mq_hw_ctx *hctx;
339
Ming Leif70ced02014-09-25 23:23:47 +0800340 flush_rq->mq_ctx = first_rq->mq_ctx;
Ming Lei0048b482015-08-09 03:41:51 -0400341
Ming Lei923218f2017-11-02 23:24:38 +0800342 if (!q->elevator) {
343 fq->orig_rq = first_rq;
344 flush_rq->tag = first_rq->tag;
345 hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
346 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
347 } else {
348 flush_rq->internal_tag = first_rq->internal_tag;
349 }
Ming Leif70ced02014-09-25 23:23:47 +0800350 }
Jens Axboe320ae512013-10-24 09:20:05 +0100351
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600352 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
Hannes Reinecke84fca1b2018-06-06 16:21:40 +0200353 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
Christoph Hellwige8064022016-10-20 15:12:13 +0200354 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
Ming Lei7ddab5d2014-09-25 23:23:42 +0800355 flush_rq->rq_disk = first_rq->rq_disk;
356 flush_rq->end_io = flush_end_io;
Jens Axboe86db1e22008-01-29 14:53:40 +0100357
Ming Lei7ddab5d2014-09-25 23:23:42 +0800358 return blk_flush_queue_rq(flush_rq, false);
Tejun Heoae1b1532011-01-25 12:43:54 +0100359}
360
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200361static void flush_data_end_io(struct request *rq, blk_status_t error)
Tejun Heoae1b1532011-01-25 12:43:54 +0100362{
363 struct request_queue *q = rq->q;
Ming Leie97c2932014-09-25 23:23:46 +0800364 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
Tejun Heoae1b1532011-01-25 12:43:54 +0100365
Bart Van Assche2fff8a92017-06-20 11:15:45 -0700366 lockdep_assert_held(q->queue_lock);
367
Jens Axboe86db1e22008-01-29 14:53:40 +0100368 /*
Ming Lei94d7dea2016-10-26 16:57:15 +0800369 * Updating q->in_flight[] here for making this tag usable
370 * early. Because in blk_queue_start_tag(),
371 * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
372 * reserve tags for sync I/O.
373 *
374 * More importantly this way can avoid the following I/O
375 * deadlock:
376 *
377 * - suppose there are 40 fua requests comming to flush queue
378 * and queue depth is 31
379 * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
380 * tag for async I/O any more
381 * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
382 * and flush_data_end_io() is called
383 * - the other rqs still can't go ahead if not updating
384 * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
385 * are held in flush data queue and make no progress of
386 * handling post flush rq
387 * - only after the post flush rq is handled, all these rqs
388 * can be completed
389 */
390
391 elv_completed_request(q, rq);
392
393 /* for avoiding double accounting */
Linus Torvalds36869cb2016-12-13 10:19:16 -0800394 rq->rq_flags &= ~RQF_STARTED;
Ming Lei94d7dea2016-10-26 16:57:15 +0800395
396 /*
Tejun Heoe83a46b2011-03-04 19:09:02 +0100397 * After populating an empty queue, kick it to avoid stall. Read
398 * the comment in flush_end_io().
Jens Axboe86db1e22008-01-29 14:53:40 +0100399 */
Ming Lei0bae3522014-09-25 23:23:45 +0800400 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +0200401 blk_run_queue_async(q);
Tejun Heoae1b1532011-01-25 12:43:54 +0100402}
403
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200404static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
Jens Axboe320ae512013-10-24 09:20:05 +0100405{
406 struct request_queue *q = rq->q;
407 struct blk_mq_hw_ctx *hctx;
Ming Leie97c2932014-09-25 23:23:46 +0800408 struct blk_mq_ctx *ctx = rq->mq_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100409 unsigned long flags;
Ming Leie97c2932014-09-25 23:23:46 +0800410 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100411
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +0200412 hctx = blk_mq_map_queue(q, ctx->cpu);
Jens Axboe320ae512013-10-24 09:20:05 +0100413
Ming Lei923218f2017-11-02 23:24:38 +0800414 if (q->elevator) {
415 WARN_ON(rq->tag < 0);
416 blk_mq_put_driver_tag_hctx(hctx, rq);
417 }
418
Jens Axboe320ae512013-10-24 09:20:05 +0100419 /*
420 * After populating an empty queue, kick it to avoid stall. Read
421 * the comment in flush_end_io().
422 */
Ming Lei7c94e1c2014-09-25 23:23:43 +0800423 spin_lock_irqsave(&fq->mq_flush_lock, flags);
Jens Axboebd166ef2017-01-17 06:03:22 -0700424 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
Ming Lei7c94e1c2014-09-25 23:23:43 +0800425 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
Jens Axboebd166ef2017-01-17 06:03:22 -0700426
427 blk_mq_run_hw_queue(hctx, true);
Jens Axboe320ae512013-10-24 09:20:05 +0100428}
429
Tejun Heoae1b1532011-01-25 12:43:54 +0100430/**
Omar Sandoval3140c3c2017-08-24 11:09:25 -0700431 * blk_insert_flush - insert a new PREFLUSH/FUA request
Tejun Heoae1b1532011-01-25 12:43:54 +0100432 * @rq: request to insert
433 *
Jens Axboeb710a482011-03-30 09:52:30 +0200434 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
Jens Axboe320ae512013-10-24 09:20:05 +0100435 * or __blk_mq_run_hw_queue() to dispatch request.
Tejun Heoae1b1532011-01-25 12:43:54 +0100436 * @rq is being submitted. Analyze what needs to be done and put it on the
437 * right queue.
Tejun Heoae1b1532011-01-25 12:43:54 +0100438 */
439void blk_insert_flush(struct request *rq)
440{
441 struct request_queue *q = rq->q;
Jens Axboec888a8f2016-04-13 13:33:19 -0600442 unsigned long fflags = q->queue_flags; /* may change, cache */
Tejun Heoae1b1532011-01-25 12:43:54 +0100443 unsigned int policy = blk_flush_policy(fflags, rq);
Ming Leie97c2932014-09-25 23:23:46 +0800444 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
Tejun Heoae1b1532011-01-25 12:43:54 +0100445
Bart Van Assche2fff8a92017-06-20 11:15:45 -0700446 if (!q->mq_ops)
447 lockdep_assert_held(q->queue_lock);
448
Tejun Heoae1b1532011-01-25 12:43:54 +0100449 /*
450 * @policy now records what operations need to be done. Adjust
Mike Christie28a8f0d2016-06-05 14:32:25 -0500451 * REQ_PREFLUSH and FUA for the driver.
Tejun Heoae1b1532011-01-25 12:43:54 +0100452 */
Mike Christie28a8f0d2016-06-05 14:32:25 -0500453 rq->cmd_flags &= ~REQ_PREFLUSH;
Jens Axboec888a8f2016-04-13 13:33:19 -0600454 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
Tejun Heo4fed9472010-09-03 11:56:17 +0200455 rq->cmd_flags &= ~REQ_FUA;
Jens Axboe86db1e22008-01-29 14:53:40 +0100456
Tejun Heoae1b1532011-01-25 12:43:54 +0100457 /*
Jens Axboeae5b2ec2016-11-08 19:39:28 -0700458 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
459 * of those flags, we have to set REQ_SYNC to avoid skewing
460 * the request accounting.
461 */
462 rq->cmd_flags |= REQ_SYNC;
463
464 /*
Jeff Moyer4853aba2011-08-15 21:37:25 +0200465 * An empty flush handed down from a stacking driver may
466 * translate into nothing if the underlying device does not
467 * advertise a write-back cache. In this case, simply
468 * complete the request.
469 */
470 if (!policy) {
Jens Axboe320ae512013-10-24 09:20:05 +0100471 if (q->mq_ops)
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700472 blk_mq_end_request(rq, 0);
Jens Axboe320ae512013-10-24 09:20:05 +0100473 else
Christoph Hellwigd0fac022017-04-12 12:13:59 +0200474 __blk_end_request(rq, 0, 0);
Jeff Moyer4853aba2011-08-15 21:37:25 +0200475 return;
476 }
477
Jeff Moyer834f9f62011-10-17 12:57:22 +0200478 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
Jeff Moyer4853aba2011-08-15 21:37:25 +0200479
480 /*
Tejun Heoae1b1532011-01-25 12:43:54 +0100481 * If there's data but flush is not necessary, the request can be
482 * processed directly without going through flush machinery. Queue
483 * for normal execution.
484 */
485 if ((policy & REQ_FSEQ_DATA) &&
486 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
Jens Axboebd166ef2017-01-17 06:03:22 -0700487 if (q->mq_ops)
Ming Lei598906f2017-11-02 23:24:35 +0800488 blk_mq_request_bypass_insert(rq, false);
Jens Axboebd166ef2017-01-17 06:03:22 -0700489 else
Jens Axboedcd83762015-11-25 10:12:54 -0700490 list_add_tail(&rq->queuelist, &q->queue_head);
Tejun Heoae1b1532011-01-25 12:43:54 +0100491 return;
492 }
493
494 /*
495 * @rq should go through flush machinery. Mark it part of flush
496 * sequence and submit for further processing.
497 */
498 memset(&rq->flush, 0, sizeof(rq->flush));
499 INIT_LIST_HEAD(&rq->flush.list);
Christoph Hellwige8064022016-10-20 15:12:13 +0200500 rq->rq_flags |= RQF_FLUSH_SEQ;
Jeff Moyer4853aba2011-08-15 21:37:25 +0200501 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
Jens Axboe320ae512013-10-24 09:20:05 +0100502 if (q->mq_ops) {
503 rq->end_io = mq_flush_data_end_io;
504
Ming Lei7c94e1c2014-09-25 23:23:43 +0800505 spin_lock_irq(&fq->mq_flush_lock);
Ming Lei0bae3522014-09-25 23:23:45 +0800506 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
Ming Lei7c94e1c2014-09-25 23:23:43 +0800507 spin_unlock_irq(&fq->mq_flush_lock);
Jens Axboe320ae512013-10-24 09:20:05 +0100508 return;
509 }
Tejun Heoae1b1532011-01-25 12:43:54 +0100510 rq->end_io = flush_data_end_io;
511
Ming Lei0bae3522014-09-25 23:23:45 +0800512 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
Tejun Heoae1b1532011-01-25 12:43:54 +0100513}
514
515/**
Jens Axboe86db1e22008-01-29 14:53:40 +0100516 * blkdev_issue_flush - queue a flush
517 * @bdev: blockdev to issue flush for
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400518 * @gfp_mask: memory allocation flags (for bio_alloc)
Jens Axboe86db1e22008-01-29 14:53:40 +0100519 * @error_sector: error sector
520 *
521 * Description:
522 * Issue a flush for the block device in question. Caller can supply
523 * room for storing the error offset in case of a flush error, if they
Eric Biggers1be7d202017-01-23 11:43:21 -0800524 * wish to.
Jens Axboe86db1e22008-01-29 14:53:40 +0100525 */
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400526int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200527 sector_t *error_sector)
Jens Axboe86db1e22008-01-29 14:53:40 +0100528{
Jens Axboe86db1e22008-01-29 14:53:40 +0100529 struct request_queue *q;
530 struct bio *bio;
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400531 int ret = 0;
Jens Axboe86db1e22008-01-29 14:53:40 +0100532
533 if (bdev->bd_disk == NULL)
534 return -ENXIO;
535
536 q = bdev_get_queue(bdev);
537 if (!q)
538 return -ENXIO;
539
Dave Chinnerf10d9f62010-07-13 17:50:50 +1000540 /*
541 * some block devices may not have their queue correctly set up here
542 * (e.g. loop device without a backing file) and so issuing a flush
543 * here will panic. Ensure there is a request function before issuing
Tejun Heod391a2d2010-09-03 11:56:17 +0200544 * the flush.
Dave Chinnerf10d9f62010-07-13 17:50:50 +1000545 */
546 if (!q->make_request_fn)
547 return -ENXIO;
548
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400549 bio = bio_alloc(gfp_mask, 0);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200550 bio_set_dev(bio, bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600551 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
Dmitry Monakhovf17e2322010-04-28 17:55:07 +0400552
Mike Christie4e49ea42016-06-05 14:31:41 -0500553 ret = submit_bio_wait(bio);
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200554
555 /*
556 * The driver must store the error location in ->bi_sector, if
557 * it supports it. For non-stacked drivers, this should be
558 * copied from blk_rq_pos(rq).
559 */
560 if (error_sector)
Kent Overstreet4f024f32013-10-11 15:44:27 -0700561 *error_sector = bio->bi_iter.bi_sector;
Jens Axboe86db1e22008-01-29 14:53:40 +0100562
Jens Axboe86db1e22008-01-29 14:53:40 +0100563 bio_put(bio);
564 return ret;
565}
Jens Axboe86db1e22008-01-29 14:53:40 +0100566EXPORT_SYMBOL(blkdev_issue_flush);
Jens Axboe320ae512013-10-24 09:20:05 +0100567
Ming Leif70ced02014-09-25 23:23:47 +0800568struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
569 int node, int cmd_size)
Jens Axboe320ae512013-10-24 09:20:05 +0100570{
Ming Lei7c94e1c2014-09-25 23:23:43 +0800571 struct blk_flush_queue *fq;
572 int rq_sz = sizeof(struct request);
Ming Lei1bcb1ea2014-09-25 23:23:39 +0800573
Ming Leif70ced02014-09-25 23:23:47 +0800574 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
Ming Lei7c94e1c2014-09-25 23:23:43 +0800575 if (!fq)
576 goto fail;
Ming Lei1bcb1ea2014-09-25 23:23:39 +0800577
Christoph Hellwig6d247d72017-01-27 09:51:45 -0700578 if (q->mq_ops)
Ming Lei7c94e1c2014-09-25 23:23:43 +0800579 spin_lock_init(&fq->mq_flush_lock);
Ming Lei7c94e1c2014-09-25 23:23:43 +0800580
Christoph Hellwig6d247d72017-01-27 09:51:45 -0700581 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
Ming Leif70ced02014-09-25 23:23:47 +0800582 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
Ming Lei7c94e1c2014-09-25 23:23:43 +0800583 if (!fq->flush_rq)
584 goto fail_rq;
585
586 INIT_LIST_HEAD(&fq->flush_queue[0]);
587 INIT_LIST_HEAD(&fq->flush_queue[1]);
588 INIT_LIST_HEAD(&fq->flush_data_in_flight);
589
590 return fq;
591
592 fail_rq:
593 kfree(fq);
594 fail:
595 return NULL;
596}
597
Ming Leiba483382014-09-25 23:23:44 +0800598void blk_free_flush_queue(struct blk_flush_queue *fq)
Ming Lei7c94e1c2014-09-25 23:23:43 +0800599{
600 /* bio based request queue hasn't flush queue */
601 if (!fq)
602 return;
603
604 kfree(fq->flush_rq);
605 kfree(fq);
Jens Axboe320ae512013-10-24 09:20:05 +0100606}