blob: c3edf9dff566f47883f3fac946b72e1e208c5286 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
Jens Axboe320ae512013-10-24 09:20:05 +01008#include <linux/blk-mq.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -06009#include <linux/sched/sysctl.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010010
11#include "blk.h"
12
13/*
14 * for max sense size
15 */
16#include <scsi/scsi_cmnd.h>
17
18/**
19 * blk_end_sync_rq - executes a completion event on a request
20 * @rq: request to complete
Randy Dunlap710027a2008-08-19 20:13:11 +020021 * @error: end I/O status of the request
Jens Axboe86db1e22008-01-29 14:53:40 +010022 */
FUJITA Tomonori681a5612008-07-15 21:21:45 +020023static void blk_end_sync_rq(struct request *rq, int error)
Jens Axboe86db1e22008-01-29 14:53:40 +010024{
25 struct completion *waiting = rq->end_io_data;
26
27 rq->end_io_data = NULL;
Jens Axboe86db1e22008-01-29 14:53:40 +010028
29 /*
30 * complete last, if this is a stack request the process (and thus
31 * the rq pointer) could be invalid right after this complete()
32 */
33 complete(waiting);
34}
Jens Axboe86db1e22008-01-29 14:53:40 +010035
36/**
37 * blk_execute_rq_nowait - insert a request into queue for execution
38 * @q: queue to insert the request in
39 * @bd_disk: matching gendisk
40 * @rq: request to insert
41 * @at_head: insert request at head or tail of queue
42 * @done: I/O completion handler
43 *
44 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020045 * Insert a fully prepared request at the back of the I/O scheduler queue
Jens Axboe86db1e22008-01-29 14:53:40 +010046 * for execution. Don't wait for completion.
Muthukumar Rattye81ca6f2012-06-29 15:31:49 +000047 *
48 * Note:
49 * This function will invoke @done directly if the queue is dead.
Jens Axboe86db1e22008-01-29 14:53:40 +010050 */
51void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
52 struct request *rq, int at_head,
53 rq_end_io_fn *done)
54{
55 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
Roland Dreier893d2902012-11-22 02:00:11 -080056 bool is_pm_resume;
Jens Axboe86db1e22008-01-29 14:53:40 +010057
Tejun Heo8ba61432011-12-14 00:33:37 +010058 WARN_ON(irqs_disabled());
James Bottomleybfe159a2011-07-07 15:45:40 -050059
Jens Axboe86db1e22008-01-29 14:53:40 +010060 rq->rq_disk = bd_disk;
Jens Axboe86db1e22008-01-29 14:53:40 +010061 rq->end_io = done;
Jens Axboe320ae512013-10-24 09:20:05 +010062
63 if (q->mq_ops) {
64 blk_mq_insert_request(q, rq, true);
65 return;
66 }
67
Roland Dreier893d2902012-11-22 02:00:11 -080068 /*
69 * need to check this before __blk_run_queue(), because rq can
70 * be freed before that returns.
71 */
72 is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
Muthukumar Rattye81ca6f2012-06-29 15:31:49 +000073
74 spin_lock_irq(q->queue_lock);
75
Bart Van Assche3f3299d2012-11-28 13:42:38 +010076 if (unlikely(blk_queue_dying(q))) {
Mike Christie76521132013-09-18 08:33:55 -060077 rq->cmd_flags |= REQ_QUIET;
Muthukumar Rattye81ca6f2012-06-29 15:31:49 +000078 rq->errors = -ENXIO;
Mike Christie76521132013-09-18 08:33:55 -060079 __blk_end_request_all(rq, rq->errors);
Muthukumar Rattye81ca6f2012-06-29 15:31:49 +000080 spin_unlock_irq(q->queue_lock);
81 return;
82 }
83
Jens Axboe7eaceac2011-03-10 08:52:07 +010084 __elv_add_request(q, rq, where);
Christoph Hellwig24ecfbe2011-04-18 11:41:33 +020085 __blk_run_queue(q);
Tao Maaddd0a092011-05-05 15:10:05 -060086 /* the queue is stopped so it won't be run */
Roland Dreier893d2902012-11-22 02:00:11 -080087 if (is_pm_resume)
Bart Van Asschec246e802012-12-06 14:32:01 +010088 __blk_run_queue_uncond(q);
Jens Axboe86db1e22008-01-29 14:53:40 +010089 spin_unlock_irq(q->queue_lock);
90}
91EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
92
93/**
94 * blk_execute_rq - insert a request into queue for execution
95 * @q: queue to insert the request in
96 * @bd_disk: matching gendisk
97 * @rq: request to insert
98 * @at_head: insert request at head or tail of queue
99 *
100 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200101 * Insert a fully prepared request at the back of the I/O scheduler queue
Jens Axboe86db1e22008-01-29 14:53:40 +0100102 * for execution and wait for completion.
103 */
104int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
105 struct request *rq, int at_head)
106{
107 DECLARE_COMPLETION_ONSTACK(wait);
108 char sense[SCSI_SENSE_BUFFERSIZE];
109 int err = 0;
Mark Lord4b197762010-09-24 09:51:13 -0400110 unsigned long hang_check;
Jens Axboe86db1e22008-01-29 14:53:40 +0100111
Jens Axboe86db1e22008-01-29 14:53:40 +0100112 if (!rq->sense) {
113 memset(sense, 0, sizeof(sense));
114 rq->sense = sense;
115 rq->sense_len = 0;
116 }
117
118 rq->end_io_data = &wait;
119 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
Mark Lord4b197762010-09-24 09:51:13 -0400120
121 /* Prevent hang_check timer from firing at us during very long I/O */
122 hang_check = sysctl_hung_task_timeout_secs;
123 if (hang_check)
Vladimir Davydov55770222013-02-14 18:19:59 +0400124 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
Mark Lord4b197762010-09-24 09:51:13 -0400125 else
Vladimir Davydov55770222013-02-14 18:19:59 +0400126 wait_for_completion_io(&wait);
Jens Axboe86db1e22008-01-29 14:53:40 +0100127
128 if (rq->errors)
129 err = -EIO;
130
131 return err;
132}
Jens Axboe86db1e22008-01-29 14:53:40 +0100133EXPORT_SYMBOL(blk_execute_rq);