Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Functions related to setting various queue properties from drivers |
| 3 | */ |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/bio.h> |
| 7 | #include <linux/blkdev.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 8 | #include <linux/blk-mq.h> |
Clark Williams | cf4aebc2 | 2013-02-07 09:46:59 -0600 | [diff] [blame] | 9 | #include <linux/sched/sysctl.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 10 | |
| 11 | #include "blk.h" |
| 12 | |
| 13 | /* |
| 14 | * for max sense size |
| 15 | */ |
| 16 | #include <scsi/scsi_cmnd.h> |
| 17 | |
| 18 | /** |
| 19 | * blk_end_sync_rq - executes a completion event on a request |
| 20 | * @rq: request to complete |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 21 | * @error: end I/O status of the request |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 22 | */ |
FUJITA Tomonori | 681a561 | 2008-07-15 21:21:45 +0200 | [diff] [blame] | 23 | static void blk_end_sync_rq(struct request *rq, int error) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 24 | { |
| 25 | struct completion *waiting = rq->end_io_data; |
| 26 | |
| 27 | rq->end_io_data = NULL; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 28 | |
| 29 | /* |
| 30 | * complete last, if this is a stack request the process (and thus |
| 31 | * the rq pointer) could be invalid right after this complete() |
| 32 | */ |
| 33 | complete(waiting); |
| 34 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 35 | |
| 36 | /** |
| 37 | * blk_execute_rq_nowait - insert a request into queue for execution |
| 38 | * @q: queue to insert the request in |
| 39 | * @bd_disk: matching gendisk |
| 40 | * @rq: request to insert |
| 41 | * @at_head: insert request at head or tail of queue |
| 42 | * @done: I/O completion handler |
| 43 | * |
| 44 | * Description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 45 | * Insert a fully prepared request at the back of the I/O scheduler queue |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 46 | * for execution. Don't wait for completion. |
Muthukumar Ratty | e81ca6f | 2012-06-29 15:31:49 +0000 | [diff] [blame] | 47 | * |
| 48 | * Note: |
| 49 | * This function will invoke @done directly if the queue is dead. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 50 | */ |
| 51 | void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, |
| 52 | struct request *rq, int at_head, |
| 53 | rq_end_io_fn *done) |
| 54 | { |
| 55 | int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; |
Roland Dreier | 893d290 | 2012-11-22 02:00:11 -0800 | [diff] [blame] | 56 | bool is_pm_resume; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 57 | |
Tejun Heo | 8ba6143 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 58 | WARN_ON(irqs_disabled()); |
James Bottomley | bfe159a | 2011-07-07 15:45:40 -0500 | [diff] [blame] | 59 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 60 | rq->rq_disk = bd_disk; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 61 | rq->end_io = done; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 62 | |
Ming Lei | 43a5e4e | 2013-12-26 21:31:35 +0800 | [diff] [blame] | 63 | /* |
| 64 | * don't check dying flag for MQ because the request won't |
| 65 | * be resued after dying flag is set |
| 66 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 67 | if (q->mq_ops) { |
Christoph Hellwig | feb71da | 2014-02-20 15:32:37 -0800 | [diff] [blame] | 68 | blk_mq_insert_request(rq, at_head, true, false); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 69 | return; |
| 70 | } |
| 71 | |
Roland Dreier | 893d290 | 2012-11-22 02:00:11 -0800 | [diff] [blame] | 72 | /* |
| 73 | * need to check this before __blk_run_queue(), because rq can |
| 74 | * be freed before that returns. |
| 75 | */ |
| 76 | is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; |
Muthukumar Ratty | e81ca6f | 2012-06-29 15:31:49 +0000 | [diff] [blame] | 77 | |
| 78 | spin_lock_irq(q->queue_lock); |
| 79 | |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 80 | if (unlikely(blk_queue_dying(q))) { |
Mike Christie | 7652113 | 2013-09-18 08:33:55 -0600 | [diff] [blame] | 81 | rq->cmd_flags |= REQ_QUIET; |
Muthukumar Ratty | e81ca6f | 2012-06-29 15:31:49 +0000 | [diff] [blame] | 82 | rq->errors = -ENXIO; |
Mike Christie | 7652113 | 2013-09-18 08:33:55 -0600 | [diff] [blame] | 83 | __blk_end_request_all(rq, rq->errors); |
Muthukumar Ratty | e81ca6f | 2012-06-29 15:31:49 +0000 | [diff] [blame] | 84 | spin_unlock_irq(q->queue_lock); |
| 85 | return; |
| 86 | } |
| 87 | |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 88 | __elv_add_request(q, rq, where); |
Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 89 | __blk_run_queue(q); |
Tao Ma | addd0a0 | 2011-05-05 15:10:05 -0600 | [diff] [blame] | 90 | /* the queue is stopped so it won't be run */ |
Roland Dreier | 893d290 | 2012-11-22 02:00:11 -0800 | [diff] [blame] | 91 | if (is_pm_resume) |
Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 92 | __blk_run_queue_uncond(q); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 93 | spin_unlock_irq(q->queue_lock); |
| 94 | } |
| 95 | EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); |
| 96 | |
| 97 | /** |
| 98 | * blk_execute_rq - insert a request into queue for execution |
| 99 | * @q: queue to insert the request in |
| 100 | * @bd_disk: matching gendisk |
| 101 | * @rq: request to insert |
| 102 | * @at_head: insert request at head or tail of queue |
| 103 | * |
| 104 | * Description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 105 | * Insert a fully prepared request at the back of the I/O scheduler queue |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 106 | * for execution and wait for completion. |
| 107 | */ |
| 108 | int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, |
| 109 | struct request *rq, int at_head) |
| 110 | { |
| 111 | DECLARE_COMPLETION_ONSTACK(wait); |
| 112 | char sense[SCSI_SENSE_BUFFERSIZE]; |
| 113 | int err = 0; |
Mark Lord | 4b19776 | 2010-09-24 09:51:13 -0400 | [diff] [blame] | 114 | unsigned long hang_check; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 115 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 116 | if (!rq->sense) { |
| 117 | memset(sense, 0, sizeof(sense)); |
| 118 | rq->sense = sense; |
| 119 | rq->sense_len = 0; |
| 120 | } |
| 121 | |
| 122 | rq->end_io_data = &wait; |
| 123 | blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); |
Mark Lord | 4b19776 | 2010-09-24 09:51:13 -0400 | [diff] [blame] | 124 | |
| 125 | /* Prevent hang_check timer from firing at us during very long I/O */ |
| 126 | hang_check = sysctl_hung_task_timeout_secs; |
| 127 | if (hang_check) |
Vladimir Davydov | 5577022 | 2013-02-14 18:19:59 +0400 | [diff] [blame] | 128 | while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); |
Mark Lord | 4b19776 | 2010-09-24 09:51:13 -0400 | [diff] [blame] | 129 | else |
Vladimir Davydov | 5577022 | 2013-02-14 18:19:59 +0400 | [diff] [blame] | 130 | wait_for_completion_io(&wait); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 131 | |
| 132 | if (rq->errors) |
| 133 | err = -EIO; |
| 134 | |
Rickard Strandqvist | de83953 | 2014-06-07 00:37:26 +0200 | [diff] [blame] | 135 | if (rq->sense == sense) { |
| 136 | rq->sense = NULL; |
| 137 | rq->sense_len = 0; |
| 138 | } |
| 139 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 140 | return err; |
| 141 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 142 | EXPORT_SYMBOL(blk_execute_rq); |