blob: 1ec0d503cacdc95bf296776ca2dc2d1e9ac4b108 [file] [log] [blame]
Jens Axboe242f9dc2008-09-14 05:55:09 -07001/*
2 * Functions related to generic timeout handling of requests.
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/blkdev.h>
Jens Axboe581d4e22008-09-14 05:56:33 -07007#include <linux/fault-inject.h>
Jens Axboe242f9dc2008-09-14 05:55:09 -07008
9#include "blk.h"
10
Jens Axboe581d4e22008-09-14 05:56:33 -070011#ifdef CONFIG_FAIL_IO_TIMEOUT
12
13static DECLARE_FAULT_ATTR(fail_io_timeout);
14
15static int __init setup_fail_io_timeout(char *str)
16{
17 return setup_fault_attr(&fail_io_timeout, str);
18}
19__setup("fail_io_timeout=", setup_fail_io_timeout);
20
21int blk_should_fake_timeout(struct request_queue *q)
22{
23 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
24 return 0;
25
26 return should_fail(&fail_io_timeout, 1);
27}
28
29static int __init fail_io_timeout_debugfs(void)
30{
31 return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
32}
33
34late_initcall(fail_io_timeout_debugfs);
35
36ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
37 char *buf)
38{
39 struct gendisk *disk = dev_to_disk(dev);
40 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
41
42 return sprintf(buf, "%d\n", set != 0);
43}
44
45ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
46 const char *buf, size_t count)
47{
48 struct gendisk *disk = dev_to_disk(dev);
49 int val;
50
51 if (count) {
52 struct request_queue *q = disk->queue;
53 char *p = (char *) buf;
54
55 val = simple_strtoul(p, &p, 10);
56 spin_lock_irq(q->queue_lock);
57 if (val)
58 queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
59 else
60 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
61 spin_unlock_irq(q->queue_lock);
62 }
63
64 return count;
65}
66
67#endif /* CONFIG_FAIL_IO_TIMEOUT */
68
Jens Axboe242f9dc2008-09-14 05:55:09 -070069/*
70 * blk_delete_timer - Delete/cancel timer for a given function.
71 * @req: request that we are canceling timer for
72 *
73 */
74void blk_delete_timer(struct request *req)
75{
Jens Axboe242f9dc2008-09-14 05:55:09 -070076 list_del_init(&req->timeout_list);
Jens Axboe242f9dc2008-09-14 05:55:09 -070077}
78
79static void blk_rq_timed_out(struct request *req)
80{
81 struct request_queue *q = req->q;
82 enum blk_eh_timer_return ret;
83
84 ret = q->rq_timed_out_fn(req);
85 switch (ret) {
86 case BLK_EH_HANDLED:
87 __blk_complete_request(req);
88 break;
89 case BLK_EH_RESET_TIMER:
90 blk_clear_rq_complete(req);
91 blk_add_timer(req);
92 break;
93 case BLK_EH_NOT_HANDLED:
94 /*
95 * LLD handles this for now but in the future
96 * we can send a request msg to abort the command
97 * and we can move more of the generic scsi eh code to
98 * the blk layer.
99 */
100 break;
101 default:
102 printk(KERN_ERR "block: bad eh return: %d\n", ret);
103 break;
104 }
105}
106
107void blk_rq_timed_out_timer(unsigned long data)
108{
109 struct request_queue *q = (struct request_queue *) data;
malahal@us.ibm.com565e4112008-10-30 08:51:58 +0100110 unsigned long flags, next = 0;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700111 struct request *rq, *tmp;
112
113 spin_lock_irqsave(q->queue_lock, flags);
114
115 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
116 if (time_after_eq(jiffies, rq->deadline)) {
117 list_del_init(&rq->timeout_list);
118
119 /*
120 * Check if we raced with end io completion
121 */
122 if (blk_mark_rq_complete(rq))
123 continue;
124 blk_rq_timed_out(rq);
malahal@us.ibm.com565e4112008-10-30 08:51:58 +0100125 } else {
126 if (!next || time_after(next, rq->deadline))
127 next = rq->deadline;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700128 }
Jens Axboe242f9dc2008-09-14 05:55:09 -0700129 }
130
Jens Axboe65d36182008-10-30 08:53:02 +0100131 /*
132 * next can never be 0 here with the list non-empty, since we always
133 * bump ->deadline to 1 so we can detect if the timer was ever added
134 * or not. See comment in blk_add_timer()
135 */
136 if (next)
Alan Stern7838c152008-11-06 08:42:49 +0100137 mod_timer(&q->timeout, round_jiffies_up(next));
Jens Axboe242f9dc2008-09-14 05:55:09 -0700138
139 spin_unlock_irqrestore(q->queue_lock, flags);
140}
141
142/**
143 * blk_abort_request -- Request request recovery for the specified command
144 * @req: pointer to the request of interest
145 *
146 * This function requests that the block layer start recovery for the
147 * request by deleting the timer and calling the q's timeout function.
148 * LLDDs who implement their own error recovery MAY ignore the timeout
149 * event if they generated blk_abort_req. Must hold queue lock.
150 */
151void blk_abort_request(struct request *req)
152{
Jens Axboe7ba1fba2008-09-16 09:54:11 -0700153 if (blk_mark_rq_complete(req))
154 return;
Jens Axboe242f9dc2008-09-14 05:55:09 -0700155 blk_delete_timer(req);
156 blk_rq_timed_out(req);
157}
158EXPORT_SYMBOL_GPL(blk_abort_request);
159
160/**
161 * blk_add_timer - Start timeout timer for a single request
162 * @req: request that is about to start running.
163 *
164 * Notes:
165 * Each request has its own timer, and as it is added to the queue, we
166 * set up the timer. When the request completes, we cancel the timer.
167 */
168void blk_add_timer(struct request *req)
169{
170 struct request_queue *q = req->q;
171 unsigned long expiry;
172
173 if (!q->rq_timed_out_fn)
174 return;
175
176 BUG_ON(!list_empty(&req->timeout_list));
177 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
178
179 if (req->timeout)
180 req->deadline = jiffies + req->timeout;
181 else {
182 req->deadline = jiffies + q->rq_timeout;
183 /*
184 * Some LLDs, like scsi, peek at the timeout to prevent
185 * a command from being retried forever.
186 */
187 req->timeout = q->rq_timeout;
188 }
189 list_add_tail(&req->timeout_list, &q->timeout_list);
190
191 /*
192 * If the timer isn't already pending or this timeout is earlier
Alan Stern7838c152008-11-06 08:42:49 +0100193 * than an existing one, modify the timer. Round up to next nearest
Jens Axboe242f9dc2008-09-14 05:55:09 -0700194 * second.
195 */
Alan Stern7838c152008-11-06 08:42:49 +0100196 expiry = round_jiffies_up(req->deadline);
Jens Axboe242f9dc2008-09-14 05:55:09 -0700197
198 if (!timer_pending(&q->timeout) ||
199 time_before(expiry, q->timeout.expires))
200 mod_timer(&q->timeout, expiry);
201}
Mike Anderson11914a52008-09-13 20:31:27 +0200202
203/**
204 * blk_abort_queue -- Abort all request on given queue
205 * @queue: pointer to queue
206 *
207 */
208void blk_abort_queue(struct request_queue *q)
209{
210 unsigned long flags;
211 struct request *rq, *tmp;
Hannes Reineckebe987fd2009-02-18 10:30:15 +0100212 LIST_HEAD(list);
Mike Anderson11914a52008-09-13 20:31:27 +0200213
Jens Axboeb7591132009-04-17 08:36:50 +0200214 /*
215 * Not a request based block device, nothing to abort
216 */
217 if (!q->request_fn)
218 return;
219
Mike Anderson11914a52008-09-13 20:31:27 +0200220 spin_lock_irqsave(q->queue_lock, flags);
221
222 elv_abort_queue(q);
223
Hannes Reineckebe987fd2009-02-18 10:30:15 +0100224 /*
225 * Splice entries to local list, to avoid deadlocking if entries
226 * get readded to the timeout list by error handling
227 */
228 list_splice_init(&q->timeout_list, &list);
229
230 list_for_each_entry_safe(rq, tmp, &list, timeout_list)
Mike Anderson11914a52008-09-13 20:31:27 +0200231 blk_abort_request(rq);
232
Hannes Reinecke17d5c8c2009-04-23 10:32:59 +0200233 /*
234 * Occasionally, blk_abort_request() will return without
235 * deleting the element from the list. Make sure we add those back
236 * instead of leaving them on the local stack list.
237 */
238 list_splice(&list, &q->timeout_list);
239
Mike Anderson11914a52008-09-13 20:31:27 +0200240 spin_unlock_irqrestore(q->queue_lock, flags);
241
242}
243EXPORT_SYMBOL_GPL(blk_abort_queue);