blob: cf1456a02acdf7f4fc7fb924ff1153dcb4620c06 [file] [log] [blame]
Jens Axboe86db1e22008-01-29 14:53:40 +01001/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8
9#include "blk.h"
10
11/*
12 * for max sense size
13 */
14#include <scsi/scsi_cmnd.h>
15
16/**
17 * blk_end_sync_rq - executes a completion event on a request
18 * @rq: request to complete
Randy Dunlap710027a2008-08-19 20:13:11 +020019 * @error: end I/O status of the request
Jens Axboe86db1e22008-01-29 14:53:40 +010020 */
FUJITA Tomonori681a5612008-07-15 21:21:45 +020021static void blk_end_sync_rq(struct request *rq, int error)
Jens Axboe86db1e22008-01-29 14:53:40 +010022{
23 struct completion *waiting = rq->end_io_data;
24
25 rq->end_io_data = NULL;
26 __blk_put_request(rq->q, rq);
27
28 /*
29 * complete last, if this is a stack request the process (and thus
30 * the rq pointer) could be invalid right after this complete()
31 */
32 complete(waiting);
33}
Jens Axboe86db1e22008-01-29 14:53:40 +010034
35/**
36 * blk_execute_rq_nowait - insert a request into queue for execution
37 * @q: queue to insert the request in
38 * @bd_disk: matching gendisk
39 * @rq: request to insert
40 * @at_head: insert request at head or tail of queue
41 * @done: I/O completion handler
42 *
43 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020044 * Insert a fully prepared request at the back of the I/O scheduler queue
Jens Axboe86db1e22008-01-29 14:53:40 +010045 * for execution. Don't wait for completion.
46 */
47void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
48 struct request *rq, int at_head,
49 rq_end_io_fn *done)
50{
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52
53 rq->rq_disk = bd_disk;
Jens Axboe86db1e22008-01-29 14:53:40 +010054 rq->end_io = done;
55 WARN_ON(irqs_disabled());
56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where, 1);
58 __generic_unplug_device(q);
Bartlomiej Zolnierkiewicz9a2d43b2008-07-15 21:21:43 +020059 /* the queue is stopped so it won't be plugged+unplugged */
Christoph Hellwig33659eb2010-08-07 18:17:56 +020060 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
Bartlomiej Zolnierkiewicz9a2d43b2008-07-15 21:21:43 +020061 q->request_fn(q);
Jens Axboe86db1e22008-01-29 14:53:40 +010062 spin_unlock_irq(q->queue_lock);
63}
64EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
65
66/**
67 * blk_execute_rq - insert a request into queue for execution
68 * @q: queue to insert the request in
69 * @bd_disk: matching gendisk
70 * @rq: request to insert
71 * @at_head: insert request at head or tail of queue
72 *
73 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020074 * Insert a fully prepared request at the back of the I/O scheduler queue
Jens Axboe86db1e22008-01-29 14:53:40 +010075 * for execution and wait for completion.
76 */
77int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
78 struct request *rq, int at_head)
79{
80 DECLARE_COMPLETION_ONSTACK(wait);
81 char sense[SCSI_SENSE_BUFFERSIZE];
82 int err = 0;
Mark Lord4b197762010-09-24 09:51:13 -040083 unsigned long hang_check;
Jens Axboe86db1e22008-01-29 14:53:40 +010084
85 /*
86 * we need an extra reference to the request, so we can look at
87 * it after io completion
88 */
89 rq->ref_count++;
90
91 if (!rq->sense) {
92 memset(sense, 0, sizeof(sense));
93 rq->sense = sense;
94 rq->sense_len = 0;
95 }
96
97 rq->end_io_data = &wait;
98 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
Mark Lord4b197762010-09-24 09:51:13 -040099
100 /* Prevent hang_check timer from firing at us during very long I/O */
101 hang_check = sysctl_hung_task_timeout_secs;
102 if (hang_check)
103 while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
104 else
105 wait_for_completion(&wait);
Jens Axboe86db1e22008-01-29 14:53:40 +0100106
107 if (rq->errors)
108 err = -EIO;
109
110 return err;
111}
Jens Axboe86db1e22008-01-29 14:53:40 +0100112EXPORT_SYMBOL(blk_execute_rq);