blob: d8d9848415342bd25ed9202ef90a3e64408a1ec7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/completion.h>
13#include <linux/kernel.h>
14#include <linux/mempool.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19
20#include <scsi/scsi.h>
21#include <scsi/scsi_dbg.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_driver.h>
24#include <scsi/scsi_eh.h>
25#include <scsi/scsi_host.h>
26#include <scsi/scsi_request.h>
27
28#include "scsi_priv.h"
29#include "scsi_logging.h"
30
31
32#define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33#define SG_MEMPOOL_SIZE 32
34
35struct scsi_host_sg_pool {
36 size_t size;
37 char *name;
38 kmem_cache_t *slab;
39 mempool_t *pool;
40};
41
42#if (SCSI_MAX_PHYS_SEGMENTS < 32)
43#error SCSI_MAX_PHYS_SEGMENTS is too small
44#endif
45
46#define SP(x) { x, "sgpool-" #x }
Adrian Bunk52c1da32005-06-23 22:05:33 -070047static struct scsi_host_sg_pool scsi_sg_pools[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 SP(8),
49 SP(16),
50 SP(32),
51#if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 SP(64),
53#if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 SP(128),
55#if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 SP(256),
57#if (SCSI_MAX_PHYS_SEGMENTS > 256)
58#error SCSI_MAX_PHYS_SEGMENTS is too large
59#endif
60#endif
61#endif
62#endif
63};
64#undef SP
65
66
67/*
68 * Function: scsi_insert_special_req()
69 *
70 * Purpose: Insert pre-formed request into request queue.
71 *
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
75 *
76 * Lock status: Assumed that lock is not held upon entry.
77 *
78 * Returns: Nothing
79 *
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
85 * process it.
86 */
87int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88{
89 /*
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
92 */
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
Tejun Heo 867d1192005-04-24 02:06:05 -050095 at_head, sreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 return 0;
97}
98
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -050099static void scsi_run_queue(struct request_queue *q);
James Bottomleye91442b2005-09-09 10:44:16 -0500100static void scsi_release_buffers(struct scsi_cmnd *cmd);
101
102/*
103 * Function: scsi_unprep_request()
104 *
105 * Purpose: Remove all preparation done for a request, including its
106 * associated scsi_cmnd, so that it can be requeued.
107 *
108 * Arguments: req - request to unprepare
109 *
110 * Lock status: Assumed that no locks are held upon entry.
111 *
112 * Returns: Nothing.
113 */
114static void scsi_unprep_request(struct request *req)
115{
116 struct scsi_cmnd *cmd = req->special;
117
118 req->flags &= ~REQ_DONTPREP;
119 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
120
121 scsi_release_buffers(cmd);
122 scsi_put_command(cmd);
123}
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125/*
126 * Function: scsi_queue_insert()
127 *
128 * Purpose: Insert a command in the midlevel queue.
129 *
130 * Arguments: cmd - command that we are adding to queue.
131 * reason - why we are inserting command to queue.
132 *
133 * Lock status: Assumed that lock is not held upon entry.
134 *
135 * Returns: Nothing.
136 *
137 * Notes: We do this for one of two cases. Either the host is busy
138 * and it cannot accept any more commands for the time being,
139 * or the device returned QUEUE_FULL and can accept no more
140 * commands.
141 * Notes: This could be called either from an interrupt context or a
142 * normal process context.
James Bottomleye91442b2005-09-09 10:44:16 -0500143 * Notes: Upon return, cmd is a stale pointer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 */
145int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
146{
147 struct Scsi_Host *host = cmd->device->host;
148 struct scsi_device *device = cmd->device;
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500149 struct request_queue *q = device->request_queue;
James Bottomleye91442b2005-09-09 10:44:16 -0500150 struct request *req = cmd->request;
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500151 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153 SCSI_LOG_MLQUEUE(1,
154 printk("Inserting command %p into mlqueue\n", cmd));
155
156 /*
Tejun Heo d8c37e72005-05-14 00:46:08 +0900157 * Set the appropriate busy bit for the device/host.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 *
159 * If the host/device isn't busy, assume that something actually
160 * completed, and that we should be able to queue a command now.
161 *
162 * Note that the prior mid-layer assumption that any host could
163 * always queue at least one command is now broken. The mid-layer
164 * will implement a user specifiable stall (see
165 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
166 * if a command is requeued with no other commands outstanding
167 * either for the device or for the host.
168 */
169 if (reason == SCSI_MLQUEUE_HOST_BUSY)
170 host->host_blocked = host->max_host_blocked;
171 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
172 device->device_blocked = device->max_device_blocked;
173
174 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 * Decrement the counters, since these commands are no longer
176 * active on the host/device.
177 */
178 scsi_device_unbusy(device);
179
180 /*
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500181 * Requeue this command. It will go before all other commands
182 * that are already in the queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 *
184 * NOTE: there is magic here about the way the queue is plugged if
185 * we have no outstanding commands.
186 *
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500187 * Although we *don't* plug the queue, we call the request
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 * function. The SCSI request function detects the blocked condition
189 * and plugs the queue appropriately.
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500190 */
James Bottomleye91442b2005-09-09 10:44:16 -0500191 scsi_unprep_request(req);
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500192 spin_lock_irqsave(q->queue_lock, flags);
James Bottomleye91442b2005-09-09 10:44:16 -0500193 blk_requeue_request(q, req);
Tejun Heo a1bf9d1d2005-04-24 02:08:52 -0500194 spin_unlock_irqrestore(q->queue_lock, flags);
195
196 scsi_run_queue(q);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 return 0;
199}
200
201/*
202 * Function: scsi_do_req
203 *
204 * Purpose: Queue a SCSI request
205 *
206 * Arguments: sreq - command descriptor.
207 * cmnd - actual SCSI command to be performed.
208 * buffer - data buffer.
209 * bufflen - size of data buffer.
210 * done - completion function to be run.
211 * timeout - how long to let it run before timeout.
212 * retries - number of retries we allow.
213 *
214 * Lock status: No locks held upon entry.
215 *
216 * Returns: Nothing.
217 *
218 * Notes: This function is only used for queueing requests for things
219 * like ioctls and character device requests - this is because
220 * we essentially just inject a request into the queue for the
221 * device.
222 *
223 * In order to support the scsi_device_quiesce function, we
224 * now inject requests on the *head* of the device queue
225 * rather than the tail.
226 */
227void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
228 void *buffer, unsigned bufflen,
229 void (*done)(struct scsi_cmnd *),
230 int timeout, int retries)
231{
232 /*
233 * If the upper level driver is reusing these things, then
234 * we should release the low-level block now. Another one will
235 * be allocated later when this request is getting queued.
236 */
237 __scsi_release_request(sreq);
238
239 /*
240 * Our own function scsi_done (which marks the host as not busy,
241 * disables the timeout counter, etc) will be called by us or by the
242 * scsi_hosts[host].queuecommand() function needs to also call
243 * the completion function for the high level driver.
244 */
245 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
246 sreq->sr_bufflen = bufflen;
247 sreq->sr_buffer = buffer;
248 sreq->sr_allowed = retries;
249 sreq->sr_done = done;
250 sreq->sr_timeout_per_command = timeout;
251
252 if (sreq->sr_cmd_len == 0)
253 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
254
255 /*
256 * head injection *required* here otherwise quiesce won't work
257 */
258 scsi_insert_special_req(sreq, 1);
259}
260EXPORT_SYMBOL(scsi_do_req);
261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262/* This is the end routine we get to if a command was never attached
263 * to the request. Simply complete the request without changing
264 * rq_status; this will cause a DRIVER_ERROR. */
265static void scsi_wait_req_end_io(struct request *req)
266{
267 BUG_ON(!req->waiting);
268
269 complete(req->waiting);
270}
271
272void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
273 unsigned bufflen, int timeout, int retries)
274{
275 DECLARE_COMPLETION(wait);
James Bottomley39216032005-06-15 18:48:29 -0500276 int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
James Bottomleye537a362005-06-05 02:07:14 -0500277 struct request *req;
278
James Bottomley8e640112005-06-15 18:16:09 -0500279 req = blk_get_request(sreq->sr_device->request_queue, write,
280 __GFP_WAIT);
281 if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
282 buffer, bufflen, __GFP_WAIT)) {
283 sreq->sr_result = DRIVER_ERROR << 24;
284 blk_put_request(req);
285 return;
286 }
287
James Bottomleye537a362005-06-05 02:07:14 -0500288 req->flags |= REQ_NOMERGE;
289 req->waiting = &wait;
290 req->end_io = scsi_wait_req_end_io;
291 req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
292 req->sense = sreq->sr_sense_buffer;
293 req->sense_len = 0;
294 memcpy(req->cmd, cmnd, req->cmd_len);
295 req->timeout = timeout;
296 req->flags |= REQ_BLOCK_PC;
297 req->rq_disk = NULL;
298 blk_insert_request(sreq->sr_device->request_queue, req,
299 sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 wait_for_completion(&wait);
301 sreq->sr_request->waiting = NULL;
James Bottomleye537a362005-06-05 02:07:14 -0500302 sreq->sr_result = req->errors;
303 if (req->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 sreq->sr_result |= (DRIVER_ERROR << 24);
305
James Bottomleye537a362005-06-05 02:07:14 -0500306 blk_put_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
James Bottomleye537a362005-06-05 02:07:14 -0500308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309EXPORT_SYMBOL(scsi_wait_req);
310
James Bottomley39216032005-06-15 18:48:29 -0500311/**
James Bottomley33aa6872005-08-28 11:31:14 -0500312 * scsi_execute - insert request and wait for the result
James Bottomley39216032005-06-15 18:48:29 -0500313 * @sdev: scsi device
314 * @cmd: scsi command
315 * @data_direction: data direction
316 * @buffer: data buffer
317 * @bufflen: len of buffer
318 * @sense: optional sense buffer
319 * @timeout: request timeout in seconds
320 * @retries: number of times to retry request
James Bottomley33aa6872005-08-28 11:31:14 -0500321 * @flags: or into request flags;
James Bottomley39216032005-06-15 18:48:29 -0500322 *
James Bottomleyea73a9f2005-08-28 11:33:52 -0500323 * returns the req->errors value which is the the scsi_cmnd result
324 * field.
James Bottomley39216032005-06-15 18:48:29 -0500325 **/
James Bottomley33aa6872005-08-28 11:31:14 -0500326int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
327 int data_direction, void *buffer, unsigned bufflen,
328 unsigned char *sense, int timeout, int retries, int flags)
James Bottomley39216032005-06-15 18:48:29 -0500329{
330 struct request *req;
331 int write = (data_direction == DMA_TO_DEVICE);
332 int ret = DRIVER_ERROR << 24;
333
334 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
335
336 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
337 buffer, bufflen, __GFP_WAIT))
338 goto out;
339
340 req->cmd_len = COMMAND_SIZE(cmd[0]);
341 memcpy(req->cmd, cmd, req->cmd_len);
342 req->sense = sense;
343 req->sense_len = 0;
344 req->timeout = timeout;
James Bottomley3173d8c2005-09-04 11:32:05 -0500345 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
James Bottomley39216032005-06-15 18:48:29 -0500346
347 /*
348 * head injection *required* here otherwise quiesce won't work
349 */
350 blk_execute_rq(req->q, NULL, req, 1);
351
352 ret = req->errors;
353 out:
354 blk_put_request(req);
355
356 return ret;
357}
James Bottomley33aa6872005-08-28 11:31:14 -0500358EXPORT_SYMBOL(scsi_execute);
James Bottomley39216032005-06-15 18:48:29 -0500359
James Bottomleyea73a9f2005-08-28 11:33:52 -0500360
361int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
362 int data_direction, void *buffer, unsigned bufflen,
363 struct scsi_sense_hdr *sshdr, int timeout, int retries)
364{
365 char *sense = NULL;
akpm@osdl.org1ccb48b2005-06-26 00:12:51 -0700366 int result;
367
James Bottomleyea73a9f2005-08-28 11:33:52 -0500368 if (sshdr) {
Neil Brown286f3e12005-09-02 13:13:54 +1000369 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
James Bottomleyea73a9f2005-08-28 11:33:52 -0500370 if (!sense)
371 return DRIVER_ERROR << 24;
James Bottomleye5143852005-08-09 11:55:36 -0500372 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
James Bottomleyea73a9f2005-08-28 11:33:52 -0500373 }
akpm@osdl.org1ccb48b2005-06-26 00:12:51 -0700374 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
James Bottomleyea73a9f2005-08-28 11:33:52 -0500375 sense, timeout, retries, 0);
376 if (sshdr)
James Bottomleye5143852005-08-09 11:55:36 -0500377 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
James Bottomleyea73a9f2005-08-28 11:33:52 -0500378
379 kfree(sense);
380 return result;
381}
382EXPORT_SYMBOL(scsi_execute_req);
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384/*
385 * Function: scsi_init_cmd_errh()
386 *
387 * Purpose: Initialize cmd fields related to error handling.
388 *
389 * Arguments: cmd - command that is ready to be queued.
390 *
391 * Returns: Nothing
392 *
393 * Notes: This function has the job of initializing a number of
394 * fields related to error handling. Typically this will
395 * be called once for each command, as required.
396 */
397static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
398{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 cmd->serial_number = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
402
403 if (cmd->cmd_len == 0)
404 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
405
406 /*
407 * We need saved copies of a number of fields - this is because
408 * error handling may need to overwrite these with different values
409 * to run different commands, and once error handling is complete,
410 * we will need to restore these values prior to running the actual
411 * command.
412 */
413 cmd->old_use_sg = cmd->use_sg;
414 cmd->old_cmd_len = cmd->cmd_len;
415 cmd->sc_old_data_direction = cmd->sc_data_direction;
416 cmd->old_underflow = cmd->underflow;
417 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
418 cmd->buffer = cmd->request_buffer;
419 cmd->bufflen = cmd->request_bufflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421 return 1;
422}
423
424/*
425 * Function: scsi_setup_cmd_retry()
426 *
427 * Purpose: Restore the command state for a retry
428 *
429 * Arguments: cmd - command to be restored
430 *
431 * Returns: Nothing
432 *
433 * Notes: Immediately prior to retrying a command, we need
434 * to restore certain fields that we saved above.
435 */
436void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
437{
438 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
439 cmd->request_buffer = cmd->buffer;
440 cmd->request_bufflen = cmd->bufflen;
441 cmd->use_sg = cmd->old_use_sg;
442 cmd->cmd_len = cmd->old_cmd_len;
443 cmd->sc_data_direction = cmd->sc_old_data_direction;
444 cmd->underflow = cmd->old_underflow;
445}
446
447void scsi_device_unbusy(struct scsi_device *sdev)
448{
449 struct Scsi_Host *shost = sdev->host;
450 unsigned long flags;
451
452 spin_lock_irqsave(shost->host_lock, flags);
453 shost->host_busy--;
Mike Andersond3301872005-06-16 11:12:38 -0700454 if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 shost->host_failed))
456 scsi_eh_wakeup(shost);
457 spin_unlock(shost->host_lock);
152587d2005-04-12 16:22:06 -0500458 spin_lock(sdev->request_queue->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 sdev->device_busy--;
152587d2005-04-12 16:22:06 -0500460 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461}
462
463/*
464 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
465 * and call blk_run_queue for all the scsi_devices on the target -
466 * including current_sdev first.
467 *
468 * Called with *no* scsi locks held.
469 */
470static void scsi_single_lun_run(struct scsi_device *current_sdev)
471{
472 struct Scsi_Host *shost = current_sdev->host;
473 struct scsi_device *sdev, *tmp;
474 struct scsi_target *starget = scsi_target(current_sdev);
475 unsigned long flags;
476
477 spin_lock_irqsave(shost->host_lock, flags);
478 starget->starget_sdev_user = NULL;
479 spin_unlock_irqrestore(shost->host_lock, flags);
480
481 /*
482 * Call blk_run_queue for all LUNs on the target, starting with
483 * current_sdev. We race with others (to set starget_sdev_user),
484 * but in most cases, we will be first. Ideally, each LU on the
485 * target would get some limited time or requests on the target.
486 */
487 blk_run_queue(current_sdev->request_queue);
488
489 spin_lock_irqsave(shost->host_lock, flags);
490 if (starget->starget_sdev_user)
491 goto out;
492 list_for_each_entry_safe(sdev, tmp, &starget->devices,
493 same_target_siblings) {
494 if (sdev == current_sdev)
495 continue;
496 if (scsi_device_get(sdev))
497 continue;
498
499 spin_unlock_irqrestore(shost->host_lock, flags);
500 blk_run_queue(sdev->request_queue);
501 spin_lock_irqsave(shost->host_lock, flags);
502
503 scsi_device_put(sdev);
504 }
505 out:
506 spin_unlock_irqrestore(shost->host_lock, flags);
507}
508
509/*
510 * Function: scsi_run_queue()
511 *
512 * Purpose: Select a proper request queue to serve next
513 *
514 * Arguments: q - last request's queue
515 *
516 * Returns: Nothing
517 *
518 * Notes: The previous command was completely finished, start
519 * a new one if possible.
520 */
521static void scsi_run_queue(struct request_queue *q)
522{
523 struct scsi_device *sdev = q->queuedata;
524 struct Scsi_Host *shost = sdev->host;
525 unsigned long flags;
526
527 if (sdev->single_lun)
528 scsi_single_lun_run(sdev);
529
530 spin_lock_irqsave(shost->host_lock, flags);
531 while (!list_empty(&shost->starved_list) &&
532 !shost->host_blocked && !shost->host_self_blocked &&
533 !((shost->can_queue > 0) &&
534 (shost->host_busy >= shost->can_queue))) {
535 /*
536 * As long as shost is accepting commands and we have
537 * starved queues, call blk_run_queue. scsi_request_fn
538 * drops the queue_lock and can add us back to the
539 * starved_list.
540 *
541 * host_lock protects the starved_list and starved_entry.
542 * scsi_request_fn must get the host_lock before checking
543 * or modifying starved_list or starved_entry.
544 */
545 sdev = list_entry(shost->starved_list.next,
546 struct scsi_device, starved_entry);
547 list_del_init(&sdev->starved_entry);
548 spin_unlock_irqrestore(shost->host_lock, flags);
549
550 blk_run_queue(sdev->request_queue);
551
552 spin_lock_irqsave(shost->host_lock, flags);
553 if (unlikely(!list_empty(&sdev->starved_entry)))
554 /*
555 * sdev lost a race, and was put back on the
556 * starved list. This is unlikely but without this
557 * in theory we could loop forever.
558 */
559 break;
560 }
561 spin_unlock_irqrestore(shost->host_lock, flags);
562
563 blk_run_queue(q);
564}
565
566/*
567 * Function: scsi_requeue_command()
568 *
569 * Purpose: Handle post-processing of completed commands.
570 *
571 * Arguments: q - queue to operate on
572 * cmd - command that may need to be requeued.
573 *
574 * Returns: Nothing
575 *
576 * Notes: After command completion, there may be blocks left
577 * over which weren't finished by the previous command
578 * this can be for a number of reasons - the main one is
579 * I/O errors in the middle of the request, in which case
580 * we need to request the blocks that come after the bad
581 * sector.
James Bottomleye91442b2005-09-09 10:44:16 -0500582 * Notes: Upon return, cmd is a stale pointer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 */
584static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
585{
James Bottomleye91442b2005-09-09 10:44:16 -0500586 struct request *req = cmd->request;
Tejun Heo 283369c2005-04-24 02:06:36 -0500587 unsigned long flags;
588
James Bottomleye91442b2005-09-09 10:44:16 -0500589 scsi_unprep_request(req);
Tejun Heo 283369c2005-04-24 02:06:36 -0500590 spin_lock_irqsave(q->queue_lock, flags);
James Bottomleye91442b2005-09-09 10:44:16 -0500591 blk_requeue_request(q, req);
Tejun Heo 283369c2005-04-24 02:06:36 -0500592 spin_unlock_irqrestore(q->queue_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
594 scsi_run_queue(q);
595}
596
597void scsi_next_command(struct scsi_cmnd *cmd)
598{
599 struct request_queue *q = cmd->device->request_queue;
600
601 scsi_put_command(cmd);
602 scsi_run_queue(q);
603}
604
605void scsi_run_host_queues(struct Scsi_Host *shost)
606{
607 struct scsi_device *sdev;
608
609 shost_for_each_device(sdev, shost)
610 scsi_run_queue(sdev->request_queue);
611}
612
613/*
614 * Function: scsi_end_request()
615 *
616 * Purpose: Post-processing of completed commands (usually invoked at end
617 * of upper level post-processing and scsi_io_completion).
618 *
619 * Arguments: cmd - command that is complete.
620 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
621 * bytes - number of bytes of completed I/O
622 * requeue - indicates whether we should requeue leftovers.
623 *
624 * Lock status: Assumed that lock is not held upon entry.
625 *
James Bottomleye91442b2005-09-09 10:44:16 -0500626 * Returns: cmd if requeue required, NULL otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 *
628 * Notes: This is called for block device requests in order to
629 * mark some number of sectors as complete.
630 *
631 * We are guaranteeing that the request queue will be goosed
632 * at some point during this call.
James Bottomleye91442b2005-09-09 10:44:16 -0500633 * Notes: If cmd was requeued, upon return it will be a stale pointer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 */
635static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
636 int bytes, int requeue)
637{
638 request_queue_t *q = cmd->device->request_queue;
639 struct request *req = cmd->request;
640 unsigned long flags;
641
642 /*
643 * If there are blocks left over at the end, set up the command
644 * to queue the remainder of them.
645 */
646 if (end_that_request_chunk(req, uptodate, bytes)) {
647 int leftover = (req->hard_nr_sectors << 9);
648
649 if (blk_pc_request(req))
650 leftover = req->data_len;
651
652 /* kill remainder if no retrys */
653 if (!uptodate && blk_noretry_request(req))
654 end_that_request_chunk(req, 0, leftover);
655 else {
James Bottomleye91442b2005-09-09 10:44:16 -0500656 if (requeue) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 /*
658 * Bleah. Leftovers again. Stick the
659 * leftovers in the front of the
660 * queue, and goose the queue again.
661 */
662 scsi_requeue_command(q, cmd);
James Bottomleye91442b2005-09-09 10:44:16 -0500663 cmd = NULL;
664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 return cmd;
666 }
667 }
668
669 add_disk_randomness(req->rq_disk);
670
671 spin_lock_irqsave(q->queue_lock, flags);
672 if (blk_rq_tagged(req))
673 blk_queue_end_tag(q, req);
674 end_that_request_last(req);
675 spin_unlock_irqrestore(q->queue_lock, flags);
676
677 /*
678 * This will goose the queue request function at the end, so we don't
679 * need to worry about launching another command.
680 */
681 scsi_next_command(cmd);
682 return NULL;
683}
684
685static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
686{
687 struct scsi_host_sg_pool *sgp;
688 struct scatterlist *sgl;
689
690 BUG_ON(!cmd->use_sg);
691
692 switch (cmd->use_sg) {
693 case 1 ... 8:
694 cmd->sglist_len = 0;
695 break;
696 case 9 ... 16:
697 cmd->sglist_len = 1;
698 break;
699 case 17 ... 32:
700 cmd->sglist_len = 2;
701 break;
702#if (SCSI_MAX_PHYS_SEGMENTS > 32)
703 case 33 ... 64:
704 cmd->sglist_len = 3;
705 break;
706#if (SCSI_MAX_PHYS_SEGMENTS > 64)
707 case 65 ... 128:
708 cmd->sglist_len = 4;
709 break;
710#if (SCSI_MAX_PHYS_SEGMENTS > 128)
711 case 129 ... 256:
712 cmd->sglist_len = 5;
713 break;
714#endif
715#endif
716#endif
717 default:
718 return NULL;
719 }
720
721 sgp = scsi_sg_pools + cmd->sglist_len;
722 sgl = mempool_alloc(sgp->pool, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return sgl;
724}
725
726static void scsi_free_sgtable(struct scatterlist *sgl, int index)
727{
728 struct scsi_host_sg_pool *sgp;
729
KAMBAROV, ZAURa77e3362005-06-28 20:45:06 -0700730 BUG_ON(index >= SG_MEMPOOL_NR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
732 sgp = scsi_sg_pools + index;
733 mempool_free(sgl, sgp->pool);
734}
735
736/*
737 * Function: scsi_release_buffers()
738 *
739 * Purpose: Completion processing for block device I/O requests.
740 *
741 * Arguments: cmd - command that we are bailing.
742 *
743 * Lock status: Assumed that no lock is held upon entry.
744 *
745 * Returns: Nothing
746 *
747 * Notes: In the event that an upper level driver rejects a
748 * command, we must release resources allocated during
749 * the __init_io() function. Primarily this would involve
750 * the scatter-gather table, and potentially any bounce
751 * buffers.
752 */
753static void scsi_release_buffers(struct scsi_cmnd *cmd)
754{
755 struct request *req = cmd->request;
756
757 /*
758 * Free up any indirection buffers we allocated for DMA purposes.
759 */
760 if (cmd->use_sg)
761 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
762 else if (cmd->request_buffer != req->buffer)
763 kfree(cmd->request_buffer);
764
765 /*
766 * Zero these out. They now point to freed memory, and it is
767 * dangerous to hang onto the pointers.
768 */
769 cmd->buffer = NULL;
770 cmd->bufflen = 0;
771 cmd->request_buffer = NULL;
772 cmd->request_bufflen = 0;
773}
774
775/*
776 * Function: scsi_io_completion()
777 *
778 * Purpose: Completion processing for block device I/O requests.
779 *
780 * Arguments: cmd - command that is finished.
781 *
782 * Lock status: Assumed that no lock is held upon entry.
783 *
784 * Returns: Nothing
785 *
786 * Notes: This function is matched in terms of capabilities to
787 * the function that created the scatter-gather list.
788 * In other words, if there are no bounce buffers
789 * (the normal case for most drivers), we don't need
790 * the logic to deal with cleaning up afterwards.
791 *
792 * We must do one of several things here:
793 *
794 * a) Call scsi_end_request. This will finish off the
795 * specified number of sectors. If we are done, the
796 * command block will be released, and the queue
797 * function will be goosed. If we are not done, then
798 * scsi_end_request will directly goose the queue.
799 *
800 * b) We can just use scsi_requeue_command() here. This would
801 * be used if we just wanted to retry, for example.
802 */
803void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
804 unsigned int block_bytes)
805{
806 int result = cmd->result;
807 int this_count = cmd->bufflen;
808 request_queue_t *q = cmd->device->request_queue;
809 struct request *req = cmd->request;
810 int clear_errors = 1;
811 struct scsi_sense_hdr sshdr;
812 int sense_valid = 0;
813 int sense_deferred = 0;
814
815 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
816 return;
817
818 /*
819 * Free up any indirection buffers we allocated for DMA purposes.
820 * For the case of a READ, we need to copy the data out of the
821 * bounce buffer and into the real buffer.
822 */
823 if (cmd->use_sg)
824 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
825 else if (cmd->buffer != req->buffer) {
826 if (rq_data_dir(req) == READ) {
827 unsigned long flags;
828 char *to = bio_kmap_irq(req->bio, &flags);
829 memcpy(to, cmd->buffer, cmd->bufflen);
830 bio_kunmap_irq(to, &flags);
831 }
832 kfree(cmd->buffer);
833 }
834
835 if (result) {
836 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
837 if (sense_valid)
838 sense_deferred = scsi_sense_is_deferred(&sshdr);
839 }
840 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
841 req->errors = result;
842 if (result) {
843 clear_errors = 0;
844 if (sense_valid && req->sense) {
845 /*
846 * SG_IO wants current and deferred errors
847 */
848 int len = 8 + cmd->sense_buffer[7];
849
850 if (len > SCSI_SENSE_BUFFERSIZE)
851 len = SCSI_SENSE_BUFFERSIZE;
852 memcpy(req->sense, cmd->sense_buffer, len);
853 req->sense_len = len;
854 }
855 } else
856 req->data_len = cmd->resid;
857 }
858
859 /*
860 * Zero these out. They now point to freed memory, and it is
861 * dangerous to hang onto the pointers.
862 */
863 cmd->buffer = NULL;
864 cmd->bufflen = 0;
865 cmd->request_buffer = NULL;
866 cmd->request_bufflen = 0;
867
868 /*
869 * Next deal with any sectors which we were able to correctly
870 * handle.
871 */
872 if (good_bytes >= 0) {
873 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
874 req->nr_sectors, good_bytes));
875 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
876
877 if (clear_errors)
878 req->errors = 0;
879 /*
880 * If multiple sectors are requested in one buffer, then
881 * they will have been finished off by the first command.
882 * If not, then we have a multi-buffer command.
883 *
884 * If block_bytes != 0, it means we had a medium error
885 * of some sort, and that we want to mark some number of
886 * sectors as not uptodate. Thus we want to inhibit
887 * requeueing right here - we will requeue down below
888 * when we handle the bad sectors.
889 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
891 /*
James Bottomleye91442b2005-09-09 10:44:16 -0500892 * If the command completed without error, then either
893 * finish off the rest of the command, or start a new one.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 */
James Bottomleye91442b2005-09-09 10:44:16 -0500895 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 }
898 /*
899 * Now, if we were good little boys and girls, Santa left us a request
900 * sense buffer. We can extract information from this, so we
901 * can choose a block to remap, etc.
902 */
903 if (sense_valid && !sense_deferred) {
904 switch (sshdr.sense_key) {
905 case UNIT_ATTENTION:
906 if (cmd->device->removable) {
907 /* detected disc change. set a bit
908 * and quietly refuse further access.
909 */
910 cmd->device->changed = 1;
James Bottomleye91442b2005-09-09 10:44:16 -0500911 scsi_end_request(cmd, 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 this_count, 1);
913 return;
914 } else {
915 /*
916 * Must have been a power glitch, or a
917 * bus reset. Could not have been a
918 * media change, so we just retry the
919 * request and see what happens.
920 */
921 scsi_requeue_command(q, cmd);
922 return;
923 }
924 break;
925 case ILLEGAL_REQUEST:
926 /*
927 * If we had an ILLEGAL REQUEST returned, then we may
928 * have performed an unsupported command. The only
929 * thing this should be would be a ten byte read where
930 * only a six byte read was supported. Also, on a
931 * system where READ CAPACITY failed, we may have read
932 * past the end of the disk.
933 */
934 if (cmd->device->use_10_for_rw &&
935 (cmd->cmnd[0] == READ_10 ||
936 cmd->cmnd[0] == WRITE_10)) {
937 cmd->device->use_10_for_rw = 0;
938 /*
939 * This will cause a retry with a 6-byte
940 * command.
941 */
942 scsi_requeue_command(q, cmd);
943 result = 0;
944 } else {
James Bottomleye91442b2005-09-09 10:44:16 -0500945 scsi_end_request(cmd, 0, this_count, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 return;
947 }
948 break;
949 case NOT_READY:
950 /*
951 * If the device is in the process of becoming ready,
952 * retry.
953 */
954 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
955 scsi_requeue_command(q, cmd);
956 return;
957 }
James Bottomley3173d8c2005-09-04 11:32:05 -0500958 if (!(req->flags & REQ_QUIET))
959 dev_printk(KERN_INFO,
960 &cmd->device->sdev_gendev,
961 "Device not ready.\n");
James Bottomleye91442b2005-09-09 10:44:16 -0500962 scsi_end_request(cmd, 0, this_count, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 return;
964 case VOLUME_OVERFLOW:
James Bottomley3173d8c2005-09-04 11:32:05 -0500965 if (!(req->flags & REQ_QUIET)) {
966 dev_printk(KERN_INFO,
967 &cmd->device->sdev_gendev,
968 "Volume overflow, CDB: ");
969 __scsi_print_command(cmd->data_cmnd);
970 scsi_print_sense("", cmd);
971 }
James Bottomleye91442b2005-09-09 10:44:16 -0500972 scsi_end_request(cmd, 0, block_bytes, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 return;
974 default:
975 break;
976 }
977 } /* driver byte != 0 */
978 if (host_byte(result) == DID_RESET) {
979 /*
980 * Third party bus reset or reset for error
981 * recovery reasons. Just retry the request
982 * and see what happens.
983 */
984 scsi_requeue_command(q, cmd);
985 return;
986 }
987 if (result) {
James Bottomley3173d8c2005-09-04 11:32:05 -0500988 if (!(req->flags & REQ_QUIET)) {
989 dev_printk(KERN_INFO, &cmd->device->sdev_gendev,
990 "SCSI error: return code = 0x%x\n", result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
James Bottomley3173d8c2005-09-04 11:32:05 -0500992 if (driver_byte(result) & DRIVER_SENSE)
993 scsi_print_sense("", cmd);
994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 /*
996 * Mark a single buffer as not uptodate. Queue the remainder.
997 * We sometimes get this cruft in the event that a medium error
998 * isn't properly reported.
999 */
1000 block_bytes = req->hard_cur_sectors << 9;
1001 if (!block_bytes)
1002 block_bytes = req->data_len;
James Bottomleye91442b2005-09-09 10:44:16 -05001003 scsi_end_request(cmd, 0, block_bytes, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
1005}
1006EXPORT_SYMBOL(scsi_io_completion);
1007
1008/*
1009 * Function: scsi_init_io()
1010 *
1011 * Purpose: SCSI I/O initialize function.
1012 *
1013 * Arguments: cmd - Command descriptor we wish to initialize
1014 *
1015 * Returns: 0 on success
1016 * BLKPREP_DEFER if the failure is retryable
1017 * BLKPREP_KILL if the failure is fatal
1018 */
1019static int scsi_init_io(struct scsi_cmnd *cmd)
1020{
1021 struct request *req = cmd->request;
1022 struct scatterlist *sgpnt;
1023 int count;
1024
1025 /*
1026 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
1027 */
1028 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1029 cmd->request_bufflen = req->data_len;
1030 cmd->request_buffer = req->data;
1031 req->buffer = req->data;
1032 cmd->use_sg = 0;
1033 return 0;
1034 }
1035
1036 /*
1037 * we used to not use scatter-gather for single segment request,
1038 * but now we do (it makes highmem I/O easier to support without
1039 * kmapping pages)
1040 */
1041 cmd->use_sg = req->nr_phys_segments;
1042
1043 /*
1044 * if sg table allocation fails, requeue request later.
1045 */
1046 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
Tejun Heo beb66172005-04-24 02:04:53 -05001047 if (unlikely(!sgpnt))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 return BLKPREP_DEFER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
1050 cmd->request_buffer = (char *) sgpnt;
1051 cmd->request_bufflen = req->nr_sectors << 9;
1052 if (blk_pc_request(req))
1053 cmd->request_bufflen = req->data_len;
1054 req->buffer = NULL;
1055
1056 /*
1057 * Next, walk the list, and fill in the addresses and sizes of
1058 * each segment.
1059 */
1060 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1061
1062 /*
1063 * mapped well, send it off
1064 */
1065 if (likely(count <= cmd->use_sg)) {
1066 cmd->use_sg = count;
1067 return 0;
1068 }
1069
1070 printk(KERN_ERR "Incorrect number of segments after building list\n");
1071 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1072 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1073 req->current_nr_sectors);
1074
1075 /* release the command and kill it */
1076 scsi_release_buffers(cmd);
1077 scsi_put_command(cmd);
1078 return BLKPREP_KILL;
1079}
1080
1081static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1082{
1083 struct scsi_device *sdev = q->queuedata;
1084 struct scsi_driver *drv;
1085
1086 if (sdev->sdev_state == SDEV_RUNNING) {
1087 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1088
1089 if (drv->prepare_flush)
1090 return drv->prepare_flush(q, rq);
1091 }
1092
1093 return 0;
1094}
1095
1096static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1097{
1098 struct scsi_device *sdev = q->queuedata;
1099 struct request *flush_rq = rq->end_io_data;
1100 struct scsi_driver *drv;
1101
1102 if (flush_rq->errors) {
1103 printk("scsi: barrier error, disabling flush support\n");
1104 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1105 }
1106
1107 if (sdev->sdev_state == SDEV_RUNNING) {
1108 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1109 drv->end_flush(q, rq);
1110 }
1111}
1112
1113static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1114 sector_t *error_sector)
1115{
1116 struct scsi_device *sdev = q->queuedata;
1117 struct scsi_driver *drv;
1118
1119 if (sdev->sdev_state != SDEV_RUNNING)
1120 return -ENXIO;
1121
1122 drv = *(struct scsi_driver **) disk->private_data;
1123 if (drv->issue_flush)
1124 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1125
1126 return -EOPNOTSUPP;
1127}
1128
James Bottomleye537a362005-06-05 02:07:14 -05001129static void scsi_generic_done(struct scsi_cmnd *cmd)
1130{
1131 BUG_ON(!blk_pc_request(cmd->request));
1132 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1133}
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135static int scsi_prep_fn(struct request_queue *q, struct request *req)
1136{
1137 struct scsi_device *sdev = q->queuedata;
1138 struct scsi_cmnd *cmd;
1139 int specials_only = 0;
1140
1141 /*
1142 * Just check to see if the device is online. If it isn't, we
1143 * refuse to process any commands. The device must be brought
1144 * online before trying any recovery commands
1145 */
1146 if (unlikely(!scsi_device_online(sdev))) {
1147 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1148 sdev->host->host_no, sdev->id, sdev->lun);
1149 return BLKPREP_KILL;
1150 }
1151 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1152 /* OK, we're not in a running state don't prep
1153 * user commands */
1154 if (sdev->sdev_state == SDEV_DEL) {
1155 /* Device is fully deleted, no commands
1156 * at all allowed down */
1157 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1158 sdev->host->host_no, sdev->id, sdev->lun);
1159 return BLKPREP_KILL;
1160 }
1161 /* OK, we only allow special commands (i.e. not
1162 * user initiated ones */
1163 specials_only = sdev->sdev_state;
1164 }
1165
1166 /*
1167 * Find the actual device driver associated with this command.
1168 * The SPECIAL requests are things like character device or
1169 * ioctls, which did not originate from ll_rw_blk. Note that
1170 * the special field is also used to indicate the cmd for
1171 * the remainder of a partially fulfilled request that can
1172 * come up when there is a medium error. We have to treat
1173 * these two cases differently. We differentiate by looking
1174 * at request->cmd, as this tells us the real story.
1175 */
James Bottomleye537a362005-06-05 02:07:14 -05001176 if (req->flags & REQ_SPECIAL && req->special) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 struct scsi_request *sreq = req->special;
1178
1179 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1180 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1181 if (unlikely(!cmd))
1182 goto defer;
1183 scsi_init_cmd_from_req(cmd, sreq);
1184 } else
1185 cmd = req->special;
1186 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1187
James Bottomleye537a362005-06-05 02:07:14 -05001188 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 if(specials_only == SDEV_QUIESCE ||
1190 specials_only == SDEV_BLOCK)
1191 return BLKPREP_DEFER;
1192
1193 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1194 sdev->host->host_no, sdev->id, sdev->lun);
1195 return BLKPREP_KILL;
1196 }
1197
1198
1199 /*
1200 * Now try and find a command block that we can use.
1201 */
1202 if (!req->special) {
1203 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1204 if (unlikely(!cmd))
1205 goto defer;
1206 } else
1207 cmd = req->special;
1208
1209 /* pull a tag out of the request if we have one */
1210 cmd->tag = req->tag;
1211 } else {
1212 blk_dump_rq_flags(req, "SCSI bad req");
1213 return BLKPREP_KILL;
1214 }
1215
1216 /* note the overloading of req->special. When the tag
1217 * is active it always means cmd. If the tag goes
1218 * back for re-queueing, it may be reset */
1219 req->special = cmd;
1220 cmd->request = req;
1221
1222 /*
1223 * FIXME: drop the lock here because the functions below
1224 * expect to be called without the queue lock held. Also,
1225 * previously, we dequeued the request before dropping the
1226 * lock. We hope REQ_STARTED prevents anything untoward from
1227 * happening now.
1228 */
1229 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1230 struct scsi_driver *drv;
1231 int ret;
1232
1233 /*
1234 * This will do a couple of things:
1235 * 1) Fill in the actual SCSI command.
1236 * 2) Fill in any other upper-level specific fields
1237 * (timeout).
1238 *
1239 * If this returns 0, it means that the request failed
1240 * (reading past end of disk, reading offline device,
1241 * etc). This won't actually talk to the device, but
1242 * some kinds of consistency checking may cause the
1243 * request to be rejected immediately.
1244 */
1245
1246 /*
1247 * This sets up the scatter-gather table (allocating if
1248 * required).
1249 */
1250 ret = scsi_init_io(cmd);
1251 if (ret) /* BLKPREP_KILL return also releases the command */
1252 return ret;
1253
1254 /*
1255 * Initialize the actual SCSI command for this request.
1256 */
James Bottomleye537a362005-06-05 02:07:14 -05001257 if (req->rq_disk) {
1258 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1259 if (unlikely(!drv->init_command(cmd))) {
1260 scsi_release_buffers(cmd);
1261 scsi_put_command(cmd);
1262 return BLKPREP_KILL;
1263 }
1264 } else {
1265 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1266 if (rq_data_dir(req) == WRITE)
1267 cmd->sc_data_direction = DMA_TO_DEVICE;
1268 else if (req->data_len)
1269 cmd->sc_data_direction = DMA_FROM_DEVICE;
1270 else
1271 cmd->sc_data_direction = DMA_NONE;
1272
1273 cmd->transfersize = req->data_len;
1274 cmd->allowed = 3;
1275 cmd->timeout_per_command = req->timeout;
1276 cmd->done = scsi_generic_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 }
1278 }
1279
1280 /*
1281 * The request is now prepped, no need to come back here
1282 */
1283 req->flags |= REQ_DONTPREP;
1284 return BLKPREP_OK;
1285
1286 defer:
1287 /* If we defer, the elv_next_request() returns NULL, but the
1288 * queue must be restarted, so we plug here if no returning
1289 * command will automatically do that. */
1290 if (sdev->device_busy == 0)
1291 blk_plug_device(q);
1292 return BLKPREP_DEFER;
1293}
1294
1295/*
1296 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1297 * return 0.
1298 *
1299 * Called with the queue_lock held.
1300 */
1301static inline int scsi_dev_queue_ready(struct request_queue *q,
1302 struct scsi_device *sdev)
1303{
1304 if (sdev->device_busy >= sdev->queue_depth)
1305 return 0;
1306 if (sdev->device_busy == 0 && sdev->device_blocked) {
1307 /*
1308 * unblock after device_blocked iterates to zero
1309 */
1310 if (--sdev->device_blocked == 0) {
1311 SCSI_LOG_MLQUEUE(3,
1312 printk("scsi%d (%d:%d) unblocking device at"
1313 " zero depth\n", sdev->host->host_no,
1314 sdev->id, sdev->lun));
1315 } else {
1316 blk_plug_device(q);
1317 return 0;
1318 }
1319 }
1320 if (sdev->device_blocked)
1321 return 0;
1322
1323 return 1;
1324}
1325
1326/*
1327 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1328 * return 0. We must end up running the queue again whenever 0 is
1329 * returned, else IO can hang.
1330 *
1331 * Called with host_lock held.
1332 */
1333static inline int scsi_host_queue_ready(struct request_queue *q,
1334 struct Scsi_Host *shost,
1335 struct scsi_device *sdev)
1336{
Mike Andersond3301872005-06-16 11:12:38 -07001337 if (shost->shost_state == SHOST_RECOVERY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 return 0;
1339 if (shost->host_busy == 0 && shost->host_blocked) {
1340 /*
1341 * unblock after host_blocked iterates to zero
1342 */
1343 if (--shost->host_blocked == 0) {
1344 SCSI_LOG_MLQUEUE(3,
1345 printk("scsi%d unblocking host at zero depth\n",
1346 shost->host_no));
1347 } else {
1348 blk_plug_device(q);
1349 return 0;
1350 }
1351 }
1352 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1353 shost->host_blocked || shost->host_self_blocked) {
1354 if (list_empty(&sdev->starved_entry))
1355 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1356 return 0;
1357 }
1358
1359 /* We're OK to process the command, so we can't be starved */
1360 if (!list_empty(&sdev->starved_entry))
1361 list_del_init(&sdev->starved_entry);
1362
1363 return 1;
1364}
1365
1366/*
James Bottomleye91442b2005-09-09 10:44:16 -05001367 * Kill a request for a dead device
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 */
James Bottomleye91442b2005-09-09 10:44:16 -05001369static void scsi_kill_request(struct request *req, request_queue_t *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370{
James Bottomleye91442b2005-09-09 10:44:16 -05001371 struct scsi_cmnd *cmd = req->special;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
James Bottomley788ce432005-09-09 13:40:23 -05001373 blkdev_dequeue_request(req);
1374
James Bottomleye91442b2005-09-09 10:44:16 -05001375 if (unlikely(cmd == NULL)) {
1376 printk(KERN_CRIT "impossible request in %s.\n",
1377 __FUNCTION__);
1378 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 }
James Bottomleye91442b2005-09-09 10:44:16 -05001380
1381 scsi_init_cmd_errh(cmd);
1382 cmd->result = DID_NO_CONNECT << 16;
1383 atomic_inc(&cmd->device->iorequest_cnt);
1384 __scsi_done(cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385}
1386
1387/*
1388 * Function: scsi_request_fn()
1389 *
1390 * Purpose: Main strategy routine for SCSI.
1391 *
1392 * Arguments: q - Pointer to actual queue.
1393 *
1394 * Returns: Nothing
1395 *
1396 * Lock status: IO request lock assumed to be held when called.
1397 */
1398static void scsi_request_fn(struct request_queue *q)
1399{
1400 struct scsi_device *sdev = q->queuedata;
1401 struct Scsi_Host *shost;
1402 struct scsi_cmnd *cmd;
1403 struct request *req;
1404
1405 if (!sdev) {
1406 printk("scsi: killing requests for dead queue\n");
James Bottomleye91442b2005-09-09 10:44:16 -05001407 while ((req = elv_next_request(q)) != NULL)
1408 scsi_kill_request(req, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 return;
1410 }
1411
1412 if(!get_device(&sdev->sdev_gendev))
1413 /* We must be tearing the block queue down already */
1414 return;
1415
1416 /*
1417 * To start with, we keep looping until the queue is empty, or until
1418 * the host is no longer able to accept any more requests.
1419 */
1420 shost = sdev->host;
1421 while (!blk_queue_plugged(q)) {
1422 int rtn;
1423 /*
1424 * get next queueable request. We do this early to make sure
1425 * that the request is fully prepared even if we cannot
1426 * accept it.
1427 */
1428 req = elv_next_request(q);
1429 if (!req || !scsi_dev_queue_ready(q, sdev))
1430 break;
1431
1432 if (unlikely(!scsi_device_online(sdev))) {
1433 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1434 sdev->host->host_no, sdev->id, sdev->lun);
James Bottomleye91442b2005-09-09 10:44:16 -05001435 scsi_kill_request(req, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 continue;
1437 }
1438
1439
1440 /*
1441 * Remove the request from the request list.
1442 */
1443 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1444 blkdev_dequeue_request(req);
1445 sdev->device_busy++;
1446
1447 spin_unlock(q->queue_lock);
James Bottomleye91442b2005-09-09 10:44:16 -05001448 cmd = req->special;
1449 if (unlikely(cmd == NULL)) {
1450 printk(KERN_CRIT "impossible request in %s.\n"
1451 "please mail a stack trace to "
1452 "linux-scsi@vger.kernel.org",
1453 __FUNCTION__);
1454 BUG();
1455 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 spin_lock(shost->host_lock);
1457
1458 if (!scsi_host_queue_ready(q, shost, sdev))
1459 goto not_ready;
1460 if (sdev->single_lun) {
1461 if (scsi_target(sdev)->starget_sdev_user &&
1462 scsi_target(sdev)->starget_sdev_user != sdev)
1463 goto not_ready;
1464 scsi_target(sdev)->starget_sdev_user = sdev;
1465 }
1466 shost->host_busy++;
1467
1468 /*
1469 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1470 * take the lock again.
1471 */
1472 spin_unlock_irq(shost->host_lock);
1473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 /*
1475 * Finally, initialize any error handling parameters, and set up
1476 * the timers for timeouts.
1477 */
1478 scsi_init_cmd_errh(cmd);
1479
1480 /*
1481 * Dispatch the command to the low-level driver.
1482 */
1483 rtn = scsi_dispatch_cmd(cmd);
1484 spin_lock_irq(q->queue_lock);
1485 if(rtn) {
1486 /* we're refusing the command; because of
1487 * the way locks get dropped, we need to
1488 * check here if plugging is required */
1489 if(sdev->device_busy == 0)
1490 blk_plug_device(q);
1491
1492 break;
1493 }
1494 }
1495
1496 goto out;
1497
1498 not_ready:
1499 spin_unlock_irq(shost->host_lock);
1500
1501 /*
1502 * lock q, handle tag, requeue req, and decrement device_busy. We
1503 * must return with queue_lock held.
1504 *
1505 * Decrementing device_busy without checking it is OK, as all such
1506 * cases (host limits or settings) should run the queue at some
1507 * later time.
1508 */
James Bottomleye91442b2005-09-09 10:44:16 -05001509 scsi_unprep_request(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 spin_lock_irq(q->queue_lock);
1511 blk_requeue_request(q, req);
1512 sdev->device_busy--;
1513 if(sdev->device_busy == 0)
1514 blk_plug_device(q);
1515 out:
1516 /* must be careful here...if we trigger the ->remove() function
1517 * we cannot be holding the q lock */
1518 spin_unlock_irq(q->queue_lock);
1519 put_device(&sdev->sdev_gendev);
1520 spin_lock_irq(q->queue_lock);
1521}
1522
1523u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1524{
1525 struct device *host_dev;
1526 u64 bounce_limit = 0xffffffff;
1527
1528 if (shost->unchecked_isa_dma)
1529 return BLK_BOUNCE_ISA;
1530 /*
1531 * Platforms with virtual-DMA translation
1532 * hardware have no practical limit.
1533 */
1534 if (!PCI_DMA_BUS_IS_PHYS)
1535 return BLK_BOUNCE_ANY;
1536
1537 host_dev = scsi_get_device(shost);
1538 if (host_dev && host_dev->dma_mask)
1539 bounce_limit = *host_dev->dma_mask;
1540
1541 return bounce_limit;
1542}
1543EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1544
1545struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1546{
1547 struct Scsi_Host *shost = sdev->host;
1548 struct request_queue *q;
1549
152587d2005-04-12 16:22:06 -05001550 q = blk_init_queue(scsi_request_fn, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 if (!q)
1552 return NULL;
1553
1554 blk_queue_prep_rq(q, scsi_prep_fn);
1555
1556 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1557 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1558 blk_queue_max_sectors(q, shost->max_sectors);
1559 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1560 blk_queue_segment_boundary(q, shost->dma_boundary);
1561 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1562
1563 /*
1564 * ordered tags are superior to flush ordering
1565 */
1566 if (shost->ordered_tag)
1567 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1568 else if (shost->ordered_flush) {
1569 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1570 q->prepare_flush_fn = scsi_prepare_flush_fn;
1571 q->end_flush_fn = scsi_end_flush_fn;
1572 }
1573
1574 if (!shost->use_clustering)
1575 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1576 return q;
1577}
1578
1579void scsi_free_queue(struct request_queue *q)
1580{
1581 blk_cleanup_queue(q);
1582}
1583
1584/*
1585 * Function: scsi_block_requests()
1586 *
1587 * Purpose: Utility function used by low-level drivers to prevent further
1588 * commands from being queued to the device.
1589 *
1590 * Arguments: shost - Host in question
1591 *
1592 * Returns: Nothing
1593 *
1594 * Lock status: No locks are assumed held.
1595 *
1596 * Notes: There is no timer nor any other means by which the requests
1597 * get unblocked other than the low-level driver calling
1598 * scsi_unblock_requests().
1599 */
1600void scsi_block_requests(struct Scsi_Host *shost)
1601{
1602 shost->host_self_blocked = 1;
1603}
1604EXPORT_SYMBOL(scsi_block_requests);
1605
1606/*
1607 * Function: scsi_unblock_requests()
1608 *
1609 * Purpose: Utility function used by low-level drivers to allow further
1610 * commands from being queued to the device.
1611 *
1612 * Arguments: shost - Host in question
1613 *
1614 * Returns: Nothing
1615 *
1616 * Lock status: No locks are assumed held.
1617 *
1618 * Notes: There is no timer nor any other means by which the requests
1619 * get unblocked other than the low-level driver calling
1620 * scsi_unblock_requests().
1621 *
1622 * This is done as an API function so that changes to the
1623 * internals of the scsi mid-layer won't require wholesale
1624 * changes to drivers that use this feature.
1625 */
1626void scsi_unblock_requests(struct Scsi_Host *shost)
1627{
1628 shost->host_self_blocked = 0;
1629 scsi_run_host_queues(shost);
1630}
1631EXPORT_SYMBOL(scsi_unblock_requests);
1632
1633int __init scsi_init_queue(void)
1634{
1635 int i;
1636
1637 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1638 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1639 int size = sgp->size * sizeof(struct scatterlist);
1640
1641 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1642 SLAB_HWCACHE_ALIGN, NULL, NULL);
1643 if (!sgp->slab) {
1644 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1645 sgp->name);
1646 }
1647
1648 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1649 mempool_alloc_slab, mempool_free_slab,
1650 sgp->slab);
1651 if (!sgp->pool) {
1652 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1653 sgp->name);
1654 }
1655 }
1656
1657 return 0;
1658}
1659
1660void scsi_exit_queue(void)
1661{
1662 int i;
1663
1664 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1665 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1666 mempool_destroy(sgp->pool);
1667 kmem_cache_destroy(sgp->slab);
1668 }
1669}
1670/**
James Bottomleyea73a9f2005-08-28 11:33:52 -05001671 * scsi_mode_sense - issue a mode sense, falling back from 10 to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 * six bytes if necessary.
James Bottomley1cf72692005-08-28 11:27:01 -05001673 * @sdev: SCSI device to be queried
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 * @dbd: set if mode sense will allow block descriptors to be returned
1675 * @modepage: mode page being requested
1676 * @buffer: request buffer (may not be smaller than eight bytes)
1677 * @len: length of request buffer.
1678 * @timeout: command timeout
1679 * @retries: number of retries before failing
1680 * @data: returns a structure abstracting the mode header data
James Bottomley1cf72692005-08-28 11:27:01 -05001681 * @sense: place to put sense data (or NULL if no sense to be collected).
1682 * must be SCSI_SENSE_BUFFERSIZE big.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 *
1684 * Returns zero if unsuccessful, or the header offset (either 4
1685 * or 8 depending on whether a six or ten byte command was
1686 * issued) if successful.
1687 **/
1688int
James Bottomley1cf72692005-08-28 11:27:01 -05001689scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 unsigned char *buffer, int len, int timeout, int retries,
James Bottomleyea73a9f2005-08-28 11:33:52 -05001691 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 unsigned char cmd[12];
1693 int use_10_for_ms;
1694 int header_length;
James Bottomley1cf72692005-08-28 11:27:01 -05001695 int result;
James Bottomleyea73a9f2005-08-28 11:33:52 -05001696 struct scsi_sense_hdr my_sshdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
1698 memset(data, 0, sizeof(*data));
1699 memset(&cmd[0], 0, 12);
1700 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1701 cmd[2] = modepage;
1702
James Bottomleyea73a9f2005-08-28 11:33:52 -05001703 /* caller might not be interested in sense, but we need it */
1704 if (!sshdr)
1705 sshdr = &my_sshdr;
1706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 retry:
James Bottomley1cf72692005-08-28 11:27:01 -05001708 use_10_for_ms = sdev->use_10_for_ms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
1710 if (use_10_for_ms) {
1711 if (len < 8)
1712 len = 8;
1713
1714 cmd[0] = MODE_SENSE_10;
1715 cmd[8] = len;
1716 header_length = 8;
1717 } else {
1718 if (len < 4)
1719 len = 4;
1720
1721 cmd[0] = MODE_SENSE;
1722 cmd[4] = len;
1723 header_length = 4;
1724 }
1725
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 memset(buffer, 0, len);
1727
James Bottomley1cf72692005-08-28 11:27:01 -05001728 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
James Bottomleyea73a9f2005-08-28 11:33:52 -05001729 sshdr, timeout, retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
1731 /* This code looks awful: what it's doing is making sure an
1732 * ILLEGAL REQUEST sense return identifies the actual command
1733 * byte as the problem. MODE_SENSE commands can return
1734 * ILLEGAL REQUEST if the code page isn't supported */
1735
James Bottomley1cf72692005-08-28 11:27:01 -05001736 if (use_10_for_ms && !scsi_status_is_good(result) &&
1737 (driver_byte(result) & DRIVER_SENSE)) {
James Bottomleyea73a9f2005-08-28 11:33:52 -05001738 if (scsi_sense_valid(sshdr)) {
1739 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1740 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 /*
1742 * Invalid command operation code
1743 */
James Bottomley1cf72692005-08-28 11:27:01 -05001744 sdev->use_10_for_ms = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 goto retry;
1746 }
1747 }
1748 }
1749
James Bottomley1cf72692005-08-28 11:27:01 -05001750 if(scsi_status_is_good(result)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 data->header_length = header_length;
1752 if(use_10_for_ms) {
1753 data->length = buffer[0]*256 + buffer[1] + 2;
1754 data->medium_type = buffer[2];
1755 data->device_specific = buffer[3];
1756 data->longlba = buffer[4] & 0x01;
1757 data->block_descriptor_length = buffer[6]*256
1758 + buffer[7];
1759 } else {
1760 data->length = buffer[0] + 1;
1761 data->medium_type = buffer[1];
1762 data->device_specific = buffer[2];
1763 data->block_descriptor_length = buffer[3];
1764 }
1765 }
1766
James Bottomley1cf72692005-08-28 11:27:01 -05001767 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768}
1769EXPORT_SYMBOL(scsi_mode_sense);
1770
1771int
1772scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1773{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 char cmd[] = {
1775 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1776 };
James Bottomleyea73a9f2005-08-28 11:33:52 -05001777 struct scsi_sense_hdr sshdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 int result;
1779
James Bottomleyea73a9f2005-08-28 11:33:52 -05001780 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
James Bottomley1cf72692005-08-28 11:27:01 -05001781 timeout, retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
James Bottomley1cf72692005-08-28 11:27:01 -05001783 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
James Bottomleyea73a9f2005-08-28 11:33:52 -05001785 if ((scsi_sense_valid(&sshdr)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 ((sshdr.sense_key == UNIT_ATTENTION) ||
1787 (sshdr.sense_key == NOT_READY))) {
1788 sdev->changed = 1;
James Bottomley1cf72692005-08-28 11:27:01 -05001789 result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 }
1791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 return result;
1793}
1794EXPORT_SYMBOL(scsi_test_unit_ready);
1795
1796/**
1797 * scsi_device_set_state - Take the given device through the device
1798 * state model.
1799 * @sdev: scsi device to change the state of.
1800 * @state: state to change to.
1801 *
1802 * Returns zero if unsuccessful or an error if the requested
1803 * transition is illegal.
1804 **/
1805int
1806scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1807{
1808 enum scsi_device_state oldstate = sdev->sdev_state;
1809
1810 if (state == oldstate)
1811 return 0;
1812
1813 switch (state) {
1814 case SDEV_CREATED:
1815 /* There are no legal states that come back to
1816 * created. This is the manually initialised start
1817 * state */
1818 goto illegal;
1819
1820 case SDEV_RUNNING:
1821 switch (oldstate) {
1822 case SDEV_CREATED:
1823 case SDEV_OFFLINE:
1824 case SDEV_QUIESCE:
1825 case SDEV_BLOCK:
1826 break;
1827 default:
1828 goto illegal;
1829 }
1830 break;
1831
1832 case SDEV_QUIESCE:
1833 switch (oldstate) {
1834 case SDEV_RUNNING:
1835 case SDEV_OFFLINE:
1836 break;
1837 default:
1838 goto illegal;
1839 }
1840 break;
1841
1842 case SDEV_OFFLINE:
1843 switch (oldstate) {
1844 case SDEV_CREATED:
1845 case SDEV_RUNNING:
1846 case SDEV_QUIESCE:
1847 case SDEV_BLOCK:
1848 break;
1849 default:
1850 goto illegal;
1851 }
1852 break;
1853
1854 case SDEV_BLOCK:
1855 switch (oldstate) {
1856 case SDEV_CREATED:
1857 case SDEV_RUNNING:
1858 break;
1859 default:
1860 goto illegal;
1861 }
1862 break;
1863
1864 case SDEV_CANCEL:
1865 switch (oldstate) {
1866 case SDEV_CREATED:
1867 case SDEV_RUNNING:
1868 case SDEV_OFFLINE:
1869 case SDEV_BLOCK:
1870 break;
1871 default:
1872 goto illegal;
1873 }
1874 break;
1875
1876 case SDEV_DEL:
1877 switch (oldstate) {
1878 case SDEV_CANCEL:
1879 break;
1880 default:
1881 goto illegal;
1882 }
1883 break;
1884
1885 }
1886 sdev->sdev_state = state;
1887 return 0;
1888
1889 illegal:
1890 SCSI_LOG_ERROR_RECOVERY(1,
1891 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1892 "Illegal state transition %s->%s\n",
1893 scsi_device_state_name(oldstate),
1894 scsi_device_state_name(state))
1895 );
1896 return -EINVAL;
1897}
1898EXPORT_SYMBOL(scsi_device_set_state);
1899
1900/**
1901 * scsi_device_quiesce - Block user issued commands.
1902 * @sdev: scsi device to quiesce.
1903 *
1904 * This works by trying to transition to the SDEV_QUIESCE state
1905 * (which must be a legal transition). When the device is in this
1906 * state, only special requests will be accepted, all others will
1907 * be deferred. Since special requests may also be requeued requests,
1908 * a successful return doesn't guarantee the device will be
1909 * totally quiescent.
1910 *
1911 * Must be called with user context, may sleep.
1912 *
1913 * Returns zero if unsuccessful or an error if not.
1914 **/
1915int
1916scsi_device_quiesce(struct scsi_device *sdev)
1917{
1918 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1919 if (err)
1920 return err;
1921
1922 scsi_run_queue(sdev->request_queue);
1923 while (sdev->device_busy) {
1924 msleep_interruptible(200);
1925 scsi_run_queue(sdev->request_queue);
1926 }
1927 return 0;
1928}
1929EXPORT_SYMBOL(scsi_device_quiesce);
1930
1931/**
1932 * scsi_device_resume - Restart user issued commands to a quiesced device.
1933 * @sdev: scsi device to resume.
1934 *
1935 * Moves the device from quiesced back to running and restarts the
1936 * queues.
1937 *
1938 * Must be called with user context, may sleep.
1939 **/
1940void
1941scsi_device_resume(struct scsi_device *sdev)
1942{
1943 if(scsi_device_set_state(sdev, SDEV_RUNNING))
1944 return;
1945 scsi_run_queue(sdev->request_queue);
1946}
1947EXPORT_SYMBOL(scsi_device_resume);
1948
1949static void
1950device_quiesce_fn(struct scsi_device *sdev, void *data)
1951{
1952 scsi_device_quiesce(sdev);
1953}
1954
1955void
1956scsi_target_quiesce(struct scsi_target *starget)
1957{
1958 starget_for_each_device(starget, NULL, device_quiesce_fn);
1959}
1960EXPORT_SYMBOL(scsi_target_quiesce);
1961
1962static void
1963device_resume_fn(struct scsi_device *sdev, void *data)
1964{
1965 scsi_device_resume(sdev);
1966}
1967
1968void
1969scsi_target_resume(struct scsi_target *starget)
1970{
1971 starget_for_each_device(starget, NULL, device_resume_fn);
1972}
1973EXPORT_SYMBOL(scsi_target_resume);
1974
1975/**
1976 * scsi_internal_device_block - internal function to put a device
1977 * temporarily into the SDEV_BLOCK state
1978 * @sdev: device to block
1979 *
1980 * Block request made by scsi lld's to temporarily stop all
1981 * scsi commands on the specified device. Called from interrupt
1982 * or normal process context.
1983 *
1984 * Returns zero if successful or error if not
1985 *
1986 * Notes:
1987 * This routine transitions the device to the SDEV_BLOCK state
1988 * (which must be a legal transition). When the device is in this
1989 * state, all commands are deferred until the scsi lld reenables
1990 * the device with scsi_device_unblock or device_block_tmo fires.
1991 * This routine assumes the host_lock is held on entry.
1992 **/
1993int
1994scsi_internal_device_block(struct scsi_device *sdev)
1995{
1996 request_queue_t *q = sdev->request_queue;
1997 unsigned long flags;
1998 int err = 0;
1999
2000 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2001 if (err)
2002 return err;
2003
2004 /*
2005 * The device has transitioned to SDEV_BLOCK. Stop the
2006 * block layer from calling the midlayer with this device's
2007 * request queue.
2008 */
2009 spin_lock_irqsave(q->queue_lock, flags);
2010 blk_stop_queue(q);
2011 spin_unlock_irqrestore(q->queue_lock, flags);
2012
2013 return 0;
2014}
2015EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2016
2017/**
2018 * scsi_internal_device_unblock - resume a device after a block request
2019 * @sdev: device to resume
2020 *
2021 * Called by scsi lld's or the midlayer to restart the device queue
2022 * for the previously suspended scsi device. Called from interrupt or
2023 * normal process context.
2024 *
2025 * Returns zero if successful or error if not.
2026 *
2027 * Notes:
2028 * This routine transitions the device to the SDEV_RUNNING state
2029 * (which must be a legal transition) allowing the midlayer to
2030 * goose the queue for this device. This routine assumes the
2031 * host_lock is held upon entry.
2032 **/
2033int
2034scsi_internal_device_unblock(struct scsi_device *sdev)
2035{
2036 request_queue_t *q = sdev->request_queue;
2037 int err;
2038 unsigned long flags;
2039
2040 /*
2041 * Try to transition the scsi device to SDEV_RUNNING
2042 * and goose the device queue if successful.
2043 */
2044 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2045 if (err)
2046 return err;
2047
2048 spin_lock_irqsave(q->queue_lock, flags);
2049 blk_start_queue(q);
2050 spin_unlock_irqrestore(q->queue_lock, flags);
2051
2052 return 0;
2053}
2054EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2055
2056static void
2057device_block(struct scsi_device *sdev, void *data)
2058{
2059 scsi_internal_device_block(sdev);
2060}
2061
2062static int
2063target_block(struct device *dev, void *data)
2064{
2065 if (scsi_is_target_device(dev))
2066 starget_for_each_device(to_scsi_target(dev), NULL,
2067 device_block);
2068 return 0;
2069}
2070
2071void
2072scsi_target_block(struct device *dev)
2073{
2074 if (scsi_is_target_device(dev))
2075 starget_for_each_device(to_scsi_target(dev), NULL,
2076 device_block);
2077 else
2078 device_for_each_child(dev, NULL, target_block);
2079}
2080EXPORT_SYMBOL_GPL(scsi_target_block);
2081
2082static void
2083device_unblock(struct scsi_device *sdev, void *data)
2084{
2085 scsi_internal_device_unblock(sdev);
2086}
2087
2088static int
2089target_unblock(struct device *dev, void *data)
2090{
2091 if (scsi_is_target_device(dev))
2092 starget_for_each_device(to_scsi_target(dev), NULL,
2093 device_unblock);
2094 return 0;
2095}
2096
2097void
2098scsi_target_unblock(struct device *dev)
2099{
2100 if (scsi_is_target_device(dev))
2101 starget_for_each_device(to_scsi_target(dev), NULL,
2102 device_unblock);
2103 else
2104 device_for_each_child(dev, NULL, target_unblock);
2105}
2106EXPORT_SYMBOL_GPL(scsi_target_unblock);