blob: d402aff5f314a4b2728da2d2d3c8f1c063092958 [file] [log] [blame]
FUJITA Tomonori5a55c252006-11-16 19:24:13 +09001/*
2 * SCSI target lib functions
3 *
4 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
5 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22#include <linux/blkdev.h>
23#include <linux/hash.h>
24#include <linux/module.h>
25#include <linux/pagemap.h>
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tgt.h>
31#include <../drivers/md/dm-bio-list.h>
32
33#include "scsi_tgt_priv.h"
34
35static struct workqueue_struct *scsi_tgtd;
Christoph Lametere18b8902006-12-06 20:33:20 -080036static struct kmem_cache *scsi_tgt_cmd_cache;
FUJITA Tomonori5a55c252006-11-16 19:24:13 +090037
38/*
39 * TODO: this struct will be killed when the block layer supports large bios
40 * and James's work struct code is in
41 */
42struct scsi_tgt_cmd {
43 /* TODO replace work with James b's code */
44 struct work_struct work;
45 /* TODO replace the lists with a large bio */
46 struct bio_list xfer_done_list;
47 struct bio_list xfer_list;
48
49 struct list_head hash_list;
50 struct request *rq;
51 u64 tag;
52
53 void *buffer;
54 unsigned bufflen;
55};
56
57#define TGT_HASH_ORDER 4
58#define cmd_hashfn(tag) hash_long((unsigned long) (tag), TGT_HASH_ORDER)
59
60struct scsi_tgt_queuedata {
61 struct Scsi_Host *shost;
62 struct list_head cmd_hash[1 << TGT_HASH_ORDER];
63 spinlock_t cmd_hash_lock;
64};
65
66/*
67 * Function: scsi_host_get_command()
68 *
69 * Purpose: Allocate and setup a scsi command block and blk request
70 *
71 * Arguments: shost - scsi host
72 * data_dir - dma data dir
73 * gfp_mask- allocator flags
74 *
75 * Returns: The allocated scsi command structure.
76 *
77 * This should be called by target LLDs to get a command.
78 */
79struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
80 enum dma_data_direction data_dir,
81 gfp_t gfp_mask)
82{
83 int write = (data_dir == DMA_TO_DEVICE);
84 struct request *rq;
85 struct scsi_cmnd *cmd;
86 struct scsi_tgt_cmd *tcmd;
87
88 /* Bail if we can't get a reference to the device */
89 if (!get_device(&shost->shost_gendev))
90 return NULL;
91
92 tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
93 if (!tcmd)
94 goto put_dev;
95
96 rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
97 if (!rq)
98 goto free_tcmd;
99
100 cmd = __scsi_get_command(shost, gfp_mask);
101 if (!cmd)
102 goto release_rq;
103
104 memset(cmd, 0, sizeof(*cmd));
105 cmd->sc_data_direction = data_dir;
106 cmd->jiffies_at_alloc = jiffies;
107 cmd->request = rq;
108
109 rq->special = cmd;
110 rq->cmd_type = REQ_TYPE_SPECIAL;
111 rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
112 rq->end_io_data = tcmd;
113
114 bio_list_init(&tcmd->xfer_list);
115 bio_list_init(&tcmd->xfer_done_list);
116 tcmd->rq = rq;
117
118 return cmd;
119
120release_rq:
121 blk_put_request(rq);
122free_tcmd:
123 kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
124put_dev:
125 put_device(&shost->shost_gendev);
126 return NULL;
127
128}
129EXPORT_SYMBOL_GPL(scsi_host_get_command);
130
131/*
132 * Function: scsi_host_put_command()
133 *
134 * Purpose: Free a scsi command block
135 *
136 * Arguments: shost - scsi host
137 * cmd - command block to free
138 *
139 * Returns: Nothing.
140 *
141 * Notes: The command must not belong to any lists.
142 */
143void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
144{
145 struct request_queue *q = shost->uspace_req_q;
146 struct request *rq = cmd->request;
147 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
148 unsigned long flags;
149
150 kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
151
152 spin_lock_irqsave(q->queue_lock, flags);
153 __blk_put_request(q, rq);
154 spin_unlock_irqrestore(q->queue_lock, flags);
155
156 __scsi_put_command(shost, cmd, &shost->shost_gendev);
157}
158EXPORT_SYMBOL_GPL(scsi_host_put_command);
159
160static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
161{
162 struct bio *bio;
163
164 /* must call bio_endio in case bio was bounced */
165 while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
166 bio_endio(bio, bio->bi_size, 0);
167 bio_unmap_user(bio);
168 }
169
170 while ((bio = bio_list_pop(&tcmd->xfer_list))) {
171 bio_endio(bio, bio->bi_size, 0);
172 bio_unmap_user(bio);
173 }
174}
175
176static void cmd_hashlist_del(struct scsi_cmnd *cmd)
177{
178 struct request_queue *q = cmd->request->q;
179 struct scsi_tgt_queuedata *qdata = q->queuedata;
180 unsigned long flags;
181 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
182
183 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
184 list_del(&tcmd->hash_list);
185 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
186}
187
David Howells06328b42006-12-06 15:02:26 +0000188static void scsi_tgt_cmd_destroy(struct work_struct *work)
FUJITA Tomonori5a55c252006-11-16 19:24:13 +0900189{
David Howells06328b42006-12-06 15:02:26 +0000190 struct scsi_tgt_cmd *tcmd =
191 container_of(work, struct scsi_tgt_cmd, work);
192 struct scsi_cmnd *cmd = tcmd->rq->special;
FUJITA Tomonori5a55c252006-11-16 19:24:13 +0900193
194 dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
195 rq_data_dir(cmd->request));
196 /*
197 * We fix rq->cmd_flags here since when we told bio_map_user
198 * to write vm for WRITE commands, blk_rq_bio_prep set
199 * rq_data_dir the flags to READ.
200 */
201 if (cmd->sc_data_direction == DMA_TO_DEVICE)
202 cmd->request->cmd_flags |= REQ_RW;
203 else
204 cmd->request->cmd_flags &= ~REQ_RW;
205
206 scsi_unmap_user_pages(tcmd);
207 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
208}
209
210static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
211 u64 tag)
212{
213 struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
214 unsigned long flags;
215 struct list_head *head;
216
217 tcmd->tag = tag;
David Howells06328b42006-12-06 15:02:26 +0000218 INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
FUJITA Tomonori5a55c252006-11-16 19:24:13 +0900219 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
220 head = &qdata->cmd_hash[cmd_hashfn(tag)];
221 list_add(&tcmd->hash_list, head);
222 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
223}
224
225/*
226 * scsi_tgt_alloc_queue - setup queue used for message passing
227 * shost: scsi host
228 *
229 * This should be called by the LLD after host allocation.
230 * And will be released when the host is released.
231 */
232int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
233{
234 struct scsi_tgt_queuedata *queuedata;
235 struct request_queue *q;
236 int err, i;
237
238 /*
239 * Do we need to send a netlink event or should uspace
240 * just respond to the hotplug event?
241 */
242 q = __scsi_alloc_queue(shost, NULL);
243 if (!q)
244 return -ENOMEM;
245
246 queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
247 if (!queuedata) {
248 err = -ENOMEM;
249 goto cleanup_queue;
250 }
251 queuedata->shost = shost;
252 q->queuedata = queuedata;
253
254 /*
255 * this is a silly hack. We should probably just queue as many
256 * command as is recvd to userspace. uspace can then make
257 * sure we do not overload the HBA
258 */
259 q->nr_requests = shost->hostt->can_queue;
260 /*
261 * We currently only support software LLDs so this does
262 * not matter for now. Do we need this for the cards we support?
263 * If so we should make it a host template value.
264 */
265 blk_queue_dma_alignment(q, 0);
266 shost->uspace_req_q = q;
267
268 for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
269 INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
270 spin_lock_init(&queuedata->cmd_hash_lock);
271
272 return 0;
273
274cleanup_queue:
275 blk_cleanup_queue(q);
276 return err;
277}
278EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);
279
280void scsi_tgt_free_queue(struct Scsi_Host *shost)
281{
282 int i;
283 unsigned long flags;
284 struct request_queue *q = shost->uspace_req_q;
285 struct scsi_cmnd *cmd;
286 struct scsi_tgt_queuedata *qdata = q->queuedata;
287 struct scsi_tgt_cmd *tcmd, *n;
288 LIST_HEAD(cmds);
289
290 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
291
292 for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
293 list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
294 hash_list) {
295 list_del(&tcmd->hash_list);
296 list_add(&tcmd->hash_list, &cmds);
297 }
298 }
299
300 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
301
302 while (!list_empty(&cmds)) {
303 tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list);
304 list_del(&tcmd->hash_list);
305 cmd = tcmd->rq->special;
306
307 shost->hostt->eh_abort_handler(cmd);
David Howells06328b42006-12-06 15:02:26 +0000308 scsi_tgt_cmd_destroy(&tcmd->work);
FUJITA Tomonori5a55c252006-11-16 19:24:13 +0900309 }
310}
311EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);
312
313struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
314{
315 struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
316 return queue->shost;
317}
318EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);
319
320/*
321 * scsi_tgt_queue_command - queue command for userspace processing
322 * @cmd: scsi command
323 * @scsilun: scsi lun
324 * @tag: unique value to identify this command for tmf
325 */
326int scsi_tgt_queue_command(struct scsi_cmnd *cmd, struct scsi_lun *scsilun,
327 u64 tag)
328{
329 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
330 int err;
331
332 init_scsi_tgt_cmd(cmd->request, tcmd, tag);
333 err = scsi_tgt_uspace_send_cmd(cmd, scsilun, tag);
334 if (err)
335 cmd_hashlist_del(cmd);
336
337 return err;
338}
339EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
340
341/*
342 * This is run from a interrpt handler normally and the unmap
343 * needs process context so we must queue
344 */
345static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
346{
347 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
348
349 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
350
351 scsi_tgt_uspace_send_status(cmd, tcmd->tag);
FUJITA Tomonori5a55c252006-11-16 19:24:13 +0900352 queue_work(scsi_tgtd, &tcmd->work);
353}
354
355static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
356{
357 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
358 int err;
359
360 dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
361
362 err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
363 switch (err) {
364 case SCSI_MLQUEUE_HOST_BUSY:
365 case SCSI_MLQUEUE_DEVICE_BUSY:
366 return -EAGAIN;
367 }
368
369 return 0;
370}
371
372static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
373{
374 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
375 int err;
376
377 err = __scsi_tgt_transfer_response(cmd);
378 if (!err)
379 return;
380
381 cmd->result = DID_BUS_BUSY << 16;
382 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
383 if (err <= 0)
384 /* the eh will have to pick this up */
385 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
386}
387
388static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
389{
390 struct request *rq = cmd->request;
391 struct scsi_tgt_cmd *tcmd = rq->end_io_data;
392 int count;
393
394 cmd->use_sg = rq->nr_phys_segments;
395 cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask);
396 if (!cmd->request_buffer)
397 return -ENOMEM;
398
399 cmd->request_bufflen = rq->data_len;
400
401 dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
402 rq_data_dir(rq));
403 count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
404 if (likely(count <= cmd->use_sg)) {
405 cmd->use_sg = count;
406 return 0;
407 }
408
409 eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg);
410 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
411 return -EINVAL;
412}
413
414/* TODO: test this crap and replace bio_map_user with new interface maybe */
415static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
416 int rw)
417{
418 struct request_queue *q = cmd->request->q;
419 struct request *rq = cmd->request;
420 void *uaddr = tcmd->buffer;
421 unsigned int len = tcmd->bufflen;
422 struct bio *bio;
423 int err;
424
425 while (len > 0) {
426 dprintk("%lx %u\n", (unsigned long) uaddr, len);
427 bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw);
428 if (IS_ERR(bio)) {
429 err = PTR_ERR(bio);
430 dprintk("fail to map %lx %u %d %x\n",
431 (unsigned long) uaddr, len, err, cmd->cmnd[0]);
432 goto unmap_bios;
433 }
434
435 uaddr += bio->bi_size;
436 len -= bio->bi_size;
437
438 /*
439 * The first bio is added and merged. We could probably
440 * try to add others using scsi_merge_bio() but for now
441 * we keep it simple. The first bio should be pretty large
442 * (either hitting the 1 MB bio pages limit or a queue limit)
443 * already but for really large IO we may want to try and
444 * merge these.
445 */
446 if (!rq->bio) {
447 blk_rq_bio_prep(q, rq, bio);
448 rq->data_len = bio->bi_size;
449 } else
450 /* put list of bios to transfer in next go around */
451 bio_list_add(&tcmd->xfer_list, bio);
452 }
453
454 cmd->offset = 0;
455 err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
456 if (err)
457 goto unmap_bios;
458
459 return 0;
460
461unmap_bios:
462 if (rq->bio) {
463 bio_unmap_user(rq->bio);
464 while ((bio = bio_list_pop(&tcmd->xfer_list)))
465 bio_unmap_user(bio);
466 }
467
468 return err;
469}
470
471static int scsi_tgt_transfer_data(struct scsi_cmnd *);
472
473static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
474{
475 struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
476 struct bio *bio;
477 int err;
478
479 /* should we free resources here on error ? */
480 if (cmd->result) {
481send_uspace_err:
482 err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
483 if (err <= 0)
484 /* the tgt uspace eh will have to pick this up */
485 printk(KERN_ERR "Could not send cmd %p status\n", cmd);
486 return;
487 }
488
489 dprintk("cmd %p request_bufflen %u bufflen %u\n",
490 cmd, cmd->request_bufflen, tcmd->bufflen);
491
492 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
493 bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
494
495 tcmd->buffer += cmd->request_bufflen;
496 cmd->offset += cmd->request_bufflen;
497
498 if (!tcmd->xfer_list.head) {
499 scsi_tgt_transfer_response(cmd);
500 return;
501 }
502
503 dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
504 cmd, cmd->request_bufflen, tcmd->bufflen);
505
506 bio = bio_list_pop(&tcmd->xfer_list);
507 BUG_ON(!bio);
508
509 blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
510 cmd->request->data_len = bio->bi_size;
511 err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
512 if (err) {
513 cmd->result = DID_ERROR << 16;
514 goto send_uspace_err;
515 }
516
517 if (scsi_tgt_transfer_data(cmd)) {
518 cmd->result = DID_NO_CONNECT << 16;
519 goto send_uspace_err;
520 }
521}
522
523static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
524{
525 int err;
526 struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
527
528 err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
529 switch (err) {
530 case SCSI_MLQUEUE_HOST_BUSY:
531 case SCSI_MLQUEUE_DEVICE_BUSY:
532 return -EAGAIN;
533 default:
534 return 0;
535 }
536}
537
538static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
539 unsigned len)
540{
541 char __user *p = (char __user *) uaddr;
542
543 if (copy_from_user(cmd->sense_buffer, p,
544 min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
545 printk(KERN_ERR "Could not copy the sense buffer\n");
546 return -EIO;
547 }
548 return 0;
549}
550
551static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
552{
David Howells06328b42006-12-06 15:02:26 +0000553 struct scsi_tgt_cmd *tcmd;
FUJITA Tomonori5a55c252006-11-16 19:24:13 +0900554 int err;
555
556 err = shost->hostt->eh_abort_handler(cmd);
557 if (err)
558 eprintk("fail to abort %p\n", cmd);
559
David Howells06328b42006-12-06 15:02:26 +0000560 tcmd = cmd->request->end_io_data;
561 scsi_tgt_cmd_destroy(&tcmd->work);
FUJITA Tomonori5a55c252006-11-16 19:24:13 +0900562 return err;
563}
564
565static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
566{
567 struct scsi_tgt_queuedata *qdata = q->queuedata;
568 struct request *rq = NULL;
569 struct list_head *head;
570 struct scsi_tgt_cmd *tcmd;
571 unsigned long flags;
572
573 head = &qdata->cmd_hash[cmd_hashfn(tag)];
574 spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
575 list_for_each_entry(tcmd, head, hash_list) {
576 if (tcmd->tag == tag) {
577 rq = tcmd->rq;
578 list_del(&tcmd->hash_list);
579 break;
580 }
581 }
582 spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
583
584 return rq;
585}
586
587int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
588 unsigned long uaddr, u8 rw)
589{
590 struct Scsi_Host *shost;
591 struct scsi_cmnd *cmd;
592 struct request *rq;
593 struct scsi_tgt_cmd *tcmd;
594 int err = 0;
595
596 dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag,
597 result, len, uaddr, rw);
598
599 /* TODO: replace with a O(1) alg */
600 shost = scsi_host_lookup(host_no);
601 if (IS_ERR(shost)) {
602 printk(KERN_ERR "Could not find host no %d\n", host_no);
603 return -EINVAL;
604 }
605
606 if (!shost->uspace_req_q) {
607 printk(KERN_ERR "Not target scsi host %d\n", host_no);
608 goto done;
609 }
610
611 rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag);
612 if (!rq) {
613 printk(KERN_ERR "Could not find tag %llu\n",
614 (unsigned long long) tag);
615 err = -EINVAL;
616 goto done;
617 }
618 cmd = rq->special;
619
620 dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
621 result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]);
622
623 if (result == TASK_ABORTED) {
624 scsi_tgt_abort_cmd(shost, cmd);
625 goto done;
626 }
627 /*
628 * store the userspace values here, the working values are
629 * in the request_* values
630 */
631 tcmd = cmd->request->end_io_data;
632 tcmd->buffer = (void *)uaddr;
633 tcmd->bufflen = len;
634 cmd->result = result;
635
636 if (!tcmd->bufflen || cmd->request_buffer) {
637 err = __scsi_tgt_transfer_response(cmd);
638 goto done;
639 }
640
641 /*
642 * TODO: Do we need to handle case where request does not
643 * align with LLD.
644 */
645 err = scsi_map_user_pages(rq->end_io_data, cmd, rw);
646 if (err) {
647 eprintk("%p %d\n", cmd, err);
648 err = -EAGAIN;
649 goto done;
650 }
651
652 /* userspace failure */
653 if (cmd->result) {
654 if (status_byte(cmd->result) == CHECK_CONDITION)
655 scsi_tgt_copy_sense(cmd, uaddr, len);
656 err = __scsi_tgt_transfer_response(cmd);
657 goto done;
658 }
659 /* ask the target LLD to transfer the data to the buffer */
660 err = scsi_tgt_transfer_data(cmd);
661
662done:
663 scsi_host_put(shost);
664 return err;
665}
666
667int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, int function, u64 tag,
668 struct scsi_lun *scsilun, void *data)
669{
670 int err;
671
672 /* TODO: need to retry if this fails. */
673 err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, function,
674 tag, scsilun, data);
675 if (err < 0)
676 eprintk("The task management request lost!\n");
677 return err;
678}
679EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);
680
681int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result)
682{
683 struct Scsi_Host *shost;
684 int err = -EINVAL;
685
686 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
687
688 shost = scsi_host_lookup(host_no);
689 if (IS_ERR(shost)) {
690 printk(KERN_ERR "Could not find host no %d\n", host_no);
691 return err;
692 }
693
694 if (!shost->uspace_req_q) {
695 printk(KERN_ERR "Not target scsi host %d\n", host_no);
696 goto done;
697 }
698
699 err = shost->hostt->tsk_mgmt_response(mid, result);
700done:
701 scsi_host_put(shost);
702 return err;
703}
704
705static int __init scsi_tgt_init(void)
706{
707 int err;
708
709 scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
710 sizeof(struct scsi_tgt_cmd),
711 0, 0, NULL, NULL);
712 if (!scsi_tgt_cmd_cache)
713 return -ENOMEM;
714
715 scsi_tgtd = create_workqueue("scsi_tgtd");
716 if (!scsi_tgtd) {
717 err = -ENOMEM;
718 goto free_kmemcache;
719 }
720
721 err = scsi_tgt_if_init();
722 if (err)
723 goto destroy_wq;
724
725 return 0;
726
727destroy_wq:
728 destroy_workqueue(scsi_tgtd);
729free_kmemcache:
730 kmem_cache_destroy(scsi_tgt_cmd_cache);
731 return err;
732}
733
734static void __exit scsi_tgt_exit(void)
735{
736 destroy_workqueue(scsi_tgtd);
737 scsi_tgt_if_exit();
738 kmem_cache_destroy(scsi_tgt_cmd_cache);
739}
740
741module_init(scsi_tgt_init);
742module_exit(scsi_tgt_exit);
743
744MODULE_DESCRIPTION("SCSI target core");
745MODULE_LICENSE("GPL");