blob: d110ead38721dbcf7aaa87337c7794753716a23c [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 *
11 * Nicholas A. Bellinger <nab@kernel.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 ******************************************************************************/
28
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080029#include <linux/net.h>
30#include <linux/delay.h>
31#include <linux/string.h>
32#include <linux/timer.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080036#include <linux/kthread.h>
37#include <linux/in.h>
38#include <linux/cdrom.h>
Paul Gortmaker827509e2011-08-30 14:20:44 -040039#include <linux/module.h>
Roland Dreier015487b2012-02-13 16:18:17 -080040#include <linux/ratelimit.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080041#include <asm/unaligned.h>
42#include <net/sock.h>
43#include <net/tcp.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_cmnd.h>
Nicholas Bellingere66ecd52011-05-19 20:19:14 -070046#include <scsi/scsi_tcq.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080047
48#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050049#include <target/target_core_backend.h>
50#include <target/target_core_fabric.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080051#include <target/target_core_configfs.h>
52
Christoph Hellwige26d99a2011-11-14 12:30:30 -050053#include "target_core_internal.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080054#include "target_core_alua.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080055#include "target_core_pr.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080056#include "target_core_ua.h"
57
Andy Grovere3d6f902011-07-19 08:55:10 +000058static int sub_api_initialized;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080059
Christoph Hellwig35e0e752011-10-17 13:56:53 -040060static struct workqueue_struct *target_completion_wq;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080061static struct kmem_cache *se_sess_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080062struct kmem_cache *se_ua_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080063struct kmem_cache *t10_pr_reg_cache;
64struct kmem_cache *t10_alua_lu_gp_cache;
65struct kmem_cache *t10_alua_lu_gp_mem_cache;
66struct kmem_cache *t10_alua_tg_pt_gp_cache;
67struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
68
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080069static int transport_generic_write_pending(struct se_cmd *);
Andy Grover5951146d2011-07-19 10:26:37 +000070static int transport_processing_thread(void *param);
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -080071static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080072static void transport_complete_task_attr(struct se_cmd *cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -070073static void transport_handle_queue_full(struct se_cmd *cmd,
Christoph Hellwige057f532011-10-17 13:56:41 -040074 struct se_device *dev);
Andy Grover05d1c7c2011-07-20 19:13:28 +000075static int transport_generic_get_mem(struct se_cmd *cmd);
Nicholas Bellinger39c05f32011-10-08 13:59:52 -070076static void transport_put_cmd(struct se_cmd *cmd);
Christoph Hellwig3df8d402011-10-17 13:56:43 -040077static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080078static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
Christoph Hellwig35e0e752011-10-17 13:56:53 -040079static void target_complete_ok_work(struct work_struct *work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080080
Andy Grovere3d6f902011-07-19 08:55:10 +000081int init_se_kmem_caches(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080082{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080083 se_sess_cache = kmem_cache_create("se_sess_cache",
84 sizeof(struct se_session), __alignof__(struct se_session),
85 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -070086 if (!se_sess_cache) {
87 pr_err("kmem_cache_create() for struct se_session"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080088 " failed\n");
Andy Groverc8e31f22012-01-19 13:39:17 -080089 goto out;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080090 }
91 se_ua_cache = kmem_cache_create("se_ua_cache",
92 sizeof(struct se_ua), __alignof__(struct se_ua),
93 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -070094 if (!se_ua_cache) {
95 pr_err("kmem_cache_create() for struct se_ua failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -040096 goto out_free_sess_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080097 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080098 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
99 sizeof(struct t10_pr_registration),
100 __alignof__(struct t10_pr_registration), 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700101 if (!t10_pr_reg_cache) {
102 pr_err("kmem_cache_create() for struct t10_pr_registration"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800103 " failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400104 goto out_free_ua_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800105 }
106 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
107 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
108 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700109 if (!t10_alua_lu_gp_cache) {
110 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800111 " failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400112 goto out_free_pr_reg_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800113 }
114 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
115 sizeof(struct t10_alua_lu_gp_member),
116 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700117 if (!t10_alua_lu_gp_mem_cache) {
118 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800119 "cache failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400120 goto out_free_lu_gp_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800121 }
122 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
123 sizeof(struct t10_alua_tg_pt_gp),
124 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700125 if (!t10_alua_tg_pt_gp_cache) {
126 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800127 "cache failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400128 goto out_free_lu_gp_mem_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800129 }
130 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
131 "t10_alua_tg_pt_gp_mem_cache",
132 sizeof(struct t10_alua_tg_pt_gp_member),
133 __alignof__(struct t10_alua_tg_pt_gp_member),
134 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700135 if (!t10_alua_tg_pt_gp_mem_cache) {
136 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800137 "mem_t failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400138 goto out_free_tg_pt_gp_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800139 }
140
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400141 target_completion_wq = alloc_workqueue("target_completion",
142 WQ_MEM_RECLAIM, 0);
143 if (!target_completion_wq)
144 goto out_free_tg_pt_gp_mem_cache;
145
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800146 return 0;
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400147
148out_free_tg_pt_gp_mem_cache:
149 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
150out_free_tg_pt_gp_cache:
151 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
152out_free_lu_gp_mem_cache:
153 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
154out_free_lu_gp_cache:
155 kmem_cache_destroy(t10_alua_lu_gp_cache);
156out_free_pr_reg_cache:
157 kmem_cache_destroy(t10_pr_reg_cache);
158out_free_ua_cache:
159 kmem_cache_destroy(se_ua_cache);
160out_free_sess_cache:
161 kmem_cache_destroy(se_sess_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800162out:
Andy Grovere3d6f902011-07-19 08:55:10 +0000163 return -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800164}
165
Andy Grovere3d6f902011-07-19 08:55:10 +0000166void release_se_kmem_caches(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800167{
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400168 destroy_workqueue(target_completion_wq);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800169 kmem_cache_destroy(se_sess_cache);
170 kmem_cache_destroy(se_ua_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800171 kmem_cache_destroy(t10_pr_reg_cache);
172 kmem_cache_destroy(t10_alua_lu_gp_cache);
173 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
174 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
175 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800176}
177
Andy Grovere3d6f902011-07-19 08:55:10 +0000178/* This code ensures unique mib indexes are handed out. */
179static DEFINE_SPINLOCK(scsi_mib_index_lock);
180static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800181
182/*
183 * Allocate a new row index for the entry type specified
184 */
185u32 scsi_get_new_index(scsi_index_t type)
186{
187 u32 new_index;
188
Andy Grovere3d6f902011-07-19 08:55:10 +0000189 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800190
Andy Grovere3d6f902011-07-19 08:55:10 +0000191 spin_lock(&scsi_mib_index_lock);
192 new_index = ++scsi_mib_index[type];
193 spin_unlock(&scsi_mib_index_lock);
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800194
195 return new_index;
196}
197
Christoph Hellwige26d99a2011-11-14 12:30:30 -0500198static void transport_init_queue_obj(struct se_queue_obj *qobj)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800199{
200 atomic_set(&qobj->queue_cnt, 0);
201 INIT_LIST_HEAD(&qobj->qobj_list);
202 init_waitqueue_head(&qobj->thread_wq);
203 spin_lock_init(&qobj->cmd_queue_lock);
204}
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800205
Nicholas Bellingerdbc56232011-10-22 01:03:54 -0700206void transport_subsystem_check_init(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800207{
208 int ret;
209
Nicholas Bellingerdbc56232011-10-22 01:03:54 -0700210 if (sub_api_initialized)
211 return;
212
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800213 ret = request_module("target_core_iblock");
214 if (ret != 0)
Andy Grover6708bb22011-06-08 10:36:43 -0700215 pr_err("Unable to load target_core_iblock\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800216
217 ret = request_module("target_core_file");
218 if (ret != 0)
Andy Grover6708bb22011-06-08 10:36:43 -0700219 pr_err("Unable to load target_core_file\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800220
221 ret = request_module("target_core_pscsi");
222 if (ret != 0)
Andy Grover6708bb22011-06-08 10:36:43 -0700223 pr_err("Unable to load target_core_pscsi\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800224
225 ret = request_module("target_core_stgt");
226 if (ret != 0)
Andy Grover6708bb22011-06-08 10:36:43 -0700227 pr_err("Unable to load target_core_stgt\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800228
Andy Grovere3d6f902011-07-19 08:55:10 +0000229 sub_api_initialized = 1;
Nicholas Bellingerdbc56232011-10-22 01:03:54 -0700230 return;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800231}
232
233struct se_session *transport_init_session(void)
234{
235 struct se_session *se_sess;
236
237 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -0700238 if (!se_sess) {
239 pr_err("Unable to allocate struct se_session from"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800240 " se_sess_cache\n");
241 return ERR_PTR(-ENOMEM);
242 }
243 INIT_LIST_HEAD(&se_sess->sess_list);
244 INIT_LIST_HEAD(&se_sess->sess_acl_list);
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700245 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
246 INIT_LIST_HEAD(&se_sess->sess_wait_list);
247 spin_lock_init(&se_sess->sess_cmd_lock);
Nicholas Bellinger41ac82b2012-02-26 22:22:10 -0800248 kref_init(&se_sess->sess_kref);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800249
250 return se_sess;
251}
252EXPORT_SYMBOL(transport_init_session);
253
254/*
Nicholas Bellinger140854c2011-08-31 12:34:39 -0700255 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800256 */
257void __transport_register_session(
258 struct se_portal_group *se_tpg,
259 struct se_node_acl *se_nacl,
260 struct se_session *se_sess,
261 void *fabric_sess_ptr)
262{
263 unsigned char buf[PR_REG_ISID_LEN];
264
265 se_sess->se_tpg = se_tpg;
266 se_sess->fabric_sess_ptr = fabric_sess_ptr;
267 /*
268 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
269 *
270 * Only set for struct se_session's that will actually be moving I/O.
271 * eg: *NOT* discovery sessions.
272 */
273 if (se_nacl) {
274 /*
275 * If the fabric module supports an ISID based TransportID,
276 * save this value in binary from the fabric I_T Nexus now.
277 */
Andy Grovere3d6f902011-07-19 08:55:10 +0000278 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800279 memset(&buf[0], 0, PR_REG_ISID_LEN);
Andy Grovere3d6f902011-07-19 08:55:10 +0000280 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800281 &buf[0], PR_REG_ISID_LEN);
282 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
283 }
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800284 kref_get(&se_nacl->acl_kref);
285
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800286 spin_lock_irq(&se_nacl->nacl_sess_lock);
287 /*
288 * The se_nacl->nacl_sess pointer will be set to the
289 * last active I_T Nexus for each struct se_node_acl.
290 */
291 se_nacl->nacl_sess = se_sess;
292
293 list_add_tail(&se_sess->sess_acl_list,
294 &se_nacl->acl_sess_list);
295 spin_unlock_irq(&se_nacl->nacl_sess_lock);
296 }
297 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
298
Andy Grover6708bb22011-06-08 10:36:43 -0700299 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000300 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800301}
302EXPORT_SYMBOL(__transport_register_session);
303
304void transport_register_session(
305 struct se_portal_group *se_tpg,
306 struct se_node_acl *se_nacl,
307 struct se_session *se_sess,
308 void *fabric_sess_ptr)
309{
Nicholas Bellinger140854c2011-08-31 12:34:39 -0700310 unsigned long flags;
311
312 spin_lock_irqsave(&se_tpg->session_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800313 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
Nicholas Bellinger140854c2011-08-31 12:34:39 -0700314 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800315}
316EXPORT_SYMBOL(transport_register_session);
317
Nicholas Bellinger41ac82b2012-02-26 22:22:10 -0800318static void target_release_session(struct kref *kref)
319{
320 struct se_session *se_sess = container_of(kref,
321 struct se_session, sess_kref);
322 struct se_portal_group *se_tpg = se_sess->se_tpg;
323
324 se_tpg->se_tpg_tfo->close_session(se_sess);
325}
326
327void target_get_session(struct se_session *se_sess)
328{
329 kref_get(&se_sess->sess_kref);
330}
331EXPORT_SYMBOL(target_get_session);
332
333int target_put_session(struct se_session *se_sess)
334{
335 return kref_put(&se_sess->sess_kref, target_release_session);
336}
337EXPORT_SYMBOL(target_put_session);
338
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800339static void target_complete_nacl(struct kref *kref)
340{
341 struct se_node_acl *nacl = container_of(kref,
342 struct se_node_acl, acl_kref);
343
344 complete(&nacl->acl_free_comp);
345}
346
347void target_put_nacl(struct se_node_acl *nacl)
348{
349 kref_put(&nacl->acl_kref, target_complete_nacl);
350}
351
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800352void transport_deregister_session_configfs(struct se_session *se_sess)
353{
354 struct se_node_acl *se_nacl;
Roland Dreier23388862011-06-22 01:02:21 -0700355 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800356 /*
357 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
358 */
359 se_nacl = se_sess->se_node_acl;
Andy Grover6708bb22011-06-08 10:36:43 -0700360 if (se_nacl) {
Roland Dreier23388862011-06-22 01:02:21 -0700361 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
Nicholas Bellinger337c0602012-03-10 14:36:21 -0800362 if (se_nacl->acl_stop == 0)
363 list_del(&se_sess->sess_acl_list);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800364 /*
365 * If the session list is empty, then clear the pointer.
366 * Otherwise, set the struct se_session pointer from the tail
367 * element of the per struct se_node_acl active session list.
368 */
369 if (list_empty(&se_nacl->acl_sess_list))
370 se_nacl->nacl_sess = NULL;
371 else {
372 se_nacl->nacl_sess = container_of(
373 se_nacl->acl_sess_list.prev,
374 struct se_session, sess_acl_list);
375 }
Roland Dreier23388862011-06-22 01:02:21 -0700376 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800377 }
378}
379EXPORT_SYMBOL(transport_deregister_session_configfs);
380
381void transport_free_session(struct se_session *se_sess)
382{
383 kmem_cache_free(se_sess_cache, se_sess);
384}
385EXPORT_SYMBOL(transport_free_session);
386
387void transport_deregister_session(struct se_session *se_sess)
388{
389 struct se_portal_group *se_tpg = se_sess->se_tpg;
Nicholas Bellinger01468342012-03-10 14:32:52 -0800390 struct target_core_fabric_ops *se_tfo;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800391 struct se_node_acl *se_nacl;
Roland Dreiere63a8e12011-08-12 16:01:02 -0700392 unsigned long flags;
Nicholas Bellinger01468342012-03-10 14:32:52 -0800393 bool comp_nacl = true;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800394
Andy Grover6708bb22011-06-08 10:36:43 -0700395 if (!se_tpg) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800396 transport_free_session(se_sess);
397 return;
398 }
Nicholas Bellinger01468342012-03-10 14:32:52 -0800399 se_tfo = se_tpg->se_tpg_tfo;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800400
Roland Dreiere63a8e12011-08-12 16:01:02 -0700401 spin_lock_irqsave(&se_tpg->session_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800402 list_del(&se_sess->sess_list);
403 se_sess->se_tpg = NULL;
404 se_sess->fabric_sess_ptr = NULL;
Roland Dreiere63a8e12011-08-12 16:01:02 -0700405 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800406
407 /*
408 * Determine if we need to do extra work for this initiator node's
409 * struct se_node_acl if it had been previously dynamically generated.
410 */
411 se_nacl = se_sess->se_node_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800412
Nicholas Bellinger01468342012-03-10 14:32:52 -0800413 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
414 if (se_nacl && se_nacl->dynamic_node_acl) {
415 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
416 list_del(&se_nacl->acl_list);
417 se_tpg->num_node_acls--;
418 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
419 core_tpg_wait_for_nacl_pr_ref(se_nacl);
420 core_free_device_list_for_node(se_nacl, se_tpg);
421 se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
422
423 comp_nacl = false;
424 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800425 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800426 }
Nicholas Bellinger01468342012-03-10 14:32:52 -0800427 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800428
Andy Grover6708bb22011-06-08 10:36:43 -0700429 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000430 se_tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellinger01468342012-03-10 14:32:52 -0800431 /*
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800432 * If last kref is dropping now for an explict NodeACL, awake sleeping
433 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
434 * removal context.
Nicholas Bellinger01468342012-03-10 14:32:52 -0800435 */
436 if (se_nacl && comp_nacl == true)
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800437 target_put_nacl(se_nacl);
Nicholas Bellinger01468342012-03-10 14:32:52 -0800438
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800439 transport_free_session(se_sess);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800440}
441EXPORT_SYMBOL(transport_deregister_session);
442
443/*
Andy Grovera1d8b492011-05-02 17:12:10 -0700444 * Called with cmd->t_state_lock held.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800445 */
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400446static void target_remove_from_state_list(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800447{
Christoph Hellwig42bf8292011-10-12 11:07:00 -0400448 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800449 unsigned long flags;
450
Christoph Hellwig42bf8292011-10-12 11:07:00 -0400451 if (!dev)
452 return;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800453
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400454 if (cmd->transport_state & CMD_T_BUSY)
455 return;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800456
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400457 spin_lock_irqsave(&dev->execute_task_lock, flags);
458 if (cmd->state_active) {
459 list_del(&cmd->state_list);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400460 cmd->state_active = false;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800461 }
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400462 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800463}
464
465/* transport_cmd_check_stop():
466 *
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500467 * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800468 * 'transport_off = 2' determines if task_dev_state should be removed.
469 *
470 * A non-zero u8 t_state sets cmd->t_state.
471 * Returns 1 when command is stopped, else 0.
472 */
473static int transport_cmd_check_stop(
474 struct se_cmd *cmd,
475 int transport_off,
476 u8 t_state)
477{
478 unsigned long flags;
479
Andy Grovera1d8b492011-05-02 17:12:10 -0700480 spin_lock_irqsave(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800481 /*
482 * Determine if IOCTL context caller in requesting the stopping of this
483 * command for LUN shutdown purposes.
484 */
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500485 if (cmd->transport_state & CMD_T_LUN_STOP) {
486 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
487 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800488
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500489 cmd->transport_state &= ~CMD_T_ACTIVE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800490 if (transport_off == 2)
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400491 target_remove_from_state_list(cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -0700492 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800493
Andy Grovera1d8b492011-05-02 17:12:10 -0700494 complete(&cmd->transport_lun_stop_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800495 return 1;
496 }
497 /*
498 * Determine if frontend context caller is requesting the stopping of
Andy Grovere3d6f902011-07-19 08:55:10 +0000499 * this command for frontend exceptions.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800500 */
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500501 if (cmd->transport_state & CMD_T_STOP) {
502 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
503 __func__, __LINE__,
Andy Grovere3d6f902011-07-19 08:55:10 +0000504 cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800505
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800506 if (transport_off == 2)
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400507 target_remove_from_state_list(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800508
509 /*
510 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
511 * to FE.
512 */
513 if (transport_off == 2)
514 cmd->se_lun = NULL;
Andy Grovera1d8b492011-05-02 17:12:10 -0700515 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800516
Andy Grovera1d8b492011-05-02 17:12:10 -0700517 complete(&cmd->t_transport_stop_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800518 return 1;
519 }
520 if (transport_off) {
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500521 cmd->transport_state &= ~CMD_T_ACTIVE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800522 if (transport_off == 2) {
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400523 target_remove_from_state_list(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800524 /*
525 * Clear struct se_cmd->se_lun before the transport_off == 2
526 * handoff to fabric module.
527 */
528 cmd->se_lun = NULL;
529 /*
530 * Some fabric modules like tcm_loop can release
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300531 * their internally allocated I/O reference now and
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800532 * struct se_cmd now.
Nicholas Bellinger88dd9e22011-11-02 03:33:16 -0700533 *
534 * Fabric modules are expected to return '1' here if the
535 * se_cmd being passed is released at this point,
536 * or zero if not being released.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800537 */
Andy Grovere3d6f902011-07-19 08:55:10 +0000538 if (cmd->se_tfo->check_stop_free != NULL) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800539 spin_unlock_irqrestore(
Andy Grovera1d8b492011-05-02 17:12:10 -0700540 &cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800541
Nicholas Bellinger88dd9e22011-11-02 03:33:16 -0700542 return cmd->se_tfo->check_stop_free(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800543 }
544 }
Andy Grovera1d8b492011-05-02 17:12:10 -0700545 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800546
547 return 0;
548 } else if (t_state)
549 cmd->t_state = t_state;
Andy Grovera1d8b492011-05-02 17:12:10 -0700550 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800551
552 return 0;
553}
554
555static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
556{
557 return transport_cmd_check_stop(cmd, 2, 0);
558}
559
560static void transport_lun_remove_cmd(struct se_cmd *cmd)
561{
Andy Grovere3d6f902011-07-19 08:55:10 +0000562 struct se_lun *lun = cmd->se_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800563 unsigned long flags;
564
565 if (!lun)
566 return;
567
Andy Grovera1d8b492011-05-02 17:12:10 -0700568 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500569 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
570 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400571 target_remove_from_state_list(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800572 }
Andy Grovera1d8b492011-05-02 17:12:10 -0700573 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800574
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800575 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
Christoph Hellwig3d26fea2011-12-21 14:14:05 -0500576 if (!list_empty(&cmd->se_lun_node))
577 list_del_init(&cmd->se_lun_node);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800578 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
579}
580
581void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
582{
Andy Groverc8e31f22012-01-19 13:39:17 -0800583 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
Nicholas Bellinger8dc52b52011-10-09 02:02:51 -0700584 transport_lun_remove_cmd(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800585
586 if (transport_cmd_check_stop_to_fabric(cmd))
587 return;
Nicholas Bellinger77039d12011-09-29 01:01:35 -0700588 if (remove) {
Christoph Hellwig3df8d402011-10-17 13:56:43 -0400589 transport_remove_cmd_from_queue(cmd);
Christoph Hellwige6a25732011-09-13 23:08:50 +0200590 transport_put_cmd(cmd);
Nicholas Bellinger77039d12011-09-29 01:01:35 -0700591 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800592}
593
Christoph Hellwigf7a5cc02011-10-17 13:56:42 -0400594static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
595 bool at_head)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800596{
597 struct se_device *dev = cmd->se_dev;
Andy Grovere3d6f902011-07-19 08:55:10 +0000598 struct se_queue_obj *qobj = &dev->dev_queue_obj;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800599 unsigned long flags;
600
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800601 if (t_state) {
Andy Grovera1d8b492011-05-02 17:12:10 -0700602 spin_lock_irqsave(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800603 cmd->t_state = t_state;
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500604 cmd->transport_state |= CMD_T_ACTIVE;
Andy Grovera1d8b492011-05-02 17:12:10 -0700605 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800606 }
607
608 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
Roland Dreier79a7fef2011-09-28 22:12:07 -0700609
610 /* If the cmd is already on the list, remove it before we add it */
611 if (!list_empty(&cmd->se_queue_node))
612 list_del(&cmd->se_queue_node);
613 else
614 atomic_inc(&qobj->queue_cnt);
615
Christoph Hellwigf7a5cc02011-10-17 13:56:42 -0400616 if (at_head)
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700617 list_add(&cmd->se_queue_node, &qobj->qobj_list);
Christoph Hellwigf7a5cc02011-10-17 13:56:42 -0400618 else
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700619 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500620 cmd->transport_state |= CMD_T_QUEUED;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800621 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
622
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800623 wake_up_interruptible(&qobj->thread_wq);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800624}
625
Andy Grover5951146d2011-07-19 10:26:37 +0000626static struct se_cmd *
627transport_get_cmd_from_queue(struct se_queue_obj *qobj)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800628{
Andy Grover5951146d2011-07-19 10:26:37 +0000629 struct se_cmd *cmd;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800630 unsigned long flags;
631
632 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
633 if (list_empty(&qobj->qobj_list)) {
634 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
635 return NULL;
636 }
Andy Grover5951146d2011-07-19 10:26:37 +0000637 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800638
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500639 cmd->transport_state &= ~CMD_T_QUEUED;
Roland Dreier79a7fef2011-09-28 22:12:07 -0700640 list_del_init(&cmd->se_queue_node);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800641 atomic_dec(&qobj->queue_cnt);
642 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
643
Andy Grover5951146d2011-07-19 10:26:37 +0000644 return cmd;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800645}
646
Christoph Hellwig3df8d402011-10-17 13:56:43 -0400647static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800648{
Christoph Hellwig3df8d402011-10-17 13:56:43 -0400649 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800650 unsigned long flags;
651
652 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500653 if (!(cmd->transport_state & CMD_T_QUEUED)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800654 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
655 return;
656 }
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500657 cmd->transport_state &= ~CMD_T_QUEUED;
Roland Dreier79a7fef2011-09-28 22:12:07 -0700658 atomic_dec(&qobj->queue_cnt);
659 list_del_init(&cmd->se_queue_node);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800660 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800661}
662
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400663static void target_complete_failure_work(struct work_struct *work)
664{
665 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
666
Nicholas Bellinger03e98c92011-11-04 02:36:16 -0700667 transport_generic_request_failure(cmd);
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400668}
669
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400670void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800671{
Christoph Hellwig42bf8292011-10-12 11:07:00 -0400672 struct se_device *dev = cmd->se_dev;
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400673 int success = scsi_status == GOOD;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800674 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800675
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400676 cmd->scsi_status = scsi_status;
677
678
Andy Grovera1d8b492011-05-02 17:12:10 -0700679 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400680 cmd->transport_state &= ~CMD_T_BUSY;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800681
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800682 if (dev && dev->transport->transport_complete) {
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400683 if (dev->transport->transport_complete(cmd,
684 cmd->t_data_sg) != 0) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800685 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800686 success = 1;
687 }
688 }
689
690 /*
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400691 * See if we are waiting to complete for an exception condition.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800692 */
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400693 if (cmd->transport_state & CMD_T_REQUEST_STOP) {
Andy Grovera1d8b492011-05-02 17:12:10 -0700694 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400695 complete(&cmd->task_stop_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800696 return;
697 }
Christoph Hellwig22350072011-11-02 05:06:35 -0700698
699 if (!success)
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500700 cmd->transport_state |= CMD_T_FAILED;
Christoph Hellwig22350072011-11-02 05:06:35 -0700701
Nicholas Bellinger3d289342012-02-13 02:38:14 -0800702 /*
703 * Check for case where an explict ABORT_TASK has been received
704 * and transport_wait_for_tasks() will be waiting for completion..
705 */
706 if (cmd->transport_state & CMD_T_ABORTED &&
707 cmd->transport_state & CMD_T_STOP) {
708 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
709 complete(&cmd->t_transport_stop_comp);
710 return;
711 } else if (cmd->transport_state & CMD_T_FAILED) {
Christoph Hellwig41e16e92011-11-23 06:54:15 -0500712 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400713 INIT_WORK(&cmd->work, target_complete_failure_work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800714 } else {
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400715 INIT_WORK(&cmd->work, target_complete_ok_work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800716 }
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400717
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400718 cmd->t_state = TRANSPORT_COMPLETE;
Nicholas Bellinger3d289342012-02-13 02:38:14 -0800719 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
Andy Grovera1d8b492011-05-02 17:12:10 -0700720 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800721
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400722 queue_work(target_completion_wq, &cmd->work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800723}
Christoph Hellwig6bb35e02012-04-23 11:35:33 -0400724EXPORT_SYMBOL(target_complete_cmd);
725
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400726static void target_add_to_state_list(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800727{
Christoph Hellwig42bf8292011-10-12 11:07:00 -0400728 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800729 unsigned long flags;
730
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -0800731 spin_lock_irqsave(&dev->execute_task_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400732 if (!cmd->state_active) {
733 list_add_tail(&cmd->state_list, &dev->state_list);
734 cmd->state_active = true;
735 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800736 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800737}
738
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400739static void __target_add_to_execute_list(struct se_cmd *cmd)
Christoph Hellwig04629b72011-10-12 11:07:04 -0400740{
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400741 struct se_device *dev = cmd->se_dev;
742 bool head_of_queue = false;
743
744 if (!list_empty(&cmd->execute_list))
745 return;
746
747 if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
748 cmd->sam_task_attr == MSG_HEAD_TAG)
749 head_of_queue = true;
750
751 if (head_of_queue)
752 list_add(&cmd->execute_list, &dev->execute_list);
753 else
754 list_add_tail(&cmd->execute_list, &dev->execute_list);
755
756 atomic_inc(&dev->execute_tasks);
757
758 if (cmd->state_active)
759 return;
760
761 if (head_of_queue)
762 list_add(&cmd->state_list, &dev->state_list);
763 else
764 list_add_tail(&cmd->state_list, &dev->state_list);
765
766 cmd->state_active = true;
Christoph Hellwig04629b72011-10-12 11:07:04 -0400767}
768
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400769static void target_add_to_execute_list(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800770{
771 unsigned long flags;
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400772 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800773
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400774 spin_lock_irqsave(&dev->execute_task_lock, flags);
775 __target_add_to_execute_list(cmd);
776 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
777}
778
779void __target_remove_from_execute_list(struct se_cmd *cmd)
780{
781 list_del_init(&cmd->execute_list);
782 atomic_dec(&cmd->se_dev->execute_tasks);
783}
784
785static void target_remove_from_execute_list(struct se_cmd *cmd)
786{
787 struct se_device *dev = cmd->se_dev;
788 unsigned long flags;
789
790 if (WARN_ON(list_empty(&cmd->execute_list)))
Nicholas Bellingeraf57c3a2011-05-19 20:19:12 -0700791 return;
Nicholas Bellingeraf57c3a2011-05-19 20:19:12 -0700792
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800793 spin_lock_irqsave(&dev->execute_task_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400794 __target_remove_from_execute_list(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800795 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
796}
797
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700798/*
Nicholas Bellingerf147abb2011-10-25 23:57:41 -0700799 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700800 */
801
802static void target_qf_do_work(struct work_struct *work)
803{
804 struct se_device *dev = container_of(work, struct se_device,
805 qf_work_queue);
Roland Dreierbcac3642011-08-27 21:33:16 -0700806 LIST_HEAD(qf_cmd_list);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700807 struct se_cmd *cmd, *cmd_tmp;
808
809 spin_lock_irq(&dev->qf_cmd_lock);
Roland Dreierbcac3642011-08-27 21:33:16 -0700810 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
811 spin_unlock_irq(&dev->qf_cmd_lock);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700812
Roland Dreierbcac3642011-08-27 21:33:16 -0700813 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700814 list_del(&cmd->se_qf_node);
815 atomic_dec(&dev->dev_qf_count);
816 smp_mb__after_atomic_dec();
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700817
Andy Grover6708bb22011-06-08 10:36:43 -0700818 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700819 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
Christoph Hellwige057f532011-10-17 13:56:41 -0400820 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700821 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
822 : "UNKNOWN");
Christoph Hellwigf7a5cc02011-10-17 13:56:42 -0400823
824 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700825 }
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700826}
827
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800828unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
829{
830 switch (cmd->data_direction) {
831 case DMA_NONE:
832 return "NONE";
833 case DMA_FROM_DEVICE:
834 return "READ";
835 case DMA_TO_DEVICE:
836 return "WRITE";
837 case DMA_BIDIRECTIONAL:
838 return "BIDI";
839 default:
840 break;
841 }
842
843 return "UNKNOWN";
844}
845
846void transport_dump_dev_state(
847 struct se_device *dev,
848 char *b,
849 int *bl)
850{
851 *bl += sprintf(b + *bl, "Status: ");
852 switch (dev->dev_status) {
853 case TRANSPORT_DEVICE_ACTIVATED:
854 *bl += sprintf(b + *bl, "ACTIVATED");
855 break;
856 case TRANSPORT_DEVICE_DEACTIVATED:
857 *bl += sprintf(b + *bl, "DEACTIVATED");
858 break;
859 case TRANSPORT_DEVICE_SHUTDOWN:
860 *bl += sprintf(b + *bl, "SHUTDOWN");
861 break;
862 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
863 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
864 *bl += sprintf(b + *bl, "OFFLINE");
865 break;
866 default:
867 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
868 break;
869 }
870
Nicholas Bellinger65586d52011-11-30 01:25:21 -0800871 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
872 atomic_read(&dev->execute_tasks), dev->queue_depth);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800873 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000874 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800875 *bl += sprintf(b + *bl, " ");
876}
877
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800878void transport_dump_vpd_proto_id(
879 struct t10_vpd *vpd,
880 unsigned char *p_buf,
881 int p_buf_len)
882{
883 unsigned char buf[VPD_TMP_BUF_SIZE];
884 int len;
885
886 memset(buf, 0, VPD_TMP_BUF_SIZE);
887 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
888
889 switch (vpd->protocol_identifier) {
890 case 0x00:
891 sprintf(buf+len, "Fibre Channel\n");
892 break;
893 case 0x10:
894 sprintf(buf+len, "Parallel SCSI\n");
895 break;
896 case 0x20:
897 sprintf(buf+len, "SSA\n");
898 break;
899 case 0x30:
900 sprintf(buf+len, "IEEE 1394\n");
901 break;
902 case 0x40:
903 sprintf(buf+len, "SCSI Remote Direct Memory Access"
904 " Protocol\n");
905 break;
906 case 0x50:
907 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
908 break;
909 case 0x60:
910 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
911 break;
912 case 0x70:
913 sprintf(buf+len, "Automation/Drive Interface Transport"
914 " Protocol\n");
915 break;
916 case 0x80:
917 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
918 break;
919 default:
920 sprintf(buf+len, "Unknown 0x%02x\n",
921 vpd->protocol_identifier);
922 break;
923 }
924
925 if (p_buf)
926 strncpy(p_buf, buf, p_buf_len);
927 else
Andy Grover6708bb22011-06-08 10:36:43 -0700928 pr_debug("%s", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800929}
930
931void
932transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
933{
934 /*
935 * Check if the Protocol Identifier Valid (PIV) bit is set..
936 *
937 * from spc3r23.pdf section 7.5.1
938 */
939 if (page_83[1] & 0x80) {
940 vpd->protocol_identifier = (page_83[0] & 0xf0);
941 vpd->protocol_identifier_set = 1;
942 transport_dump_vpd_proto_id(vpd, NULL, 0);
943 }
944}
945EXPORT_SYMBOL(transport_set_vpd_proto_id);
946
947int transport_dump_vpd_assoc(
948 struct t10_vpd *vpd,
949 unsigned char *p_buf,
950 int p_buf_len)
951{
952 unsigned char buf[VPD_TMP_BUF_SIZE];
Andy Grovere3d6f902011-07-19 08:55:10 +0000953 int ret = 0;
954 int len;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800955
956 memset(buf, 0, VPD_TMP_BUF_SIZE);
957 len = sprintf(buf, "T10 VPD Identifier Association: ");
958
959 switch (vpd->association) {
960 case 0x00:
961 sprintf(buf+len, "addressed logical unit\n");
962 break;
963 case 0x10:
964 sprintf(buf+len, "target port\n");
965 break;
966 case 0x20:
967 sprintf(buf+len, "SCSI target device\n");
968 break;
969 default:
970 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
Andy Grovere3d6f902011-07-19 08:55:10 +0000971 ret = -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800972 break;
973 }
974
975 if (p_buf)
976 strncpy(p_buf, buf, p_buf_len);
977 else
Andy Grover6708bb22011-06-08 10:36:43 -0700978 pr_debug("%s", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800979
980 return ret;
981}
982
983int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
984{
985 /*
986 * The VPD identification association..
987 *
988 * from spc3r23.pdf Section 7.6.3.1 Table 297
989 */
990 vpd->association = (page_83[1] & 0x30);
991 return transport_dump_vpd_assoc(vpd, NULL, 0);
992}
993EXPORT_SYMBOL(transport_set_vpd_assoc);
994
995int transport_dump_vpd_ident_type(
996 struct t10_vpd *vpd,
997 unsigned char *p_buf,
998 int p_buf_len)
999{
1000 unsigned char buf[VPD_TMP_BUF_SIZE];
Andy Grovere3d6f902011-07-19 08:55:10 +00001001 int ret = 0;
1002 int len;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001003
1004 memset(buf, 0, VPD_TMP_BUF_SIZE);
1005 len = sprintf(buf, "T10 VPD Identifier Type: ");
1006
1007 switch (vpd->device_identifier_type) {
1008 case 0x00:
1009 sprintf(buf+len, "Vendor specific\n");
1010 break;
1011 case 0x01:
1012 sprintf(buf+len, "T10 Vendor ID based\n");
1013 break;
1014 case 0x02:
1015 sprintf(buf+len, "EUI-64 based\n");
1016 break;
1017 case 0x03:
1018 sprintf(buf+len, "NAA\n");
1019 break;
1020 case 0x04:
1021 sprintf(buf+len, "Relative target port identifier\n");
1022 break;
1023 case 0x08:
1024 sprintf(buf+len, "SCSI name string\n");
1025 break;
1026 default:
1027 sprintf(buf+len, "Unsupported: 0x%02x\n",
1028 vpd->device_identifier_type);
Andy Grovere3d6f902011-07-19 08:55:10 +00001029 ret = -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001030 break;
1031 }
1032
Andy Grovere3d6f902011-07-19 08:55:10 +00001033 if (p_buf) {
1034 if (p_buf_len < strlen(buf)+1)
1035 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001036 strncpy(p_buf, buf, p_buf_len);
Andy Grovere3d6f902011-07-19 08:55:10 +00001037 } else {
Andy Grover6708bb22011-06-08 10:36:43 -07001038 pr_debug("%s", buf);
Andy Grovere3d6f902011-07-19 08:55:10 +00001039 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001040
1041 return ret;
1042}
1043
1044int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1045{
1046 /*
1047 * The VPD identifier type..
1048 *
1049 * from spc3r23.pdf Section 7.6.3.1 Table 298
1050 */
1051 vpd->device_identifier_type = (page_83[1] & 0x0f);
1052 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1053}
1054EXPORT_SYMBOL(transport_set_vpd_ident_type);
1055
1056int transport_dump_vpd_ident(
1057 struct t10_vpd *vpd,
1058 unsigned char *p_buf,
1059 int p_buf_len)
1060{
1061 unsigned char buf[VPD_TMP_BUF_SIZE];
1062 int ret = 0;
1063
1064 memset(buf, 0, VPD_TMP_BUF_SIZE);
1065
1066 switch (vpd->device_identifier_code_set) {
1067 case 0x01: /* Binary */
1068 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1069 &vpd->device_identifier[0]);
1070 break;
1071 case 0x02: /* ASCII */
1072 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1073 &vpd->device_identifier[0]);
1074 break;
1075 case 0x03: /* UTF-8 */
1076 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1077 &vpd->device_identifier[0]);
1078 break;
1079 default:
1080 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1081 " 0x%02x", vpd->device_identifier_code_set);
Andy Grovere3d6f902011-07-19 08:55:10 +00001082 ret = -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001083 break;
1084 }
1085
1086 if (p_buf)
1087 strncpy(p_buf, buf, p_buf_len);
1088 else
Andy Grover6708bb22011-06-08 10:36:43 -07001089 pr_debug("%s", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001090
1091 return ret;
1092}
1093
1094int
1095transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1096{
1097 static const char hex_str[] = "0123456789abcdef";
1098 int j = 0, i = 4; /* offset to start of the identifer */
1099
1100 /*
1101 * The VPD Code Set (encoding)
1102 *
1103 * from spc3r23.pdf Section 7.6.3.1 Table 296
1104 */
1105 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1106 switch (vpd->device_identifier_code_set) {
1107 case 0x01: /* Binary */
1108 vpd->device_identifier[j++] =
1109 hex_str[vpd->device_identifier_type];
1110 while (i < (4 + page_83[3])) {
1111 vpd->device_identifier[j++] =
1112 hex_str[(page_83[i] & 0xf0) >> 4];
1113 vpd->device_identifier[j++] =
1114 hex_str[page_83[i] & 0x0f];
1115 i++;
1116 }
1117 break;
1118 case 0x02: /* ASCII */
1119 case 0x03: /* UTF-8 */
1120 while (i < (4 + page_83[3]))
1121 vpd->device_identifier[j++] = page_83[i++];
1122 break;
1123 default:
1124 break;
1125 }
1126
1127 return transport_dump_vpd_ident(vpd, NULL, 0);
1128}
1129EXPORT_SYMBOL(transport_set_vpd_ident);
1130
1131static void core_setup_task_attr_emulation(struct se_device *dev)
1132{
1133 /*
1134 * If this device is from Target_Core_Mod/pSCSI, disable the
1135 * SAM Task Attribute emulation.
1136 *
1137 * This is currently not available in upsream Linux/SCSI Target
1138 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1139 */
Andy Grovere3d6f902011-07-19 08:55:10 +00001140 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001141 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1142 return;
1143 }
1144
1145 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
Andy Grover6708bb22011-06-08 10:36:43 -07001146 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
Andy Grovere3d6f902011-07-19 08:55:10 +00001147 " device\n", dev->transport->name,
1148 dev->transport->get_device_rev(dev));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001149}
1150
1151static void scsi_dump_inquiry(struct se_device *dev)
1152{
Andy Grovere3d6f902011-07-19 08:55:10 +00001153 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
Sebastian Andrzej Siewiore59a41b2012-01-10 14:16:57 +01001154 char buf[17];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001155 int i, device_type;
1156 /*
1157 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1158 */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001159 for (i = 0; i < 8; i++)
1160 if (wwn->vendor[i] >= 0x20)
Sebastian Andrzej Siewiore59a41b2012-01-10 14:16:57 +01001161 buf[i] = wwn->vendor[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001162 else
Sebastian Andrzej Siewiore59a41b2012-01-10 14:16:57 +01001163 buf[i] = ' ';
1164 buf[i] = '\0';
1165 pr_debug(" Vendor: %s\n", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001166
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001167 for (i = 0; i < 16; i++)
1168 if (wwn->model[i] >= 0x20)
Sebastian Andrzej Siewiore59a41b2012-01-10 14:16:57 +01001169 buf[i] = wwn->model[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001170 else
Sebastian Andrzej Siewiore59a41b2012-01-10 14:16:57 +01001171 buf[i] = ' ';
1172 buf[i] = '\0';
1173 pr_debug(" Model: %s\n", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001174
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001175 for (i = 0; i < 4; i++)
1176 if (wwn->revision[i] >= 0x20)
Sebastian Andrzej Siewiore59a41b2012-01-10 14:16:57 +01001177 buf[i] = wwn->revision[i];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001178 else
Sebastian Andrzej Siewiore59a41b2012-01-10 14:16:57 +01001179 buf[i] = ' ';
1180 buf[i] = '\0';
1181 pr_debug(" Revision: %s\n", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001182
Andy Grovere3d6f902011-07-19 08:55:10 +00001183 device_type = dev->transport->get_device_type(dev);
Andy Grover6708bb22011-06-08 10:36:43 -07001184 pr_debug(" Type: %s ", scsi_device_type(device_type));
1185 pr_debug(" ANSI SCSI revision: %02x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001186 dev->transport->get_device_rev(dev));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001187}
1188
1189struct se_device *transport_add_device_to_core_hba(
1190 struct se_hba *hba,
1191 struct se_subsystem_api *transport,
1192 struct se_subsystem_dev *se_dev,
1193 u32 device_flags,
1194 void *transport_dev,
1195 struct se_dev_limits *dev_limits,
1196 const char *inquiry_prod,
1197 const char *inquiry_rev)
1198{
Nicholas Bellinger12a18bd2011-03-14 04:06:06 -07001199 int force_pt;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001200 struct se_device *dev;
1201
1202 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07001203 if (!dev) {
1204 pr_err("Unable to allocate memory for se_dev_t\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001205 return NULL;
1206 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001207
Andy Grovere3d6f902011-07-19 08:55:10 +00001208 transport_init_queue_obj(&dev->dev_queue_obj);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001209 dev->dev_flags = device_flags;
1210 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
Andy Grover5951146d2011-07-19 10:26:37 +00001211 dev->dev_ptr = transport_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001212 dev->se_hba = hba;
1213 dev->se_sub_dev = se_dev;
1214 dev->transport = transport;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001215 INIT_LIST_HEAD(&dev->dev_list);
1216 INIT_LIST_HEAD(&dev->dev_sep_list);
1217 INIT_LIST_HEAD(&dev->dev_tmr_list);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001218 INIT_LIST_HEAD(&dev->execute_list);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001219 INIT_LIST_HEAD(&dev->delayed_cmd_list);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001220 INIT_LIST_HEAD(&dev->state_list);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001221 INIT_LIST_HEAD(&dev->qf_cmd_list);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001222 spin_lock_init(&dev->execute_task_lock);
1223 spin_lock_init(&dev->delayed_cmd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001224 spin_lock_init(&dev->dev_reservation_lock);
1225 spin_lock_init(&dev->dev_status_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001226 spin_lock_init(&dev->se_port_lock);
1227 spin_lock_init(&dev->se_tmr_lock);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001228 spin_lock_init(&dev->qf_cmd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001229 atomic_set(&dev->dev_ordered_id, 0);
1230
1231 se_dev_set_default_attribs(dev, dev_limits);
1232
1233 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1234 dev->creation_time = get_jiffies_64();
1235 spin_lock_init(&dev->stats_lock);
1236
1237 spin_lock(&hba->device_lock);
1238 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1239 hba->dev_count++;
1240 spin_unlock(&hba->device_lock);
1241 /*
1242 * Setup the SAM Task Attribute emulation for struct se_device
1243 */
1244 core_setup_task_attr_emulation(dev);
1245 /*
1246 * Force PR and ALUA passthrough emulation with internal object use.
1247 */
1248 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1249 /*
1250 * Setup the Reservations infrastructure for struct se_device
1251 */
1252 core_setup_reservations(dev, force_pt);
1253 /*
1254 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1255 */
1256 if (core_setup_alua(dev, force_pt) < 0)
1257 goto out;
1258
1259 /*
1260 * Startup the struct se_device processing thread
1261 */
1262 dev->process_thread = kthread_run(transport_processing_thread, dev,
Andy Grovere3d6f902011-07-19 08:55:10 +00001263 "LIO_%s", dev->transport->name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001264 if (IS_ERR(dev->process_thread)) {
Andy Grover6708bb22011-06-08 10:36:43 -07001265 pr_err("Unable to create kthread: LIO_%s\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001266 dev->transport->name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001267 goto out;
1268 }
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001269 /*
1270 * Setup work_queue for QUEUE_FULL
1271 */
1272 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001273 /*
1274 * Preload the initial INQUIRY const values if we are doing
1275 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1276 * passthrough because this is being provided by the backend LLD.
1277 * This is required so that transport_get_inquiry() copies these
1278 * originals once back into DEV_T10_WWN(dev) for the virtual device
1279 * setup.
1280 */
Andy Grovere3d6f902011-07-19 08:55:10 +00001281 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
Roland Dreierf22c1192011-05-02 22:15:37 -07001282 if (!inquiry_prod || !inquiry_rev) {
Andy Grover6708bb22011-06-08 10:36:43 -07001283 pr_err("All non TCM/pSCSI plugins require"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001284 " INQUIRY consts\n");
1285 goto out;
1286 }
1287
Andy Grovere3d6f902011-07-19 08:55:10 +00001288 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1289 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1290 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001291 }
1292 scsi_dump_inquiry(dev);
1293
Nicholas Bellinger12a18bd2011-03-14 04:06:06 -07001294 return dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001295out:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001296 kthread_stop(dev->process_thread);
1297
1298 spin_lock(&hba->device_lock);
1299 list_del(&dev->dev_list);
1300 hba->dev_count--;
1301 spin_unlock(&hba->device_lock);
1302
1303 se_release_vpd_for_dev(dev);
1304
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001305 kfree(dev);
1306
1307 return NULL;
1308}
1309EXPORT_SYMBOL(transport_add_device_to_core_hba);
1310
1311/* transport_generic_prepare_cdb():
1312 *
1313 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1314 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1315 * The point of this is since we are mapping iSCSI LUNs to
1316 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1317 * devices and HBAs for a loop.
1318 */
1319static inline void transport_generic_prepare_cdb(
1320 unsigned char *cdb)
1321{
1322 switch (cdb[0]) {
1323 case READ_10: /* SBC - RDProtect */
1324 case READ_12: /* SBC - RDProtect */
1325 case READ_16: /* SBC - RDProtect */
1326 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1327 case VERIFY: /* SBC - VRProtect */
1328 case VERIFY_16: /* SBC - VRProtect */
1329 case WRITE_VERIFY: /* SBC - VRProtect */
1330 case WRITE_VERIFY_12: /* SBC - VRProtect */
1331 break;
1332 default:
1333 cdb[1] &= 0x1f; /* clear logical unit number */
1334 break;
1335 }
1336}
1337
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001338static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1339
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001340/*
1341 * Used by fabric modules containing a local struct se_cmd within their
1342 * fabric dependent per I/O descriptor.
1343 */
1344void transport_init_se_cmd(
1345 struct se_cmd *cmd,
1346 struct target_core_fabric_ops *tfo,
1347 struct se_session *se_sess,
1348 u32 data_length,
1349 int data_direction,
1350 int task_attr,
1351 unsigned char *sense_buffer)
1352{
Andy Grover5951146d2011-07-19 10:26:37 +00001353 INIT_LIST_HEAD(&cmd->se_lun_node);
1354 INIT_LIST_HEAD(&cmd->se_delayed_node);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001355 INIT_LIST_HEAD(&cmd->se_qf_node);
Roland Dreier79a7fef2011-09-28 22:12:07 -07001356 INIT_LIST_HEAD(&cmd->se_queue_node);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07001357 INIT_LIST_HEAD(&cmd->se_cmd_list);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001358 INIT_LIST_HEAD(&cmd->execute_list);
1359 INIT_LIST_HEAD(&cmd->state_list);
Andy Grovera1d8b492011-05-02 17:12:10 -07001360 init_completion(&cmd->transport_lun_fe_stop_comp);
1361 init_completion(&cmd->transport_lun_stop_comp);
1362 init_completion(&cmd->t_transport_stop_comp);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07001363 init_completion(&cmd->cmd_wait_comp);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001364 init_completion(&cmd->task_stop_comp);
Andy Grovera1d8b492011-05-02 17:12:10 -07001365 spin_lock_init(&cmd->t_state_lock);
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001366 cmd->transport_state = CMD_T_DEV_ACTIVE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001367
1368 cmd->se_tfo = tfo;
1369 cmd->se_sess = se_sess;
1370 cmd->data_length = data_length;
1371 cmd->data_direction = data_direction;
1372 cmd->sam_task_attr = task_attr;
1373 cmd->sense_buffer = sense_buffer;
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001374
1375 cmd->state_active = false;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001376}
1377EXPORT_SYMBOL(transport_init_se_cmd);
1378
1379static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1380{
1381 /*
1382 * Check if SAM Task Attribute emulation is enabled for this
1383 * struct se_device storage object
1384 */
Andy Grover5951146d2011-07-19 10:26:37 +00001385 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001386 return 0;
1387
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07001388 if (cmd->sam_task_attr == MSG_ACA_TAG) {
Andy Grover6708bb22011-06-08 10:36:43 -07001389 pr_debug("SAM Task Attribute ACA"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001390 " emulation is not supported\n");
Andy Grovere3d6f902011-07-19 08:55:10 +00001391 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001392 }
1393 /*
1394 * Used to determine when ORDERED commands should go from
1395 * Dormant to Active status.
1396 */
Andy Grover5951146d2011-07-19 10:26:37 +00001397 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001398 smp_mb__after_atomic_inc();
Andy Grover6708bb22011-06-08 10:36:43 -07001399 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001400 cmd->se_ordered_id, cmd->sam_task_attr,
Andy Grover6708bb22011-06-08 10:36:43 -07001401 cmd->se_dev->transport->name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001402 return 0;
1403}
1404
Andy Grovera12f41f2012-04-03 15:51:20 -07001405/* target_setup_cmd_from_cdb():
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001406 *
1407 * Called from fabric RX Thread.
1408 */
Andy Grovera12f41f2012-04-03 15:51:20 -07001409int target_setup_cmd_from_cdb(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001410 struct se_cmd *cmd,
1411 unsigned char *cdb)
1412{
1413 int ret;
1414
1415 transport_generic_prepare_cdb(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001416 /*
1417 * Ensure that the received CDB is less than the max (252 + 8) bytes
1418 * for VARIABLE_LENGTH_CMD
1419 */
1420 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001421 pr_err("Received SCSI CDB with command_size: %d that"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001422 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1423 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001424 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1425 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
Andy Grovere3d6f902011-07-19 08:55:10 +00001426 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001427 }
1428 /*
1429 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1430 * allocate the additional extended CDB buffer now.. Otherwise
1431 * setup the pointer from __t_task_cdb to t_task_cdb.
1432 */
Andy Grovera1d8b492011-05-02 17:12:10 -07001433 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1434 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001435 GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07001436 if (!cmd->t_task_cdb) {
1437 pr_err("Unable to allocate cmd->t_task_cdb"
Andy Grovera1d8b492011-05-02 17:12:10 -07001438 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001439 scsi_command_size(cdb),
Andy Grovera1d8b492011-05-02 17:12:10 -07001440 (unsigned long)sizeof(cmd->__t_task_cdb));
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001441 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1442 cmd->scsi_sense_reason =
1443 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Andy Grovere3d6f902011-07-19 08:55:10 +00001444 return -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001445 }
1446 } else
Andy Grovera1d8b492011-05-02 17:12:10 -07001447 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001448 /*
Andy Grovera1d8b492011-05-02 17:12:10 -07001449 * Copy the original CDB into cmd->
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001450 */
Andy Grovera1d8b492011-05-02 17:12:10 -07001451 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001452 /*
1453 * Setup the received CDB based on SCSI defined opcodes and
1454 * perform unit attention, persistent reservations and ALUA
Andy Grovera1d8b492011-05-02 17:12:10 -07001455 * checks for virtual device backends. The cmd->t_task_cdb
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001456 * pointer is expected to be setup before we reach this point.
1457 */
1458 ret = transport_generic_cmd_sequencer(cmd, cdb);
1459 if (ret < 0)
1460 return ret;
1461 /*
1462 * Check for SAM Task Attribute Emulation
1463 */
1464 if (transport_check_alloc_task_attr(cmd) < 0) {
1465 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1466 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
Andy Grover5951146d2011-07-19 10:26:37 +00001467 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001468 }
1469 spin_lock(&cmd->se_lun->lun_sep_lock);
1470 if (cmd->se_lun->lun_sep)
1471 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1472 spin_unlock(&cmd->se_lun->lun_sep_lock);
1473 return 0;
1474}
Andy Grovera12f41f2012-04-03 15:51:20 -07001475EXPORT_SYMBOL(target_setup_cmd_from_cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001476
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001477/*
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001478 * Used by fabric module frontends to queue tasks directly.
1479 * Many only be used from process context only
1480 */
1481int transport_handle_cdb_direct(
1482 struct se_cmd *cmd)
1483{
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001484 int ret;
1485
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001486 if (!cmd->se_lun) {
1487 dump_stack();
Andy Grover6708bb22011-06-08 10:36:43 -07001488 pr_err("cmd->se_lun is NULL\n");
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001489 return -EINVAL;
1490 }
1491 if (in_interrupt()) {
1492 dump_stack();
Andy Grover6708bb22011-06-08 10:36:43 -07001493 pr_err("transport_generic_handle_cdb cannot be called"
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001494 " from interrupt context\n");
1495 return -EINVAL;
1496 }
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001497 /*
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001498 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001499 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1500 * in existing usage to ensure that outstanding descriptors are handled
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07001501 * correctly during shutdown via transport_wait_for_tasks()
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001502 *
1503 * Also, we don't take cmd->t_state_lock here as we only expect
1504 * this to be called for initial descriptor submission.
1505 */
1506 cmd->t_state = TRANSPORT_NEW_CMD;
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001507 cmd->transport_state |= CMD_T_ACTIVE;
1508
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001509 /*
1510 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1511 * so follow TRANSPORT_NEW_CMD processing thread context usage
1512 * and call transport_generic_request_failure() if necessary..
1513 */
1514 ret = transport_generic_new_cmd(cmd);
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001515 if (ret < 0)
1516 transport_generic_request_failure(cmd);
1517
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001518 return 0;
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001519}
1520EXPORT_SYMBOL(transport_handle_cdb_direct);
1521
Nicholas Bellingera6360782011-11-18 20:36:22 -08001522/**
1523 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1524 *
1525 * @se_cmd: command descriptor to submit
1526 * @se_sess: associated se_sess for endpoint
1527 * @cdb: pointer to SCSI CDB
1528 * @sense: pointer to SCSI sense buffer
1529 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1530 * @data_length: fabric expected data transfer length
1531 * @task_addr: SAM task attribute
1532 * @data_dir: DMA data direction
1533 * @flags: flags for command submission from target_sc_flags_tables
1534 *
1535 * This may only be called from process context, and also currently
1536 * assumes internal allocation of fabric payload buffer by target-core.
1537 **/
Andy Grover1edcdb42012-01-19 13:39:23 -08001538void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
Nicholas Bellingera6360782011-11-18 20:36:22 -08001539 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1540 u32 data_length, int task_attr, int data_dir, int flags)
1541{
1542 struct se_portal_group *se_tpg;
1543 int rc;
1544
1545 se_tpg = se_sess->se_tpg;
1546 BUG_ON(!se_tpg);
1547 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1548 BUG_ON(in_interrupt());
1549 /*
1550 * Initialize se_cmd for target operation. From this point
1551 * exceptions are handled by sending exception status via
1552 * target_core_fabric_ops->queue_status() callback
1553 */
1554 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1555 data_length, data_dir, task_attr, sense);
Sebastian Andrzej Siewiorb0d79942012-01-10 14:16:59 +01001556 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1557 se_cmd->unknown_data_length = 1;
Nicholas Bellingera6360782011-11-18 20:36:22 -08001558 /*
1559 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1560 * se_sess->sess_cmd_list. A second kref_get here is necessary
1561 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1562 * kref_put() to happen during fabric packet acknowledgement.
1563 */
1564 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1565 /*
1566 * Signal bidirectional data payloads to target-core
1567 */
1568 if (flags & TARGET_SCF_BIDI_OP)
1569 se_cmd->se_cmd_flags |= SCF_BIDI;
1570 /*
1571 * Locate se_lun pointer and attach it to struct se_cmd
1572 */
Nicholas Bellinger735703c2012-01-20 19:02:56 -08001573 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
1574 transport_send_check_condition_and_sense(se_cmd,
1575 se_cmd->scsi_sense_reason, 0);
1576 target_put_sess_cmd(se_sess, se_cmd);
1577 return;
1578 }
Nicholas Bellingera6360782011-11-18 20:36:22 -08001579 /*
1580 * Sanitize CDBs via transport_generic_cmd_sequencer() and
1581 * allocate the necessary tasks to complete the received CDB+data
1582 */
Andy Grovera12f41f2012-04-03 15:51:20 -07001583 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
Nicholas Bellinger735703c2012-01-20 19:02:56 -08001584 if (rc != 0) {
1585 transport_generic_request_failure(se_cmd);
1586 return;
1587 }
Andy Grover11e319e2012-04-03 15:51:28 -07001588
1589 /*
1590 * Check if we need to delay processing because of ALUA
1591 * Active/NonOptimized primary access state..
1592 */
1593 core_alua_check_nonop_delay(se_cmd);
1594
Nicholas Bellingera6360782011-11-18 20:36:22 -08001595 /*
1596 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
1597 * for immediate execution of READs, otherwise wait for
1598 * transport_generic_handle_data() to be called for WRITEs
1599 * when fabric has filled the incoming buffer.
1600 */
1601 transport_handle_cdb_direct(se_cmd);
Andy Grover1edcdb42012-01-19 13:39:23 -08001602 return;
Nicholas Bellingera6360782011-11-18 20:36:22 -08001603}
1604EXPORT_SYMBOL(target_submit_cmd);
1605
Nicholas Bellinger9f0d05c2012-02-25 05:02:48 -08001606static void target_complete_tmr_failure(struct work_struct *work)
1607{
1608 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1609
1610 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1611 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1612 transport_generic_free_cmd(se_cmd, 0);
1613}
1614
Andy Groverea98d7f2012-01-19 13:39:21 -08001615/**
1616 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1617 * for TMR CDBs
1618 *
1619 * @se_cmd: command descriptor to submit
1620 * @se_sess: associated se_sess for endpoint
1621 * @sense: pointer to SCSI sense buffer
1622 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1623 * @fabric_context: fabric context for TMR req
1624 * @tm_type: Type of TM request
Nicholas Bellingerc0974f82012-02-25 05:10:04 -08001625 * @gfp: gfp type for caller
1626 * @tag: referenced task tag for TMR_ABORT_TASK
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001627 * @flags: submit cmd flags
Andy Groverea98d7f2012-01-19 13:39:21 -08001628 *
1629 * Callable from all contexts.
1630 **/
1631
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001632int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
Andy Groverea98d7f2012-01-19 13:39:21 -08001633 unsigned char *sense, u32 unpacked_lun,
Nicholas Bellingerc0974f82012-02-25 05:10:04 -08001634 void *fabric_tmr_ptr, unsigned char tm_type,
1635 gfp_t gfp, unsigned int tag, int flags)
Andy Groverea98d7f2012-01-19 13:39:21 -08001636{
1637 struct se_portal_group *se_tpg;
1638 int ret;
1639
1640 se_tpg = se_sess->se_tpg;
1641 BUG_ON(!se_tpg);
1642
1643 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1644 0, DMA_NONE, MSG_SIMPLE_TAG, sense);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001645 /*
1646 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1647 * allocation failure.
1648 */
Nicholas Bellingerc0974f82012-02-25 05:10:04 -08001649 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001650 if (ret < 0)
1651 return -ENOMEM;
Andy Groverea98d7f2012-01-19 13:39:21 -08001652
Nicholas Bellingerc0974f82012-02-25 05:10:04 -08001653 if (tm_type == TMR_ABORT_TASK)
1654 se_cmd->se_tmr_req->ref_task_tag = tag;
1655
Andy Groverea98d7f2012-01-19 13:39:21 -08001656 /* See target_submit_cmd for commentary */
1657 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
1658
Andy Groverea98d7f2012-01-19 13:39:21 -08001659 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1660 if (ret) {
Nicholas Bellinger9f0d05c2012-02-25 05:02:48 -08001661 /*
1662 * For callback during failure handling, push this work off
1663 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1664 */
1665 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1666 schedule_work(&se_cmd->work);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001667 return 0;
Andy Groverea98d7f2012-01-19 13:39:21 -08001668 }
1669 transport_generic_handle_tmr(se_cmd);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001670 return 0;
Andy Groverea98d7f2012-01-19 13:39:21 -08001671}
1672EXPORT_SYMBOL(target_submit_tmr);
1673
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001674/*
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001675 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1676 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1677 * complete setup in TCM process context w/ TFO->new_cmd_map().
1678 */
1679int transport_generic_handle_cdb_map(
1680 struct se_cmd *cmd)
1681{
Andy Grovere3d6f902011-07-19 08:55:10 +00001682 if (!cmd->se_lun) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001683 dump_stack();
Andy Grover6708bb22011-06-08 10:36:43 -07001684 pr_err("cmd->se_lun is NULL\n");
Andy Grovere3d6f902011-07-19 08:55:10 +00001685 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001686 }
1687
Christoph Hellwigf7a5cc02011-10-17 13:56:42 -04001688 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001689 return 0;
1690}
1691EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1692
1693/* transport_generic_handle_data():
1694 *
1695 *
1696 */
1697int transport_generic_handle_data(
1698 struct se_cmd *cmd)
1699{
1700 /*
1701 * For the software fabric case, then we assume the nexus is being
1702 * failed/shutdown when signals are pending from the kthread context
1703 * caller, so we return a failure. For the HW target mode case running
1704 * in interrupt code, the signal_pending() check is skipped.
1705 */
1706 if (!in_interrupt() && signal_pending(current))
Andy Grovere3d6f902011-07-19 08:55:10 +00001707 return -EPERM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001708 /*
1709 * If the received CDB has aleady been ABORTED by the generic
1710 * target engine, we now call transport_check_aborted_status()
1711 * to queue any delated TASK_ABORTED status for the received CDB to the
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001712 * fabric module as we are expecting no further incoming DATA OUT
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001713 * sequences at this point.
1714 */
1715 if (transport_check_aborted_status(cmd, 1) != 0)
1716 return 0;
1717
Christoph Hellwigf7a5cc02011-10-17 13:56:42 -04001718 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001719 return 0;
1720}
1721EXPORT_SYMBOL(transport_generic_handle_data);
1722
1723/* transport_generic_handle_tmr():
1724 *
1725 *
1726 */
1727int transport_generic_handle_tmr(
1728 struct se_cmd *cmd)
1729{
Christoph Hellwigf7a5cc02011-10-17 13:56:42 -04001730 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001731 return 0;
1732}
1733EXPORT_SYMBOL(transport_generic_handle_tmr);
1734
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001735/*
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001736 * If the cmd is active, request it to be stopped and sleep until it
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001737 * has completed.
1738 */
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001739bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001740{
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001741 bool was_active = false;
1742
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001743 if (cmd->transport_state & CMD_T_BUSY) {
1744 cmd->transport_state |= CMD_T_REQUEST_STOP;
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001745 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1746
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001747 pr_debug("cmd %p waiting to complete\n", cmd);
1748 wait_for_completion(&cmd->task_stop_comp);
1749 pr_debug("cmd %p stopped successfully\n", cmd);
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001750
1751 spin_lock_irqsave(&cmd->t_state_lock, *flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001752 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1753 cmd->transport_state &= ~CMD_T_BUSY;
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001754 was_active = true;
1755 }
1756
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001757 return was_active;
1758}
1759
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001760/*
1761 * Handle SAM-esque emulation for generic transport request failures.
1762 */
Nicholas Bellinger2fbff122012-02-10 16:18:11 -08001763void transport_generic_request_failure(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001764{
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001765 int ret = 0;
1766
Andy Grover6708bb22011-06-08 10:36:43 -07001767 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
Andy Grovere3d6f902011-07-19 08:55:10 +00001768 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
Andy Grovera1d8b492011-05-02 17:12:10 -07001769 cmd->t_task_cdb[0]);
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001770 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001771 cmd->se_tfo->get_cmd_state(cmd),
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001772 cmd->t_state, cmd->scsi_sense_reason);
Christoph Hellwigd43d6ae2012-04-24 00:25:08 -04001773 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001774 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1775 (cmd->transport_state & CMD_T_STOP) != 0,
1776 (cmd->transport_state & CMD_T_SENT) != 0);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001777
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001778 /*
1779 * For SAM Task Attribute emulation for failed struct se_cmd
1780 */
1781 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1782 transport_complete_task_attr(cmd);
1783
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001784 switch (cmd->scsi_sense_reason) {
1785 case TCM_NON_EXISTENT_LUN:
1786 case TCM_UNSUPPORTED_SCSI_OPCODE:
1787 case TCM_INVALID_CDB_FIELD:
1788 case TCM_INVALID_PARAMETER_LIST:
1789 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1790 case TCM_UNKNOWN_MODE_PAGE:
1791 case TCM_WRITE_PROTECTED:
1792 case TCM_CHECK_CONDITION_ABORT_CMD:
1793 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1794 case TCM_CHECK_CONDITION_NOT_READY:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001795 break;
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001796 case TCM_RESERVATION_CONFLICT:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001797 /*
1798 * No SENSE Data payload for this case, set SCSI Status
1799 * and queue the response to $FABRIC_MOD.
1800 *
1801 * Uses linux/include/scsi/scsi.h SAM status codes defs
1802 */
1803 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1804 /*
1805 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1806 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1807 * CONFLICT STATUS.
1808 *
1809 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1810 */
Andy Grovere3d6f902011-07-19 08:55:10 +00001811 if (cmd->se_sess &&
1812 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1813 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001814 cmd->orig_fe_lun, 0x2C,
1815 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1816
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001817 ret = cmd->se_tfo->queue_status(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07001818 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001819 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001820 goto check_stop;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001821 default:
Andy Grover6708bb22011-06-08 10:36:43 -07001822 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001823 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001824 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1825 break;
1826 }
Nicholas Bellinger16ab8e62011-08-08 19:03:38 -07001827 /*
1828 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1829 * make the call to transport_send_check_condition_and_sense()
1830 * directly. Otherwise expect the fabric to make the call to
1831 * transport_send_check_condition_and_sense() after handling
1832 * possible unsoliticied write data payloads.
1833 */
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001834 ret = transport_send_check_condition_and_sense(cmd,
1835 cmd->scsi_sense_reason, 0);
1836 if (ret == -EAGAIN || ret == -ENOMEM)
1837 goto queue_full;
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001838
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001839check_stop:
1840 transport_lun_remove_cmd(cmd);
Andy Grover6708bb22011-06-08 10:36:43 -07001841 if (!transport_cmd_check_stop_to_fabric(cmd))
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001842 ;
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001843 return;
1844
1845queue_full:
Christoph Hellwige057f532011-10-17 13:56:41 -04001846 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1847 transport_handle_queue_full(cmd, cmd->se_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001848}
Nicholas Bellinger2fbff122012-02-10 16:18:11 -08001849EXPORT_SYMBOL(transport_generic_request_failure);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001850
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001851static inline u32 transport_lba_21(unsigned char *cdb)
1852{
1853 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1854}
1855
1856static inline u32 transport_lba_32(unsigned char *cdb)
1857{
1858 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1859}
1860
1861static inline unsigned long long transport_lba_64(unsigned char *cdb)
1862{
1863 unsigned int __v1, __v2;
1864
1865 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1866 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1867
1868 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1869}
1870
1871/*
1872 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1873 */
1874static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1875{
1876 unsigned int __v1, __v2;
1877
1878 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1879 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1880
1881 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1882}
1883
1884static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1885{
1886 unsigned long flags;
1887
Andy Grovera1d8b492011-05-02 17:12:10 -07001888 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001889 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
Andy Grovera1d8b492011-05-02 17:12:10 -07001890 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001891}
1892
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001893/*
1894 * Called from Fabric Module context from transport_execute_tasks()
1895 *
1896 * The return of this function determins if the tasks from struct se_cmd
1897 * get added to the execution queue in transport_execute_tasks(),
1898 * or are added to the delayed or ordered lists here.
1899 */
1900static inline int transport_execute_task_attr(struct se_cmd *cmd)
1901{
Andy Grover5951146d2011-07-19 10:26:37 +00001902 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001903 return 1;
1904 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001905 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001906 * to allow the passed struct se_cmd list of tasks to the front of the list.
1907 */
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07001908 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
Andy Grover6708bb22011-06-08 10:36:43 -07001909 pr_debug("Added HEAD_OF_QUEUE for CDB:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001910 " 0x%02x, se_ordered_id: %u\n",
Andy Grover6708bb22011-06-08 10:36:43 -07001911 cmd->t_task_cdb[0],
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001912 cmd->se_ordered_id);
1913 return 1;
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07001914 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
Andy Grover5951146d2011-07-19 10:26:37 +00001915 atomic_inc(&cmd->se_dev->dev_ordered_sync);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001916 smp_mb__after_atomic_inc();
1917
Andy Grover6708bb22011-06-08 10:36:43 -07001918 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001919 " list, se_ordered_id: %u\n",
Andy Grovera1d8b492011-05-02 17:12:10 -07001920 cmd->t_task_cdb[0],
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001921 cmd->se_ordered_id);
1922 /*
1923 * Add ORDERED command to tail of execution queue if
1924 * no other older commands exist that need to be
1925 * completed first.
1926 */
Andy Grover6708bb22011-06-08 10:36:43 -07001927 if (!atomic_read(&cmd->se_dev->simple_cmds))
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001928 return 1;
1929 } else {
1930 /*
1931 * For SIMPLE and UNTAGGED Task Attribute commands
1932 */
Andy Grover5951146d2011-07-19 10:26:37 +00001933 atomic_inc(&cmd->se_dev->simple_cmds);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001934 smp_mb__after_atomic_inc();
1935 }
1936 /*
1937 * Otherwise if one or more outstanding ORDERED task attribute exist,
1938 * add the dormant task(s) built for the passed struct se_cmd to the
1939 * execution queue and become in Active state for this struct se_device.
1940 */
Andy Grover5951146d2011-07-19 10:26:37 +00001941 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001942 /*
1943 * Otherwise, add cmd w/ tasks to delayed cmd queue that
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001944 * will be drained upon completion of HEAD_OF_QUEUE task.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001945 */
Andy Grover5951146d2011-07-19 10:26:37 +00001946 spin_lock(&cmd->se_dev->delayed_cmd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001947 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
Andy Grover5951146d2011-07-19 10:26:37 +00001948 list_add_tail(&cmd->se_delayed_node,
1949 &cmd->se_dev->delayed_cmd_list);
1950 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001951
Andy Grover6708bb22011-06-08 10:36:43 -07001952 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001953 " delayed CMD list, se_ordered_id: %u\n",
Andy Grovera1d8b492011-05-02 17:12:10 -07001954 cmd->t_task_cdb[0], cmd->sam_task_attr,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001955 cmd->se_ordered_id);
1956 /*
1957 * Return zero to let transport_execute_tasks() know
1958 * not to add the delayed tasks to the execution list.
1959 */
1960 return 0;
1961 }
1962 /*
1963 * Otherwise, no ORDERED task attributes exist..
1964 */
1965 return 1;
1966}
1967
1968/*
1969 * Called from fabric module context in transport_generic_new_cmd() and
1970 * transport_generic_process_write()
1971 */
1972static int transport_execute_tasks(struct se_cmd *cmd)
1973{
1974 int add_tasks;
Nicholas Bellinger40be67f2011-11-30 00:41:20 -08001975 struct se_device *se_dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001976 /*
1977 * Call transport_cmd_check_stop() to see if a fabric exception
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001978 * has occurred that prevents execution.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001979 */
Andy Grover6708bb22011-06-08 10:36:43 -07001980 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001981 /*
1982 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
1983 * attribute for the tasks of the received struct se_cmd CDB
1984 */
1985 add_tasks = transport_execute_task_attr(cmd);
Andy Grovere3d6f902011-07-19 08:55:10 +00001986 if (!add_tasks)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001987 goto execute_tasks;
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001988
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -08001989 __transport_execute_tasks(se_dev, cmd);
1990 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001991 }
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -08001992
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001993execute_tasks:
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -08001994 __transport_execute_tasks(se_dev, NULL);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001995 return 0;
1996}
1997
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -08001998static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001999{
2000 int error;
2001 struct se_cmd *cmd = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002002 unsigned long flags;
2003
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002004check_depth:
Andy Grovere3d6f902011-07-19 08:55:10 +00002005 spin_lock_irq(&dev->execute_task_lock);
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -08002006 if (new_cmd != NULL)
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002007 __target_add_to_execute_list(new_cmd);
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -08002008
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002009 if (list_empty(&dev->execute_list)) {
Andy Grovere3d6f902011-07-19 08:55:10 +00002010 spin_unlock_irq(&dev->execute_task_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002011 return 0;
2012 }
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002013 cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
2014 __target_remove_from_execute_list(cmd);
Andy Grovere3d6f902011-07-19 08:55:10 +00002015 spin_unlock_irq(&dev->execute_task_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002016
Andy Grovera1d8b492011-05-02 17:12:10 -07002017 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002018 cmd->transport_state |= CMD_T_BUSY;
Christoph Hellwig785fdf72012-04-24 00:25:04 -04002019 cmd->transport_state |= CMD_T_SENT;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002020
Andy Grovera1d8b492011-05-02 17:12:10 -07002021 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002022
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002023 if (cmd->execute_cmd)
2024 error = cmd->execute_cmd(cmd);
Christoph Hellwig5787cac2012-04-24 00:25:06 -04002025 else {
2026 error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
2027 cmd->t_data_nents, cmd->data_direction);
2028 }
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002029
Christoph Hellwigd29a5b62011-11-03 17:50:44 -04002030 if (error != 0) {
Christoph Hellwigd29a5b62011-11-03 17:50:44 -04002031 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002032 cmd->transport_state &= ~CMD_T_BUSY;
Christoph Hellwig7d680f32011-12-21 14:13:47 -05002033 cmd->transport_state &= ~CMD_T_SENT;
Christoph Hellwigd29a5b62011-11-03 17:50:44 -04002034 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Christoph Hellwig7d680f32011-12-21 14:13:47 -05002035
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07002036 transport_generic_request_failure(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002037 }
2038
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -08002039 new_cmd = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002040 goto check_depth;
2041
2042 return 0;
2043}
2044
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002045static inline u32 transport_get_sectors_6(
2046 unsigned char *cdb,
2047 struct se_cmd *cmd,
2048 int *ret)
2049{
Andy Grover5951146d2011-07-19 10:26:37 +00002050 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002051
2052 /*
2053 * Assume TYPE_DISK for non struct se_device objects.
2054 * Use 8-bit sector value.
2055 */
2056 if (!dev)
2057 goto type_disk;
2058
2059 /*
2060 * Use 24-bit allocation length for TYPE_TAPE.
2061 */
Andy Grovere3d6f902011-07-19 08:55:10 +00002062 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002063 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2064
2065 /*
2066 * Everything else assume TYPE_DISK Sector CDB location.
Roland Dreier9b5cd7f2011-11-22 13:51:33 -08002067 * Use 8-bit sector value. SBC-3 says:
2068 *
2069 * A TRANSFER LENGTH field set to zero specifies that 256
2070 * logical blocks shall be written. Any other value
2071 * specifies the number of logical blocks that shall be
2072 * written.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002073 */
2074type_disk:
Roland Dreier9b5cd7f2011-11-22 13:51:33 -08002075 return cdb[4] ? : 256;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002076}
2077
2078static inline u32 transport_get_sectors_10(
2079 unsigned char *cdb,
2080 struct se_cmd *cmd,
2081 int *ret)
2082{
Andy Grover5951146d2011-07-19 10:26:37 +00002083 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002084
2085 /*
2086 * Assume TYPE_DISK for non struct se_device objects.
2087 * Use 16-bit sector value.
2088 */
2089 if (!dev)
2090 goto type_disk;
2091
2092 /*
2093 * XXX_10 is not defined in SSC, throw an exception
2094 */
Andy Grovere3d6f902011-07-19 08:55:10 +00002095 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2096 *ret = -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002097 return 0;
2098 }
2099
2100 /*
2101 * Everything else assume TYPE_DISK Sector CDB location.
2102 * Use 16-bit sector value.
2103 */
2104type_disk:
2105 return (u32)(cdb[7] << 8) + cdb[8];
2106}
2107
2108static inline u32 transport_get_sectors_12(
2109 unsigned char *cdb,
2110 struct se_cmd *cmd,
2111 int *ret)
2112{
Andy Grover5951146d2011-07-19 10:26:37 +00002113 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002114
2115 /*
2116 * Assume TYPE_DISK for non struct se_device objects.
2117 * Use 32-bit sector value.
2118 */
2119 if (!dev)
2120 goto type_disk;
2121
2122 /*
2123 * XXX_12 is not defined in SSC, throw an exception
2124 */
Andy Grovere3d6f902011-07-19 08:55:10 +00002125 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2126 *ret = -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002127 return 0;
2128 }
2129
2130 /*
2131 * Everything else assume TYPE_DISK Sector CDB location.
2132 * Use 32-bit sector value.
2133 */
2134type_disk:
2135 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2136}
2137
2138static inline u32 transport_get_sectors_16(
2139 unsigned char *cdb,
2140 struct se_cmd *cmd,
2141 int *ret)
2142{
Andy Grover5951146d2011-07-19 10:26:37 +00002143 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002144
2145 /*
2146 * Assume TYPE_DISK for non struct se_device objects.
2147 * Use 32-bit sector value.
2148 */
2149 if (!dev)
2150 goto type_disk;
2151
2152 /*
2153 * Use 24-bit allocation length for TYPE_TAPE.
2154 */
Andy Grovere3d6f902011-07-19 08:55:10 +00002155 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002156 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2157
2158type_disk:
2159 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2160 (cdb[12] << 8) + cdb[13];
2161}
2162
2163/*
2164 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2165 */
2166static inline u32 transport_get_sectors_32(
2167 unsigned char *cdb,
2168 struct se_cmd *cmd,
2169 int *ret)
2170{
2171 /*
2172 * Assume TYPE_DISK for non struct se_device objects.
2173 * Use 32-bit sector value.
2174 */
2175 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2176 (cdb[30] << 8) + cdb[31];
2177
2178}
2179
2180static inline u32 transport_get_size(
2181 u32 sectors,
2182 unsigned char *cdb,
2183 struct se_cmd *cmd)
2184{
Andy Grover5951146d2011-07-19 10:26:37 +00002185 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002186
Andy Grovere3d6f902011-07-19 08:55:10 +00002187 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002188 if (cdb[1] & 1) { /* sectors */
Andy Grovere3d6f902011-07-19 08:55:10 +00002189 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002190 } else /* bytes */
2191 return sectors;
2192 }
Andy Grover8b1e1242012-04-03 15:51:12 -07002193
Andy Grover6708bb22011-06-08 10:36:43 -07002194 pr_debug("Returning block_size: %u, sectors: %u == %u for"
Andy Grover8b1e1242012-04-03 15:51:12 -07002195 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
2196 sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2197 dev->transport->name);
2198
Andy Grovere3d6f902011-07-19 08:55:10 +00002199 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002200}
2201
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002202static void transport_xor_callback(struct se_cmd *cmd)
2203{
2204 unsigned char *buf, *addr;
Andy Groverec98f782011-07-20 19:28:46 +00002205 struct scatterlist *sg;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002206 unsigned int offset;
2207 int i;
Andy Groverec98f782011-07-20 19:28:46 +00002208 int count;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002209 /*
2210 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2211 *
2212 * 1) read the specified logical block(s);
2213 * 2) transfer logical blocks from the data-out buffer;
2214 * 3) XOR the logical blocks transferred from the data-out buffer with
2215 * the logical blocks read, storing the resulting XOR data in a buffer;
2216 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2217 * blocks transferred from the data-out buffer; and
2218 * 5) transfer the resulting XOR data to the data-in buffer.
2219 */
2220 buf = kmalloc(cmd->data_length, GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07002221 if (!buf) {
2222 pr_err("Unable to allocate xor_callback buf\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002223 return;
2224 }
2225 /*
Andy Groverec98f782011-07-20 19:28:46 +00002226 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002227 * into the locally allocated *buf
2228 */
Andy Groverec98f782011-07-20 19:28:46 +00002229 sg_copy_to_buffer(cmd->t_data_sg,
2230 cmd->t_data_nents,
2231 buf,
2232 cmd->data_length);
2233
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002234 /*
2235 * Now perform the XOR against the BIDI read memory located at
Andy Grovera1d8b492011-05-02 17:12:10 -07002236 * cmd->t_mem_bidi_list
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002237 */
2238
2239 offset = 0;
Andy Groverec98f782011-07-20 19:28:46 +00002240 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
Cong Wangca747d62011-11-25 23:14:25 +08002241 addr = kmap_atomic(sg_page(sg));
Andy Groverec98f782011-07-20 19:28:46 +00002242 if (!addr)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002243 goto out;
2244
Andy Groverec98f782011-07-20 19:28:46 +00002245 for (i = 0; i < sg->length; i++)
2246 *(addr + sg->offset + i) ^= *(buf + offset + i);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002247
Andy Groverec98f782011-07-20 19:28:46 +00002248 offset += sg->length;
Cong Wangca747d62011-11-25 23:14:25 +08002249 kunmap_atomic(addr);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002250 }
Andy Groverec98f782011-07-20 19:28:46 +00002251
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002252out:
2253 kfree(buf);
2254}
2255
2256/*
2257 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2258 */
2259static int transport_get_sense_data(struct se_cmd *cmd)
2260{
2261 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
Christoph Hellwig42bf8292011-10-12 11:07:00 -04002262 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002263 unsigned long flags;
2264 u32 offset = 0;
2265
Andy Grovere3d6f902011-07-19 08:55:10 +00002266 WARN_ON(!cmd->se_lun);
2267
Christoph Hellwig42bf8292011-10-12 11:07:00 -04002268 if (!dev)
2269 return 0;
2270
Andy Grovera1d8b492011-05-02 17:12:10 -07002271 spin_lock_irqsave(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002272 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
Andy Grovera1d8b492011-05-02 17:12:10 -07002273 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002274 return 0;
2275 }
2276
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002277 if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2278 goto out;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002279
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002280 if (!dev->transport->get_sense_buffer) {
2281 pr_err("dev->transport->get_sense_buffer is NULL\n");
2282 goto out;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002283 }
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002284
Christoph Hellwig5787cac2012-04-24 00:25:06 -04002285 sense_buffer = dev->transport->get_sense_buffer(cmd);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002286 if (!sense_buffer) {
Christoph Hellwig5787cac2012-04-24 00:25:06 -04002287 pr_err("ITT 0x%08x cmd %p: Unable to locate"
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002288 " sense buffer for task with sense\n",
Christoph Hellwig5787cac2012-04-24 00:25:06 -04002289 cmd->se_tfo->get_task_tag(cmd), cmd);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002290 goto out;
2291 }
Christoph Hellwig5787cac2012-04-24 00:25:06 -04002292
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002293 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2294
2295 offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
2296
2297 memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04002298
2299 /* Automatically padded */
2300 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
2301
2302 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
2303 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
2304 return 0;
2305
Christoph Hellwig785fdf72012-04-24 00:25:04 -04002306out:
Andy Grovera1d8b492011-05-02 17:12:10 -07002307 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002308 return -1;
2309}
2310
Andy Groverec98f782011-07-20 19:28:46 +00002311static inline long long transport_dev_end_lba(struct se_device *dev)
2312{
2313 return dev->transport->get_blocks(dev) + 1;
2314}
2315
2316static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2317{
2318 struct se_device *dev = cmd->se_dev;
2319 u32 sectors;
2320
2321 if (dev->transport->get_device_type(dev) != TYPE_DISK)
2322 return 0;
2323
2324 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2325
Andy Grover6708bb22011-06-08 10:36:43 -07002326 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2327 pr_err("LBA: %llu Sectors: %u exceeds"
Andy Groverec98f782011-07-20 19:28:46 +00002328 " transport_dev_end_lba(): %llu\n",
2329 cmd->t_task_lba, sectors,
2330 transport_dev_end_lba(dev));
Nicholas Bellinger7abbe7f32011-08-10 18:41:14 -07002331 return -EINVAL;
Andy Groverec98f782011-07-20 19:28:46 +00002332 }
2333
Nicholas Bellinger7abbe7f32011-08-10 18:41:14 -07002334 return 0;
Andy Groverec98f782011-07-20 19:28:46 +00002335}
2336
Nicholas Bellinger706d5862011-07-28 00:07:03 -07002337static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2338{
2339 /*
2340 * Determine if the received WRITE_SAME is used to for direct
2341 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2342 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2343 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2344 */
2345 int passthrough = (dev->transport->transport_type ==
2346 TRANSPORT_PLUGIN_PHBA_PDEV);
2347
2348 if (!passthrough) {
2349 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2350 pr_err("WRITE_SAME PBDATA and LBDATA"
2351 " bits not supported for Block Discard"
2352 " Emulation\n");
2353 return -ENOSYS;
2354 }
2355 /*
2356 * Currently for the emulated case we only accept
2357 * tpws with the UNMAP=1 bit set.
2358 */
2359 if (!(flags[0] & 0x08)) {
2360 pr_err("WRITE_SAME w/o UNMAP bit not"
2361 " supported for Block Discard Emulation\n");
2362 return -ENOSYS;
2363 }
2364 }
2365
2366 return 0;
2367}
2368
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002369/* transport_generic_cmd_sequencer():
2370 *
2371 * Generic Command Sequencer that should work for most DAS transport
2372 * drivers.
2373 *
Andy Grovera12f41f2012-04-03 15:51:20 -07002374 * Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002375 * RX Thread.
2376 *
2377 * FIXME: Need to support other SCSI OPCODES where as well.
2378 */
2379static int transport_generic_cmd_sequencer(
2380 struct se_cmd *cmd,
2381 unsigned char *cdb)
2382{
Andy Grover5951146d2011-07-19 10:26:37 +00002383 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002384 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2385 int ret = 0, sector_ret = 0, passthrough;
2386 u32 sectors = 0, size = 0, pr_reg_type = 0;
2387 u16 service_action;
2388 u8 alua_ascq = 0;
2389 /*
2390 * Check for an existing UNIT ATTENTION condition
2391 */
2392 if (core_scsi3_ua_check(cmd, cdb) < 0) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002393 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2394 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
Andy Grover5951146d2011-07-19 10:26:37 +00002395 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002396 }
2397 /*
2398 * Check status of Asymmetric Logical Unit Assignment port
2399 */
Andy Grovere3d6f902011-07-19 08:55:10 +00002400 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002401 if (ret != 0) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002402 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002403 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002404 * The ALUA additional sense code qualifier (ASCQ) is determined
2405 * by the ALUA primary or secondary access state..
2406 */
2407 if (ret > 0) {
Andy Grover6708bb22011-06-08 10:36:43 -07002408 pr_debug("[%s]: ALUA TG Port not available,"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002409 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00002410 cmd->se_tfo->get_fabric_name(), alua_ascq);
Andy Grover8b1e1242012-04-03 15:51:12 -07002411
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002412 transport_set_sense_codes(cmd, 0x04, alua_ascq);
2413 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2414 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
Andy Grover5951146d2011-07-19 10:26:37 +00002415 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002416 }
2417 goto out_invalid_cdb_field;
2418 }
2419 /*
2420 * Check status for SPC-3 Persistent Reservations
2421 */
Andy Grovere3d6f902011-07-19 08:55:10 +00002422 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2423 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07002424 cmd, cdb, pr_reg_type) != 0) {
2425 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2426 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
Nicholas Bellinger087a03b2012-03-13 21:29:06 -07002427 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07002428 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2429 return -EBUSY;
2430 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002431 /*
2432 * This means the CDB is allowed for the SCSI Initiator port
2433 * when said port is *NOT* holding the legacy SPC-2 or
2434 * SPC-3 Persistent Reservation.
2435 */
2436 }
2437
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002438 /*
2439 * If we operate in passthrough mode we skip most CDB emulation and
2440 * instead hand the commands down to the physical SCSI device.
2441 */
2442 passthrough =
2443 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2444
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002445 switch (cdb[0]) {
2446 case READ_6:
2447 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2448 if (sector_ret)
2449 goto out_unsupported_cdb;
2450 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002451 cmd->t_task_lba = transport_lba_21(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002452 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2453 break;
2454 case READ_10:
2455 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2456 if (sector_ret)
2457 goto out_unsupported_cdb;
2458 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002459 cmd->t_task_lba = transport_lba_32(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002460 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2461 break;
2462 case READ_12:
2463 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2464 if (sector_ret)
2465 goto out_unsupported_cdb;
2466 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002467 cmd->t_task_lba = transport_lba_32(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002468 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2469 break;
2470 case READ_16:
2471 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2472 if (sector_ret)
2473 goto out_unsupported_cdb;
2474 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002475 cmd->t_task_lba = transport_lba_64(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002476 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2477 break;
2478 case WRITE_6:
2479 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2480 if (sector_ret)
2481 goto out_unsupported_cdb;
2482 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002483 cmd->t_task_lba = transport_lba_21(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002484 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2485 break;
2486 case WRITE_10:
2487 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2488 if (sector_ret)
2489 goto out_unsupported_cdb;
2490 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002491 cmd->t_task_lba = transport_lba_32(cdb);
Christoph Hellwig2d3a4b52011-11-14 11:36:29 -05002492 if (cdb[1] & 0x8)
2493 cmd->se_cmd_flags |= SCF_FUA;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002494 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2495 break;
2496 case WRITE_12:
2497 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2498 if (sector_ret)
2499 goto out_unsupported_cdb;
2500 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002501 cmd->t_task_lba = transport_lba_32(cdb);
Christoph Hellwig2d3a4b52011-11-14 11:36:29 -05002502 if (cdb[1] & 0x8)
2503 cmd->se_cmd_flags |= SCF_FUA;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002504 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2505 break;
2506 case WRITE_16:
2507 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2508 if (sector_ret)
2509 goto out_unsupported_cdb;
2510 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002511 cmd->t_task_lba = transport_lba_64(cdb);
Christoph Hellwig2d3a4b52011-11-14 11:36:29 -05002512 if (cdb[1] & 0x8)
2513 cmd->se_cmd_flags |= SCF_FUA;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002514 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2515 break;
2516 case XDWRITEREAD_10:
2517 if ((cmd->data_direction != DMA_TO_DEVICE) ||
Christoph Hellwig33c3faf2011-11-14 11:36:30 -05002518 !(cmd->se_cmd_flags & SCF_BIDI))
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002519 goto out_invalid_cdb_field;
2520 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2521 if (sector_ret)
2522 goto out_unsupported_cdb;
2523 size = transport_get_size(sectors, cdb, cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07002524 cmd->t_task_lba = transport_lba_32(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002525 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
Christoph Hellwig7c1c6af2011-10-18 06:57:00 -04002526
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002527 /*
2528 * Do now allow BIDI commands for passthrough mode.
2529 */
2530 if (passthrough)
Christoph Hellwig7c1c6af2011-10-18 06:57:00 -04002531 goto out_unsupported_cdb;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002532
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002533 /*
Christoph Hellwig35e0e752011-10-17 13:56:53 -04002534 * Setup BIDI XOR callback to be run after I/O completion.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002535 */
2536 cmd->transport_complete_callback = &transport_xor_callback;
Christoph Hellwig2d3a4b52011-11-14 11:36:29 -05002537 if (cdb[1] & 0x8)
2538 cmd->se_cmd_flags |= SCF_FUA;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002539 break;
2540 case VARIABLE_LENGTH_CMD:
2541 service_action = get_unaligned_be16(&cdb[8]);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002542 switch (service_action) {
2543 case XDWRITEREAD_32:
2544 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2545 if (sector_ret)
2546 goto out_unsupported_cdb;
2547 size = transport_get_size(sectors, cdb, cmd);
2548 /*
2549 * Use WRITE_32 and READ_32 opcodes for the emulated
2550 * XDWRITE_READ_32 logic.
2551 */
Andy Grovera1d8b492011-05-02 17:12:10 -07002552 cmd->t_task_lba = transport_lba_64_ext(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002553 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2554
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002555 /*
2556 * Do now allow BIDI commands for passthrough mode.
2557 */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002558 if (passthrough)
Christoph Hellwig7c1c6af2011-10-18 06:57:00 -04002559 goto out_unsupported_cdb;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002560
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002561 /*
Christoph Hellwig35e0e752011-10-17 13:56:53 -04002562 * Setup BIDI XOR callback to be run during after I/O
2563 * completion.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002564 */
2565 cmd->transport_complete_callback = &transport_xor_callback;
Christoph Hellwig2d3a4b52011-11-14 11:36:29 -05002566 if (cdb[1] & 0x8)
2567 cmd->se_cmd_flags |= SCF_FUA;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002568 break;
2569 case WRITE_SAME_32:
2570 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2571 if (sector_ret)
2572 goto out_unsupported_cdb;
Nicholas Bellingerdd3a5ad2011-05-06 17:55:35 -07002573
Andy Grover6708bb22011-06-08 10:36:43 -07002574 if (sectors)
Nicholas Bellinger12850622011-08-08 19:08:23 -07002575 size = transport_get_size(1, cdb, cmd);
Andy Grover6708bb22011-06-08 10:36:43 -07002576 else {
2577 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2578 " supported\n");
2579 goto out_invalid_cdb_field;
2580 }
Nicholas Bellingerdd3a5ad2011-05-06 17:55:35 -07002581
Andy Grovera1d8b492011-05-02 17:12:10 -07002582 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002583 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2584
Nicholas Bellinger706d5862011-07-28 00:07:03 -07002585 if (target_check_write_same_discard(&cdb[10], dev) < 0)
Martin Svec67236c42012-02-06 22:13:25 -08002586 goto out_unsupported_cdb;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002587 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002588 cmd->execute_cmd = target_emulate_write_same;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002589 break;
2590 default:
Andy Grover6708bb22011-06-08 10:36:43 -07002591 pr_err("VARIABLE_LENGTH_CMD service action"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002592 " 0x%04x not supported\n", service_action);
2593 goto out_unsupported_cdb;
2594 }
2595 break;
Nicholas Bellingere434f1f12011-02-21 07:49:26 +00002596 case MAINTENANCE_IN:
Andy Grovere3d6f902011-07-19 08:55:10 +00002597 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002598 /* MAINTENANCE_IN from SCC-2 */
2599 /*
2600 * Check for emulated MI_REPORT_TARGET_PGS.
2601 */
Christoph Hellwige76a35d2011-11-03 17:50:42 -04002602 if (cdb[1] == MI_REPORT_TARGET_PGS &&
2603 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002604 cmd->execute_cmd =
Christoph Hellwige76a35d2011-11-03 17:50:42 -04002605 target_emulate_report_target_port_groups;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002606 }
2607 size = (cdb[6] << 24) | (cdb[7] << 16) |
2608 (cdb[8] << 8) | cdb[9];
2609 } else {
2610 /* GPCMD_SEND_KEY from multi media commands */
2611 size = (cdb[8] << 8) + cdb[9];
2612 }
Andy Grover05d1c7c2011-07-20 19:13:28 +00002613 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002614 break;
2615 case MODE_SELECT:
2616 size = cdb[4];
2617 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2618 break;
2619 case MODE_SELECT_10:
2620 size = (cdb[7] << 8) + cdb[8];
2621 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2622 break;
2623 case MODE_SENSE:
2624 size = cdb[4];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002625 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002626 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002627 cmd->execute_cmd = target_emulate_modesense;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002628 break;
2629 case MODE_SENSE_10:
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002630 size = (cdb[7] << 8) + cdb[8];
2631 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2632 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002633 cmd->execute_cmd = target_emulate_modesense;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002634 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002635 case GPCMD_READ_BUFFER_CAPACITY:
2636 case GPCMD_SEND_OPC:
2637 case LOG_SELECT:
2638 case LOG_SENSE:
2639 size = (cdb[7] << 8) + cdb[8];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002640 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002641 break;
2642 case READ_BLOCK_LIMITS:
2643 size = READ_BLOCK_LEN;
Andy Grover05d1c7c2011-07-20 19:13:28 +00002644 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002645 break;
2646 case GPCMD_GET_CONFIGURATION:
2647 case GPCMD_READ_FORMAT_CAPACITIES:
2648 case GPCMD_READ_DISC_INFO:
2649 case GPCMD_READ_TRACK_RZONE_INFO:
2650 size = (cdb[7] << 8) + cdb[8];
2651 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2652 break;
2653 case PERSISTENT_RESERVE_IN:
Christoph Hellwig617c0e02011-11-03 17:50:41 -04002654 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002655 cmd->execute_cmd = target_scsi3_emulate_pr_in;
Christoph Hellwig617c0e02011-11-03 17:50:41 -04002656 size = (cdb[7] << 8) + cdb[8];
2657 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2658 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002659 case PERSISTENT_RESERVE_OUT:
Christoph Hellwig617c0e02011-11-03 17:50:41 -04002660 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002661 cmd->execute_cmd = target_scsi3_emulate_pr_out;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002662 size = (cdb[7] << 8) + cdb[8];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002663 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002664 break;
2665 case GPCMD_MECHANISM_STATUS:
2666 case GPCMD_READ_DVD_STRUCTURE:
2667 size = (cdb[8] << 8) + cdb[9];
2668 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2669 break;
2670 case READ_POSITION:
2671 size = READ_POSITION_LEN;
Andy Grover05d1c7c2011-07-20 19:13:28 +00002672 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002673 break;
Nicholas Bellingere434f1f12011-02-21 07:49:26 +00002674 case MAINTENANCE_OUT:
Andy Grovere3d6f902011-07-19 08:55:10 +00002675 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002676 /* MAINTENANCE_OUT from SCC-2
2677 *
2678 * Check for emulated MO_SET_TARGET_PGS.
2679 */
Christoph Hellwige76a35d2011-11-03 17:50:42 -04002680 if (cdb[1] == MO_SET_TARGET_PGS &&
2681 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002682 cmd->execute_cmd =
Christoph Hellwige76a35d2011-11-03 17:50:42 -04002683 target_emulate_set_target_port_groups;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002684 }
2685
2686 size = (cdb[6] << 24) | (cdb[7] << 16) |
2687 (cdb[8] << 8) | cdb[9];
2688 } else {
2689 /* GPCMD_REPORT_KEY from multi media commands */
2690 size = (cdb[8] << 8) + cdb[9];
2691 }
Andy Grover05d1c7c2011-07-20 19:13:28 +00002692 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002693 break;
2694 case INQUIRY:
2695 size = (cdb[3] << 8) + cdb[4];
2696 /*
2697 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2698 * See spc4r17 section 5.3
2699 */
Andy Grover5951146d2011-07-19 10:26:37 +00002700 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07002701 cmd->sam_task_attr = MSG_HEAD_TAG;
Andy Grover05d1c7c2011-07-20 19:13:28 +00002702 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002703 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002704 cmd->execute_cmd = target_emulate_inquiry;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002705 break;
2706 case READ_BUFFER:
2707 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002708 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002709 break;
2710 case READ_CAPACITY:
2711 size = READ_CAP_LEN;
Andy Grover05d1c7c2011-07-20 19:13:28 +00002712 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002713 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002714 cmd->execute_cmd = target_emulate_readcapacity;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002715 break;
2716 case READ_MEDIA_SERIAL_NUMBER:
2717 case SECURITY_PROTOCOL_IN:
2718 case SECURITY_PROTOCOL_OUT:
2719 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002720 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002721 break;
2722 case SERVICE_ACTION_IN:
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002723 switch (cmd->t_task_cdb[1] & 0x1f) {
2724 case SAI_READ_CAPACITY_16:
2725 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002726 cmd->execute_cmd =
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002727 target_emulate_readcapacity_16;
2728 break;
2729 default:
2730 if (passthrough)
2731 break;
2732
2733 pr_err("Unsupported SA: 0x%02x\n",
2734 cmd->t_task_cdb[1] & 0x1f);
Roland Dreierb168fe82012-03-14 10:40:43 -07002735 goto out_invalid_cdb_field;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002736 }
2737 /*FALLTHROUGH*/
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002738 case ACCESS_CONTROL_IN:
2739 case ACCESS_CONTROL_OUT:
2740 case EXTENDED_COPY:
2741 case READ_ATTRIBUTE:
2742 case RECEIVE_COPY_RESULTS:
2743 case WRITE_ATTRIBUTE:
2744 size = (cdb[10] << 24) | (cdb[11] << 16) |
2745 (cdb[12] << 8) | cdb[13];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002746 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002747 break;
2748 case RECEIVE_DIAGNOSTIC:
2749 case SEND_DIAGNOSTIC:
2750 size = (cdb[3] << 8) | cdb[4];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002751 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002752 break;
2753/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2754#if 0
2755 case GPCMD_READ_CD:
2756 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2757 size = (2336 * sectors);
Andy Grover05d1c7c2011-07-20 19:13:28 +00002758 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002759 break;
2760#endif
2761 case READ_TOC:
2762 size = cdb[8];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002763 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002764 break;
2765 case REQUEST_SENSE:
2766 size = cdb[4];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002767 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002768 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002769 cmd->execute_cmd = target_emulate_request_sense;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002770 break;
2771 case READ_ELEMENT_STATUS:
2772 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002773 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002774 break;
2775 case WRITE_BUFFER:
2776 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
Andy Grover05d1c7c2011-07-20 19:13:28 +00002777 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002778 break;
2779 case RESERVE:
2780 case RESERVE_10:
2781 /*
2782 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2783 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2784 */
2785 if (cdb[0] == RESERVE_10)
2786 size = (cdb[7] << 8) | cdb[8];
2787 else
2788 size = cmd->data_length;
2789
2790 /*
2791 * Setup the legacy emulated handler for SPC-2 and
2792 * >= SPC-3 compatible reservation handling (CRH=1)
2793 * Otherwise, we assume the underlying SCSI logic is
2794 * is running in SPC_PASSTHROUGH, and wants reservations
2795 * emulation disabled.
2796 */
Christoph Hellwige76a35d2011-11-03 17:50:42 -04002797 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002798 cmd->execute_cmd = target_scsi2_reservation_reserve;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002799 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2800 break;
2801 case RELEASE:
2802 case RELEASE_10:
2803 /*
2804 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2805 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2806 */
2807 if (cdb[0] == RELEASE_10)
2808 size = (cdb[7] << 8) | cdb[8];
2809 else
2810 size = cmd->data_length;
2811
Christoph Hellwige76a35d2011-11-03 17:50:42 -04002812 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002813 cmd->execute_cmd = target_scsi2_reservation_release;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002814 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2815 break;
2816 case SYNCHRONIZE_CACHE:
Andy Grover8e94b8d2012-01-16 16:57:07 -08002817 case SYNCHRONIZE_CACHE_16:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002818 /*
2819 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2820 */
2821 if (cdb[0] == SYNCHRONIZE_CACHE) {
2822 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
Andy Grovera1d8b492011-05-02 17:12:10 -07002823 cmd->t_task_lba = transport_lba_32(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002824 } else {
2825 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
Andy Grovera1d8b492011-05-02 17:12:10 -07002826 cmd->t_task_lba = transport_lba_64(cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002827 }
2828 if (sector_ret)
2829 goto out_unsupported_cdb;
2830
2831 size = transport_get_size(sectors, cdb, cmd);
2832 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2833
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002834 if (passthrough)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002835 break;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002836
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002837 /*
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002838 * Check to ensure that LBA + Range does not exceed past end of
Nicholas Bellinger7abbe7f32011-08-10 18:41:14 -07002839 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002840 */
Nicholas Bellinger7abbe7f32011-08-10 18:41:14 -07002841 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2842 if (transport_cmd_get_valid_sectors(cmd) < 0)
2843 goto out_invalid_cdb_field;
2844 }
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002845 cmd->execute_cmd = target_emulate_synchronize_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002846 break;
2847 case UNMAP:
2848 size = get_unaligned_be16(&cdb[7]);
Andy Grover05d1c7c2011-07-20 19:13:28 +00002849 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002850 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002851 cmd->execute_cmd = target_emulate_unmap;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002852 break;
2853 case WRITE_SAME_16:
2854 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2855 if (sector_ret)
2856 goto out_unsupported_cdb;
Nicholas Bellingerdd3a5ad2011-05-06 17:55:35 -07002857
Andy Grover6708bb22011-06-08 10:36:43 -07002858 if (sectors)
Nicholas Bellinger12850622011-08-08 19:08:23 -07002859 size = transport_get_size(1, cdb, cmd);
Andy Grover6708bb22011-06-08 10:36:43 -07002860 else {
2861 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2862 goto out_invalid_cdb_field;
2863 }
Nicholas Bellingerdd3a5ad2011-05-06 17:55:35 -07002864
Nicholas Bellinger5db07532011-07-27 22:18:52 -07002865 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002866 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellinger706d5862011-07-28 00:07:03 -07002867
2868 if (target_check_write_same_discard(&cdb[1], dev) < 0)
Martin Svec67236c42012-02-06 22:13:25 -08002869 goto out_unsupported_cdb;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002870 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002871 cmd->execute_cmd = target_emulate_write_same;
Nicholas Bellinger706d5862011-07-28 00:07:03 -07002872 break;
2873 case WRITE_SAME:
2874 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2875 if (sector_ret)
2876 goto out_unsupported_cdb;
2877
2878 if (sectors)
Nicholas Bellinger12850622011-08-08 19:08:23 -07002879 size = transport_get_size(1, cdb, cmd);
Nicholas Bellinger706d5862011-07-28 00:07:03 -07002880 else {
2881 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2882 goto out_invalid_cdb_field;
2883 }
2884
2885 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2886 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2887 /*
2888 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
2889 * of byte 1 bit 3 UNMAP instead of original reserved field
2890 */
2891 if (target_check_write_same_discard(&cdb[1], dev) < 0)
Martin Svec67236c42012-02-06 22:13:25 -08002892 goto out_unsupported_cdb;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002893 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002894 cmd->execute_cmd = target_emulate_write_same;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002895 break;
2896 case ALLOW_MEDIUM_REMOVAL:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002897 case ERASE:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002898 case REZERO_UNIT:
2899 case SEEK_10:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002900 case SPACE:
2901 case START_STOP:
2902 case TEST_UNIT_READY:
2903 case VERIFY:
2904 case WRITE_FILEMARKS:
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002905 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2906 if (!passthrough)
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002907 cmd->execute_cmd = target_emulate_noop;
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002908 break;
2909 case GPCMD_CLOSE_TRACK:
2910 case INITIALIZE_ELEMENT_STATUS:
2911 case GPCMD_LOAD_UNLOAD:
2912 case GPCMD_SET_SPEED:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002913 case MOVE_MEDIUM:
2914 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2915 break;
2916 case REPORT_LUNS:
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002917 cmd->execute_cmd = target_report_luns;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002918 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2919 /*
2920 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
2921 * See spc4r17 section 5.3
2922 */
Andy Grover5951146d2011-07-19 10:26:37 +00002923 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07002924 cmd->sam_task_attr = MSG_HEAD_TAG;
Andy Grover05d1c7c2011-07-20 19:13:28 +00002925 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002926 break;
Stefan Hajnoczied0b2142012-04-30 16:35:13 +01002927 case GET_EVENT_STATUS_NOTIFICATION:
2928 size = (cdb[7] << 8) | cdb[8];
2929 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2930 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002931 default:
Andy Grover6708bb22011-06-08 10:36:43 -07002932 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002933 " 0x%02x, sending CHECK_CONDITION.\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00002934 cmd->se_tfo->get_fabric_name(), cdb[0]);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002935 goto out_unsupported_cdb;
2936 }
2937
Sebastian Andrzej Siewiorb0d79942012-01-10 14:16:59 +01002938 if (cmd->unknown_data_length)
2939 cmd->data_length = size;
2940
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002941 if (size != cmd->data_length) {
Andy Grover6708bb22011-06-08 10:36:43 -07002942 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002943 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
Andy Grovere3d6f902011-07-19 08:55:10 +00002944 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002945 cmd->data_length, size, cdb[0]);
2946
2947 cmd->cmd_spdtl = size;
2948
2949 if (cmd->data_direction == DMA_TO_DEVICE) {
Andy Grover6708bb22011-06-08 10:36:43 -07002950 pr_err("Rejecting underflow/overflow"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002951 " WRITE data\n");
2952 goto out_invalid_cdb_field;
2953 }
2954 /*
2955 * Reject READ_* or WRITE_* with overflow/underflow for
2956 * type SCF_SCSI_DATA_SG_IO_CDB.
2957 */
Andy Grover6708bb22011-06-08 10:36:43 -07002958 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
2959 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002960 " CDB on non 512-byte sector setup subsystem"
Andy Grovere3d6f902011-07-19 08:55:10 +00002961 " plugin: %s\n", dev->transport->name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002962 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
2963 goto out_invalid_cdb_field;
2964 }
2965
2966 if (size > cmd->data_length) {
2967 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
2968 cmd->residual_count = (size - cmd->data_length);
2969 } else {
2970 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
2971 cmd->residual_count = (cmd->data_length - size);
2972 }
2973 cmd->data_length = size;
2974 }
2975
Roland Dreier015487b2012-02-13 16:18:17 -08002976 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
Christoph Hellwigbebe2fd2012-03-26 17:53:17 -04002977 (sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors ||
2978 sectors > dev->se_sub_dev->se_dev_attrib.max_sectors)) {
Roland Dreier015487b2012-02-13 16:18:17 -08002979 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
2980 cdb[0], sectors);
2981 goto out_invalid_cdb_field;
2982 }
2983
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002984 /* reject any command that we don't have a handler for */
Christoph Hellwig6bb35e02012-04-23 11:35:33 -04002985 if (!(passthrough || cmd->execute_cmd ||
Christoph Hellwig5bda90c2011-11-03 17:50:45 -04002986 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
2987 goto out_unsupported_cdb;
2988
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002989 transport_set_supported_SAM_opcode(cmd);
2990 return ret;
2991
2992out_unsupported_cdb:
2993 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2994 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
Andy Grover5951146d2011-07-19 10:26:37 +00002995 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002996out_invalid_cdb_field:
2997 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2998 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
Andy Grover5951146d2011-07-19 10:26:37 +00002999 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003000}
3001
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003002/*
Christoph Hellwig35e0e752011-10-17 13:56:53 -04003003 * Called from I/O completion to determine which dormant/delayed
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003004 * and ordered cmds need to have their tasks added to the execution queue.
3005 */
3006static void transport_complete_task_attr(struct se_cmd *cmd)
3007{
Andy Grover5951146d2011-07-19 10:26:37 +00003008 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003009 struct se_cmd *cmd_p, *cmd_tmp;
3010 int new_active_tasks = 0;
3011
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07003012 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003013 atomic_dec(&dev->simple_cmds);
3014 smp_mb__after_atomic_dec();
3015 dev->dev_cur_ordered_id++;
Andy Grover6708bb22011-06-08 10:36:43 -07003016 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003017 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3018 cmd->se_ordered_id);
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07003019 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003020 dev->dev_cur_ordered_id++;
Andy Grover6708bb22011-06-08 10:36:43 -07003021 pr_debug("Incremented dev_cur_ordered_id: %u for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003022 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3023 cmd->se_ordered_id);
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07003024 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003025 atomic_dec(&dev->dev_ordered_sync);
3026 smp_mb__after_atomic_dec();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003027
3028 dev->dev_cur_ordered_id++;
Andy Grover6708bb22011-06-08 10:36:43 -07003029 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003030 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3031 }
3032 /*
3033 * Process all commands up to the last received
3034 * ORDERED task attribute which requires another blocking
3035 * boundary
3036 */
3037 spin_lock(&dev->delayed_cmd_lock);
3038 list_for_each_entry_safe(cmd_p, cmd_tmp,
Andy Grover5951146d2011-07-19 10:26:37 +00003039 &dev->delayed_cmd_list, se_delayed_node) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003040
Andy Grover5951146d2011-07-19 10:26:37 +00003041 list_del(&cmd_p->se_delayed_node);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003042 spin_unlock(&dev->delayed_cmd_lock);
3043
Andy Grover6708bb22011-06-08 10:36:43 -07003044 pr_debug("Calling add_tasks() for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003045 " cmd_p: 0x%02x Task Attr: 0x%02x"
3046 " Dormant -> Active, se_ordered_id: %u\n",
Andy Grover6708bb22011-06-08 10:36:43 -07003047 cmd_p->t_task_cdb[0],
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003048 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3049
Christoph Hellwigcf572a92012-04-24 00:25:05 -04003050 target_add_to_execute_list(cmd_p);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003051 new_active_tasks++;
3052
3053 spin_lock(&dev->delayed_cmd_lock);
Nicholas Bellingere66ecd52011-05-19 20:19:14 -07003054 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003055 break;
3056 }
3057 spin_unlock(&dev->delayed_cmd_lock);
3058 /*
3059 * If new tasks have become active, wake up the transport thread
3060 * to do the processing of the Active tasks.
3061 */
3062 if (new_active_tasks != 0)
Andy Grovere3d6f902011-07-19 08:55:10 +00003063 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003064}
3065
Christoph Hellwige057f532011-10-17 13:56:41 -04003066static void transport_complete_qf(struct se_cmd *cmd)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003067{
3068 int ret = 0;
3069
Christoph Hellwige057f532011-10-17 13:56:41 -04003070 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3071 transport_complete_task_attr(cmd);
3072
3073 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3074 ret = cmd->se_tfo->queue_status(cmd);
3075 if (ret)
3076 goto out;
3077 }
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003078
3079 switch (cmd->data_direction) {
3080 case DMA_FROM_DEVICE:
3081 ret = cmd->se_tfo->queue_data_in(cmd);
3082 break;
3083 case DMA_TO_DEVICE:
Andy Groverec98f782011-07-20 19:28:46 +00003084 if (cmd->t_bidi_data_sg) {
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003085 ret = cmd->se_tfo->queue_data_in(cmd);
3086 if (ret < 0)
Christoph Hellwige057f532011-10-17 13:56:41 -04003087 break;
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003088 }
3089 /* Fall through for DMA_TO_DEVICE */
3090 case DMA_NONE:
3091 ret = cmd->se_tfo->queue_status(cmd);
3092 break;
3093 default:
3094 break;
3095 }
3096
Christoph Hellwige057f532011-10-17 13:56:41 -04003097out:
3098 if (ret < 0) {
3099 transport_handle_queue_full(cmd, cmd->se_dev);
3100 return;
3101 }
3102 transport_lun_remove_cmd(cmd);
3103 transport_cmd_check_stop_to_fabric(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003104}
3105
3106static void transport_handle_queue_full(
3107 struct se_cmd *cmd,
Christoph Hellwige057f532011-10-17 13:56:41 -04003108 struct se_device *dev)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003109{
3110 spin_lock_irq(&dev->qf_cmd_lock);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003111 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3112 atomic_inc(&dev->dev_qf_count);
3113 smp_mb__after_atomic_inc();
3114 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3115
3116 schedule_work(&cmd->se_dev->qf_work_queue);
3117}
3118
Christoph Hellwig35e0e752011-10-17 13:56:53 -04003119static void target_complete_ok_work(struct work_struct *work)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003120{
Christoph Hellwig35e0e752011-10-17 13:56:53 -04003121 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003122 int reason = 0, ret;
Christoph Hellwig35e0e752011-10-17 13:56:53 -04003123
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003124 /*
3125 * Check if we need to move delayed/dormant tasks from cmds on the
3126 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3127 * Attribute.
3128 */
Andy Grover5951146d2011-07-19 10:26:37 +00003129 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003130 transport_complete_task_attr(cmd);
3131 /*
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003132 * Check to schedule QUEUE_FULL work, or execute an existing
3133 * cmd->transport_qf_callback()
3134 */
3135 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3136 schedule_work(&cmd->se_dev->qf_work_queue);
3137
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003138 /*
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003139 * Check if we need to retrieve a sense buffer from
3140 * the struct se_cmd in question.
3141 */
3142 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3143 if (transport_get_sense_data(cmd) < 0)
3144 reason = TCM_NON_EXISTENT_LUN;
3145
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003146 if (cmd->scsi_status) {
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003147 ret = transport_send_check_condition_and_sense(
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003148 cmd, reason, 1);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07003149 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003150 goto queue_full;
3151
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003152 transport_lun_remove_cmd(cmd);
3153 transport_cmd_check_stop_to_fabric(cmd);
3154 return;
3155 }
3156 }
3157 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003158 * Check for a callback, used by amongst other things
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003159 * XDWRITE_READ_10 emulation.
3160 */
3161 if (cmd->transport_complete_callback)
3162 cmd->transport_complete_callback(cmd);
3163
3164 switch (cmd->data_direction) {
3165 case DMA_FROM_DEVICE:
3166 spin_lock(&cmd->se_lun->lun_sep_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +00003167 if (cmd->se_lun->lun_sep) {
3168 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003169 cmd->data_length;
3170 }
3171 spin_unlock(&cmd->se_lun->lun_sep_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003172
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003173 ret = cmd->se_tfo->queue_data_in(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07003174 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003175 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003176 break;
3177 case DMA_TO_DEVICE:
3178 spin_lock(&cmd->se_lun->lun_sep_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +00003179 if (cmd->se_lun->lun_sep) {
3180 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003181 cmd->data_length;
3182 }
3183 spin_unlock(&cmd->se_lun->lun_sep_lock);
3184 /*
3185 * Check if we need to send READ payload for BIDI-COMMAND
3186 */
Andy Groverec98f782011-07-20 19:28:46 +00003187 if (cmd->t_bidi_data_sg) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003188 spin_lock(&cmd->se_lun->lun_sep_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +00003189 if (cmd->se_lun->lun_sep) {
3190 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003191 cmd->data_length;
3192 }
3193 spin_unlock(&cmd->se_lun->lun_sep_lock);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003194 ret = cmd->se_tfo->queue_data_in(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07003195 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003196 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003197 break;
3198 }
3199 /* Fall through for DMA_TO_DEVICE */
3200 case DMA_NONE:
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003201 ret = cmd->se_tfo->queue_status(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07003202 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003203 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003204 break;
3205 default:
3206 break;
3207 }
3208
3209 transport_lun_remove_cmd(cmd);
3210 transport_cmd_check_stop_to_fabric(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003211 return;
3212
3213queue_full:
Andy Grover6708bb22011-06-08 10:36:43 -07003214 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003215 " data_direction: %d\n", cmd, cmd->data_direction);
Christoph Hellwige057f532011-10-17 13:56:41 -04003216 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3217 transport_handle_queue_full(cmd, cmd->se_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003218}
3219
Andy Grover6708bb22011-06-08 10:36:43 -07003220static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003221{
Andy Groverec98f782011-07-20 19:28:46 +00003222 struct scatterlist *sg;
Andy Groverec98f782011-07-20 19:28:46 +00003223 int count;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003224
Andy Grover6708bb22011-06-08 10:36:43 -07003225 for_each_sg(sgl, sg, nents, count)
3226 __free_page(sg_page(sg));
3227
3228 kfree(sgl);
3229}
3230
3231static inline void transport_free_pages(struct se_cmd *cmd)
3232{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003233 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
Andy Grover6708bb22011-06-08 10:36:43 -07003234 return;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003235
Andy Grover6708bb22011-06-08 10:36:43 -07003236 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
Andy Groverec98f782011-07-20 19:28:46 +00003237 cmd->t_data_sg = NULL;
3238 cmd->t_data_nents = 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003239
Andy Grover6708bb22011-06-08 10:36:43 -07003240 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
Andy Groverec98f782011-07-20 19:28:46 +00003241 cmd->t_bidi_data_sg = NULL;
3242 cmd->t_bidi_data_nents = 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003243}
3244
Christoph Hellwigd3df7822011-09-13 23:08:32 +02003245/**
Christoph Hellwige26d99a2011-11-14 12:30:30 -05003246 * transport_release_cmd - free a command
3247 * @cmd: command to free
3248 *
3249 * This routine unconditionally frees a command, and reference counting
3250 * or list removal must be done in the caller.
3251 */
3252static void transport_release_cmd(struct se_cmd *cmd)
3253{
3254 BUG_ON(!cmd->se_tfo);
3255
Andy Groverc8e31f22012-01-19 13:39:17 -08003256 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
Christoph Hellwige26d99a2011-11-14 12:30:30 -05003257 core_tmr_release_req(cmd->se_tmr_req);
3258 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3259 kfree(cmd->t_task_cdb);
3260 /*
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003261 * If this cmd has been setup with target_get_sess_cmd(), drop
3262 * the kref and call ->release_cmd() in kref callback.
Christoph Hellwige26d99a2011-11-14 12:30:30 -05003263 */
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003264 if (cmd->check_release != 0) {
3265 target_put_sess_cmd(cmd->se_sess, cmd);
3266 return;
3267 }
Christoph Hellwige26d99a2011-11-14 12:30:30 -05003268 cmd->se_tfo->release_cmd(cmd);
3269}
3270
3271/**
Christoph Hellwigd3df7822011-09-13 23:08:32 +02003272 * transport_put_cmd - release a reference to a command
3273 * @cmd: command to release
3274 *
3275 * This routine releases our reference to the command and frees it if possible.
3276 */
Nicholas Bellinger39c05f32011-10-08 13:59:52 -07003277static void transport_put_cmd(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003278{
3279 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003280
Andy Grovera1d8b492011-05-02 17:12:10 -07003281 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwig4911e3c2011-09-13 23:08:42 +02003282 if (atomic_read(&cmd->t_fe_count)) {
3283 if (!atomic_dec_and_test(&cmd->t_fe_count))
3284 goto out_busy;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003285 }
Christoph Hellwig4911e3c2011-09-13 23:08:42 +02003286
3287 if (atomic_read(&cmd->t_se_count)) {
3288 if (!atomic_dec_and_test(&cmd->t_se_count))
3289 goto out_busy;
3290 }
3291
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003292 if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
3293 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
Christoph Hellwigcf572a92012-04-24 00:25:05 -04003294 target_remove_from_state_list(cmd);
Christoph Hellwig4911e3c2011-09-13 23:08:42 +02003295 }
Andy Grovera1d8b492011-05-02 17:12:10 -07003296 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003297
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003298 transport_free_pages(cmd);
Christoph Hellwig31afc392011-09-13 23:08:11 +02003299 transport_release_cmd(cmd);
Nicholas Bellinger39c05f32011-10-08 13:59:52 -07003300 return;
Christoph Hellwig4911e3c2011-09-13 23:08:42 +02003301out_busy:
3302 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003303}
3304
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003305/*
Andy Groverec98f782011-07-20 19:28:46 +00003306 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3307 * allocating in the core.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003308 * @cmd: Associated se_cmd descriptor
3309 * @mem: SGL style memory for TCM WRITE / READ
3310 * @sg_mem_num: Number of SGL elements
3311 * @mem_bidi_in: SGL style memory for TCM BIDI READ
3312 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3313 *
3314 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3315 * of parameters.
3316 */
3317int transport_generic_map_mem_to_cmd(
3318 struct se_cmd *cmd,
Andy Grover5951146d2011-07-19 10:26:37 +00003319 struct scatterlist *sgl,
3320 u32 sgl_count,
3321 struct scatterlist *sgl_bidi,
3322 u32 sgl_bidi_count)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003323{
Andy Grover5951146d2011-07-19 10:26:37 +00003324 if (!sgl || !sgl_count)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003325 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003326
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003327 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3328 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
Nicholas Bellingerfef58a62011-11-15 22:13:24 -08003329 /*
3330 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3331 * scatterlists already have been set to follow what the fabric
3332 * passes for the original expected data transfer length.
3333 */
3334 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3335 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3336 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3337 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3338 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3339 return -EINVAL;
3340 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003341
Andy Groverec98f782011-07-20 19:28:46 +00003342 cmd->t_data_sg = sgl;
3343 cmd->t_data_nents = sgl_count;
3344
Andy Grover5951146d2011-07-19 10:26:37 +00003345 if (sgl_bidi && sgl_bidi_count) {
Andy Groverec98f782011-07-20 19:28:46 +00003346 cmd->t_bidi_data_sg = sgl_bidi;
3347 cmd->t_bidi_data_nents = sgl_bidi_count;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003348 }
3349 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003350 }
3351
3352 return 0;
3353}
3354EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3355
Andy Grover49493142012-01-16 16:57:08 -08003356void *transport_kmap_data_sg(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003357{
Andy Groverec98f782011-07-20 19:28:46 +00003358 struct scatterlist *sg = cmd->t_data_sg;
Andy Grover49493142012-01-16 16:57:08 -08003359 struct page **pages;
3360 int i;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003361
Andy Groverec98f782011-07-20 19:28:46 +00003362 BUG_ON(!sg);
Andy Grover05d1c7c2011-07-20 19:13:28 +00003363 /*
Andy Groverec98f782011-07-20 19:28:46 +00003364 * We need to take into account a possible offset here for fabrics like
3365 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3366 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
Andy Grover05d1c7c2011-07-20 19:13:28 +00003367 */
Andy Grover49493142012-01-16 16:57:08 -08003368 if (!cmd->t_data_nents)
3369 return NULL;
3370 else if (cmd->t_data_nents == 1)
3371 return kmap(sg_page(sg)) + sg->offset;
Andy Grover05d1c7c2011-07-20 19:13:28 +00003372
Andy Grover49493142012-01-16 16:57:08 -08003373 /* >1 page. use vmap */
3374 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
3375 if (!pages)
3376 return NULL;
3377
3378 /* convert sg[] to pages[] */
3379 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
3380 pages[i] = sg_page(sg);
3381 }
3382
3383 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
3384 kfree(pages);
3385 if (!cmd->t_data_vmap)
3386 return NULL;
3387
3388 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
Andy Grover05d1c7c2011-07-20 19:13:28 +00003389}
Andy Grover49493142012-01-16 16:57:08 -08003390EXPORT_SYMBOL(transport_kmap_data_sg);
3391
3392void transport_kunmap_data_sg(struct se_cmd *cmd)
3393{
Andy Grovera1edf9c2012-02-09 12:18:06 -08003394 if (!cmd->t_data_nents) {
Andy Grover49493142012-01-16 16:57:08 -08003395 return;
Andy Grovera1edf9c2012-02-09 12:18:06 -08003396 } else if (cmd->t_data_nents == 1) {
Andy Grover49493142012-01-16 16:57:08 -08003397 kunmap(sg_page(cmd->t_data_sg));
Andy Grovera1edf9c2012-02-09 12:18:06 -08003398 return;
3399 }
Andy Grover49493142012-01-16 16:57:08 -08003400
3401 vunmap(cmd->t_data_vmap);
3402 cmd->t_data_vmap = NULL;
3403}
3404EXPORT_SYMBOL(transport_kunmap_data_sg);
Andy Grover05d1c7c2011-07-20 19:13:28 +00003405
3406static int
3407transport_generic_get_mem(struct se_cmd *cmd)
3408{
Andy Groverec98f782011-07-20 19:28:46 +00003409 u32 length = cmd->data_length;
3410 unsigned int nents;
3411 struct page *page;
roland@purestorage.com9db9da32012-01-04 15:59:58 -08003412 gfp_t zero_flag;
Andy Groverec98f782011-07-20 19:28:46 +00003413 int i = 0;
Andy Grover05d1c7c2011-07-20 19:13:28 +00003414
Andy Groverec98f782011-07-20 19:28:46 +00003415 nents = DIV_ROUND_UP(length, PAGE_SIZE);
3416 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3417 if (!cmd->t_data_sg)
Andy Grovere3d6f902011-07-19 08:55:10 +00003418 return -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003419
Andy Groverec98f782011-07-20 19:28:46 +00003420 cmd->t_data_nents = nents;
3421 sg_init_table(cmd->t_data_sg, nents);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003422
roland@purestorage.com9db9da32012-01-04 15:59:58 -08003423 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
3424
Andy Groverec98f782011-07-20 19:28:46 +00003425 while (length) {
3426 u32 page_len = min_t(u32, length, PAGE_SIZE);
roland@purestorage.com9db9da32012-01-04 15:59:58 -08003427 page = alloc_page(GFP_KERNEL | zero_flag);
Andy Groverec98f782011-07-20 19:28:46 +00003428 if (!page)
3429 goto out;
3430
3431 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3432 length -= page_len;
3433 i++;
3434 }
3435 return 0;
3436
3437out:
3438 while (i >= 0) {
3439 __free_page(sg_page(&cmd->t_data_sg[i]));
3440 i--;
3441 }
3442 kfree(cmd->t_data_sg);
3443 cmd->t_data_sg = NULL;
3444 return -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003445}
3446
Andy Grovera1d8b492011-05-02 17:12:10 -07003447/*
Andy Groverb16a35b2012-04-03 15:51:21 -07003448 * Allocate any required resources to execute the command. For writes we
3449 * might not have the payload yet, so notify the fabric via a call to
3450 * ->write_pending instead. Otherwise place it on the execution queue.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003451 */
Andy Grovera1d8b492011-05-02 17:12:10 -07003452int transport_generic_new_cmd(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003453{
Christoph Hellwigda0f7612011-10-18 06:57:01 -04003454 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003455 int ret = 0;
3456
3457 /*
3458 * Determine is the TCM fabric module has already allocated physical
3459 * memory, and is directly calling transport_generic_map_mem_to_cmd()
Andy Groverec98f782011-07-20 19:28:46 +00003460 * beforehand.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003461 */
Andy Groverec98f782011-07-20 19:28:46 +00003462 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3463 cmd->data_length) {
Andy Grover05d1c7c2011-07-20 19:13:28 +00003464 ret = transport_generic_get_mem(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003465 if (ret < 0)
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07003466 goto out_fail;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003467 }
Christoph Hellwigda0f7612011-10-18 06:57:01 -04003468
Christoph Hellwig4101f0a2012-04-24 00:25:03 -04003469 /* Workaround for handling zero-length control CDBs */
3470 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3471 !cmd->data_length) {
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003472 spin_lock_irq(&cmd->t_state_lock);
Roland Dreier410f6702011-11-22 13:51:32 -08003473 cmd->t_state = TRANSPORT_COMPLETE;
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003474 cmd->transport_state |= CMD_T_ACTIVE;
3475 spin_unlock_irq(&cmd->t_state_lock);
Nicholas Bellinger91ec1d32012-01-13 12:01:34 -08003476
3477 if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
3478 u8 ua_asc = 0, ua_ascq = 0;
3479
3480 core_scsi3_ua_clear_for_request_sense(cmd,
3481 &ua_asc, &ua_ascq);
3482 }
3483
Roland Dreier410f6702011-11-22 13:51:32 -08003484 INIT_WORK(&cmd->work, target_complete_ok_work);
3485 queue_work(target_completion_wq, &cmd->work);
3486 return 0;
3487 }
Christoph Hellwigda0f7612011-10-18 06:57:01 -04003488
Christoph Hellwig4101f0a2012-04-24 00:25:03 -04003489 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3490 struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
3491
3492 if (transport_cmd_get_valid_sectors(cmd) < 0)
3493 return -EINVAL;
3494
3495 BUG_ON(cmd->data_length % attr->block_size);
3496 BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
3497 attr->max_sectors);
Christoph Hellwigda0f7612011-10-18 06:57:01 -04003498 }
3499
Christoph Hellwig4101f0a2012-04-24 00:25:03 -04003500 atomic_inc(&cmd->t_fe_count);
3501 atomic_inc(&cmd->t_se_count);
3502
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003503 /*
Christoph Hellwig5787cac2012-04-24 00:25:06 -04003504 * For WRITEs, let the fabric know its buffer is ready.
3505 *
3506 * The command will be added to the execution queue after its write
3507 * data has arrived.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003508 */
3509 if (cmd->data_direction == DMA_TO_DEVICE) {
Christoph Hellwigcf572a92012-04-24 00:25:05 -04003510 target_add_to_state_list(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003511 return transport_generic_write_pending(cmd);
3512 }
3513 /*
Christoph Hellwig5787cac2012-04-24 00:25:06 -04003514 * Everything else but a WRITE, add the command to the execution queue.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003515 */
3516 transport_execute_tasks(cmd);
3517 return 0;
Christoph Hellwigda0f7612011-10-18 06:57:01 -04003518
3519out_fail:
3520 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3521 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3522 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003523}
Andy Grovera1d8b492011-05-02 17:12:10 -07003524EXPORT_SYMBOL(transport_generic_new_cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003525
3526/* transport_generic_process_write():
3527 *
3528 *
3529 */
3530void transport_generic_process_write(struct se_cmd *cmd)
3531{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003532 transport_execute_tasks(cmd);
3533}
3534EXPORT_SYMBOL(transport_generic_process_write);
3535
Christoph Hellwige057f532011-10-17 13:56:41 -04003536static void transport_write_pending_qf(struct se_cmd *cmd)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003537{
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07003538 int ret;
3539
3540 ret = cmd->se_tfo->write_pending(cmd);
3541 if (ret == -EAGAIN || ret == -ENOMEM) {
Christoph Hellwige057f532011-10-17 13:56:41 -04003542 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3543 cmd);
3544 transport_handle_queue_full(cmd, cmd->se_dev);
3545 }
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003546}
3547
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003548static int transport_generic_write_pending(struct se_cmd *cmd)
3549{
3550 unsigned long flags;
3551 int ret;
3552
Andy Grovera1d8b492011-05-02 17:12:10 -07003553 spin_lock_irqsave(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003554 cmd->t_state = TRANSPORT_WRITE_PENDING;
Andy Grovera1d8b492011-05-02 17:12:10 -07003555 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003556
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003557 /*
3558 * Clear the se_cmd for WRITE_PENDING status in order to set
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003559 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
3560 * from HW target mode interrupt code. This is safe to be called
3561 * with transport_off=1 before the cmd->se_tfo->write_pending
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003562 * because the se_cmd->se_lun pointer is not being cleared.
3563 */
3564 transport_cmd_check_stop(cmd, 1, 0);
3565
3566 /*
3567 * Call the fabric write_pending function here to let the
3568 * frontend know that WRITE buffers are ready.
3569 */
Andy Grovere3d6f902011-07-19 08:55:10 +00003570 ret = cmd->se_tfo->write_pending(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07003571 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003572 goto queue_full;
3573 else if (ret < 0)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003574 return ret;
3575
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07003576 return 1;
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003577
3578queue_full:
Andy Grover6708bb22011-06-08 10:36:43 -07003579 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07003580 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
Christoph Hellwige057f532011-10-17 13:56:41 -04003581 transport_handle_queue_full(cmd, cmd->se_dev);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07003582 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003583}
3584
Nicholas Bellinger39c05f32011-10-08 13:59:52 -07003585void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003586{
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003587 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
Andy Groverc8e31f22012-01-19 13:39:17 -08003588 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003589 transport_wait_for_tasks(cmd);
3590
Christoph Hellwig35462972011-05-31 23:56:57 -04003591 transport_release_cmd(cmd);
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003592 } else {
3593 if (wait_for_tasks)
3594 transport_wait_for_tasks(cmd);
3595
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003596 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3597
Christoph Hellwig82f1c8a2011-09-13 23:09:01 +02003598 if (cmd->se_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003599 transport_lun_remove_cmd(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003600
Nicholas Bellinger39c05f32011-10-08 13:59:52 -07003601 transport_put_cmd(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003602 }
3603}
3604EXPORT_SYMBOL(transport_generic_free_cmd);
3605
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003606/* target_get_sess_cmd - Add command to active ->sess_cmd_list
3607 * @se_sess: session to reference
3608 * @se_cmd: command descriptor to add
Nicholas Bellingera6360782011-11-18 20:36:22 -08003609 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003610 */
Nicholas Bellingera6360782011-11-18 20:36:22 -08003611void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
3612 bool ack_kref)
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003613{
3614 unsigned long flags;
3615
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003616 kref_init(&se_cmd->cmd_kref);
Nicholas Bellingera6360782011-11-18 20:36:22 -08003617 /*
3618 * Add a second kref if the fabric caller is expecting to handle
3619 * fabric acknowledgement that requires two target_put_sess_cmd()
3620 * invocations before se_cmd descriptor release.
3621 */
Nicholas Bellinger86715562012-02-13 01:07:22 -08003622 if (ack_kref == true) {
Nicholas Bellingera6360782011-11-18 20:36:22 -08003623 kref_get(&se_cmd->cmd_kref);
Nicholas Bellinger86715562012-02-13 01:07:22 -08003624 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
3625 }
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003626
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003627 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3628 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3629 se_cmd->check_release = 1;
3630 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3631}
3632EXPORT_SYMBOL(target_get_sess_cmd);
3633
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003634static void target_release_cmd_kref(struct kref *kref)
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003635{
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003636 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
3637 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003638 unsigned long flags;
3639
3640 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3641 if (list_empty(&se_cmd->se_cmd_list)) {
3642 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
Nicholas Bellingerffc32d52012-02-13 02:35:01 -08003643 se_cmd->se_tfo->release_cmd(se_cmd);
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003644 return;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003645 }
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003646 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3647 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3648 complete(&se_cmd->cmd_wait_comp);
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003649 return;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003650 }
3651 list_del(&se_cmd->se_cmd_list);
3652 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3653
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08003654 se_cmd->se_tfo->release_cmd(se_cmd);
3655}
3656
3657/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
3658 * @se_sess: session to reference
3659 * @se_cmd: command descriptor to drop
3660 */
3661int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3662{
3663 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003664}
3665EXPORT_SYMBOL(target_put_sess_cmd);
3666
3667/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
3668 * @se_sess: session to split
3669 */
3670void target_splice_sess_cmd_list(struct se_session *se_sess)
3671{
3672 struct se_cmd *se_cmd;
3673 unsigned long flags;
3674
3675 WARN_ON(!list_empty(&se_sess->sess_wait_list));
3676 INIT_LIST_HEAD(&se_sess->sess_wait_list);
3677
3678 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3679 se_sess->sess_tearing_down = 1;
3680
3681 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
3682
3683 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
3684 se_cmd->cmd_wait_set = 1;
3685
3686 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3687}
3688EXPORT_SYMBOL(target_splice_sess_cmd_list);
3689
3690/* target_wait_for_sess_cmds - Wait for outstanding descriptors
3691 * @se_sess: session to wait for active I/O
3692 * @wait_for_tasks: Make extra transport_wait_for_tasks call
3693 */
3694void target_wait_for_sess_cmds(
3695 struct se_session *se_sess,
3696 int wait_for_tasks)
3697{
3698 struct se_cmd *se_cmd, *tmp_cmd;
3699 bool rc = false;
3700
3701 list_for_each_entry_safe(se_cmd, tmp_cmd,
3702 &se_sess->sess_wait_list, se_cmd_list) {
3703 list_del(&se_cmd->se_cmd_list);
3704
3705 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
3706 " %d\n", se_cmd, se_cmd->t_state,
3707 se_cmd->se_tfo->get_cmd_state(se_cmd));
3708
3709 if (wait_for_tasks) {
3710 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
3711 " fabric state: %d\n", se_cmd, se_cmd->t_state,
3712 se_cmd->se_tfo->get_cmd_state(se_cmd));
3713
3714 rc = transport_wait_for_tasks(se_cmd);
3715
3716 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
3717 " fabric state: %d\n", se_cmd, se_cmd->t_state,
3718 se_cmd->se_tfo->get_cmd_state(se_cmd));
3719 }
3720
3721 if (!rc) {
3722 wait_for_completion(&se_cmd->cmd_wait_comp);
3723 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
3724 " fabric state: %d\n", se_cmd, se_cmd->t_state,
3725 se_cmd->se_tfo->get_cmd_state(se_cmd));
3726 }
3727
3728 se_cmd->se_tfo->release_cmd(se_cmd);
3729 }
3730}
3731EXPORT_SYMBOL(target_wait_for_sess_cmds);
3732
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003733/* transport_lun_wait_for_tasks():
3734 *
3735 * Called from ConfigFS context to stop the passed struct se_cmd to allow
3736 * an struct se_lun to be successfully shutdown.
3737 */
3738static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
3739{
3740 unsigned long flags;
Christoph Hellwigcf572a92012-04-24 00:25:05 -04003741 int ret = 0;
3742
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003743 /*
3744 * If the frontend has already requested this struct se_cmd to
3745 * be stopped, we can safely ignore this struct se_cmd.
3746 */
Andy Grovera1d8b492011-05-02 17:12:10 -07003747 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003748 if (cmd->transport_state & CMD_T_STOP) {
3749 cmd->transport_state &= ~CMD_T_LUN_STOP;
3750
3751 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
3752 cmd->se_tfo->get_task_tag(cmd));
Andy Grovera1d8b492011-05-02 17:12:10 -07003753 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003754 transport_cmd_check_stop(cmd, 1, 0);
Andy Grovere3d6f902011-07-19 08:55:10 +00003755 return -EPERM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003756 }
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003757 cmd->transport_state |= CMD_T_LUN_FE_STOP;
Andy Grovera1d8b492011-05-02 17:12:10 -07003758 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003759
Andy Grover5951146d2011-07-19 10:26:37 +00003760 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003761
Christoph Hellwigcf572a92012-04-24 00:25:05 -04003762 // XXX: audit task_flags checks.
3763 spin_lock_irqsave(&cmd->t_state_lock, flags);
3764 if ((cmd->transport_state & CMD_T_BUSY) &&
3765 (cmd->transport_state & CMD_T_SENT)) {
3766 if (!target_stop_cmd(cmd, &flags))
3767 ret++;
3768 spin_lock_irqsave(&cmd->t_state_lock, flags);
3769 } else {
3770 spin_unlock_irqrestore(&cmd->t_state_lock,
3771 flags);
3772 target_remove_from_execute_list(cmd);
3773 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003774
Christoph Hellwig785fdf72012-04-24 00:25:04 -04003775 pr_debug("ConfigFS: cmd: %p stop tasks ret:"
3776 " %d\n", cmd, ret);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003777 if (!ret) {
Andy Grover6708bb22011-06-08 10:36:43 -07003778 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00003779 cmd->se_tfo->get_task_tag(cmd));
Andy Grovera1d8b492011-05-02 17:12:10 -07003780 wait_for_completion(&cmd->transport_lun_stop_comp);
Andy Grover6708bb22011-06-08 10:36:43 -07003781 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00003782 cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003783 }
Christoph Hellwig3df8d402011-10-17 13:56:43 -04003784 transport_remove_cmd_from_queue(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003785
3786 return 0;
3787}
3788
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003789static void __transport_clear_lun_from_sessions(struct se_lun *lun)
3790{
3791 struct se_cmd *cmd = NULL;
3792 unsigned long lun_flags, cmd_flags;
3793 /*
3794 * Do exception processing and return CHECK_CONDITION status to the
3795 * Initiator Port.
3796 */
3797 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
Andy Grover5951146d2011-07-19 10:26:37 +00003798 while (!list_empty(&lun->lun_cmd_list)) {
3799 cmd = list_first_entry(&lun->lun_cmd_list,
3800 struct se_cmd, se_lun_node);
Christoph Hellwig3d26fea2011-12-21 14:14:05 -05003801 list_del_init(&cmd->se_lun_node);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003802
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003803 /*
3804 * This will notify iscsi_target_transport.c:
3805 * transport_cmd_check_stop() that a LUN shutdown is in
3806 * progress for the iscsi_cmd_t.
3807 */
Andy Grovera1d8b492011-05-02 17:12:10 -07003808 spin_lock(&cmd->t_state_lock);
Andy Grover6708bb22011-06-08 10:36:43 -07003809 pr_debug("SE_LUN[%d] - Setting cmd->transport"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003810 "_lun_stop for ITT: 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00003811 cmd->se_lun->unpacked_lun,
3812 cmd->se_tfo->get_task_tag(cmd));
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003813 cmd->transport_state |= CMD_T_LUN_STOP;
Andy Grovera1d8b492011-05-02 17:12:10 -07003814 spin_unlock(&cmd->t_state_lock);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003815
3816 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
3817
Andy Grover6708bb22011-06-08 10:36:43 -07003818 if (!cmd->se_lun) {
3819 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00003820 cmd->se_tfo->get_task_tag(cmd),
3821 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003822 BUG();
3823 }
3824 /*
3825 * If the Storage engine still owns the iscsi_cmd_t, determine
3826 * and/or stop its context.
3827 */
Andy Grover6708bb22011-06-08 10:36:43 -07003828 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
Andy Grovere3d6f902011-07-19 08:55:10 +00003829 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
3830 cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003831
Andy Grovere3d6f902011-07-19 08:55:10 +00003832 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003833 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
3834 continue;
3835 }
3836
Andy Grover6708bb22011-06-08 10:36:43 -07003837 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003838 "_wait_for_tasks(): SUCCESS\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00003839 cmd->se_lun->unpacked_lun,
3840 cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003841
Andy Grovera1d8b492011-05-02 17:12:10 -07003842 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003843 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
Andy Grovera1d8b492011-05-02 17:12:10 -07003844 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003845 goto check_cond;
3846 }
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003847 cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
Christoph Hellwigcf572a92012-04-24 00:25:05 -04003848 target_remove_from_state_list(cmd);
Andy Grovera1d8b492011-05-02 17:12:10 -07003849 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003850
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003851 /*
3852 * The Storage engine stopped this struct se_cmd before it was
3853 * send to the fabric frontend for delivery back to the
3854 * Initiator Node. Return this SCSI CDB back with an
3855 * CHECK_CONDITION status.
3856 */
3857check_cond:
3858 transport_send_check_condition_and_sense(cmd,
3859 TCM_NON_EXISTENT_LUN, 0);
3860 /*
3861 * If the fabric frontend is waiting for this iscsi_cmd_t to
3862 * be released, notify the waiting thread now that LU has
3863 * finished accessing it.
3864 */
Andy Grovera1d8b492011-05-02 17:12:10 -07003865 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003866 if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
Andy Grover6708bb22011-06-08 10:36:43 -07003867 pr_debug("SE_LUN[%d] - Detected FE stop for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003868 " struct se_cmd: %p ITT: 0x%08x\n",
3869 lun->unpacked_lun,
Andy Grovere3d6f902011-07-19 08:55:10 +00003870 cmd, cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003871
Andy Grovera1d8b492011-05-02 17:12:10 -07003872 spin_unlock_irqrestore(&cmd->t_state_lock,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003873 cmd_flags);
3874 transport_cmd_check_stop(cmd, 1, 0);
Andy Grovera1d8b492011-05-02 17:12:10 -07003875 complete(&cmd->transport_lun_fe_stop_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003876 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
3877 continue;
3878 }
Andy Grover6708bb22011-06-08 10:36:43 -07003879 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00003880 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003881
Andy Grovera1d8b492011-05-02 17:12:10 -07003882 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003883 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
3884 }
3885 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
3886}
3887
3888static int transport_clear_lun_thread(void *p)
3889{
Jörn Engel8359cf42011-11-24 02:05:51 +01003890 struct se_lun *lun = p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003891
3892 __transport_clear_lun_from_sessions(lun);
3893 complete(&lun->lun_shutdown_comp);
3894
3895 return 0;
3896}
3897
3898int transport_clear_lun_from_sessions(struct se_lun *lun)
3899{
3900 struct task_struct *kt;
3901
Andy Grover5951146d2011-07-19 10:26:37 +00003902 kt = kthread_run(transport_clear_lun_thread, lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003903 "tcm_cl_%u", lun->unpacked_lun);
3904 if (IS_ERR(kt)) {
Andy Grover6708bb22011-06-08 10:36:43 -07003905 pr_err("Unable to start clear_lun thread\n");
Andy Grovere3d6f902011-07-19 08:55:10 +00003906 return PTR_ERR(kt);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003907 }
3908 wait_for_completion(&lun->lun_shutdown_comp);
3909
3910 return 0;
3911}
3912
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003913/**
3914 * transport_wait_for_tasks - wait for completion to occur
3915 * @cmd: command to wait
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003916 *
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003917 * Called from frontend fabric context to wait for storage engine
3918 * to pause and/or release frontend generated struct se_cmd.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003919 */
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003920bool transport_wait_for_tasks(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003921{
3922 unsigned long flags;
3923
Andy Grovera1d8b492011-05-02 17:12:10 -07003924 spin_lock_irqsave(&cmd->t_state_lock, flags);
Andy Groverc8e31f22012-01-19 13:39:17 -08003925 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3926 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003927 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003928 return false;
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003929 }
3930 /*
3931 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
3932 * has been set in transport_set_supported_SAM_opcode().
3933 */
Andy Groverc8e31f22012-01-19 13:39:17 -08003934 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3935 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003936 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003937 return false;
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003938 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003939 /*
3940 * If we are already stopped due to an external event (ie: LUN shutdown)
3941 * sleep until the connection can have the passed struct se_cmd back.
Andy Grovera1d8b492011-05-02 17:12:10 -07003942 * The cmd->transport_lun_stopped_sem will be upped by
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003943 * transport_clear_lun_from_sessions() once the ConfigFS context caller
3944 * has completed its operation on the struct se_cmd.
3945 */
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003946 if (cmd->transport_state & CMD_T_LUN_STOP) {
Andy Grover6708bb22011-06-08 10:36:43 -07003947 pr_debug("wait_for_tasks: Stopping"
Andy Grovere3d6f902011-07-19 08:55:10 +00003948 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003949 "_stop_comp); for ITT: 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00003950 cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003951 /*
3952 * There is a special case for WRITES where a FE exception +
3953 * LUN shutdown means ConfigFS context is still sleeping on
3954 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
3955 * We go ahead and up transport_lun_stop_comp just to be sure
3956 * here.
3957 */
Andy Grovera1d8b492011-05-02 17:12:10 -07003958 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3959 complete(&cmd->transport_lun_stop_comp);
3960 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
3961 spin_lock_irqsave(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003962
Christoph Hellwigcf572a92012-04-24 00:25:05 -04003963 target_remove_from_state_list(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003964 /*
3965 * At this point, the frontend who was the originator of this
3966 * struct se_cmd, now owns the structure and can be released through
3967 * normal means below.
3968 */
Andy Grover6708bb22011-06-08 10:36:43 -07003969 pr_debug("wait_for_tasks: Stopped"
Andy Grovere3d6f902011-07-19 08:55:10 +00003970 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003971 "stop_comp); for ITT: 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00003972 cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003973
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003974 cmd->transport_state &= ~CMD_T_LUN_STOP;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003975 }
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003976
Nicholas Bellinger3d289342012-02-13 02:38:14 -08003977 if (!(cmd->transport_state & CMD_T_ACTIVE)) {
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003978 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07003979 return false;
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07003980 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003981
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003982 cmd->transport_state |= CMD_T_STOP;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003983
Andy Grover6708bb22011-06-08 10:36:43 -07003984 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003985 " i_state: %d, t_state: %d, CMD_T_STOP\n",
Christoph Hellwigf2da9db2011-10-17 13:56:51 -04003986 cmd, cmd->se_tfo->get_task_tag(cmd),
3987 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003988
Andy Grovera1d8b492011-05-02 17:12:10 -07003989 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003990
Andy Grover5951146d2011-07-19 10:26:37 +00003991 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003992
Andy Grovera1d8b492011-05-02 17:12:10 -07003993 wait_for_completion(&cmd->t_transport_stop_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003994
Andy Grovera1d8b492011-05-02 17:12:10 -07003995 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003996 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003997
Andy Grover6708bb22011-06-08 10:36:43 -07003998 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
Andy Grovera1d8b492011-05-02 17:12:10 -07003999 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00004000 cmd->se_tfo->get_task_tag(cmd));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004001
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07004002 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07004003
4004 return true;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004005}
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07004006EXPORT_SYMBOL(transport_wait_for_tasks);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004007
4008static int transport_get_sense_codes(
4009 struct se_cmd *cmd,
4010 u8 *asc,
4011 u8 *ascq)
4012{
4013 *asc = cmd->scsi_asc;
4014 *ascq = cmd->scsi_ascq;
4015
4016 return 0;
4017}
4018
4019static int transport_set_sense_codes(
4020 struct se_cmd *cmd,
4021 u8 asc,
4022 u8 ascq)
4023{
4024 cmd->scsi_asc = asc;
4025 cmd->scsi_ascq = ascq;
4026
4027 return 0;
4028}
4029
4030int transport_send_check_condition_and_sense(
4031 struct se_cmd *cmd,
4032 u8 reason,
4033 int from_transport)
4034{
4035 unsigned char *buffer = cmd->sense_buffer;
4036 unsigned long flags;
4037 int offset;
4038 u8 asc = 0, ascq = 0;
4039
Andy Grovera1d8b492011-05-02 17:12:10 -07004040 spin_lock_irqsave(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004041 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
Andy Grovera1d8b492011-05-02 17:12:10 -07004042 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004043 return 0;
4044 }
4045 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
Andy Grovera1d8b492011-05-02 17:12:10 -07004046 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004047
4048 if (!reason && from_transport)
4049 goto after_reason;
4050
4051 if (!from_transport)
4052 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4053 /*
4054 * Data Segment and SenseLength of the fabric response PDU.
4055 *
4056 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4057 * from include/scsi/scsi_cmnd.h
4058 */
Andy Grovere3d6f902011-07-19 08:55:10 +00004059 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004060 TRANSPORT_SENSE_BUFFER);
4061 /*
4062 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
4063 * SENSE KEY values from include/scsi/scsi.h
4064 */
4065 switch (reason) {
4066 case TCM_NON_EXISTENT_LUN:
Nicholas Bellingereb39d342011-07-26 16:59:00 -07004067 /* CURRENT ERROR */
4068 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004069 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingereb39d342011-07-26 16:59:00 -07004070 /* ILLEGAL REQUEST */
4071 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4072 /* LOGICAL UNIT NOT SUPPORTED */
4073 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4074 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004075 case TCM_UNSUPPORTED_SCSI_OPCODE:
4076 case TCM_SECTOR_COUNT_TOO_MANY:
4077 /* CURRENT ERROR */
4078 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004079 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004080 /* ILLEGAL REQUEST */
4081 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4082 /* INVALID COMMAND OPERATION CODE */
4083 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4084 break;
4085 case TCM_UNKNOWN_MODE_PAGE:
4086 /* CURRENT ERROR */
4087 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004088 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004089 /* ILLEGAL REQUEST */
4090 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4091 /* INVALID FIELD IN CDB */
4092 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4093 break;
4094 case TCM_CHECK_CONDITION_ABORT_CMD:
4095 /* CURRENT ERROR */
4096 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004097 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004098 /* ABORTED COMMAND */
4099 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4100 /* BUS DEVICE RESET FUNCTION OCCURRED */
4101 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4102 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4103 break;
4104 case TCM_INCORRECT_AMOUNT_OF_DATA:
4105 /* CURRENT ERROR */
4106 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004107 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004108 /* ABORTED COMMAND */
4109 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4110 /* WRITE ERROR */
4111 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4112 /* NOT ENOUGH UNSOLICITED DATA */
4113 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4114 break;
4115 case TCM_INVALID_CDB_FIELD:
4116 /* CURRENT ERROR */
4117 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004118 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Roland Dreier9fbc8902012-01-09 17:54:00 -08004119 /* ILLEGAL REQUEST */
4120 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004121 /* INVALID FIELD IN CDB */
4122 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4123 break;
4124 case TCM_INVALID_PARAMETER_LIST:
4125 /* CURRENT ERROR */
4126 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004127 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Roland Dreier9fbc8902012-01-09 17:54:00 -08004128 /* ILLEGAL REQUEST */
4129 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004130 /* INVALID FIELD IN PARAMETER LIST */
4131 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4132 break;
4133 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4134 /* CURRENT ERROR */
4135 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004136 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004137 /* ABORTED COMMAND */
4138 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4139 /* WRITE ERROR */
4140 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4141 /* UNEXPECTED_UNSOLICITED_DATA */
4142 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4143 break;
4144 case TCM_SERVICE_CRC_ERROR:
4145 /* CURRENT ERROR */
4146 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004147 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004148 /* ABORTED COMMAND */
4149 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4150 /* PROTOCOL SERVICE CRC ERROR */
4151 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4152 /* N/A */
4153 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4154 break;
4155 case TCM_SNACK_REJECTED:
4156 /* CURRENT ERROR */
4157 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004158 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004159 /* ABORTED COMMAND */
4160 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4161 /* READ ERROR */
4162 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4163 /* FAILED RETRANSMISSION REQUEST */
4164 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4165 break;
4166 case TCM_WRITE_PROTECTED:
4167 /* CURRENT ERROR */
4168 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004169 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004170 /* DATA PROTECT */
4171 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4172 /* WRITE PROTECTED */
4173 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4174 break;
4175 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4176 /* CURRENT ERROR */
4177 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004178 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004179 /* UNIT ATTENTION */
4180 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4181 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4182 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4183 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4184 break;
4185 case TCM_CHECK_CONDITION_NOT_READY:
4186 /* CURRENT ERROR */
4187 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004188 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004189 /* Not Ready */
4190 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4191 transport_get_sense_codes(cmd, &asc, &ascq);
4192 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4193 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4194 break;
4195 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4196 default:
4197 /* CURRENT ERROR */
4198 buffer[offset] = 0x70;
Roland Dreier895f3022011-12-13 14:55:33 -08004199 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004200 /* ILLEGAL REQUEST */
4201 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4202 /* LOGICAL UNIT COMMUNICATION FAILURE */
4203 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4204 break;
4205 }
4206 /*
4207 * This code uses linux/include/scsi/scsi.h SAM status codes!
4208 */
4209 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4210 /*
4211 * Automatically padded, this value is encoded in the fabric's
4212 * data_length response PDU containing the SCSI defined sense data.
4213 */
4214 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
4215
4216after_reason:
Nicholas Bellinger07bde792011-06-13 14:46:09 -07004217 return cmd->se_tfo->queue_status(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004218}
4219EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4220
4221int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4222{
4223 int ret = 0;
4224
Christoph Hellwig7d680f32011-12-21 14:13:47 -05004225 if (cmd->transport_state & CMD_T_ABORTED) {
Andy Grover6708bb22011-06-08 10:36:43 -07004226 if (!send_status ||
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004227 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4228 return 1;
Andy Grover8b1e1242012-04-03 15:51:12 -07004229
Andy Grover6708bb22011-06-08 10:36:43 -07004230 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004231 " status for CDB: 0x%02x ITT: 0x%08x\n",
Andy Grovera1d8b492011-05-02 17:12:10 -07004232 cmd->t_task_cdb[0],
Andy Grovere3d6f902011-07-19 08:55:10 +00004233 cmd->se_tfo->get_task_tag(cmd));
Andy Grover8b1e1242012-04-03 15:51:12 -07004234
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004235 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
Andy Grovere3d6f902011-07-19 08:55:10 +00004236 cmd->se_tfo->queue_status(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004237 ret = 1;
4238 }
4239 return ret;
4240}
4241EXPORT_SYMBOL(transport_check_aborted_status);
4242
4243void transport_send_task_abort(struct se_cmd *cmd)
4244{
Nicholas Bellingerc252f002011-09-29 14:22:13 -07004245 unsigned long flags;
4246
4247 spin_lock_irqsave(&cmd->t_state_lock, flags);
4248 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4249 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4250 return;
4251 }
4252 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4253
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004254 /*
4255 * If there are still expected incoming fabric WRITEs, we wait
4256 * until until they have completed before sending a TASK_ABORTED
4257 * response. This response with TASK_ABORTED status will be
4258 * queued back to fabric module by transport_check_aborted_status().
4259 */
4260 if (cmd->data_direction == DMA_TO_DEVICE) {
Andy Grovere3d6f902011-07-19 08:55:10 +00004261 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
Christoph Hellwig7d680f32011-12-21 14:13:47 -05004262 cmd->transport_state |= CMD_T_ABORTED;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004263 smp_mb__after_atomic_inc();
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004264 }
4265 }
4266 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
Andy Grover8b1e1242012-04-03 15:51:12 -07004267
Andy Grover6708bb22011-06-08 10:36:43 -07004268 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
Andy Grovera1d8b492011-05-02 17:12:10 -07004269 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
Andy Grovere3d6f902011-07-19 08:55:10 +00004270 cmd->se_tfo->get_task_tag(cmd));
Andy Grover8b1e1242012-04-03 15:51:12 -07004271
Andy Grovere3d6f902011-07-19 08:55:10 +00004272 cmd->se_tfo->queue_status(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004273}
4274
Christoph Hellwige26d99a2011-11-14 12:30:30 -05004275static int transport_generic_do_tmr(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004276{
Andy Grover5951146d2011-07-19 10:26:37 +00004277 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004278 struct se_tmr_req *tmr = cmd->se_tmr_req;
4279 int ret;
4280
4281 switch (tmr->function) {
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07004282 case TMR_ABORT_TASK:
Nicholas Bellinger3d289342012-02-13 02:38:14 -08004283 core_tmr_abort_task(dev, tmr, cmd->se_sess);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004284 break;
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07004285 case TMR_ABORT_TASK_SET:
4286 case TMR_CLEAR_ACA:
4287 case TMR_CLEAR_TASK_SET:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004288 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4289 break;
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07004290 case TMR_LUN_RESET:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004291 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4292 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4293 TMR_FUNCTION_REJECTED;
4294 break;
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07004295 case TMR_TARGET_WARM_RESET:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004296 tmr->response = TMR_FUNCTION_REJECTED;
4297 break;
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07004298 case TMR_TARGET_COLD_RESET:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004299 tmr->response = TMR_FUNCTION_REJECTED;
4300 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004301 default:
Andy Grover6708bb22011-06-08 10:36:43 -07004302 pr_err("Uknown TMR function: 0x%02x.\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004303 tmr->function);
4304 tmr->response = TMR_FUNCTION_REJECTED;
4305 break;
4306 }
4307
4308 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
Andy Grovere3d6f902011-07-19 08:55:10 +00004309 cmd->se_tfo->queue_tm_rsp(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004310
Christoph Hellwigb7b8bef2011-10-17 13:56:44 -04004311 transport_cmd_check_stop_to_fabric(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004312 return 0;
4313}
4314
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004315/* transport_processing_thread():
4316 *
4317 *
4318 */
4319static int transport_processing_thread(void *param)
4320{
Andy Grover5951146d2011-07-19 10:26:37 +00004321 int ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004322 struct se_cmd *cmd;
Jörn Engel8359cf42011-11-24 02:05:51 +01004323 struct se_device *dev = param;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004324
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004325 while (!kthread_should_stop()) {
Andy Grovere3d6f902011-07-19 08:55:10 +00004326 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4327 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004328 kthread_should_stop());
4329 if (ret < 0)
4330 goto out;
4331
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004332get_cmd:
Andy Grover5951146d2011-07-19 10:26:37 +00004333 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4334 if (!cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004335 continue;
4336
Andy Grover5951146d2011-07-19 10:26:37 +00004337 switch (cmd->t_state) {
Christoph Hellwig680b73c2011-09-12 21:51:14 +02004338 case TRANSPORT_NEW_CMD:
4339 BUG();
4340 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004341 case TRANSPORT_NEW_CMD_MAP:
Andy Grover6708bb22011-06-08 10:36:43 -07004342 if (!cmd->se_tfo->new_cmd_map) {
4343 pr_err("cmd->se_tfo->new_cmd_map is"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004344 " NULL for TRANSPORT_NEW_CMD_MAP\n");
4345 BUG();
4346 }
Andy Grovere3d6f902011-07-19 08:55:10 +00004347 ret = cmd->se_tfo->new_cmd_map(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004348 if (ret < 0) {
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07004349 transport_generic_request_failure(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004350 break;
4351 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004352 ret = transport_generic_new_cmd(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07004353 if (ret < 0) {
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07004354 transport_generic_request_failure(cmd);
4355 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004356 }
4357 break;
4358 case TRANSPORT_PROCESS_WRITE:
4359 transport_generic_process_write(cmd);
4360 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004361 case TRANSPORT_PROCESS_TMR:
4362 transport_generic_do_tmr(cmd);
4363 break;
Nicholas Bellinger07bde792011-06-13 14:46:09 -07004364 case TRANSPORT_COMPLETE_QF_WP:
Christoph Hellwige057f532011-10-17 13:56:41 -04004365 transport_write_pending_qf(cmd);
4366 break;
4367 case TRANSPORT_COMPLETE_QF_OK:
4368 transport_complete_qf(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07004369 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004370 default:
Christoph Hellwigf2da9db2011-10-17 13:56:51 -04004371 pr_err("Unknown t_state: %d for ITT: 0x%08x "
4372 "i_state: %d on SE LUN: %u\n",
4373 cmd->t_state,
Andy Grovere3d6f902011-07-19 08:55:10 +00004374 cmd->se_tfo->get_task_tag(cmd),
4375 cmd->se_tfo->get_cmd_state(cmd),
4376 cmd->se_lun->unpacked_lun);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004377 BUG();
4378 }
4379
4380 goto get_cmd;
4381 }
4382
4383out:
Christoph Hellwigcf572a92012-04-24 00:25:05 -04004384 WARN_ON(!list_empty(&dev->state_list));
Nicholas Bellingerce8762f2011-10-09 02:19:01 -07004385 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08004386 dev->process_thread = NULL;
4387 return 0;
4388}