blob: 965a308e10a5d2c011a05b60bdd706b019c47776 [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_transport.c
3 *
4 * This file contains the Generic Target Engine Core.
5 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07006 * (c) Copyright 2002-2013 Datera, Inc.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08007 *
8 * Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080026#include <linux/net.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/slab.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080031#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080032#include <linux/kthread.h>
33#include <linux/in.h>
34#include <linux/cdrom.h>
Paul Gortmaker827509e2011-08-30 14:20:44 -040035#include <linux/module.h>
Roland Dreier015487b2012-02-13 16:18:17 -080036#include <linux/ratelimit.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080037#include <asm/unaligned.h>
38#include <net/sock.h>
39#include <net/tcp.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
Nicholas Bellingere66ecd52011-05-19 20:19:14 -070042#include <scsi/scsi_tcq.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080043
44#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050045#include <target/target_core_backend.h>
46#include <target/target_core_fabric.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080047
Christoph Hellwige26d99a2011-11-14 12:30:30 -050048#include "target_core_internal.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080049#include "target_core_alua.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080050#include "target_core_pr.h"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080051#include "target_core_ua.h"
52
Roland Dreiere5c0d6a2013-06-26 17:36:17 -070053#define CREATE_TRACE_POINTS
54#include <trace/events/target.h>
55
Christoph Hellwig35e0e752011-10-17 13:56:53 -040056static struct workqueue_struct *target_completion_wq;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080057static struct kmem_cache *se_sess_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080058struct kmem_cache *se_ua_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080059struct kmem_cache *t10_pr_reg_cache;
60struct kmem_cache *t10_alua_lu_gp_cache;
61struct kmem_cache *t10_alua_lu_gp_mem_cache;
62struct kmem_cache *t10_alua_tg_pt_gp_cache;
63struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
Hannes Reinecke229d4f12013-12-17 09:18:50 +010064struct kmem_cache *t10_alua_lba_map_cache;
65struct kmem_cache *t10_alua_lba_map_mem_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080066
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080067static void transport_complete_task_attr(struct se_cmd *cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -070068static void transport_handle_queue_full(struct se_cmd *cmd,
Christoph Hellwige057f532011-10-17 13:56:41 -040069 struct se_device *dev);
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -070070static int transport_put_cmd(struct se_cmd *cmd);
Christoph Hellwig35e0e752011-10-17 13:56:53 -040071static void target_complete_ok_work(struct work_struct *work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080072
Andy Grovere3d6f902011-07-19 08:55:10 +000073int init_se_kmem_caches(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080074{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080075 se_sess_cache = kmem_cache_create("se_sess_cache",
76 sizeof(struct se_session), __alignof__(struct se_session),
77 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -070078 if (!se_sess_cache) {
79 pr_err("kmem_cache_create() for struct se_session"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080080 " failed\n");
Andy Groverc8e31f22012-01-19 13:39:17 -080081 goto out;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080082 }
83 se_ua_cache = kmem_cache_create("se_ua_cache",
84 sizeof(struct se_ua), __alignof__(struct se_ua),
85 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -070086 if (!se_ua_cache) {
87 pr_err("kmem_cache_create() for struct se_ua failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -040088 goto out_free_sess_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080089 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080090 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
91 sizeof(struct t10_pr_registration),
92 __alignof__(struct t10_pr_registration), 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -070093 if (!t10_pr_reg_cache) {
94 pr_err("kmem_cache_create() for struct t10_pr_registration"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080095 " failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -040096 goto out_free_ua_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080097 }
98 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
99 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
100 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700101 if (!t10_alua_lu_gp_cache) {
102 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800103 " failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400104 goto out_free_pr_reg_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800105 }
106 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
107 sizeof(struct t10_alua_lu_gp_member),
108 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700109 if (!t10_alua_lu_gp_mem_cache) {
110 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800111 "cache failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400112 goto out_free_lu_gp_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800113 }
114 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
115 sizeof(struct t10_alua_tg_pt_gp),
116 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700117 if (!t10_alua_tg_pt_gp_cache) {
118 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800119 "cache failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400120 goto out_free_lu_gp_mem_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800121 }
122 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
123 "t10_alua_tg_pt_gp_mem_cache",
124 sizeof(struct t10_alua_tg_pt_gp_member),
125 __alignof__(struct t10_alua_tg_pt_gp_member),
126 0, NULL);
Andy Grover6708bb22011-06-08 10:36:43 -0700127 if (!t10_alua_tg_pt_gp_mem_cache) {
128 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800129 "mem_t failed\n");
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400130 goto out_free_tg_pt_gp_cache;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800131 }
Hannes Reinecke229d4f12013-12-17 09:18:50 +0100132 t10_alua_lba_map_cache = kmem_cache_create(
133 "t10_alua_lba_map_cache",
134 sizeof(struct t10_alua_lba_map),
135 __alignof__(struct t10_alua_lba_map), 0, NULL);
136 if (!t10_alua_lba_map_cache) {
137 pr_err("kmem_cache_create() for t10_alua_lba_map_"
138 "cache failed\n");
139 goto out_free_tg_pt_gp_mem_cache;
140 }
141 t10_alua_lba_map_mem_cache = kmem_cache_create(
142 "t10_alua_lba_map_mem_cache",
143 sizeof(struct t10_alua_lba_map_member),
144 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
145 if (!t10_alua_lba_map_mem_cache) {
146 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
147 "cache failed\n");
148 goto out_free_lba_map_cache;
149 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800150
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400151 target_completion_wq = alloc_workqueue("target_completion",
152 WQ_MEM_RECLAIM, 0);
153 if (!target_completion_wq)
Hannes Reinecke229d4f12013-12-17 09:18:50 +0100154 goto out_free_lba_map_mem_cache;
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400155
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800156 return 0;
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400157
Hannes Reinecke229d4f12013-12-17 09:18:50 +0100158out_free_lba_map_mem_cache:
159 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
160out_free_lba_map_cache:
161 kmem_cache_destroy(t10_alua_lba_map_cache);
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400162out_free_tg_pt_gp_mem_cache:
163 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
164out_free_tg_pt_gp_cache:
165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
166out_free_lu_gp_mem_cache:
167 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
168out_free_lu_gp_cache:
169 kmem_cache_destroy(t10_alua_lu_gp_cache);
170out_free_pr_reg_cache:
171 kmem_cache_destroy(t10_pr_reg_cache);
172out_free_ua_cache:
173 kmem_cache_destroy(se_ua_cache);
174out_free_sess_cache:
175 kmem_cache_destroy(se_sess_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800176out:
Andy Grovere3d6f902011-07-19 08:55:10 +0000177 return -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800178}
179
Andy Grovere3d6f902011-07-19 08:55:10 +0000180void release_se_kmem_caches(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800181{
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400182 destroy_workqueue(target_completion_wq);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800183 kmem_cache_destroy(se_sess_cache);
184 kmem_cache_destroy(se_ua_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800185 kmem_cache_destroy(t10_pr_reg_cache);
186 kmem_cache_destroy(t10_alua_lu_gp_cache);
187 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
188 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
189 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
Hannes Reinecke229d4f12013-12-17 09:18:50 +0100190 kmem_cache_destroy(t10_alua_lba_map_cache);
191 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800192}
193
Andy Grovere3d6f902011-07-19 08:55:10 +0000194/* This code ensures unique mib indexes are handed out. */
195static DEFINE_SPINLOCK(scsi_mib_index_lock);
196static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800197
198/*
199 * Allocate a new row index for the entry type specified
200 */
201u32 scsi_get_new_index(scsi_index_t type)
202{
203 u32 new_index;
204
Andy Grovere3d6f902011-07-19 08:55:10 +0000205 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800206
Andy Grovere3d6f902011-07-19 08:55:10 +0000207 spin_lock(&scsi_mib_index_lock);
208 new_index = ++scsi_mib_index[type];
209 spin_unlock(&scsi_mib_index_lock);
Nicholas Bellingere89d15e2011-02-09 15:35:03 -0800210
211 return new_index;
212}
213
Nicholas Bellingerdbc56232011-10-22 01:03:54 -0700214void transport_subsystem_check_init(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800215{
216 int ret;
Andy Grover283669d2012-07-30 15:54:17 -0700217 static int sub_api_initialized;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800218
Nicholas Bellingerdbc56232011-10-22 01:03:54 -0700219 if (sub_api_initialized)
220 return;
221
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800222 ret = request_module("target_core_iblock");
223 if (ret != 0)
Andy Grover6708bb22011-06-08 10:36:43 -0700224 pr_err("Unable to load target_core_iblock\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800225
226 ret = request_module("target_core_file");
227 if (ret != 0)
Andy Grover6708bb22011-06-08 10:36:43 -0700228 pr_err("Unable to load target_core_file\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800229
230 ret = request_module("target_core_pscsi");
231 if (ret != 0)
Andy Grover6708bb22011-06-08 10:36:43 -0700232 pr_err("Unable to load target_core_pscsi\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800233
Andy Grover7c9e7a62014-10-01 16:07:05 -0700234 ret = request_module("target_core_user");
235 if (ret != 0)
236 pr_err("Unable to load target_core_user\n");
237
Andy Grovere3d6f902011-07-19 08:55:10 +0000238 sub_api_initialized = 1;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800239}
240
Nicholas Bellingere70beee2014-04-02 12:52:38 -0700241struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800242{
243 struct se_session *se_sess;
244
245 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -0700246 if (!se_sess) {
247 pr_err("Unable to allocate struct se_session from"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800248 " se_sess_cache\n");
249 return ERR_PTR(-ENOMEM);
250 }
251 INIT_LIST_HEAD(&se_sess->sess_list);
252 INIT_LIST_HEAD(&se_sess->sess_acl_list);
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700253 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
Nicholas Bellinger9b31a322013-05-15 00:52:44 -0700254 INIT_LIST_HEAD(&se_sess->sess_wait_list);
Nicholas Bellingera17f0912011-11-02 21:52:08 -0700255 spin_lock_init(&se_sess->sess_cmd_lock);
Nicholas Bellinger41ac82b2012-02-26 22:22:10 -0800256 kref_init(&se_sess->sess_kref);
Nicholas Bellingere70beee2014-04-02 12:52:38 -0700257 se_sess->sup_prot_ops = sup_prot_ops;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800258
259 return se_sess;
260}
261EXPORT_SYMBOL(transport_init_session);
262
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700263int transport_alloc_session_tags(struct se_session *se_sess,
264 unsigned int tag_num, unsigned int tag_size)
265{
266 int rc;
267
Nicholas Bellinger8c7f6e92013-09-23 11:57:38 -0700268 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
269 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700270 if (!se_sess->sess_cmd_map) {
Nicholas Bellinger8c7f6e92013-09-23 11:57:38 -0700271 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
272 if (!se_sess->sess_cmd_map) {
273 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
274 return -ENOMEM;
275 }
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700276 }
277
278 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
279 if (rc < 0) {
280 pr_err("Unable to init se_sess->sess_tag_pool,"
281 " tag_num: %u\n", tag_num);
Nicholas Bellinger8c7f6e92013-09-23 11:57:38 -0700282 if (is_vmalloc_addr(se_sess->sess_cmd_map))
283 vfree(se_sess->sess_cmd_map);
284 else
285 kfree(se_sess->sess_cmd_map);
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700286 se_sess->sess_cmd_map = NULL;
287 return -ENOMEM;
288 }
289
290 return 0;
291}
292EXPORT_SYMBOL(transport_alloc_session_tags);
293
294struct se_session *transport_init_session_tags(unsigned int tag_num,
Nicholas Bellingere70beee2014-04-02 12:52:38 -0700295 unsigned int tag_size,
296 enum target_prot_op sup_prot_ops)
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700297{
298 struct se_session *se_sess;
299 int rc;
300
Nicholas Bellingere70beee2014-04-02 12:52:38 -0700301 se_sess = transport_init_session(sup_prot_ops);
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700302 if (IS_ERR(se_sess))
303 return se_sess;
304
305 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
306 if (rc < 0) {
307 transport_free_session(se_sess);
308 return ERR_PTR(-ENOMEM);
309 }
310
311 return se_sess;
312}
313EXPORT_SYMBOL(transport_init_session_tags);
314
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800315/*
Nicholas Bellinger140854c2011-08-31 12:34:39 -0700316 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800317 */
318void __transport_register_session(
319 struct se_portal_group *se_tpg,
320 struct se_node_acl *se_nacl,
321 struct se_session *se_sess,
322 void *fabric_sess_ptr)
323{
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200324 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800325 unsigned char buf[PR_REG_ISID_LEN];
326
327 se_sess->se_tpg = se_tpg;
328 se_sess->fabric_sess_ptr = fabric_sess_ptr;
329 /*
330 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
331 *
332 * Only set for struct se_session's that will actually be moving I/O.
333 * eg: *NOT* discovery sessions.
334 */
335 if (se_nacl) {
336 /*
Nicholas Bellingerbffb5122015-04-14 11:52:22 -0700337 *
338 * Determine if fabric allows for T10-PI feature bits exposed to
339 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
340 *
341 * If so, then always save prot_type on a per se_node_acl node
342 * basis and re-instate the previous sess_prot_type to avoid
343 * disabling PI from below any previously initiator side
344 * registered LUNs.
345 */
346 if (se_nacl->saved_prot_type)
347 se_sess->sess_prot_type = se_nacl->saved_prot_type;
348 else if (tfo->tpg_check_prot_fabric_only)
349 se_sess->sess_prot_type = se_nacl->saved_prot_type =
350 tfo->tpg_check_prot_fabric_only(se_tpg);
351 /*
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800352 * If the fabric module supports an ISID based TransportID,
353 * save this value in binary from the fabric I_T Nexus now.
354 */
Andy Grovere3d6f902011-07-19 08:55:10 +0000355 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800356 memset(&buf[0], 0, PR_REG_ISID_LEN);
Andy Grovere3d6f902011-07-19 08:55:10 +0000357 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800358 &buf[0], PR_REG_ISID_LEN);
359 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
360 }
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800361 kref_get(&se_nacl->acl_kref);
362
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800363 spin_lock_irq(&se_nacl->nacl_sess_lock);
364 /*
365 * The se_nacl->nacl_sess pointer will be set to the
366 * last active I_T Nexus for each struct se_node_acl.
367 */
368 se_nacl->nacl_sess = se_sess;
369
370 list_add_tail(&se_sess->sess_acl_list,
371 &se_nacl->acl_sess_list);
372 spin_unlock_irq(&se_nacl->nacl_sess_lock);
373 }
374 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
375
Andy Grover6708bb22011-06-08 10:36:43 -0700376 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000377 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800378}
379EXPORT_SYMBOL(__transport_register_session);
380
381void transport_register_session(
382 struct se_portal_group *se_tpg,
383 struct se_node_acl *se_nacl,
384 struct se_session *se_sess,
385 void *fabric_sess_ptr)
386{
Nicholas Bellinger140854c2011-08-31 12:34:39 -0700387 unsigned long flags;
388
389 spin_lock_irqsave(&se_tpg->session_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800390 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
Nicholas Bellinger140854c2011-08-31 12:34:39 -0700391 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800392}
393EXPORT_SYMBOL(transport_register_session);
394
Fengguang Wuecf0dd62012-10-10 07:30:20 +0800395static void target_release_session(struct kref *kref)
Nicholas Bellinger41ac82b2012-02-26 22:22:10 -0800396{
397 struct se_session *se_sess = container_of(kref,
398 struct se_session, sess_kref);
399 struct se_portal_group *se_tpg = se_sess->se_tpg;
400
401 se_tpg->se_tpg_tfo->close_session(se_sess);
402}
403
404void target_get_session(struct se_session *se_sess)
405{
406 kref_get(&se_sess->sess_kref);
407}
408EXPORT_SYMBOL(target_get_session);
409
Jörn Engel33933a02012-05-11 10:35:08 -0400410void target_put_session(struct se_session *se_sess)
Nicholas Bellinger41ac82b2012-02-26 22:22:10 -0800411{
Jörn Engel33933a02012-05-11 10:35:08 -0400412 kref_put(&se_sess->sess_kref, target_release_session);
Nicholas Bellinger41ac82b2012-02-26 22:22:10 -0800413}
414EXPORT_SYMBOL(target_put_session);
415
Nicholas Bellingerf8e471f2015-03-06 20:34:32 -0800416ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
417{
418 struct se_session *se_sess;
419 ssize_t len = 0;
420
421 spin_lock_bh(&se_tpg->session_lock);
422 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
423 if (!se_sess->se_node_acl)
424 continue;
425 if (!se_sess->se_node_acl->dynamic_node_acl)
426 continue;
427 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
428 break;
429
430 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
431 se_sess->se_node_acl->initiatorname);
432 len += 1; /* Include NULL terminator */
433 }
434 spin_unlock_bh(&se_tpg->session_lock);
435
436 return len;
437}
438EXPORT_SYMBOL(target_show_dynamic_sessions);
439
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800440static void target_complete_nacl(struct kref *kref)
441{
442 struct se_node_acl *nacl = container_of(kref,
443 struct se_node_acl, acl_kref);
444
445 complete(&nacl->acl_free_comp);
446}
447
448void target_put_nacl(struct se_node_acl *nacl)
449{
450 kref_put(&nacl->acl_kref, target_complete_nacl);
451}
452
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800453void transport_deregister_session_configfs(struct se_session *se_sess)
454{
455 struct se_node_acl *se_nacl;
Roland Dreier23388862011-06-22 01:02:21 -0700456 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800457 /*
458 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
459 */
460 se_nacl = se_sess->se_node_acl;
Andy Grover6708bb22011-06-08 10:36:43 -0700461 if (se_nacl) {
Roland Dreier23388862011-06-22 01:02:21 -0700462 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
Nicholas Bellinger337c0602012-03-10 14:36:21 -0800463 if (se_nacl->acl_stop == 0)
464 list_del(&se_sess->sess_acl_list);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800465 /*
466 * If the session list is empty, then clear the pointer.
467 * Otherwise, set the struct se_session pointer from the tail
468 * element of the per struct se_node_acl active session list.
469 */
470 if (list_empty(&se_nacl->acl_sess_list))
471 se_nacl->nacl_sess = NULL;
472 else {
473 se_nacl->nacl_sess = container_of(
474 se_nacl->acl_sess_list.prev,
475 struct se_session, sess_acl_list);
476 }
Roland Dreier23388862011-06-22 01:02:21 -0700477 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800478 }
479}
480EXPORT_SYMBOL(transport_deregister_session_configfs);
481
482void transport_free_session(struct se_session *se_sess)
483{
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700484 if (se_sess->sess_cmd_map) {
485 percpu_ida_destroy(&se_sess->sess_tag_pool);
Nicholas Bellinger8c7f6e92013-09-23 11:57:38 -0700486 if (is_vmalloc_addr(se_sess->sess_cmd_map))
487 vfree(se_sess->sess_cmd_map);
488 else
489 kfree(se_sess->sess_cmd_map);
Nicholas Bellingerc0add7f2013-06-07 17:38:58 -0700490 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800491 kmem_cache_free(se_sess_cache, se_sess);
492}
493EXPORT_SYMBOL(transport_free_session);
494
495void transport_deregister_session(struct se_session *se_sess)
496{
497 struct se_portal_group *se_tpg = se_sess->se_tpg;
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200498 const struct target_core_fabric_ops *se_tfo;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800499 struct se_node_acl *se_nacl;
Roland Dreiere63a8e12011-08-12 16:01:02 -0700500 unsigned long flags;
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000501 bool comp_nacl = true, drop_nacl = false;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800502
Andy Grover6708bb22011-06-08 10:36:43 -0700503 if (!se_tpg) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800504 transport_free_session(se_sess);
505 return;
506 }
Nicholas Bellinger01468342012-03-10 14:32:52 -0800507 se_tfo = se_tpg->se_tpg_tfo;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800508
Roland Dreiere63a8e12011-08-12 16:01:02 -0700509 spin_lock_irqsave(&se_tpg->session_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800510 list_del(&se_sess->sess_list);
511 se_sess->se_tpg = NULL;
512 se_sess->fabric_sess_ptr = NULL;
Roland Dreiere63a8e12011-08-12 16:01:02 -0700513 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800514
515 /*
516 * Determine if we need to do extra work for this initiator node's
517 * struct se_node_acl if it had been previously dynamically generated.
518 */
519 se_nacl = se_sess->se_node_acl;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800520
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000521 mutex_lock(&se_tpg->acl_node_mutex);
Nicholas Bellinger01468342012-03-10 14:32:52 -0800522 if (se_nacl && se_nacl->dynamic_node_acl) {
523 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
524 list_del(&se_nacl->acl_list);
525 se_tpg->num_node_acls--;
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000526 drop_nacl = true;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800527 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800528 }
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000529 mutex_unlock(&se_tpg->acl_node_mutex);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800530
Nicholas Bellinger403edd72015-03-08 22:33:47 +0000531 if (drop_nacl) {
532 core_tpg_wait_for_nacl_pr_ref(se_nacl);
533 core_free_device_list_for_node(se_nacl, se_tpg);
534 kfree(se_nacl);
535 comp_nacl = false;
536 }
Andy Grover6708bb22011-06-08 10:36:43 -0700537 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
Andy Grovere3d6f902011-07-19 08:55:10 +0000538 se_tpg->se_tpg_tfo->get_fabric_name());
Nicholas Bellinger01468342012-03-10 14:32:52 -0800539 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +0100540 * If last kref is dropping now for an explicit NodeACL, awake sleeping
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800541 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
542 * removal context.
Nicholas Bellinger01468342012-03-10 14:32:52 -0800543 */
Christophe Vu-Brugier0bcc2972014-06-06 17:15:16 +0200544 if (se_nacl && comp_nacl)
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800545 target_put_nacl(se_nacl);
Nicholas Bellinger01468342012-03-10 14:32:52 -0800546
Nicholas Bellingerafb999f2012-03-08 23:45:02 -0800547 transport_free_session(se_sess);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800548}
549EXPORT_SYMBOL(transport_deregister_session);
550
551/*
Andy Grovera1d8b492011-05-02 17:12:10 -0700552 * Called with cmd->t_state_lock held.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800553 */
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400554static void target_remove_from_state_list(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800555{
Christoph Hellwig42bf8292011-10-12 11:07:00 -0400556 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800557 unsigned long flags;
558
Christoph Hellwig42bf8292011-10-12 11:07:00 -0400559 if (!dev)
560 return;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800561
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400562 if (cmd->transport_state & CMD_T_BUSY)
563 return;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800564
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400565 spin_lock_irqsave(&dev->execute_task_lock, flags);
566 if (cmd->state_active) {
567 list_del(&cmd->state_list);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400568 cmd->state_active = false;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800569 }
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400570 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800571}
572
Nicholas Bellinger862e6382013-06-06 01:35:18 -0700573static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
574 bool write_pending)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800575{
576 unsigned long flags;
577
Andy Grovera1d8b492011-05-02 17:12:10 -0700578 spin_lock_irqsave(&cmd->t_state_lock, flags);
Nicholas Bellinger862e6382013-06-06 01:35:18 -0700579 if (write_pending)
580 cmd->t_state = TRANSPORT_WRITE_PENDING;
581
Christoph Hellwigf7113a42012-07-08 15:58:38 -0400582 if (remove_from_lists) {
583 target_remove_from_state_list(cmd);
584
585 /*
586 * Clear struct se_cmd->se_lun before the handoff to FE.
587 */
588 cmd->se_lun = NULL;
589 }
590
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800591 /*
592 * Determine if frontend context caller is requesting the stopping of
Andy Grovere3d6f902011-07-19 08:55:10 +0000593 * this command for frontend exceptions.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800594 */
Christoph Hellwig7d680f32011-12-21 14:13:47 -0500595 if (cmd->transport_state & CMD_T_STOP) {
Bart Van Assche649ee052015-04-14 13:26:44 +0200596 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
597 __func__, __LINE__, cmd->tag);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800598
Andy Grovera1d8b492011-05-02 17:12:10 -0700599 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800600
Nicholas Bellingera95d6512014-06-09 23:36:51 +0000601 complete_all(&cmd->t_transport_stop_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800602 return 1;
603 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800604
Christoph Hellwigf7113a42012-07-08 15:58:38 -0400605 cmd->transport_state &= ~CMD_T_ACTIVE;
606 if (remove_from_lists) {
607 /*
608 * Some fabric modules like tcm_loop can release
609 * their internally allocated I/O reference now and
610 * struct se_cmd now.
611 *
612 * Fabric modules are expected to return '1' here if the
613 * se_cmd being passed is released at this point,
614 * or zero if not being released.
615 */
616 if (cmd->se_tfo->check_stop_free != NULL) {
617 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
618 return cmd->se_tfo->check_stop_free(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800619 }
Christoph Hellwigf7113a42012-07-08 15:58:38 -0400620 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800621
Andy Grovera1d8b492011-05-02 17:12:10 -0700622 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800623 return 0;
624}
625
626static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
627{
Nicholas Bellinger862e6382013-06-06 01:35:18 -0700628 return transport_cmd_check_stop(cmd, true, false);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800629}
630
631static void transport_lun_remove_cmd(struct se_cmd *cmd)
632{
Andy Grovere3d6f902011-07-19 08:55:10 +0000633 struct se_lun *lun = cmd->se_lun;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800634
Nicholas Bellinger5259a062014-01-28 17:56:30 -0800635 if (!lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800636 return;
637
Nicholas Bellinger5259a062014-01-28 17:56:30 -0800638 if (cmpxchg(&cmd->lun_ref_active, true, false))
639 percpu_ref_put(&lun->lun_ref);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800640}
641
642void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
643{
Alex Leung68259b52014-03-21 22:20:41 -0700644 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
645 transport_lun_remove_cmd(cmd);
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -0700646 /*
647 * Allow the fabric driver to unmap any resources before
648 * releasing the descriptor via TFO->release_cmd()
649 */
650 if (remove)
651 cmd->se_tfo->aborted_task(cmd);
Alex Leung68259b52014-03-21 22:20:41 -0700652
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800653 if (transport_cmd_check_stop_to_fabric(cmd))
654 return;
Christoph Hellwigaf877292012-07-08 15:58:49 -0400655 if (remove)
Christoph Hellwige6a25732011-09-13 23:08:50 +0200656 transport_put_cmd(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800657}
658
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400659static void target_complete_failure_work(struct work_struct *work)
660{
661 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
662
Christoph Hellwigde103c92012-11-06 12:24:09 -0800663 transport_generic_request_failure(cmd,
664 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400665}
666
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200667/*
Paolo Bonzinid5829ea2012-09-05 17:09:15 +0200668 * Used when asking transport to copy Sense Data from the underlying
669 * Linux/SCSI struct scsi_cmnd
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200670 */
Paolo Bonzinid5829ea2012-09-05 17:09:15 +0200671static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200672{
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200673 struct se_device *dev = cmd->se_dev;
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200674
675 WARN_ON(!cmd->se_lun);
676
677 if (!dev)
Paolo Bonzinid5829ea2012-09-05 17:09:15 +0200678 return NULL;
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200679
Paolo Bonzinid5829ea2012-09-05 17:09:15 +0200680 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
681 return NULL;
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200682
Roland Dreier9c58b7d2012-08-15 14:35:25 -0700683 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200684
Paolo Bonzinid5829ea2012-09-05 17:09:15 +0200685 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200686 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
Roland Dreier9c58b7d2012-08-15 14:35:25 -0700687 return cmd->sense_buffer;
Paolo Bonzini6138ed22012-09-05 17:09:13 +0200688}
689
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400690void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800691{
Christoph Hellwig42bf8292011-10-12 11:07:00 -0400692 struct se_device *dev = cmd->se_dev;
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400693 int success = scsi_status == GOOD;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800694 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800695
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400696 cmd->scsi_status = scsi_status;
697
698
Andy Grovera1d8b492011-05-02 17:12:10 -0700699 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400700 cmd->transport_state &= ~CMD_T_BUSY;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800701
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800702 if (dev && dev->transport->transport_complete) {
Paolo Bonzinid5829ea2012-09-05 17:09:15 +0200703 dev->transport->transport_complete(cmd,
704 cmd->t_data_sg,
705 transport_get_sense_buffer(cmd));
706 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800707 success = 1;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800708 }
709
710 /*
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400711 * See if we are waiting to complete for an exception condition.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800712 */
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400713 if (cmd->transport_state & CMD_T_REQUEST_STOP) {
Andy Grovera1d8b492011-05-02 17:12:10 -0700714 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400715 complete(&cmd->task_stop_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800716 return;
717 }
Christoph Hellwig22350072011-11-02 05:06:35 -0700718
Nicholas Bellinger3d289342012-02-13 02:38:14 -0800719 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +0100720 * Check for case where an explicit ABORT_TASK has been received
Nicholas Bellinger3d289342012-02-13 02:38:14 -0800721 * and transport_wait_for_tasks() will be waiting for completion..
722 */
723 if (cmd->transport_state & CMD_T_ABORTED &&
724 cmd->transport_state & CMD_T_STOP) {
725 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera95d6512014-06-09 23:36:51 +0000726 complete_all(&cmd->t_transport_stop_comp);
Nicholas Bellinger3d289342012-02-13 02:38:14 -0800727 return;
Roland Dreier3dca1472014-02-03 14:08:26 -0800728 } else if (!success) {
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400729 INIT_WORK(&cmd->work, target_complete_failure_work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800730 } else {
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400731 INIT_WORK(&cmd->work, target_complete_ok_work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800732 }
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400733
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400734 cmd->t_state = TRANSPORT_COMPLETE;
Nicholas Bellinger3d289342012-02-13 02:38:14 -0800735 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
Andy Grovera1d8b492011-05-02 17:12:10 -0700736 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800737
Christoph Hellwig35e0e752011-10-17 13:56:53 -0400738 queue_work(target_completion_wq, &cmd->work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800739}
Christoph Hellwig6bb35e02012-04-23 11:35:33 -0400740EXPORT_SYMBOL(target_complete_cmd);
741
Roland Dreier2426bd42014-06-10 11:07:47 -0700742void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
743{
744 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
745 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
746 cmd->residual_count += cmd->data_length - length;
747 } else {
748 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
749 cmd->residual_count = cmd->data_length - length;
750 }
751
752 cmd->data_length = length;
753 }
754
755 target_complete_cmd(cmd, scsi_status);
756}
757EXPORT_SYMBOL(target_complete_cmd_with_length);
758
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400759static void target_add_to_state_list(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800760{
Christoph Hellwig42bf8292011-10-12 11:07:00 -0400761 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800762 unsigned long flags;
763
Nicholas Bellinger4d2300c2011-11-30 18:18:33 -0800764 spin_lock_irqsave(&dev->execute_task_lock, flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -0400765 if (!cmd->state_active) {
766 list_add_tail(&cmd->state_list, &dev->state_list);
767 cmd->state_active = true;
768 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800769 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800770}
771
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700772/*
Nicholas Bellingerf147abb2011-10-25 23:57:41 -0700773 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700774 */
Christoph Hellwig7a6f0a12012-07-08 15:58:47 -0400775static void transport_write_pending_qf(struct se_cmd *cmd);
776static void transport_complete_qf(struct se_cmd *cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700777
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400778void target_qf_do_work(struct work_struct *work)
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700779{
780 struct se_device *dev = container_of(work, struct se_device,
781 qf_work_queue);
Roland Dreierbcac3642011-08-27 21:33:16 -0700782 LIST_HEAD(qf_cmd_list);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700783 struct se_cmd *cmd, *cmd_tmp;
784
785 spin_lock_irq(&dev->qf_cmd_lock);
Roland Dreierbcac3642011-08-27 21:33:16 -0700786 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
787 spin_unlock_irq(&dev->qf_cmd_lock);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700788
Roland Dreierbcac3642011-08-27 21:33:16 -0700789 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700790 list_del(&cmd->se_qf_node);
Joern Engel33940d02014-09-16 16:23:12 -0400791 atomic_dec_mb(&dev->dev_qf_count);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700792
Andy Grover6708bb22011-06-08 10:36:43 -0700793 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700794 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
Christoph Hellwige057f532011-10-17 13:56:41 -0400795 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700796 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
797 : "UNKNOWN");
Christoph Hellwigf7a5cc02011-10-17 13:56:42 -0400798
Christoph Hellwig7a6f0a12012-07-08 15:58:47 -0400799 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
800 transport_write_pending_qf(cmd);
801 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
802 transport_complete_qf(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700803 }
Nicholas Bellinger07bde792011-06-13 14:46:09 -0700804}
805
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800806unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
807{
808 switch (cmd->data_direction) {
809 case DMA_NONE:
810 return "NONE";
811 case DMA_FROM_DEVICE:
812 return "READ";
813 case DMA_TO_DEVICE:
814 return "WRITE";
815 case DMA_BIDIRECTIONAL:
816 return "BIDI";
817 default:
818 break;
819 }
820
821 return "UNKNOWN";
822}
823
824void transport_dump_dev_state(
825 struct se_device *dev,
826 char *b,
827 int *bl)
828{
829 *bl += sprintf(b + *bl, "Status: ");
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400830 if (dev->export_count)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800831 *bl += sprintf(b + *bl, "ACTIVATED");
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400832 else
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800833 *bl += sprintf(b + *bl, "DEACTIVATED");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800834
Christoph Hellwig5f41a312012-05-20 14:34:44 -0400835 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
Nicholas Bellinger11e764b2012-05-09 12:42:09 -0700836 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400837 dev->dev_attrib.block_size,
838 dev->dev_attrib.hw_max_sectors);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800839 *bl += sprintf(b + *bl, " ");
840}
841
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800842void transport_dump_vpd_proto_id(
843 struct t10_vpd *vpd,
844 unsigned char *p_buf,
845 int p_buf_len)
846{
847 unsigned char buf[VPD_TMP_BUF_SIZE];
848 int len;
849
850 memset(buf, 0, VPD_TMP_BUF_SIZE);
851 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
852
853 switch (vpd->protocol_identifier) {
854 case 0x00:
855 sprintf(buf+len, "Fibre Channel\n");
856 break;
857 case 0x10:
858 sprintf(buf+len, "Parallel SCSI\n");
859 break;
860 case 0x20:
861 sprintf(buf+len, "SSA\n");
862 break;
863 case 0x30:
864 sprintf(buf+len, "IEEE 1394\n");
865 break;
866 case 0x40:
867 sprintf(buf+len, "SCSI Remote Direct Memory Access"
868 " Protocol\n");
869 break;
870 case 0x50:
871 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
872 break;
873 case 0x60:
874 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
875 break;
876 case 0x70:
877 sprintf(buf+len, "Automation/Drive Interface Transport"
878 " Protocol\n");
879 break;
880 case 0x80:
881 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
882 break;
883 default:
884 sprintf(buf+len, "Unknown 0x%02x\n",
885 vpd->protocol_identifier);
886 break;
887 }
888
889 if (p_buf)
890 strncpy(p_buf, buf, p_buf_len);
891 else
Andy Grover6708bb22011-06-08 10:36:43 -0700892 pr_debug("%s", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800893}
894
895void
896transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
897{
898 /*
899 * Check if the Protocol Identifier Valid (PIV) bit is set..
900 *
901 * from spc3r23.pdf section 7.5.1
902 */
903 if (page_83[1] & 0x80) {
904 vpd->protocol_identifier = (page_83[0] & 0xf0);
905 vpd->protocol_identifier_set = 1;
906 transport_dump_vpd_proto_id(vpd, NULL, 0);
907 }
908}
909EXPORT_SYMBOL(transport_set_vpd_proto_id);
910
911int transport_dump_vpd_assoc(
912 struct t10_vpd *vpd,
913 unsigned char *p_buf,
914 int p_buf_len)
915{
916 unsigned char buf[VPD_TMP_BUF_SIZE];
Andy Grovere3d6f902011-07-19 08:55:10 +0000917 int ret = 0;
918 int len;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800919
920 memset(buf, 0, VPD_TMP_BUF_SIZE);
921 len = sprintf(buf, "T10 VPD Identifier Association: ");
922
923 switch (vpd->association) {
924 case 0x00:
925 sprintf(buf+len, "addressed logical unit\n");
926 break;
927 case 0x10:
928 sprintf(buf+len, "target port\n");
929 break;
930 case 0x20:
931 sprintf(buf+len, "SCSI target device\n");
932 break;
933 default:
934 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
Andy Grovere3d6f902011-07-19 08:55:10 +0000935 ret = -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800936 break;
937 }
938
939 if (p_buf)
940 strncpy(p_buf, buf, p_buf_len);
941 else
Andy Grover6708bb22011-06-08 10:36:43 -0700942 pr_debug("%s", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800943
944 return ret;
945}
946
947int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
948{
949 /*
950 * The VPD identification association..
951 *
952 * from spc3r23.pdf Section 7.6.3.1 Table 297
953 */
954 vpd->association = (page_83[1] & 0x30);
955 return transport_dump_vpd_assoc(vpd, NULL, 0);
956}
957EXPORT_SYMBOL(transport_set_vpd_assoc);
958
959int transport_dump_vpd_ident_type(
960 struct t10_vpd *vpd,
961 unsigned char *p_buf,
962 int p_buf_len)
963{
964 unsigned char buf[VPD_TMP_BUF_SIZE];
Andy Grovere3d6f902011-07-19 08:55:10 +0000965 int ret = 0;
966 int len;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800967
968 memset(buf, 0, VPD_TMP_BUF_SIZE);
969 len = sprintf(buf, "T10 VPD Identifier Type: ");
970
971 switch (vpd->device_identifier_type) {
972 case 0x00:
973 sprintf(buf+len, "Vendor specific\n");
974 break;
975 case 0x01:
976 sprintf(buf+len, "T10 Vendor ID based\n");
977 break;
978 case 0x02:
979 sprintf(buf+len, "EUI-64 based\n");
980 break;
981 case 0x03:
982 sprintf(buf+len, "NAA\n");
983 break;
984 case 0x04:
985 sprintf(buf+len, "Relative target port identifier\n");
986 break;
987 case 0x08:
988 sprintf(buf+len, "SCSI name string\n");
989 break;
990 default:
991 sprintf(buf+len, "Unsupported: 0x%02x\n",
992 vpd->device_identifier_type);
Andy Grovere3d6f902011-07-19 08:55:10 +0000993 ret = -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800994 break;
995 }
996
Andy Grovere3d6f902011-07-19 08:55:10 +0000997 if (p_buf) {
998 if (p_buf_len < strlen(buf)+1)
999 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001000 strncpy(p_buf, buf, p_buf_len);
Andy Grovere3d6f902011-07-19 08:55:10 +00001001 } else {
Andy Grover6708bb22011-06-08 10:36:43 -07001002 pr_debug("%s", buf);
Andy Grovere3d6f902011-07-19 08:55:10 +00001003 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001004
1005 return ret;
1006}
1007
1008int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1009{
1010 /*
1011 * The VPD identifier type..
1012 *
1013 * from spc3r23.pdf Section 7.6.3.1 Table 298
1014 */
1015 vpd->device_identifier_type = (page_83[1] & 0x0f);
1016 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1017}
1018EXPORT_SYMBOL(transport_set_vpd_ident_type);
1019
1020int transport_dump_vpd_ident(
1021 struct t10_vpd *vpd,
1022 unsigned char *p_buf,
1023 int p_buf_len)
1024{
1025 unsigned char buf[VPD_TMP_BUF_SIZE];
1026 int ret = 0;
1027
1028 memset(buf, 0, VPD_TMP_BUF_SIZE);
1029
1030 switch (vpd->device_identifier_code_set) {
1031 case 0x01: /* Binary */
Dan Carpenter703d6412013-01-18 16:05:12 +03001032 snprintf(buf, sizeof(buf),
1033 "T10 VPD Binary Device Identifier: %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001034 &vpd->device_identifier[0]);
1035 break;
1036 case 0x02: /* ASCII */
Dan Carpenter703d6412013-01-18 16:05:12 +03001037 snprintf(buf, sizeof(buf),
1038 "T10 VPD ASCII Device Identifier: %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001039 &vpd->device_identifier[0]);
1040 break;
1041 case 0x03: /* UTF-8 */
Dan Carpenter703d6412013-01-18 16:05:12 +03001042 snprintf(buf, sizeof(buf),
1043 "T10 VPD UTF-8 Device Identifier: %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001044 &vpd->device_identifier[0]);
1045 break;
1046 default:
1047 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1048 " 0x%02x", vpd->device_identifier_code_set);
Andy Grovere3d6f902011-07-19 08:55:10 +00001049 ret = -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001050 break;
1051 }
1052
1053 if (p_buf)
1054 strncpy(p_buf, buf, p_buf_len);
1055 else
Andy Grover6708bb22011-06-08 10:36:43 -07001056 pr_debug("%s", buf);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001057
1058 return ret;
1059}
1060
1061int
1062transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1063{
1064 static const char hex_str[] = "0123456789abcdef";
Masanari Iida35d1efe2012-08-16 22:43:13 +09001065 int j = 0, i = 4; /* offset to start of the identifier */
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001066
1067 /*
1068 * The VPD Code Set (encoding)
1069 *
1070 * from spc3r23.pdf Section 7.6.3.1 Table 296
1071 */
1072 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1073 switch (vpd->device_identifier_code_set) {
1074 case 0x01: /* Binary */
1075 vpd->device_identifier[j++] =
1076 hex_str[vpd->device_identifier_type];
1077 while (i < (4 + page_83[3])) {
1078 vpd->device_identifier[j++] =
1079 hex_str[(page_83[i] & 0xf0) >> 4];
1080 vpd->device_identifier[j++] =
1081 hex_str[page_83[i] & 0x0f];
1082 i++;
1083 }
1084 break;
1085 case 0x02: /* ASCII */
1086 case 0x03: /* UTF-8 */
1087 while (i < (4 + page_83[3]))
1088 vpd->device_identifier[j++] = page_83[i++];
1089 break;
1090 default:
1091 break;
1092 }
1093
1094 return transport_dump_vpd_ident(vpd, NULL, 0);
1095}
1096EXPORT_SYMBOL(transport_set_vpd_ident);
1097
Christoph Hellwigde103c92012-11-06 12:24:09 -08001098sense_reason_t
1099target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001100{
1101 struct se_device *dev = cmd->se_dev;
1102
1103 if (cmd->unknown_data_length) {
1104 cmd->data_length = size;
1105 } else if (size != cmd->data_length) {
1106 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1107 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1108 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1109 cmd->data_length, size, cmd->t_task_cdb[0]);
1110
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001111 if (cmd->data_direction == DMA_TO_DEVICE) {
1112 pr_err("Rejecting underflow/overflow"
1113 " WRITE data\n");
Christoph Hellwigde103c92012-11-06 12:24:09 -08001114 return TCM_INVALID_CDB_FIELD;
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001115 }
1116 /*
1117 * Reject READ_* or WRITE_* with overflow/underflow for
1118 * type SCF_SCSI_DATA_CDB.
1119 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001120 if (dev->dev_attrib.block_size != 512) {
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001121 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1122 " CDB on non 512-byte sector setup subsystem"
1123 " plugin: %s\n", dev->transport->name);
1124 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
Christoph Hellwigde103c92012-11-06 12:24:09 -08001125 return TCM_INVALID_CDB_FIELD;
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001126 }
Nicholas Bellinger4c054ba2012-08-16 15:33:10 -07001127 /*
1128 * For the overflow case keep the existing fabric provided
1129 * ->data_length. Otherwise for the underflow case, reset
1130 * ->data_length to the smaller SCSI expected data transfer
1131 * length.
1132 */
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001133 if (size > cmd->data_length) {
1134 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1135 cmd->residual_count = (size - cmd->data_length);
1136 } else {
1137 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1138 cmd->residual_count = (cmd->data_length - size);
Nicholas Bellinger4c054ba2012-08-16 15:33:10 -07001139 cmd->data_length = size;
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001140 }
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001141 }
1142
1143 return 0;
1144
Christoph Hellwig9b3b8042012-05-20 11:59:12 -04001145}
1146
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001147/*
1148 * Used by fabric modules containing a local struct se_cmd within their
1149 * fabric dependent per I/O descriptor.
Bart Van Assche649ee052015-04-14 13:26:44 +02001150 *
1151 * Preserves the value of @cmd->tag.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001152 */
1153void transport_init_se_cmd(
1154 struct se_cmd *cmd,
Christoph Hellwig9ac89282015-04-08 20:01:35 +02001155 const struct target_core_fabric_ops *tfo,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001156 struct se_session *se_sess,
1157 u32 data_length,
1158 int data_direction,
1159 int task_attr,
1160 unsigned char *sense_buffer)
1161{
Andy Grover5951146d2011-07-19 10:26:37 +00001162 INIT_LIST_HEAD(&cmd->se_delayed_node);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001163 INIT_LIST_HEAD(&cmd->se_qf_node);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07001164 INIT_LIST_HEAD(&cmd->se_cmd_list);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001165 INIT_LIST_HEAD(&cmd->state_list);
Andy Grovera1d8b492011-05-02 17:12:10 -07001166 init_completion(&cmd->t_transport_stop_comp);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07001167 init_completion(&cmd->cmd_wait_comp);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001168 init_completion(&cmd->task_stop_comp);
Andy Grovera1d8b492011-05-02 17:12:10 -07001169 spin_lock_init(&cmd->t_state_lock);
Mikulas Patocka1e1110c2014-05-17 06:49:22 -04001170 kref_init(&cmd->cmd_kref);
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001171 cmd->transport_state = CMD_T_DEV_ACTIVE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001172
1173 cmd->se_tfo = tfo;
1174 cmd->se_sess = se_sess;
1175 cmd->data_length = data_length;
1176 cmd->data_direction = data_direction;
1177 cmd->sam_task_attr = task_attr;
1178 cmd->sense_buffer = sense_buffer;
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001179
1180 cmd->state_active = false;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001181}
1182EXPORT_SYMBOL(transport_init_se_cmd);
1183
Christoph Hellwigde103c92012-11-06 12:24:09 -08001184static sense_reason_t
1185transport_check_alloc_task_attr(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001186{
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001187 struct se_device *dev = cmd->se_dev;
1188
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001189 /*
1190 * Check if SAM Task Attribute emulation is enabled for this
1191 * struct se_device storage object
1192 */
Andy Grovera3541702015-05-19 14:44:41 -07001193 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001194 return 0;
1195
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001196 if (cmd->sam_task_attr == TCM_ACA_TAG) {
Andy Grover6708bb22011-06-08 10:36:43 -07001197 pr_debug("SAM Task Attribute ACA"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001198 " emulation is not supported\n");
Christoph Hellwigde103c92012-11-06 12:24:09 -08001199 return TCM_INVALID_CDB_FIELD;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001200 }
1201 /*
1202 * Used to determine when ORDERED commands should go from
1203 * Dormant to Active status.
1204 */
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001205 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
Andy Grover6708bb22011-06-08 10:36:43 -07001206 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001207 cmd->se_ordered_id, cmd->sam_task_attr,
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001208 dev->transport->name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001209 return 0;
1210}
1211
Christoph Hellwigde103c92012-11-06 12:24:09 -08001212sense_reason_t
1213target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001214{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001215 struct se_device *dev = cmd->se_dev;
Christoph Hellwigde103c92012-11-06 12:24:09 -08001216 sense_reason_t ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001217
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001218 /*
1219 * Ensure that the received CDB is less than the max (252 + 8) bytes
1220 * for VARIABLE_LENGTH_CMD
1221 */
1222 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
Andy Grover6708bb22011-06-08 10:36:43 -07001223 pr_err("Received SCSI CDB with command_size: %d that"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001224 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1225 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001226 return TCM_INVALID_CDB_FIELD;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001227 }
1228 /*
1229 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1230 * allocate the additional extended CDB buffer now.. Otherwise
1231 * setup the pointer from __t_task_cdb to t_task_cdb.
1232 */
Andy Grovera1d8b492011-05-02 17:12:10 -07001233 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1234 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001235 GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -07001236 if (!cmd->t_task_cdb) {
1237 pr_err("Unable to allocate cmd->t_task_cdb"
Andy Grovera1d8b492011-05-02 17:12:10 -07001238 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001239 scsi_command_size(cdb),
Andy Grovera1d8b492011-05-02 17:12:10 -07001240 (unsigned long)sizeof(cmd->__t_task_cdb));
Christoph Hellwigde103c92012-11-06 12:24:09 -08001241 return TCM_OUT_OF_RESOURCES;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001242 }
1243 } else
Andy Grovera1d8b492011-05-02 17:12:10 -07001244 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001245 /*
Andy Grovera1d8b492011-05-02 17:12:10 -07001246 * Copy the original CDB into cmd->
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001247 */
Andy Grovera1d8b492011-05-02 17:12:10 -07001248 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001249
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07001250 trace_target_sequencer_start(cmd);
1251
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001252 /*
1253 * Check for an existing UNIT ATTENTION condition
1254 */
Christoph Hellwigde103c92012-11-06 12:24:09 -08001255 ret = target_scsi3_ua_check(cmd);
1256 if (ret)
1257 return ret;
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001258
Christoph Hellwigc87fbd52012-10-10 17:37:16 -04001259 ret = target_alua_state_check(cmd);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001260 if (ret)
Christoph Hellwigd977f432012-10-10 17:37:15 -04001261 return ret;
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001262
Christoph Hellwigde103c92012-11-06 12:24:09 -08001263 ret = target_check_reservation(cmd);
Nicholas Bellingerf85eda82013-03-28 23:06:00 -07001264 if (ret) {
1265 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
Christoph Hellwigde103c92012-11-06 12:24:09 -08001266 return ret;
Nicholas Bellingerf85eda82013-03-28 23:06:00 -07001267 }
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001268
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001269 ret = dev->transport->parse_cdb(cmd);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001270 if (ret)
1271 return ret;
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001272
Christoph Hellwigde103c92012-11-06 12:24:09 -08001273 ret = transport_check_alloc_task_attr(cmd);
1274 if (ret)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001275 return ret;
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001276
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001277 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04001278
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001279 spin_lock(&cmd->se_lun->lun_sep_lock);
1280 if (cmd->se_lun->lun_sep)
1281 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1282 spin_unlock(&cmd->se_lun->lun_sep_lock);
1283 return 0;
1284}
Andy Grovera12f41f2012-04-03 15:51:20 -07001285EXPORT_SYMBOL(target_setup_cmd_from_cdb);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001286
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001287/*
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001288 * Used by fabric module frontends to queue tasks directly.
1289 * Many only be used from process context only
1290 */
1291int transport_handle_cdb_direct(
1292 struct se_cmd *cmd)
1293{
Christoph Hellwigde103c92012-11-06 12:24:09 -08001294 sense_reason_t ret;
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001295
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001296 if (!cmd->se_lun) {
1297 dump_stack();
Andy Grover6708bb22011-06-08 10:36:43 -07001298 pr_err("cmd->se_lun is NULL\n");
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001299 return -EINVAL;
1300 }
1301 if (in_interrupt()) {
1302 dump_stack();
Andy Grover6708bb22011-06-08 10:36:43 -07001303 pr_err("transport_generic_handle_cdb cannot be called"
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001304 " from interrupt context\n");
1305 return -EINVAL;
1306 }
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001307 /*
Christoph Hellwigaf877292012-07-08 15:58:49 -04001308 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1309 * outstanding descriptors are handled correctly during shutdown via
1310 * transport_wait_for_tasks()
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001311 *
1312 * Also, we don't take cmd->t_state_lock here as we only expect
1313 * this to be called for initial descriptor submission.
1314 */
1315 cmd->t_state = TRANSPORT_NEW_CMD;
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001316 cmd->transport_state |= CMD_T_ACTIVE;
1317
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001318 /*
1319 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1320 * so follow TRANSPORT_NEW_CMD processing thread context usage
1321 * and call transport_generic_request_failure() if necessary..
1322 */
1323 ret = transport_generic_new_cmd(cmd);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001324 if (ret)
1325 transport_generic_request_failure(cmd, ret);
Nicholas Bellingerdd8ae592011-07-30 05:03:58 -07001326 return 0;
Nicholas Bellinger695434e12011-06-03 20:59:19 -07001327}
1328EXPORT_SYMBOL(transport_handle_cdb_direct);
1329
Nicholas Bellingerc5ff8d62013-08-22 11:58:43 -07001330sense_reason_t
Christoph Hellwigde103c92012-11-06 12:24:09 -08001331transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1332 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1333{
1334 if (!sgl || !sgl_count)
1335 return 0;
1336
1337 /*
1338 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1339 * scatterlists already have been set to follow what the fabric
1340 * passes for the original expected data transfer length.
1341 */
1342 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1343 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1344 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1345 return TCM_INVALID_CDB_FIELD;
1346 }
1347
1348 cmd->t_data_sg = sgl;
1349 cmd->t_data_nents = sgl_count;
Ilias Tsitsimpisb32bd0a2015-04-23 21:30:07 +03001350 cmd->t_bidi_data_sg = sgl_bidi;
1351 cmd->t_bidi_data_nents = sgl_bidi_count;
Christoph Hellwigde103c92012-11-06 12:24:09 -08001352
Christoph Hellwigde103c92012-11-06 12:24:09 -08001353 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1354 return 0;
1355}
1356
Nicholas Bellingera0267572012-10-01 17:23:22 -07001357/*
1358 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1359 * se_cmd + use pre-allocated SGL memory.
Nicholas Bellingera6360782011-11-18 20:36:22 -08001360 *
1361 * @se_cmd: command descriptor to submit
1362 * @se_sess: associated se_sess for endpoint
1363 * @cdb: pointer to SCSI CDB
1364 * @sense: pointer to SCSI sense buffer
1365 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1366 * @data_length: fabric expected data transfer length
1367 * @task_addr: SAM task attribute
1368 * @data_dir: DMA data direction
1369 * @flags: flags for command submission from target_sc_flags_tables
Nicholas Bellingera0267572012-10-01 17:23:22 -07001370 * @sgl: struct scatterlist memory for unidirectional mapping
1371 * @sgl_count: scatterlist count for unidirectional mapping
1372 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1373 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
Nicholas Bellingerdef2b332013-12-23 20:38:30 +00001374 * @sgl_prot: struct scatterlist memory protection information
1375 * @sgl_prot_count: scatterlist count for protection information
Nicholas Bellingera6360782011-11-18 20:36:22 -08001376 *
Bart Van Assche649ee052015-04-14 13:26:44 +02001377 * Task tags are supported if the caller has set @se_cmd->tag.
1378 *
Roland Dreierd6dfc862012-07-16 11:04:39 -07001379 * Returns non zero to signal active I/O shutdown failure. All other
1380 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1381 * but still return zero here.
1382 *
Nicholas Bellingera6360782011-11-18 20:36:22 -08001383 * This may only be called from process context, and also currently
1384 * assumes internal allocation of fabric payload buffer by target-core.
Nicholas Bellingera0267572012-10-01 17:23:22 -07001385 */
1386int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
Nicholas Bellingera6360782011-11-18 20:36:22 -08001387 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
Nicholas Bellingera0267572012-10-01 17:23:22 -07001388 u32 data_length, int task_attr, int data_dir, int flags,
1389 struct scatterlist *sgl, u32 sgl_count,
Nicholas Bellingerdef2b332013-12-23 20:38:30 +00001390 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1391 struct scatterlist *sgl_prot, u32 sgl_prot_count)
Nicholas Bellingera6360782011-11-18 20:36:22 -08001392{
1393 struct se_portal_group *se_tpg;
Christoph Hellwigde103c92012-11-06 12:24:09 -08001394 sense_reason_t rc;
1395 int ret;
Nicholas Bellingera6360782011-11-18 20:36:22 -08001396
1397 se_tpg = se_sess->se_tpg;
1398 BUG_ON(!se_tpg);
1399 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1400 BUG_ON(in_interrupt());
1401 /*
1402 * Initialize se_cmd for target operation. From this point
1403 * exceptions are handled by sending exception status via
1404 * target_core_fabric_ops->queue_status() callback
1405 */
1406 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1407 data_length, data_dir, task_attr, sense);
Sebastian Andrzej Siewiorb0d79942012-01-10 14:16:59 +01001408 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1409 se_cmd->unknown_data_length = 1;
Nicholas Bellingera6360782011-11-18 20:36:22 -08001410 /*
1411 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1412 * se_sess->sess_cmd_list. A second kref_get here is necessary
1413 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1414 * kref_put() to happen during fabric packet acknowledgement.
1415 */
Bart Van Asscheafc16602015-04-27 13:52:36 +02001416 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001417 if (ret)
1418 return ret;
Nicholas Bellingera6360782011-11-18 20:36:22 -08001419 /*
1420 * Signal bidirectional data payloads to target-core
1421 */
1422 if (flags & TARGET_SCF_BIDI_OP)
1423 se_cmd->se_cmd_flags |= SCF_BIDI;
1424 /*
1425 * Locate se_lun pointer and attach it to struct se_cmd
1426 */
Christoph Hellwigde103c92012-11-06 12:24:09 -08001427 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1428 if (rc) {
1429 transport_send_check_condition_and_sense(se_cmd, rc, 0);
Bart Van Asscheafc16602015-04-27 13:52:36 +02001430 target_put_sess_cmd(se_cmd);
Roland Dreierd6dfc862012-07-16 11:04:39 -07001431 return 0;
Nicholas Bellinger735703c2012-01-20 19:02:56 -08001432 }
Sagi Grimbergb5b8e292014-02-19 17:50:17 +02001433
1434 rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1435 if (rc != 0) {
1436 transport_generic_request_failure(se_cmd, rc);
1437 return 0;
1438 }
1439
Nicholas Bellingerdef2b332013-12-23 20:38:30 +00001440 /*
1441 * Save pointers for SGLs containing protection information,
1442 * if present.
1443 */
1444 if (sgl_prot_count) {
1445 se_cmd->t_prot_sg = sgl_prot;
1446 se_cmd->t_prot_nents = sgl_prot_count;
Akinobu Mita58358122015-05-01 15:23:49 +09001447 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
Nicholas Bellingerdef2b332013-12-23 20:38:30 +00001448 }
Christoph Hellwigd6e01752012-05-20 11:59:14 -04001449
Nicholas Bellingera0267572012-10-01 17:23:22 -07001450 /*
1451 * When a non zero sgl_count has been passed perform SGL passthrough
1452 * mapping for pre-allocated fabric memory instead of having target
1453 * core perform an internal SGL allocation..
1454 */
1455 if (sgl_count != 0) {
1456 BUG_ON(!sgl);
Andy Grover11e319e2012-04-03 15:51:28 -07001457
Nicholas Bellinger944981c2012-10-02 14:00:33 -07001458 /*
1459 * A work-around for tcm_loop as some userspace code via
1460 * scsi-generic do not memset their associated read buffers,
1461 * so go ahead and do that here for type non-data CDBs. Also
1462 * note that this is currently guaranteed to be a single SGL
1463 * for this case by target core in target_setup_cmd_from_cdb()
1464 * -> transport_generic_cmd_sequencer().
1465 */
1466 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1467 se_cmd->data_direction == DMA_FROM_DEVICE) {
1468 unsigned char *buf = NULL;
1469
1470 if (sgl)
1471 buf = kmap(sg_page(sgl)) + sgl->offset;
1472
1473 if (buf) {
1474 memset(buf, 0, sgl->length);
1475 kunmap(sg_page(sgl));
1476 }
1477 }
1478
Nicholas Bellingera0267572012-10-01 17:23:22 -07001479 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1480 sgl_bidi, sgl_bidi_count);
1481 if (rc != 0) {
Christoph Hellwigde103c92012-11-06 12:24:09 -08001482 transport_generic_request_failure(se_cmd, rc);
Nicholas Bellingera0267572012-10-01 17:23:22 -07001483 return 0;
1484 }
1485 }
Nicholas Bellingerdef2b332013-12-23 20:38:30 +00001486
Andy Grover11e319e2012-04-03 15:51:28 -07001487 /*
1488 * Check if we need to delay processing because of ALUA
1489 * Active/NonOptimized primary access state..
1490 */
1491 core_alua_check_nonop_delay(se_cmd);
1492
Nicholas Bellingera6360782011-11-18 20:36:22 -08001493 transport_handle_cdb_direct(se_cmd);
Roland Dreierd6dfc862012-07-16 11:04:39 -07001494 return 0;
Nicholas Bellingera6360782011-11-18 20:36:22 -08001495}
Nicholas Bellingera0267572012-10-01 17:23:22 -07001496EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1497
1498/*
1499 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1500 *
1501 * @se_cmd: command descriptor to submit
1502 * @se_sess: associated se_sess for endpoint
1503 * @cdb: pointer to SCSI CDB
1504 * @sense: pointer to SCSI sense buffer
1505 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1506 * @data_length: fabric expected data transfer length
1507 * @task_addr: SAM task attribute
1508 * @data_dir: DMA data direction
1509 * @flags: flags for command submission from target_sc_flags_tables
1510 *
Bart Van Assche649ee052015-04-14 13:26:44 +02001511 * Task tags are supported if the caller has set @se_cmd->tag.
1512 *
Nicholas Bellingera0267572012-10-01 17:23:22 -07001513 * Returns non zero to signal active I/O shutdown failure. All other
1514 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1515 * but still return zero here.
1516 *
1517 * This may only be called from process context, and also currently
1518 * assumes internal allocation of fabric payload buffer by target-core.
1519 *
1520 * It also assumes interal target core SGL memory allocation.
1521 */
1522int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1523 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
1524 u32 data_length, int task_attr, int data_dir, int flags)
1525{
1526 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1527 unpacked_lun, data_length, task_attr, data_dir,
Nicholas Bellingerdef2b332013-12-23 20:38:30 +00001528 flags, NULL, 0, NULL, 0, NULL, 0);
Nicholas Bellingera0267572012-10-01 17:23:22 -07001529}
Nicholas Bellingera6360782011-11-18 20:36:22 -08001530EXPORT_SYMBOL(target_submit_cmd);
1531
Nicholas Bellinger9f0d05c2012-02-25 05:02:48 -08001532static void target_complete_tmr_failure(struct work_struct *work)
1533{
1534 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1535
1536 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1537 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
Roland Dreier5a3b6fc2013-01-02 12:47:59 -08001538
1539 transport_cmd_check_stop_to_fabric(se_cmd);
Nicholas Bellinger9f0d05c2012-02-25 05:02:48 -08001540}
1541
Andy Groverea98d7f2012-01-19 13:39:21 -08001542/**
1543 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1544 * for TMR CDBs
1545 *
1546 * @se_cmd: command descriptor to submit
1547 * @se_sess: associated se_sess for endpoint
1548 * @sense: pointer to SCSI sense buffer
1549 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1550 * @fabric_context: fabric context for TMR req
1551 * @tm_type: Type of TM request
Nicholas Bellingerc0974f82012-02-25 05:10:04 -08001552 * @gfp: gfp type for caller
1553 * @tag: referenced task tag for TMR_ABORT_TASK
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001554 * @flags: submit cmd flags
Andy Groverea98d7f2012-01-19 13:39:21 -08001555 *
1556 * Callable from all contexts.
1557 **/
1558
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001559int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
Andy Groverea98d7f2012-01-19 13:39:21 -08001560 unsigned char *sense, u32 unpacked_lun,
Nicholas Bellingerc0974f82012-02-25 05:10:04 -08001561 void *fabric_tmr_ptr, unsigned char tm_type,
1562 gfp_t gfp, unsigned int tag, int flags)
Andy Groverea98d7f2012-01-19 13:39:21 -08001563{
1564 struct se_portal_group *se_tpg;
1565 int ret;
1566
1567 se_tpg = se_sess->se_tpg;
1568 BUG_ON(!se_tpg);
1569
1570 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001571 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001572 /*
1573 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1574 * allocation failure.
1575 */
Nicholas Bellingerc0974f82012-02-25 05:10:04 -08001576 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001577 if (ret < 0)
1578 return -ENOMEM;
Andy Groverea98d7f2012-01-19 13:39:21 -08001579
Nicholas Bellingerc0974f82012-02-25 05:10:04 -08001580 if (tm_type == TMR_ABORT_TASK)
1581 se_cmd->se_tmr_req->ref_task_tag = tag;
1582
Andy Groverea98d7f2012-01-19 13:39:21 -08001583 /* See target_submit_cmd for commentary */
Bart Van Asscheafc16602015-04-27 13:52:36 +02001584 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
Roland Dreierbc187ea2012-07-16 11:04:40 -07001585 if (ret) {
1586 core_tmr_release_req(se_cmd->se_tmr_req);
1587 return ret;
1588 }
Andy Groverea98d7f2012-01-19 13:39:21 -08001589
Andy Groverea98d7f2012-01-19 13:39:21 -08001590 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1591 if (ret) {
Nicholas Bellinger9f0d05c2012-02-25 05:02:48 -08001592 /*
1593 * For callback during failure handling, push this work off
1594 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1595 */
1596 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1597 schedule_work(&se_cmd->work);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001598 return 0;
Andy Groverea98d7f2012-01-19 13:39:21 -08001599 }
1600 transport_generic_handle_tmr(se_cmd);
Nicholas Bellingerc7042ca2012-02-25 01:40:24 -08001601 return 0;
Andy Groverea98d7f2012-01-19 13:39:21 -08001602}
1603EXPORT_SYMBOL(target_submit_tmr);
1604
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001605/*
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001606 * If the cmd is active, request it to be stopped and sleep until it
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001607 * has completed.
1608 */
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001609bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
Bart Van Asschecb0df4d2015-04-10 14:49:02 +02001610 __releases(&cmd->t_state_lock)
1611 __acquires(&cmd->t_state_lock)
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001612{
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001613 bool was_active = false;
1614
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001615 if (cmd->transport_state & CMD_T_BUSY) {
1616 cmd->transport_state |= CMD_T_REQUEST_STOP;
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001617 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1618
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001619 pr_debug("cmd %p waiting to complete\n", cmd);
1620 wait_for_completion(&cmd->task_stop_comp);
1621 pr_debug("cmd %p stopped successfully\n", cmd);
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001622
1623 spin_lock_irqsave(&cmd->t_state_lock, *flags);
Christoph Hellwigcf572a92012-04-24 00:25:05 -04001624 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1625 cmd->transport_state &= ~CMD_T_BUSY;
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001626 was_active = true;
1627 }
1628
Christoph Hellwigcdbb70b2011-10-17 13:56:46 -04001629 return was_active;
1630}
1631
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001632/*
1633 * Handle SAM-esque emulation for generic transport request failures.
1634 */
Christoph Hellwigde103c92012-11-06 12:24:09 -08001635void transport_generic_request_failure(struct se_cmd *cmd,
1636 sense_reason_t sense_reason)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001637{
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001638 int ret = 0;
1639
Bart Van Assche649ee052015-04-14 13:26:44 +02001640 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1641 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001642 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
Andy Grovere3d6f902011-07-19 08:55:10 +00001643 cmd->se_tfo->get_cmd_state(cmd),
Christoph Hellwigde103c92012-11-06 12:24:09 -08001644 cmd->t_state, sense_reason);
Christoph Hellwigd43d6ae2012-04-24 00:25:08 -04001645 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
Christoph Hellwig7d680f32011-12-21 14:13:47 -05001646 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1647 (cmd->transport_state & CMD_T_STOP) != 0,
1648 (cmd->transport_state & CMD_T_SENT) != 0);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001649
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001650 /*
1651 * For SAM Task Attribute emulation for failed struct se_cmd
1652 */
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001653 transport_complete_task_attr(cmd);
Nicholas Bellingercf6d1f02013-08-21 19:34:43 -07001654 /*
1655 * Handle special case for COMPARE_AND_WRITE failure, where the
Nicholas Bellingerc8e63982015-04-07 21:53:27 +00001656 * callback is expected to drop the per device ->caw_sem.
Nicholas Bellingercf6d1f02013-08-21 19:34:43 -07001657 */
1658 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1659 cmd->transport_complete_callback)
Nicholas Bellingerc8e63982015-04-07 21:53:27 +00001660 cmd->transport_complete_callback(cmd, false);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001661
Christoph Hellwigde103c92012-11-06 12:24:09 -08001662 switch (sense_reason) {
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001663 case TCM_NON_EXISTENT_LUN:
1664 case TCM_UNSUPPORTED_SCSI_OPCODE:
1665 case TCM_INVALID_CDB_FIELD:
1666 case TCM_INVALID_PARAMETER_LIST:
Roland Dreierbb992e72013-02-08 15:18:39 -08001667 case TCM_PARAMETER_LIST_LENGTH_ERROR:
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001668 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1669 case TCM_UNKNOWN_MODE_PAGE:
1670 case TCM_WRITE_PROTECTED:
Roland Dreiere2397c72012-07-16 15:34:21 -07001671 case TCM_ADDRESS_OUT_OF_RANGE:
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001672 case TCM_CHECK_CONDITION_ABORT_CMD:
1673 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1674 case TCM_CHECK_CONDITION_NOT_READY:
Nicholas Bellinger94387aa2014-02-23 14:04:09 +00001675 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1676 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1677 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001678 break;
Christoph Hellwigde103c92012-11-06 12:24:09 -08001679 case TCM_OUT_OF_RESOURCES:
1680 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1681 break;
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001682 case TCM_RESERVATION_CONFLICT:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001683 /*
1684 * No SENSE Data payload for this case, set SCSI Status
1685 * and queue the response to $FABRIC_MOD.
1686 *
1687 * Uses linux/include/scsi/scsi.h SAM status codes defs
1688 */
1689 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1690 /*
1691 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1692 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1693 * CONFLICT STATUS.
1694 *
1695 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1696 */
Andy Grovere3d6f902011-07-19 08:55:10 +00001697 if (cmd->se_sess &&
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -04001698 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
Andy Grovere3d6f902011-07-19 08:55:10 +00001699 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001700 cmd->orig_fe_lun, 0x2C,
1701 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1702
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07001703 trace_target_cmd_complete(cmd);
1704 ret = cmd->se_tfo-> queue_status(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07001705 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001706 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001707 goto check_stop;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001708 default:
Andy Grover6708bb22011-06-08 10:36:43 -07001709 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
Christoph Hellwigde103c92012-11-06 12:24:09 -08001710 cmd->t_task_cdb[0], sense_reason);
1711 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001712 break;
1713 }
Christoph Hellwigf3146432012-07-08 15:58:48 -04001714
Christoph Hellwigde103c92012-11-06 12:24:09 -08001715 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
Nicholas Bellinger03e98c92011-11-04 02:36:16 -07001716 if (ret == -EAGAIN || ret == -ENOMEM)
1717 goto queue_full;
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001718
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001719check_stop:
1720 transport_lun_remove_cmd(cmd);
Andy Grover6708bb22011-06-08 10:36:43 -07001721 if (!transport_cmd_check_stop_to_fabric(cmd))
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001722 ;
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001723 return;
1724
1725queue_full:
Christoph Hellwige057f532011-10-17 13:56:41 -04001726 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1727 transport_handle_queue_full(cmd, cmd->se_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001728}
Nicholas Bellinger2fbff122012-02-10 16:18:11 -08001729EXPORT_SYMBOL(transport_generic_request_failure);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001730
Nicholas Bellinger76dde502013-08-21 16:04:10 -07001731void __target_execute_cmd(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001732{
Christoph Hellwigde103c92012-11-06 12:24:09 -08001733 sense_reason_t ret;
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001734
Christoph Hellwigde103c92012-11-06 12:24:09 -08001735 if (cmd->execute_cmd) {
1736 ret = cmd->execute_cmd(cmd);
1737 if (ret) {
1738 spin_lock_irq(&cmd->t_state_lock);
1739 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1740 spin_unlock_irq(&cmd->t_state_lock);
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001741
Christoph Hellwigde103c92012-11-06 12:24:09 -08001742 transport_generic_request_failure(cmd, ret);
1743 }
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001744 }
1745}
1746
Nicholas Bellingeraa58b532015-02-08 12:39:06 -08001747static int target_write_prot_action(struct se_cmd *cmd)
1748{
Nicholas Bellinger5132d1e2015-02-08 03:06:17 -08001749 u32 sectors;
Nicholas Bellingeraa58b532015-02-08 12:39:06 -08001750 /*
1751 * Perform WRITE_INSERT of PI using software emulation when backend
1752 * device has PI enabled, if the transport has not already generated
1753 * PI using hardware WRITE_INSERT offload.
1754 */
1755 switch (cmd->prot_op) {
1756 case TARGET_PROT_DOUT_INSERT:
1757 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1758 sbc_dif_generate(cmd);
1759 break;
Nicholas Bellinger5132d1e2015-02-08 03:06:17 -08001760 case TARGET_PROT_DOUT_STRIP:
1761 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1762 break;
1763
1764 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
Sagi Grimbergf75b6fa2015-04-19 20:27:19 +03001765 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1766 sectors, 0, cmd->t_prot_sg, 0);
Nicholas Bellinger5132d1e2015-02-08 03:06:17 -08001767 if (unlikely(cmd->pi_err)) {
1768 spin_lock_irq(&cmd->t_state_lock);
Bart Van Assche6c2faea2015-05-13 09:19:02 +02001769 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
Nicholas Bellinger5132d1e2015-02-08 03:06:17 -08001770 spin_unlock_irq(&cmd->t_state_lock);
1771 transport_generic_request_failure(cmd, cmd->pi_err);
1772 return -1;
1773 }
1774 break;
Nicholas Bellingeraa58b532015-02-08 12:39:06 -08001775 default:
1776 break;
1777 }
1778
1779 return 0;
1780}
1781
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001782static bool target_handle_task_attr(struct se_cmd *cmd)
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001783{
1784 struct se_device *dev = cmd->se_dev;
1785
Andy Grovera3541702015-05-19 14:44:41 -07001786 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001787 return false;
1788
1789 /*
1790 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1791 * to allow the passed struct se_cmd list of tasks to the front of the list.
1792 */
1793 switch (cmd->sam_task_attr) {
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001794 case TCM_HEAD_TAG:
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001795 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
1796 "se_ordered_id: %u\n",
1797 cmd->t_task_cdb[0], cmd->se_ordered_id);
1798 return false;
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001799 case TCM_ORDERED_TAG:
Joern Engel33940d02014-09-16 16:23:12 -04001800 atomic_inc_mb(&dev->dev_ordered_sync);
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001801
1802 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1803 " se_ordered_id: %u\n",
1804 cmd->t_task_cdb[0], cmd->se_ordered_id);
1805
1806 /*
1807 * Execute an ORDERED command if no other older commands
1808 * exist that need to be completed first.
1809 */
1810 if (!atomic_read(&dev->simple_cmds))
1811 return false;
1812 break;
1813 default:
1814 /*
1815 * For SIMPLE and UNTAGGED Task Attribute commands
1816 */
Joern Engel33940d02014-09-16 16:23:12 -04001817 atomic_inc_mb(&dev->simple_cmds);
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001818 break;
1819 }
1820
1821 if (atomic_read(&dev->dev_ordered_sync) == 0)
1822 return false;
1823
1824 spin_lock(&dev->delayed_cmd_lock);
1825 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1826 spin_unlock(&dev->delayed_cmd_lock);
1827
1828 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1829 " delayed CMD list, se_ordered_id: %u\n",
1830 cmd->t_task_cdb[0], cmd->sam_task_attr,
1831 cmd->se_ordered_id);
1832 return true;
1833}
1834
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001835void target_execute_cmd(struct se_cmd *cmd)
1836{
Christoph Hellwigf7113a42012-07-08 15:58:38 -04001837 /*
Christoph Hellwigd59a02b2012-07-08 15:58:40 -04001838 * If the received CDB has aleady been aborted stop processing it here.
1839 */
Nicholas Bellinger4a9a6c82013-11-06 21:05:19 -08001840 if (transport_check_aborted_status(cmd, 1))
Christoph Hellwigd59a02b2012-07-08 15:58:40 -04001841 return;
1842
1843 /*
Christoph Hellwigf7113a42012-07-08 15:58:38 -04001844 * Determine if frontend context caller is requesting the stopping of
1845 * this command for frontend exceptions.
1846 */
Nicholas Bellinger4a9a6c82013-11-06 21:05:19 -08001847 spin_lock_irq(&cmd->t_state_lock);
Christoph Hellwigf7113a42012-07-08 15:58:38 -04001848 if (cmd->transport_state & CMD_T_STOP) {
Bart Van Assche649ee052015-04-14 13:26:44 +02001849 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1850 __func__, __LINE__, cmd->tag);
Christoph Hellwigf7113a42012-07-08 15:58:38 -04001851
1852 spin_unlock_irq(&cmd->t_state_lock);
Nicholas Bellingera95d6512014-06-09 23:36:51 +00001853 complete_all(&cmd->t_transport_stop_comp);
Christoph Hellwigf7113a42012-07-08 15:58:38 -04001854 return;
1855 }
1856
1857 cmd->t_state = TRANSPORT_PROCESSING;
Nicholas Bellinger1a398b92013-06-06 01:40:27 -07001858 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
Christoph Hellwigf7113a42012-07-08 15:58:38 -04001859 spin_unlock_irq(&cmd->t_state_lock);
Nicholas Bellingeraa58b532015-02-08 12:39:06 -08001860
1861 if (target_write_prot_action(cmd))
1862 return;
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001863
Nicholas Bellinger1a398b92013-06-06 01:40:27 -07001864 if (target_handle_task_attr(cmd)) {
1865 spin_lock_irq(&cmd->t_state_lock);
Bart Van Assche6c2faea2015-05-13 09:19:02 +02001866 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
Nicholas Bellinger1a398b92013-06-06 01:40:27 -07001867 spin_unlock_irq(&cmd->t_state_lock);
1868 return;
1869 }
1870
1871 __target_execute_cmd(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001872}
Christoph Hellwig70baf0a2012-07-08 15:58:39 -04001873EXPORT_SYMBOL(target_execute_cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001874
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001875/*
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001876 * Process all commands up to the last received ORDERED task attribute which
1877 * requires another blocking boundary
1878 */
1879static void target_restart_delayed_cmds(struct se_device *dev)
1880{
1881 for (;;) {
1882 struct se_cmd *cmd;
1883
1884 spin_lock(&dev->delayed_cmd_lock);
1885 if (list_empty(&dev->delayed_cmd_list)) {
1886 spin_unlock(&dev->delayed_cmd_lock);
1887 break;
1888 }
1889
1890 cmd = list_entry(dev->delayed_cmd_list.next,
1891 struct se_cmd, se_delayed_node);
1892 list_del(&cmd->se_delayed_node);
1893 spin_unlock(&dev->delayed_cmd_lock);
1894
1895 __target_execute_cmd(cmd);
1896
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001897 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001898 break;
1899 }
1900}
1901
1902/*
Christoph Hellwig35e0e752011-10-17 13:56:53 -04001903 * Called from I/O completion to determine which dormant/delayed
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001904 * and ordered cmds need to have their tasks added to the execution queue.
1905 */
1906static void transport_complete_task_attr(struct se_cmd *cmd)
1907{
Andy Grover5951146d2011-07-19 10:26:37 +00001908 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001909
Andy Grovera3541702015-05-19 14:44:41 -07001910 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001911 return;
1912
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001913 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
Joern Engel33940d02014-09-16 16:23:12 -04001914 atomic_dec_mb(&dev->simple_cmds);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001915 dev->dev_cur_ordered_id++;
Andy Grover6708bb22011-06-08 10:36:43 -07001916 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001917 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
1918 cmd->se_ordered_id);
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001919 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001920 dev->dev_cur_ordered_id++;
Andy Grover6708bb22011-06-08 10:36:43 -07001921 pr_debug("Incremented dev_cur_ordered_id: %u for"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001922 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1923 cmd->se_ordered_id);
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001924 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
Joern Engel33940d02014-09-16 16:23:12 -04001925 atomic_dec_mb(&dev->dev_ordered_sync);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001926
1927 dev->dev_cur_ordered_id++;
Andy Grover6708bb22011-06-08 10:36:43 -07001928 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001929 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
1930 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001931
Christoph Hellwig5f41a312012-05-20 14:34:44 -04001932 target_restart_delayed_cmds(dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001933}
1934
Christoph Hellwige057f532011-10-17 13:56:41 -04001935static void transport_complete_qf(struct se_cmd *cmd)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001936{
1937 int ret = 0;
1938
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001939 transport_complete_task_attr(cmd);
Christoph Hellwige057f532011-10-17 13:56:41 -04001940
1941 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07001942 trace_target_cmd_complete(cmd);
Christoph Hellwige057f532011-10-17 13:56:41 -04001943 ret = cmd->se_tfo->queue_status(cmd);
Quinn Tran082f58a2014-09-25 06:22:28 -04001944 goto out;
Christoph Hellwige057f532011-10-17 13:56:41 -04001945 }
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001946
1947 switch (cmd->data_direction) {
1948 case DMA_FROM_DEVICE:
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07001949 trace_target_cmd_complete(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001950 ret = cmd->se_tfo->queue_data_in(cmd);
1951 break;
1952 case DMA_TO_DEVICE:
Nicholas Bellinger64577402013-08-21 14:39:19 -07001953 if (cmd->se_cmd_flags & SCF_BIDI) {
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001954 ret = cmd->se_tfo->queue_data_in(cmd);
Bart Van Assche63509c62015-05-13 09:17:54 +02001955 break;
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001956 }
1957 /* Fall through for DMA_TO_DEVICE */
1958 case DMA_NONE:
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07001959 trace_target_cmd_complete(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001960 ret = cmd->se_tfo->queue_status(cmd);
1961 break;
1962 default:
1963 break;
1964 }
1965
Christoph Hellwige057f532011-10-17 13:56:41 -04001966out:
1967 if (ret < 0) {
1968 transport_handle_queue_full(cmd, cmd->se_dev);
1969 return;
1970 }
1971 transport_lun_remove_cmd(cmd);
1972 transport_cmd_check_stop_to_fabric(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001973}
1974
1975static void transport_handle_queue_full(
1976 struct se_cmd *cmd,
Christoph Hellwige057f532011-10-17 13:56:41 -04001977 struct se_device *dev)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001978{
1979 spin_lock_irq(&dev->qf_cmd_lock);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001980 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
Joern Engel33940d02014-09-16 16:23:12 -04001981 atomic_inc_mb(&dev->dev_qf_count);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07001982 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
1983
1984 schedule_work(&cmd->se_dev->qf_work_queue);
1985}
1986
Nicholas Bellingerfdeab852015-02-08 02:53:25 -08001987static bool target_read_prot_action(struct se_cmd *cmd)
Nicholas Bellingerbc005862014-04-02 14:55:33 -07001988{
Nicholas Bellingerfdeab852015-02-08 02:53:25 -08001989 switch (cmd->prot_op) {
1990 case TARGET_PROT_DIN_STRIP:
1991 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
Sagi Grimbergf75b6fa2015-04-19 20:27:19 +03001992 u32 sectors = cmd->data_length >>
1993 ilog2(cmd->se_dev->dev_attrib.block_size);
1994
1995 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1996 sectors, 0, cmd->t_prot_sg,
1997 0);
1998 if (cmd->pi_err)
Nicholas Bellingerfdeab852015-02-08 02:53:25 -08001999 return true;
Nicholas Bellingerbc005862014-04-02 14:55:33 -07002000 }
Nicholas Bellingerfdeab852015-02-08 02:53:25 -08002001 break;
Nicholas Bellinger72c038502015-02-08 04:02:08 -08002002 case TARGET_PROT_DIN_INSERT:
2003 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2004 break;
2005
2006 sbc_dif_generate(cmd);
2007 break;
Nicholas Bellingerfdeab852015-02-08 02:53:25 -08002008 default:
2009 break;
Nicholas Bellingerbc005862014-04-02 14:55:33 -07002010 }
2011
2012 return false;
2013}
2014
Christoph Hellwig35e0e752011-10-17 13:56:53 -04002015static void target_complete_ok_work(struct work_struct *work)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002016{
Christoph Hellwig35e0e752011-10-17 13:56:53 -04002017 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
Paolo Bonzini27a27092012-09-05 17:09:14 +02002018 int ret;
Christoph Hellwig35e0e752011-10-17 13:56:53 -04002019
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002020 /*
2021 * Check if we need to move delayed/dormant tasks from cmds on the
2022 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2023 * Attribute.
2024 */
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04002025 transport_complete_task_attr(cmd);
2026
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002027 /*
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002028 * Check to schedule QUEUE_FULL work, or execute an existing
2029 * cmd->transport_qf_callback()
2030 */
2031 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2032 schedule_work(&cmd->se_dev->qf_work_queue);
2033
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002034 /*
Paolo Bonzinid5829ea2012-09-05 17:09:15 +02002035 * Check if we need to send a sense buffer from
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002036 * the struct se_cmd in question.
2037 */
2038 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
Paolo Bonzini27a27092012-09-05 17:09:14 +02002039 WARN_ON(!cmd->scsi_status);
Paolo Bonzini27a27092012-09-05 17:09:14 +02002040 ret = transport_send_check_condition_and_sense(
2041 cmd, 0, 1);
2042 if (ret == -EAGAIN || ret == -ENOMEM)
2043 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002044
Paolo Bonzini27a27092012-09-05 17:09:14 +02002045 transport_lun_remove_cmd(cmd);
2046 transport_cmd_check_stop_to_fabric(cmd);
2047 return;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002048 }
2049 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002050 * Check for a callback, used by amongst other things
Nicholas Bellingera6b01332013-08-19 14:34:17 -07002051 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002052 */
Nicholas Bellingera6b01332013-08-19 14:34:17 -07002053 if (cmd->transport_complete_callback) {
2054 sense_reason_t rc;
2055
Nicholas Bellingerc8e63982015-04-07 21:53:27 +00002056 rc = cmd->transport_complete_callback(cmd, true);
Nicholas Bellingera2890082013-08-21 18:10:04 -07002057 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
Nicholas Bellingerc8e63982015-04-07 21:53:27 +00002058 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2059 !cmd->data_length)
2060 goto queue_rsp;
2061
Nicholas Bellingera6b01332013-08-19 14:34:17 -07002062 return;
Nicholas Bellingera2890082013-08-21 18:10:04 -07002063 } else if (rc) {
2064 ret = transport_send_check_condition_and_sense(cmd,
2065 rc, 0);
2066 if (ret == -EAGAIN || ret == -ENOMEM)
2067 goto queue_full;
Nicholas Bellingera6b01332013-08-19 14:34:17 -07002068
Nicholas Bellingera2890082013-08-21 18:10:04 -07002069 transport_lun_remove_cmd(cmd);
2070 transport_cmd_check_stop_to_fabric(cmd);
2071 return;
2072 }
Nicholas Bellingera6b01332013-08-19 14:34:17 -07002073 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002074
Nicholas Bellingerc8e63982015-04-07 21:53:27 +00002075queue_rsp:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002076 switch (cmd->data_direction) {
2077 case DMA_FROM_DEVICE:
2078 spin_lock(&cmd->se_lun->lun_sep_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +00002079 if (cmd->se_lun->lun_sep) {
2080 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002081 cmd->data_length;
2082 }
2083 spin_unlock(&cmd->se_lun->lun_sep_lock);
Nicholas Bellingerbc005862014-04-02 14:55:33 -07002084 /*
2085 * Perform READ_STRIP of PI using software emulation when
2086 * backend had PI enabled, if the transport will not be
2087 * performing hardware READ_STRIP offload.
2088 */
Nicholas Bellingerfdeab852015-02-08 02:53:25 -08002089 if (target_read_prot_action(cmd)) {
Nicholas Bellingerbc005862014-04-02 14:55:33 -07002090 ret = transport_send_check_condition_and_sense(cmd,
2091 cmd->pi_err, 0);
2092 if (ret == -EAGAIN || ret == -ENOMEM)
2093 goto queue_full;
2094
2095 transport_lun_remove_cmd(cmd);
2096 transport_cmd_check_stop_to_fabric(cmd);
2097 return;
2098 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002099
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07002100 trace_target_cmd_complete(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002101 ret = cmd->se_tfo->queue_data_in(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07002102 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002103 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002104 break;
2105 case DMA_TO_DEVICE:
2106 spin_lock(&cmd->se_lun->lun_sep_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +00002107 if (cmd->se_lun->lun_sep) {
2108 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002109 cmd->data_length;
2110 }
2111 spin_unlock(&cmd->se_lun->lun_sep_lock);
2112 /*
2113 * Check if we need to send READ payload for BIDI-COMMAND
2114 */
Nicholas Bellinger64577402013-08-21 14:39:19 -07002115 if (cmd->se_cmd_flags & SCF_BIDI) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002116 spin_lock(&cmd->se_lun->lun_sep_lock);
Andy Grovere3d6f902011-07-19 08:55:10 +00002117 if (cmd->se_lun->lun_sep) {
2118 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002119 cmd->data_length;
2120 }
2121 spin_unlock(&cmd->se_lun->lun_sep_lock);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002122 ret = cmd->se_tfo->queue_data_in(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07002123 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002124 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002125 break;
2126 }
2127 /* Fall through for DMA_TO_DEVICE */
2128 case DMA_NONE:
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07002129 trace_target_cmd_complete(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002130 ret = cmd->se_tfo->queue_status(cmd);
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07002131 if (ret == -EAGAIN || ret == -ENOMEM)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002132 goto queue_full;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002133 break;
2134 default:
2135 break;
2136 }
2137
2138 transport_lun_remove_cmd(cmd);
2139 transport_cmd_check_stop_to_fabric(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002140 return;
2141
2142queue_full:
Andy Grover6708bb22011-06-08 10:36:43 -07002143 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002144 " data_direction: %d\n", cmd, cmd->data_direction);
Christoph Hellwige057f532011-10-17 13:56:41 -04002145 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2146 transport_handle_queue_full(cmd, cmd->se_dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002147}
2148
Andy Grover6708bb22011-06-08 10:36:43 -07002149static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002150{
Andy Groverec98f782011-07-20 19:28:46 +00002151 struct scatterlist *sg;
Andy Groverec98f782011-07-20 19:28:46 +00002152 int count;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002153
Andy Grover6708bb22011-06-08 10:36:43 -07002154 for_each_sg(sgl, sg, nents, count)
2155 __free_page(sg_page(sg));
2156
2157 kfree(sgl);
2158}
2159
Nicholas Bellinger47e459e2013-08-20 10:45:16 -07002160static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2161{
2162 /*
2163 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2164 * emulation, and free + reset pointers if necessary..
2165 */
2166 if (!cmd->t_data_sg_orig)
2167 return;
2168
2169 kfree(cmd->t_data_sg);
2170 cmd->t_data_sg = cmd->t_data_sg_orig;
2171 cmd->t_data_sg_orig = NULL;
2172 cmd->t_data_nents = cmd->t_data_nents_orig;
2173 cmd->t_data_nents_orig = 0;
2174}
2175
Andy Grover6708bb22011-06-08 10:36:43 -07002176static inline void transport_free_pages(struct se_cmd *cmd)
2177{
Akinobu Mita58358122015-05-01 15:23:49 +09002178 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2179 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2180 cmd->t_prot_sg = NULL;
2181 cmd->t_prot_nents = 0;
2182 }
2183
Nicholas Bellinger47e459e2013-08-20 10:45:16 -07002184 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
Nicholas Bellingerc8e63982015-04-07 21:53:27 +00002185 /*
2186 * Release special case READ buffer payload required for
2187 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2188 */
2189 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2190 transport_free_sgl(cmd->t_bidi_data_sg,
2191 cmd->t_bidi_data_nents);
2192 cmd->t_bidi_data_sg = NULL;
2193 cmd->t_bidi_data_nents = 0;
2194 }
Nicholas Bellinger47e459e2013-08-20 10:45:16 -07002195 transport_reset_sgl_orig(cmd);
Andy Grover6708bb22011-06-08 10:36:43 -07002196 return;
Nicholas Bellinger47e459e2013-08-20 10:45:16 -07002197 }
2198 transport_reset_sgl_orig(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002199
Andy Grover6708bb22011-06-08 10:36:43 -07002200 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
Andy Groverec98f782011-07-20 19:28:46 +00002201 cmd->t_data_sg = NULL;
2202 cmd->t_data_nents = 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002203
Andy Grover6708bb22011-06-08 10:36:43 -07002204 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
Andy Groverec98f782011-07-20 19:28:46 +00002205 cmd->t_bidi_data_sg = NULL;
2206 cmd->t_bidi_data_nents = 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002207}
2208
Christoph Hellwigd3df7822011-09-13 23:08:32 +02002209/**
Christoph Hellwige26d99a2011-11-14 12:30:30 -05002210 * transport_release_cmd - free a command
2211 * @cmd: command to free
2212 *
2213 * This routine unconditionally frees a command, and reference counting
2214 * or list removal must be done in the caller.
2215 */
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -07002216static int transport_release_cmd(struct se_cmd *cmd)
Christoph Hellwige26d99a2011-11-14 12:30:30 -05002217{
2218 BUG_ON(!cmd->se_tfo);
2219
Andy Groverc8e31f22012-01-19 13:39:17 -08002220 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
Christoph Hellwige26d99a2011-11-14 12:30:30 -05002221 core_tmr_release_req(cmd->se_tmr_req);
2222 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2223 kfree(cmd->t_task_cdb);
2224 /*
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002225 * If this cmd has been setup with target_get_sess_cmd(), drop
2226 * the kref and call ->release_cmd() in kref callback.
Christoph Hellwige26d99a2011-11-14 12:30:30 -05002227 */
Bart Van Asscheafc16602015-04-27 13:52:36 +02002228 return target_put_sess_cmd(cmd);
Christoph Hellwige26d99a2011-11-14 12:30:30 -05002229}
2230
2231/**
Christoph Hellwigd3df7822011-09-13 23:08:32 +02002232 * transport_put_cmd - release a reference to a command
2233 * @cmd: command to release
2234 *
2235 * This routine releases our reference to the command and frees it if possible.
2236 */
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -07002237static int transport_put_cmd(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002238{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002239 transport_free_pages(cmd);
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -07002240 return transport_release_cmd(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002241}
2242
Andy Grover49493142012-01-16 16:57:08 -08002243void *transport_kmap_data_sg(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002244{
Andy Groverec98f782011-07-20 19:28:46 +00002245 struct scatterlist *sg = cmd->t_data_sg;
Andy Grover49493142012-01-16 16:57:08 -08002246 struct page **pages;
2247 int i;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002248
Andy Grover05d1c7c2011-07-20 19:13:28 +00002249 /*
Andy Groverec98f782011-07-20 19:28:46 +00002250 * We need to take into account a possible offset here for fabrics like
2251 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2252 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
Andy Grover05d1c7c2011-07-20 19:13:28 +00002253 */
Andy Grover49493142012-01-16 16:57:08 -08002254 if (!cmd->t_data_nents)
2255 return NULL;
Paolo Bonzini3717ef02012-09-07 17:30:35 +02002256
2257 BUG_ON(!sg);
2258 if (cmd->t_data_nents == 1)
Andy Grover49493142012-01-16 16:57:08 -08002259 return kmap(sg_page(sg)) + sg->offset;
Andy Grover05d1c7c2011-07-20 19:13:28 +00002260
Andy Grover49493142012-01-16 16:57:08 -08002261 /* >1 page. use vmap */
2262 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
Christoph Hellwigde103c92012-11-06 12:24:09 -08002263 if (!pages)
Andy Grover49493142012-01-16 16:57:08 -08002264 return NULL;
2265
2266 /* convert sg[] to pages[] */
2267 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2268 pages[i] = sg_page(sg);
2269 }
2270
2271 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2272 kfree(pages);
Christoph Hellwigde103c92012-11-06 12:24:09 -08002273 if (!cmd->t_data_vmap)
Andy Grover49493142012-01-16 16:57:08 -08002274 return NULL;
2275
2276 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
Andy Grover05d1c7c2011-07-20 19:13:28 +00002277}
Andy Grover49493142012-01-16 16:57:08 -08002278EXPORT_SYMBOL(transport_kmap_data_sg);
2279
2280void transport_kunmap_data_sg(struct se_cmd *cmd)
2281{
Andy Grovera1edf9c2012-02-09 12:18:06 -08002282 if (!cmd->t_data_nents) {
Andy Grover49493142012-01-16 16:57:08 -08002283 return;
Andy Grovera1edf9c2012-02-09 12:18:06 -08002284 } else if (cmd->t_data_nents == 1) {
Andy Grover49493142012-01-16 16:57:08 -08002285 kunmap(sg_page(cmd->t_data_sg));
Andy Grovera1edf9c2012-02-09 12:18:06 -08002286 return;
2287 }
Andy Grover49493142012-01-16 16:57:08 -08002288
2289 vunmap(cmd->t_data_vmap);
2290 cmd->t_data_vmap = NULL;
2291}
2292EXPORT_SYMBOL(transport_kunmap_data_sg);
Andy Grover05d1c7c2011-07-20 19:13:28 +00002293
Nicholas Bellingerc5ff8d62013-08-22 11:58:43 -07002294int
Nicholas Bellinger20093992013-08-25 15:44:03 -07002295target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2296 bool zero_page)
Andy Grover05d1c7c2011-07-20 19:13:28 +00002297{
Nicholas Bellinger20093992013-08-25 15:44:03 -07002298 struct scatterlist *sg;
Andy Groverec98f782011-07-20 19:28:46 +00002299 struct page *page;
Nicholas Bellinger20093992013-08-25 15:44:03 -07002300 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2301 unsigned int nent;
Andy Groverec98f782011-07-20 19:28:46 +00002302 int i = 0;
Andy Grover05d1c7c2011-07-20 19:13:28 +00002303
Nicholas Bellinger20093992013-08-25 15:44:03 -07002304 nent = DIV_ROUND_UP(length, PAGE_SIZE);
2305 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
2306 if (!sg)
Andy Grovere3d6f902011-07-19 08:55:10 +00002307 return -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002308
Nicholas Bellinger20093992013-08-25 15:44:03 -07002309 sg_init_table(sg, nent);
roland@purestorage.com9db9da32012-01-04 15:59:58 -08002310
Andy Groverec98f782011-07-20 19:28:46 +00002311 while (length) {
2312 u32 page_len = min_t(u32, length, PAGE_SIZE);
roland@purestorage.com9db9da32012-01-04 15:59:58 -08002313 page = alloc_page(GFP_KERNEL | zero_flag);
Andy Groverec98f782011-07-20 19:28:46 +00002314 if (!page)
2315 goto out;
2316
Nicholas Bellinger20093992013-08-25 15:44:03 -07002317 sg_set_page(&sg[i], page, page_len, 0);
Andy Groverec98f782011-07-20 19:28:46 +00002318 length -= page_len;
2319 i++;
2320 }
Nicholas Bellinger20093992013-08-25 15:44:03 -07002321 *sgl = sg;
2322 *nents = nent;
Andy Groverec98f782011-07-20 19:28:46 +00002323 return 0;
2324
2325out:
Yi Zoud0e27c82012-08-14 16:06:43 -07002326 while (i > 0) {
Andy Groverec98f782011-07-20 19:28:46 +00002327 i--;
Nicholas Bellinger20093992013-08-25 15:44:03 -07002328 __free_page(sg_page(&sg[i]));
Andy Groverec98f782011-07-20 19:28:46 +00002329 }
Nicholas Bellinger20093992013-08-25 15:44:03 -07002330 kfree(sg);
Andy Groverec98f782011-07-20 19:28:46 +00002331 return -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002332}
2333
Andy Grovera1d8b492011-05-02 17:12:10 -07002334/*
Andy Groverb16a35b2012-04-03 15:51:21 -07002335 * Allocate any required resources to execute the command. For writes we
2336 * might not have the payload yet, so notify the fabric via a call to
2337 * ->write_pending instead. Otherwise place it on the execution queue.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002338 */
Christoph Hellwigde103c92012-11-06 12:24:09 -08002339sense_reason_t
2340transport_generic_new_cmd(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002341{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002342 int ret = 0;
Nicholas Bellingerc8e63982015-04-07 21:53:27 +00002343 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002344
Akinobu Mita58358122015-05-01 15:23:49 +09002345 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2346 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2347 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2348 cmd->prot_length, true);
2349 if (ret < 0)
2350 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2351 }
2352
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002353 /*
2354 * Determine is the TCM fabric module has already allocated physical
2355 * memory, and is directly calling transport_generic_map_mem_to_cmd()
Andy Groverec98f782011-07-20 19:28:46 +00002356 * beforehand.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002357 */
Andy Groverec98f782011-07-20 19:28:46 +00002358 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2359 cmd->data_length) {
Nicholas Bellinger20093992013-08-25 15:44:03 -07002360
Nicholas Bellinger8cefe072013-08-25 16:10:57 -07002361 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2362 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2363 u32 bidi_length;
2364
2365 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2366 bidi_length = cmd->t_task_nolb *
2367 cmd->se_dev->dev_attrib.block_size;
2368 else
2369 bidi_length = cmd->data_length;
2370
2371 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2372 &cmd->t_bidi_data_nents,
2373 bidi_length, zero_flag);
2374 if (ret < 0)
2375 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2376 }
2377
Nicholas Bellinger20093992013-08-25 15:44:03 -07002378 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2379 cmd->data_length, zero_flag);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002380 if (ret < 0)
Christoph Hellwigde103c92012-11-06 12:24:09 -08002381 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Nicholas Bellingerc8e63982015-04-07 21:53:27 +00002382 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2383 cmd->data_length) {
2384 /*
2385 * Special case for COMPARE_AND_WRITE with fabrics
2386 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2387 */
2388 u32 caw_length = cmd->t_task_nolb *
2389 cmd->se_dev->dev_attrib.block_size;
2390
2391 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2392 &cmd->t_bidi_data_nents,
2393 caw_length, zero_flag);
2394 if (ret < 0)
2395 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002396 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002397 /*
Christoph Hellwigc3196f02012-07-08 15:58:41 -04002398 * If this command is not a write we can execute it right here,
2399 * for write buffers we need to notify the fabric driver first
2400 * and let it call back once the write buffers are ready.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002401 */
Christoph Hellwig5f41a312012-05-20 14:34:44 -04002402 target_add_to_state_list(cmd);
Roland Dreier885e7b02014-10-14 14:16:24 -07002403 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
Christoph Hellwigc3196f02012-07-08 15:58:41 -04002404 target_execute_cmd(cmd);
2405 return 0;
2406 }
Nicholas Bellinger862e6382013-06-06 01:35:18 -07002407 transport_cmd_check_stop(cmd, false, true);
Christoph Hellwigc3196f02012-07-08 15:58:41 -04002408
2409 ret = cmd->se_tfo->write_pending(cmd);
2410 if (ret == -EAGAIN || ret == -ENOMEM)
2411 goto queue_full;
2412
Christoph Hellwigde103c92012-11-06 12:24:09 -08002413 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
2414 WARN_ON(ret);
Christoph Hellwigda0f7612011-10-18 06:57:01 -04002415
Nicholas Bellingerb69c1fc2012-11-06 15:43:53 -08002416 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Christoph Hellwigde103c92012-11-06 12:24:09 -08002417
Christoph Hellwigc3196f02012-07-08 15:58:41 -04002418queue_full:
2419 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2420 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2421 transport_handle_queue_full(cmd, cmd->se_dev);
2422 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002423}
Andy Grovera1d8b492011-05-02 17:12:10 -07002424EXPORT_SYMBOL(transport_generic_new_cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002425
Christoph Hellwige057f532011-10-17 13:56:41 -04002426static void transport_write_pending_qf(struct se_cmd *cmd)
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002427{
Nicholas Bellingerf147abb2011-10-25 23:57:41 -07002428 int ret;
2429
2430 ret = cmd->se_tfo->write_pending(cmd);
2431 if (ret == -EAGAIN || ret == -ENOMEM) {
Christoph Hellwige057f532011-10-17 13:56:41 -04002432 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2433 cmd);
2434 transport_handle_queue_full(cmd, cmd->se_dev);
2435 }
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002436}
2437
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -07002438int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002439{
Nicholas Bellingerc1304802013-08-31 15:12:01 -07002440 unsigned long flags;
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -07002441 int ret = 0;
2442
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002443 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
Andy Groverc8e31f22012-01-19 13:39:17 -08002444 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002445 transport_wait_for_tasks(cmd);
2446
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -07002447 ret = transport_release_cmd(cmd);
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002448 } else {
2449 if (wait_for_tasks)
2450 transport_wait_for_tasks(cmd);
Nicholas Bellingerc1304802013-08-31 15:12:01 -07002451 /*
2452 * Handle WRITE failure case where transport_generic_new_cmd()
2453 * has already added se_cmd to state_list, but fabric has
2454 * failed command before I/O submission.
2455 */
2456 if (cmd->state_active) {
2457 spin_lock_irqsave(&cmd->t_state_lock, flags);
2458 target_remove_from_state_list(cmd);
2459 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2460 }
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002461
Christoph Hellwig82f1c8a2011-09-13 23:09:01 +02002462 if (cmd->se_lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002463 transport_lun_remove_cmd(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002464
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -07002465 ret = transport_put_cmd(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002466 }
Nicholas Bellingerd5ddad4162013-05-31 00:46:11 -07002467 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002468}
2469EXPORT_SYMBOL(transport_generic_free_cmd);
2470
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002471/* target_get_sess_cmd - Add command to active ->sess_cmd_list
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002472 * @se_cmd: command descriptor to add
Nicholas Bellingera6360782011-11-18 20:36:22 -08002473 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002474 */
Bart Van Asscheafc16602015-04-27 13:52:36 +02002475int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002476{
Bart Van Asscheafc16602015-04-27 13:52:36 +02002477 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002478 unsigned long flags;
Roland Dreierbc187ea2012-07-16 11:04:40 -07002479 int ret = 0;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002480
Nicholas Bellingera6360782011-11-18 20:36:22 -08002481 /*
2482 * Add a second kref if the fabric caller is expecting to handle
2483 * fabric acknowledgement that requires two target_put_sess_cmd()
2484 * invocations before se_cmd descriptor release.
2485 */
Bart Van Assche054922b2015-04-10 14:49:44 +02002486 if (ack_kref)
Nicholas Bellingera6360782011-11-18 20:36:22 -08002487 kref_get(&se_cmd->cmd_kref);
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002488
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002489 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
Roland Dreierbc187ea2012-07-16 11:04:40 -07002490 if (se_sess->sess_tearing_down) {
2491 ret = -ESHUTDOWN;
2492 goto out;
2493 }
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002494 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
Roland Dreierbc187ea2012-07-16 11:04:40 -07002495out:
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002496 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
Bart Van Assche7544e5972015-02-18 15:33:58 +01002497
2498 if (ret && ack_kref)
Bart Van Asscheafc16602015-04-27 13:52:36 +02002499 target_put_sess_cmd(se_cmd);
Bart Van Assche7544e5972015-02-18 15:33:58 +01002500
Roland Dreierbc187ea2012-07-16 11:04:40 -07002501 return ret;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002502}
Nicholas Bellinger20361e62013-03-21 22:54:28 -07002503EXPORT_SYMBOL(target_get_sess_cmd);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002504
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002505static void target_release_cmd_kref(struct kref *kref)
Christoph Hellwig077aa3b2015-03-26 12:27:34 +01002506 __releases(&se_cmd->se_sess->sess_cmd_lock)
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002507{
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002508 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2509 struct se_session *se_sess = se_cmd->se_sess;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002510
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002511 if (list_empty(&se_cmd->se_cmd_list)) {
Joern Engelccf5ae82013-05-13 16:30:06 -04002512 spin_unlock(&se_sess->sess_cmd_lock);
Nicholas Bellingerffc32d52012-02-13 02:35:01 -08002513 se_cmd->se_tfo->release_cmd(se_cmd);
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002514 return;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002515 }
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002516 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
Joern Engelccf5ae82013-05-13 16:30:06 -04002517 spin_unlock(&se_sess->sess_cmd_lock);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002518 complete(&se_cmd->cmd_wait_comp);
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002519 return;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002520 }
2521 list_del(&se_cmd->se_cmd_list);
Joern Engelccf5ae82013-05-13 16:30:06 -04002522 spin_unlock(&se_sess->sess_cmd_lock);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002523
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002524 se_cmd->se_tfo->release_cmd(se_cmd);
2525}
2526
2527/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002528 * @se_cmd: command descriptor to drop
2529 */
Bart Van Asscheafc16602015-04-27 13:52:36 +02002530int target_put_sess_cmd(struct se_cmd *se_cmd)
Nicholas Bellinger7481deb2011-11-12 00:32:17 -08002531{
Bart Van Asscheafc16602015-04-27 13:52:36 +02002532 struct se_session *se_sess = se_cmd->se_sess;
2533
Nicholas Bellinger0ed6e182014-06-12 12:45:02 -07002534 if (!se_sess) {
2535 se_cmd->se_tfo->release_cmd(se_cmd);
2536 return 1;
2537 }
Joern Engelccf5ae82013-05-13 16:30:06 -04002538 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
2539 &se_sess->sess_cmd_lock);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002540}
2541EXPORT_SYMBOL(target_put_sess_cmd);
2542
Roland Dreier1c7b13f2012-07-16 11:04:42 -07002543/* target_sess_cmd_list_set_waiting - Flag all commands in
2544 * sess_cmd_list to complete cmd_wait_comp. Set
2545 * sess_tearing_down so no more commands are queued.
2546 * @se_sess: session to flag
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002547 */
Roland Dreier1c7b13f2012-07-16 11:04:42 -07002548void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002549{
2550 struct se_cmd *se_cmd;
2551 unsigned long flags;
2552
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002553 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
Nicholas Bellinger9b31a322013-05-15 00:52:44 -07002554 if (se_sess->sess_tearing_down) {
2555 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2556 return;
2557 }
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002558 se_sess->sess_tearing_down = 1;
Nicholas Bellinger9b31a322013-05-15 00:52:44 -07002559 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002560
Nicholas Bellinger9b31a322013-05-15 00:52:44 -07002561 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002562 se_cmd->cmd_wait_set = 1;
2563
2564 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2565}
Roland Dreier1c7b13f2012-07-16 11:04:42 -07002566EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002567
2568/* target_wait_for_sess_cmds - Wait for outstanding descriptors
2569 * @se_sess: session to wait for active I/O
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002570 */
Joern Engelbe646c2d2013-05-15 00:44:07 -07002571void target_wait_for_sess_cmds(struct se_session *se_sess)
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002572{
2573 struct se_cmd *se_cmd, *tmp_cmd;
Nicholas Bellinger9b31a322013-05-15 00:52:44 -07002574 unsigned long flags;
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002575
2576 list_for_each_entry_safe(se_cmd, tmp_cmd,
Nicholas Bellinger9b31a322013-05-15 00:52:44 -07002577 &se_sess->sess_wait_list, se_cmd_list) {
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002578 list_del(&se_cmd->se_cmd_list);
2579
2580 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2581 " %d\n", se_cmd, se_cmd->t_state,
2582 se_cmd->se_tfo->get_cmd_state(se_cmd));
2583
Joern Engelbe646c2d2013-05-15 00:44:07 -07002584 wait_for_completion(&se_cmd->cmd_wait_comp);
2585 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2586 " fabric state: %d\n", se_cmd, se_cmd->t_state,
2587 se_cmd->se_tfo->get_cmd_state(se_cmd));
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002588
2589 se_cmd->se_tfo->release_cmd(se_cmd);
2590 }
Nicholas Bellinger9b31a322013-05-15 00:52:44 -07002591
2592 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2593 WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2594 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2595
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002596}
2597EXPORT_SYMBOL(target_wait_for_sess_cmds);
2598
Nicholas Bellinger52777972013-11-06 21:03:43 -08002599static int transport_clear_lun_ref_thread(void *p)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002600{
Jörn Engel8359cf42011-11-24 02:05:51 +01002601 struct se_lun *lun = p;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002602
Nicholas Bellinger52777972013-11-06 21:03:43 -08002603 percpu_ref_kill(&lun->lun_ref);
2604
2605 wait_for_completion(&lun->lun_ref_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002606 complete(&lun->lun_shutdown_comp);
2607
2608 return 0;
2609}
2610
Nicholas Bellinger52777972013-11-06 21:03:43 -08002611int transport_clear_lun_ref(struct se_lun *lun)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002612{
2613 struct task_struct *kt;
2614
Nicholas Bellinger52777972013-11-06 21:03:43 -08002615 kt = kthread_run(transport_clear_lun_ref_thread, lun,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002616 "tcm_cl_%u", lun->unpacked_lun);
2617 if (IS_ERR(kt)) {
Andy Grover6708bb22011-06-08 10:36:43 -07002618 pr_err("Unable to start clear_lun thread\n");
Andy Grovere3d6f902011-07-19 08:55:10 +00002619 return PTR_ERR(kt);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002620 }
2621 wait_for_completion(&lun->lun_shutdown_comp);
2622
2623 return 0;
2624}
2625
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002626/**
2627 * transport_wait_for_tasks - wait for completion to occur
2628 * @cmd: command to wait
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002629 *
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002630 * Called from frontend fabric context to wait for storage engine
2631 * to pause and/or release frontend generated struct se_cmd.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002632 */
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002633bool transport_wait_for_tasks(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002634{
2635 unsigned long flags;
2636
Andy Grovera1d8b492011-05-02 17:12:10 -07002637 spin_lock_irqsave(&cmd->t_state_lock, flags);
Andy Groverc8e31f22012-01-19 13:39:17 -08002638 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2639 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002640 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002641 return false;
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002642 }
Christoph Hellwigcb4f4d32012-05-20 11:59:10 -04002643
Andy Groverc8e31f22012-01-19 13:39:17 -08002644 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2645 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002646 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002647 return false;
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002648 }
Christoph Hellwig7d680f32011-12-21 14:13:47 -05002649
Nicholas Bellinger3d289342012-02-13 02:38:14 -08002650 if (!(cmd->transport_state & CMD_T_ACTIVE)) {
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002651 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002652 return false;
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002653 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002654
Christoph Hellwig7d680f32011-12-21 14:13:47 -05002655 cmd->transport_state |= CMD_T_STOP;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002656
Bart Van Assche649ee052015-04-14 13:26:44 +02002657 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
2658 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002659
Andy Grovera1d8b492011-05-02 17:12:10 -07002660 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002661
Andy Grovera1d8b492011-05-02 17:12:10 -07002662 wait_for_completion(&cmd->t_transport_stop_comp);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002663
Andy Grovera1d8b492011-05-02 17:12:10 -07002664 spin_lock_irqsave(&cmd->t_state_lock, flags);
Christoph Hellwig7d680f32011-12-21 14:13:47 -05002665 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002666
Bart Van Assche649ee052015-04-14 13:26:44 +02002667 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
2668 cmd->tag);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002669
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002670 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingera17f0912011-11-02 21:52:08 -07002671
2672 return true;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002673}
Nicholas Bellingerd14921d2011-10-09 01:00:58 -07002674EXPORT_SYMBOL(transport_wait_for_tasks);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002675
2676static int transport_get_sense_codes(
2677 struct se_cmd *cmd,
2678 u8 *asc,
2679 u8 *ascq)
2680{
2681 *asc = cmd->scsi_asc;
2682 *ascq = cmd->scsi_ascq;
2683
2684 return 0;
2685}
2686
Sagi Grimberg76736db2014-01-23 19:29:38 +02002687static
2688void transport_err_sector_info(unsigned char *buffer, sector_t bad_sector)
2689{
2690 /* Place failed LBA in sense data information descriptor 0. */
2691 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 0xc;
2692 buffer[SPC_DESC_TYPE_OFFSET] = 0; /* Information */
2693 buffer[SPC_ADDITIONAL_DESC_LEN_OFFSET] = 0xa;
2694 buffer[SPC_VALIDITY_OFFSET] = 0x80;
2695
2696 /* Descriptor Information: failing sector */
2697 put_unaligned_be64(bad_sector, &buffer[12]);
2698}
2699
Christoph Hellwigde103c92012-11-06 12:24:09 -08002700int
2701transport_send_check_condition_and_sense(struct se_cmd *cmd,
2702 sense_reason_t reason, int from_transport)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002703{
2704 unsigned char *buffer = cmd->sense_buffer;
2705 unsigned long flags;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002706 u8 asc = 0, ascq = 0;
2707
Andy Grovera1d8b492011-05-02 17:12:10 -07002708 spin_lock_irqsave(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002709 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
Andy Grovera1d8b492011-05-02 17:12:10 -07002710 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002711 return 0;
2712 }
2713 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
Andy Grovera1d8b492011-05-02 17:12:10 -07002714 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002715
2716 if (!reason && from_transport)
2717 goto after_reason;
2718
2719 if (!from_transport)
2720 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002721
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002722 /*
2723 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
2724 * SENSE KEY values from include/scsi/scsi.h
2725 */
2726 switch (reason) {
Hannes Reineckeba829132012-12-17 09:53:33 +01002727 case TCM_NO_SENSE:
2728 /* CURRENT ERROR */
2729 buffer[0] = 0x70;
2730 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2731 /* Not Ready */
2732 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
2733 /* NO ADDITIONAL SENSE INFORMATION */
2734 buffer[SPC_ASC_KEY_OFFSET] = 0;
2735 buffer[SPC_ASCQ_KEY_OFFSET] = 0;
2736 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002737 case TCM_NON_EXISTENT_LUN:
Nicholas Bellingereb39d342011-07-26 16:59:00 -07002738 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002739 buffer[0] = 0x70;
2740 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingereb39d342011-07-26 16:59:00 -07002741 /* ILLEGAL REQUEST */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002742 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
Nicholas Bellingereb39d342011-07-26 16:59:00 -07002743 /* LOGICAL UNIT NOT SUPPORTED */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002744 buffer[SPC_ASC_KEY_OFFSET] = 0x25;
Nicholas Bellingereb39d342011-07-26 16:59:00 -07002745 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002746 case TCM_UNSUPPORTED_SCSI_OPCODE:
2747 case TCM_SECTOR_COUNT_TOO_MANY:
2748 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002749 buffer[0] = 0x70;
2750 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002751 /* ILLEGAL REQUEST */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002752 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002753 /* INVALID COMMAND OPERATION CODE */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002754 buffer[SPC_ASC_KEY_OFFSET] = 0x20;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002755 break;
2756 case TCM_UNKNOWN_MODE_PAGE:
2757 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002758 buffer[0] = 0x70;
2759 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002760 /* ILLEGAL REQUEST */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002761 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002762 /* INVALID FIELD IN CDB */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002763 buffer[SPC_ASC_KEY_OFFSET] = 0x24;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002764 break;
2765 case TCM_CHECK_CONDITION_ABORT_CMD:
2766 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002767 buffer[0] = 0x70;
2768 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002769 /* ABORTED COMMAND */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002770 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002771 /* BUS DEVICE RESET FUNCTION OCCURRED */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002772 buffer[SPC_ASC_KEY_OFFSET] = 0x29;
2773 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002774 break;
2775 case TCM_INCORRECT_AMOUNT_OF_DATA:
2776 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002777 buffer[0] = 0x70;
2778 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002779 /* ABORTED COMMAND */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002780 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002781 /* WRITE ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002782 buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002783 /* NOT ENOUGH UNSOLICITED DATA */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002784 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002785 break;
2786 case TCM_INVALID_CDB_FIELD:
2787 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002788 buffer[0] = 0x70;
2789 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Roland Dreier9fbc8902012-01-09 17:54:00 -08002790 /* ILLEGAL REQUEST */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002791 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002792 /* INVALID FIELD IN CDB */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002793 buffer[SPC_ASC_KEY_OFFSET] = 0x24;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002794 break;
2795 case TCM_INVALID_PARAMETER_LIST:
2796 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002797 buffer[0] = 0x70;
2798 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Roland Dreier9fbc8902012-01-09 17:54:00 -08002799 /* ILLEGAL REQUEST */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002800 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002801 /* INVALID FIELD IN PARAMETER LIST */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002802 buffer[SPC_ASC_KEY_OFFSET] = 0x26;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002803 break;
Roland Dreierbb992e72013-02-08 15:18:39 -08002804 case TCM_PARAMETER_LIST_LENGTH_ERROR:
2805 /* CURRENT ERROR */
2806 buffer[0] = 0x70;
2807 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2808 /* ILLEGAL REQUEST */
2809 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2810 /* PARAMETER LIST LENGTH ERROR */
2811 buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
2812 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002813 case TCM_UNEXPECTED_UNSOLICITED_DATA:
2814 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002815 buffer[0] = 0x70;
2816 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002817 /* ABORTED COMMAND */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002818 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002819 /* WRITE ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002820 buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002821 /* UNEXPECTED_UNSOLICITED_DATA */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002822 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002823 break;
2824 case TCM_SERVICE_CRC_ERROR:
2825 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002826 buffer[0] = 0x70;
2827 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002828 /* ABORTED COMMAND */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002829 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002830 /* PROTOCOL SERVICE CRC ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002831 buffer[SPC_ASC_KEY_OFFSET] = 0x47;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002832 /* N/A */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002833 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002834 break;
2835 case TCM_SNACK_REJECTED:
2836 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002837 buffer[0] = 0x70;
2838 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002839 /* ABORTED COMMAND */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002840 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002841 /* READ ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002842 buffer[SPC_ASC_KEY_OFFSET] = 0x11;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002843 /* FAILED RETRANSMISSION REQUEST */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002844 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002845 break;
2846 case TCM_WRITE_PROTECTED:
2847 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002848 buffer[0] = 0x70;
2849 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002850 /* DATA PROTECT */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002851 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002852 /* WRITE PROTECTED */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002853 buffer[SPC_ASC_KEY_OFFSET] = 0x27;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002854 break;
Roland Dreiere2397c72012-07-16 15:34:21 -07002855 case TCM_ADDRESS_OUT_OF_RANGE:
2856 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002857 buffer[0] = 0x70;
2858 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Roland Dreiere2397c72012-07-16 15:34:21 -07002859 /* ILLEGAL REQUEST */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002860 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
Roland Dreiere2397c72012-07-16 15:34:21 -07002861 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002862 buffer[SPC_ASC_KEY_OFFSET] = 0x21;
Roland Dreiere2397c72012-07-16 15:34:21 -07002863 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002864 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
2865 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002866 buffer[0] = 0x70;
2867 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002868 /* UNIT ATTENTION */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002869 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002870 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002871 buffer[SPC_ASC_KEY_OFFSET] = asc;
2872 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002873 break;
2874 case TCM_CHECK_CONDITION_NOT_READY:
2875 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002876 buffer[0] = 0x70;
2877 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002878 /* Not Ready */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002879 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002880 transport_get_sense_codes(cmd, &asc, &ascq);
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002881 buffer[SPC_ASC_KEY_OFFSET] = asc;
2882 buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002883 break;
Nicholas Bellinger818b5712013-08-19 15:10:38 -07002884 case TCM_MISCOMPARE_VERIFY:
2885 /* CURRENT ERROR */
2886 buffer[0] = 0x70;
2887 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2888 buffer[SPC_SENSE_KEY_OFFSET] = MISCOMPARE;
2889 /* MISCOMPARE DURING VERIFY OPERATION */
2890 buffer[SPC_ASC_KEY_OFFSET] = 0x1d;
2891 buffer[SPC_ASCQ_KEY_OFFSET] = 0x00;
2892 break;
Nicholas Bellingerfcc4f172014-01-07 23:39:33 +00002893 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
2894 /* CURRENT ERROR */
2895 buffer[0] = 0x70;
2896 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2897 /* ILLEGAL REQUEST */
2898 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2899 /* LOGICAL BLOCK GUARD CHECK FAILED */
2900 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2901 buffer[SPC_ASCQ_KEY_OFFSET] = 0x01;
Sagi Grimberg76736db2014-01-23 19:29:38 +02002902 transport_err_sector_info(buffer, cmd->bad_sector);
Nicholas Bellingerfcc4f172014-01-07 23:39:33 +00002903 break;
2904 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
2905 /* CURRENT ERROR */
2906 buffer[0] = 0x70;
2907 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2908 /* ILLEGAL REQUEST */
2909 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2910 /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2911 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2912 buffer[SPC_ASCQ_KEY_OFFSET] = 0x02;
Sagi Grimberg76736db2014-01-23 19:29:38 +02002913 transport_err_sector_info(buffer, cmd->bad_sector);
Nicholas Bellingerfcc4f172014-01-07 23:39:33 +00002914 break;
2915 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
2916 /* CURRENT ERROR */
2917 buffer[0] = 0x70;
2918 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
2919 /* ILLEGAL REQUEST */
2920 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2921 /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2922 buffer[SPC_ASC_KEY_OFFSET] = 0x10;
2923 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
Sagi Grimberg76736db2014-01-23 19:29:38 +02002924 transport_err_sector_info(buffer, cmd->bad_sector);
Nicholas Bellingerfcc4f172014-01-07 23:39:33 +00002925 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002926 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2927 default:
2928 /* CURRENT ERROR */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002929 buffer[0] = 0x70;
2930 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
Jörn Engelad673282013-03-18 18:30:31 -04002931 /*
2932 * Returning ILLEGAL REQUEST would cause immediate IO errors on
2933 * Solaris initiators. Returning NOT READY instead means the
2934 * operations will be retried a finite number of times and we
2935 * can survive intermittent errors.
2936 */
2937 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002938 /* LOGICAL UNIT COMMUNICATION FAILURE */
Hannes Reinecke18a9df42012-12-17 09:53:32 +01002939 buffer[SPC_ASC_KEY_OFFSET] = 0x08;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002940 break;
2941 }
2942 /*
2943 * This code uses linux/include/scsi/scsi.h SAM status codes!
2944 */
2945 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2946 /*
2947 * Automatically padded, this value is encoded in the fabric's
2948 * data_length response PDU containing the SCSI defined sense data.
2949 */
Roland Dreier9c58b7d2012-08-15 14:35:25 -07002950 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002951
2952after_reason:
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07002953 trace_target_cmd_complete(cmd);
Nicholas Bellinger07bde792011-06-13 14:46:09 -07002954 return cmd->se_tfo->queue_status(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002955}
2956EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2957
2958int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2959{
Roland Dreierc18bc7d2012-11-16 08:06:19 -08002960 if (!(cmd->transport_state & CMD_T_ABORTED))
2961 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002962
Alex Leung68259b52014-03-21 22:20:41 -07002963 /*
2964 * If cmd has been aborted but either no status is to be sent or it has
2965 * already been sent, just return
2966 */
2967 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
Roland Dreierc18bc7d2012-11-16 08:06:19 -08002968 return 1;
Andy Grover8b1e1242012-04-03 15:51:12 -07002969
Bart Van Assche649ee052015-04-14 13:26:44 +02002970 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
2971 cmd->t_task_cdb[0], cmd->tag);
Andy Grover8b1e1242012-04-03 15:51:12 -07002972
Alex Leung68259b52014-03-21 22:20:41 -07002973 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
Nicholas Bellinger29f4c092013-11-13 14:39:14 -08002974 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07002975 trace_target_cmd_complete(cmd);
Roland Dreierc18bc7d2012-11-16 08:06:19 -08002976 cmd->se_tfo->queue_status(cmd);
2977
2978 return 1;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002979}
2980EXPORT_SYMBOL(transport_check_aborted_status);
2981
2982void transport_send_task_abort(struct se_cmd *cmd)
2983{
Nicholas Bellingerc252f002011-09-29 14:22:13 -07002984 unsigned long flags;
2985
2986 spin_lock_irqsave(&cmd->t_state_lock, flags);
Alex Leung68259b52014-03-21 22:20:41 -07002987 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
Nicholas Bellingerc252f002011-09-29 14:22:13 -07002988 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2989 return;
2990 }
2991 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2992
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08002993 /*
2994 * If there are still expected incoming fabric WRITEs, we wait
2995 * until until they have completed before sending a TASK_ABORTED
2996 * response. This response with TASK_ABORTED status will be
2997 * queued back to fabric module by transport_check_aborted_status().
2998 */
2999 if (cmd->data_direction == DMA_TO_DEVICE) {
Andy Grovere3d6f902011-07-19 08:55:10 +00003000 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
Christoph Hellwig7d680f32011-12-21 14:13:47 -05003001 cmd->transport_state |= CMD_T_ABORTED;
Alex Leung68259b52014-03-21 22:20:41 -07003002 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
Nicholas Bellinger29f4c092013-11-13 14:39:14 -08003003 return;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003004 }
3005 }
3006 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
Andy Grover8b1e1242012-04-03 15:51:12 -07003007
Roland Dreier72b59d6e2013-01-02 12:47:58 -08003008 transport_lun_remove_cmd(cmd);
3009
Bart Van Assche649ee052015-04-14 13:26:44 +02003010 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3011 cmd->t_task_cdb[0], cmd->tag);
Andy Grover8b1e1242012-04-03 15:51:12 -07003012
Roland Dreiere5c0d6a2013-06-26 17:36:17 -07003013 trace_target_cmd_complete(cmd);
Andy Grovere3d6f902011-07-19 08:55:10 +00003014 cmd->se_tfo->queue_status(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003015}
3016
Christoph Hellwigaf877292012-07-08 15:58:49 -04003017static void target_tmr_work(struct work_struct *work)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003018{
Christoph Hellwigaf877292012-07-08 15:58:49 -04003019 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
Andy Grover5951146d2011-07-19 10:26:37 +00003020 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003021 struct se_tmr_req *tmr = cmd->se_tmr_req;
3022 int ret;
3023
3024 switch (tmr->function) {
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07003025 case TMR_ABORT_TASK:
Nicholas Bellinger3d289342012-02-13 02:38:14 -08003026 core_tmr_abort_task(dev, tmr, cmd->se_sess);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003027 break;
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07003028 case TMR_ABORT_TASK_SET:
3029 case TMR_CLEAR_ACA:
3030 case TMR_CLEAR_TASK_SET:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003031 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3032 break;
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07003033 case TMR_LUN_RESET:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003034 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3035 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3036 TMR_FUNCTION_REJECTED;
3037 break;
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07003038 case TMR_TARGET_WARM_RESET:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003039 tmr->response = TMR_FUNCTION_REJECTED;
3040 break;
Nicholas Bellinger5c6cd612011-03-14 04:06:04 -07003041 case TMR_TARGET_COLD_RESET:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003042 tmr->response = TMR_FUNCTION_REJECTED;
3043 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003044 default:
Andy Grover6708bb22011-06-08 10:36:43 -07003045 pr_err("Uknown TMR function: 0x%02x.\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003046 tmr->function);
3047 tmr->response = TMR_FUNCTION_REJECTED;
3048 break;
3049 }
3050
3051 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
Andy Grovere3d6f902011-07-19 08:55:10 +00003052 cmd->se_tfo->queue_tm_rsp(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003053
Christoph Hellwigb7b8bef2011-10-17 13:56:44 -04003054 transport_cmd_check_stop_to_fabric(cmd);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003055}
3056
Christoph Hellwigaf877292012-07-08 15:58:49 -04003057int transport_generic_handle_tmr(
3058 struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003059{
Nicholas Bellingerf15e9cd2014-06-09 23:13:20 +00003060 unsigned long flags;
3061
3062 spin_lock_irqsave(&cmd->t_state_lock, flags);
3063 cmd->transport_state |= CMD_T_ACTIVE;
3064 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3065
Christoph Hellwigaf877292012-07-08 15:58:49 -04003066 INIT_WORK(&cmd->work, target_tmr_work);
3067 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08003068 return 0;
3069}
Christoph Hellwigaf877292012-07-08 15:58:49 -04003070EXPORT_SYMBOL(transport_generic_handle_tmr);
Christoph Hellwig814e5b42015-04-20 15:00:30 +02003071
3072bool
3073target_check_wce(struct se_device *dev)
3074{
3075 bool wce = false;
3076
3077 if (dev->transport->get_write_cache)
3078 wce = dev->transport->get_write_cache(dev);
3079 else if (dev->dev_attrib.emulate_write_cache > 0)
3080 wce = true;
3081
3082 return wce;
3083}
3084
3085bool
3086target_check_fua(struct se_device *dev)
3087{
3088 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3089}