blob: d4c6a318d4c25f4daa2693bf5a1517bcd845aec1 [file] [log] [blame]
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001/*
2 * SCSI Primary Commands (SPC) parsing and emulation.
3 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07004 * (c) Copyright 2002-2013 Datera, Inc.
Christoph Hellwig88455ec2012-05-20 11:59:13 -04005 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <asm/unaligned.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_tcq.h>
29
30#include <target/target_core_base.h>
31#include <target/target_core_backend.h>
32#include <target/target_core_fabric.h>
33
34#include "target_core_internal.h"
Nicholas Bellingereba2ca42012-05-30 14:09:10 -070035#include "target_core_alua.h"
Christoph Hellwig88455ec2012-05-20 11:59:13 -040036#include "target_core_pr.h"
37#include "target_core_ua.h"
Nicholas Bellinger04b1b792013-08-22 12:29:59 -070038#include "target_core_xcopy.h"
Christoph Hellwig88455ec2012-05-20 11:59:13 -040039
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040040static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
41{
42 struct t10_alua_tg_pt_gp *tg_pt_gp;
43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
44
45 /*
46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
47 */
48 buf[5] = 0x80;
49
50 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +010051 * Set TPGS field for explicit and/or implicit ALUA access type
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040052 * and opteration.
53 *
54 * See spc4r17 section 6.4.2 Table 135
55 */
56 if (!port)
57 return;
58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
59 if (!tg_pt_gp_mem)
60 return;
61
62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
64 if (tg_pt_gp)
65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
67}
68
Hannes Reinecke0dfa1c52012-12-17 09:53:35 +010069sense_reason_t
70spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040071{
72 struct se_lun *lun = cmd->se_lun;
73 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerd2c53042014-04-02 13:27:43 -070074 struct se_session *sess = cmd->se_sess;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040075
76 /* Set RMB (removable media) for tape devices */
77 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
78 buf[1] = 0x80;
79
Christoph Hellwig48c25672012-10-10 17:37:17 -040080 buf[2] = 0x05; /* SPC-3 */
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040081
82 /*
83 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
84 *
85 * SPC4 says:
86 * A RESPONSE DATA FORMAT field set to 2h indicates that the
87 * standard INQUIRY data is in the format defined in this
88 * standard. Response data format values less than 2h are
89 * obsolete. Response data format values greater than 2h are
90 * reserved.
91 */
92 buf[3] = 2;
93
94 /*
95 * Enable SCCS and TPGS fields for Emulated ALUA
96 */
Christoph Hellwigc87fbd52012-10-10 17:37:16 -040097 spc_fill_alua_data(lun->lun_sep, buf);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040098
Nicholas Bellingerd397a442013-08-22 14:17:20 -070099 /*
100 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
101 */
102 if (dev->dev_attrib.emulate_3pc)
103 buf[5] |= 0x8;
Nicholas Bellingerbdbad2b2013-12-23 20:32:46 +0000104 /*
Nicholas Bellingerd2c53042014-04-02 13:27:43 -0700105 * Set Protection (PROTECT) bit when DIF has been enabled on the
106 * device, and the transport supports VERIFY + PASS.
Nicholas Bellingerbdbad2b2013-12-23 20:32:46 +0000107 */
Nicholas Bellingerd2c53042014-04-02 13:27:43 -0700108 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
109 if (dev->dev_attrib.pi_prot_type)
110 buf[5] |= 0x1;
111 }
Nicholas Bellingerd397a442013-08-22 14:17:20 -0700112
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400113 buf[7] = 0x2; /* CmdQue=1 */
114
Nicholas Bellingeree60bdd2013-07-24 16:15:08 -0700115 memcpy(&buf[8], "LIO-ORG ", 8);
116 memset(&buf[16], 0x20, 16);
117 memcpy(&buf[16], dev->t10_wwn.model,
118 min_t(size_t, strlen(dev->t10_wwn.model), 16));
119 memcpy(&buf[32], dev->t10_wwn.revision,
120 min_t(size_t, strlen(dev->t10_wwn.revision), 4));
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400121 buf[4] = 31; /* Set additional length to 31 */
122
123 return 0;
124}
Hannes Reinecke0dfa1c52012-12-17 09:53:35 +0100125EXPORT_SYMBOL(spc_emulate_inquiry_std);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400126
127/* unit serial number */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800128static sense_reason_t
129spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400130{
131 struct se_device *dev = cmd->se_dev;
132 u16 len = 0;
133
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400134 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400135 u32 unit_serial_len;
136
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400137 unit_serial_len = strlen(dev->t10_wwn.unit_serial);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400138 unit_serial_len++; /* For NULL Terminator */
139
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400140 len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400141 len++; /* Extra Byte for NULL Terminator */
142 buf[3] = len;
143 }
144 return 0;
145}
146
Nicholas Bellinger68366022013-08-20 17:02:21 -0700147void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
148 unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400149{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400150 unsigned char *p = &dev->t10_wwn.unit_serial[0];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400151 int cnt;
152 bool next = true;
153
154 /*
155 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
156 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
157 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
158 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
159 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
160 * per device uniqeness.
161 */
162 for (cnt = 0; *p && cnt < 13; p++) {
163 int val = hex_to_bin(*p);
164
165 if (val < 0)
166 continue;
167
168 if (next) {
169 next = false;
170 buf[cnt++] |= val;
171 } else {
172 next = true;
173 buf[cnt] = val << 4;
174 }
175 }
176}
177
178/*
179 * Device identification VPD, for a complete list of
180 * DESIGNATOR TYPEs see spc4r17 Table 459.
181 */
Hannes Reinecke0dfa1c52012-12-17 09:53:35 +0100182sense_reason_t
Christoph Hellwigde103c92012-11-06 12:24:09 -0800183spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400184{
185 struct se_device *dev = cmd->se_dev;
186 struct se_lun *lun = cmd->se_lun;
187 struct se_port *port = NULL;
188 struct se_portal_group *tpg = NULL;
189 struct t10_alua_lu_gp_member *lu_gp_mem;
190 struct t10_alua_tg_pt_gp *tg_pt_gp;
191 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400192 unsigned char *prod = &dev->t10_wwn.model[0];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400193 u32 prod_len;
194 u32 unit_serial_len, off = 0;
195 u16 len = 0, id_len;
196
197 off = 4;
198
199 /*
200 * NAA IEEE Registered Extended Assigned designator format, see
201 * spc4r17 section 7.7.3.6.5
202 *
203 * We depend upon a target_core_mod/ConfigFS provided
204 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
205 * value in order to return the NAA id.
206 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400207 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400208 goto check_t10_vend_desc;
209
210 /* CODE SET == Binary */
211 buf[off++] = 0x1;
212
213 /* Set ASSOCIATION == addressed logical unit: 0)b */
214 buf[off] = 0x00;
215
216 /* Identifier/Designator type == NAA identifier */
217 buf[off++] |= 0x3;
218 off++;
219
220 /* Identifier/Designator length */
221 buf[off++] = 0x10;
222
223 /*
224 * Start NAA IEEE Registered Extended Identifier/Designator
225 */
226 buf[off++] = (0x6 << 4);
227
228 /*
229 * Use OpenFabrics IEEE Company ID: 00 14 05
230 */
231 buf[off++] = 0x01;
232 buf[off++] = 0x40;
233 buf[off] = (0x5 << 4);
234
235 /*
236 * Return ConfigFS Unit Serial Number information for
237 * VENDOR_SPECIFIC_IDENTIFIER and
238 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
239 */
240 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
241
242 len = 20;
243 off = (len + 4);
244
245check_t10_vend_desc:
246 /*
247 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
248 */
249 id_len = 8; /* For Vendor field */
250 prod_len = 4; /* For VPD Header */
251 prod_len += 8; /* For Vendor field */
252 prod_len += strlen(prod);
253 prod_len++; /* For : */
254
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400255 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
256 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400257 unit_serial_len++; /* For NULL Terminator */
258
259 id_len += sprintf(&buf[off+12], "%s:%s", prod,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400260 &dev->t10_wwn.unit_serial[0]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400261 }
262 buf[off] = 0x2; /* ASCII */
263 buf[off+1] = 0x1; /* T10 Vendor ID */
264 buf[off+2] = 0x0;
265 memcpy(&buf[off+4], "LIO-ORG", 8);
266 /* Extra Byte for NULL Terminator */
267 id_len++;
268 /* Identifier Length */
269 buf[off+3] = id_len;
270 /* Header size for Designation descriptor */
271 len += (id_len + 4);
272 off += (id_len + 4);
273 /*
274 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
275 */
276 port = lun->lun_sep;
277 if (port) {
278 struct t10_alua_lu_gp *lu_gp;
Hannes Reineckefbfe8582013-12-17 09:18:48 +0100279 u32 padding, scsi_name_len, scsi_target_len;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400280 u16 lu_gp_id = 0;
281 u16 tg_pt_gp_id = 0;
282 u16 tpgt;
283
284 tpg = port->sep_tpg;
285 /*
286 * Relative target port identifer, see spc4r17
287 * section 7.7.3.7
288 *
289 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
290 * section 7.5.1 Table 362
291 */
292 buf[off] =
293 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
294 buf[off++] |= 0x1; /* CODE SET == Binary */
295 buf[off] = 0x80; /* Set PIV=1 */
296 /* Set ASSOCIATION == target port: 01b */
297 buf[off] |= 0x10;
298 /* DESIGNATOR TYPE == Relative target port identifer */
299 buf[off++] |= 0x4;
300 off++; /* Skip over Reserved */
301 buf[off++] = 4; /* DESIGNATOR LENGTH */
302 /* Skip over Obsolete field in RTPI payload
303 * in Table 472 */
304 off += 2;
305 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
306 buf[off++] = (port->sep_rtpi & 0xff);
307 len += 8; /* Header size + Designation descriptor */
308 /*
309 * Target port group identifier, see spc4r17
310 * section 7.7.3.8
311 *
312 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
313 * section 7.5.1 Table 362
314 */
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400315 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
316 if (!tg_pt_gp_mem)
317 goto check_lu_gp;
318
319 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
320 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
321 if (!tg_pt_gp) {
322 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
323 goto check_lu_gp;
324 }
325 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
326 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
327
328 buf[off] =
329 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
330 buf[off++] |= 0x1; /* CODE SET == Binary */
331 buf[off] = 0x80; /* Set PIV=1 */
332 /* Set ASSOCIATION == target port: 01b */
333 buf[off] |= 0x10;
334 /* DESIGNATOR TYPE == Target port group identifier */
335 buf[off++] |= 0x5;
336 off++; /* Skip over Reserved */
337 buf[off++] = 4; /* DESIGNATOR LENGTH */
338 off += 2; /* Skip over Reserved Field */
339 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
340 buf[off++] = (tg_pt_gp_id & 0xff);
341 len += 8; /* Header size + Designation descriptor */
342 /*
343 * Logical Unit Group identifier, see spc4r17
344 * section 7.7.3.8
345 */
346check_lu_gp:
347 lu_gp_mem = dev->dev_alua_lu_gp_mem;
348 if (!lu_gp_mem)
349 goto check_scsi_name;
350
351 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
352 lu_gp = lu_gp_mem->lu_gp;
353 if (!lu_gp) {
354 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
355 goto check_scsi_name;
356 }
357 lu_gp_id = lu_gp->lu_gp_id;
358 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
359
360 buf[off++] |= 0x1; /* CODE SET == Binary */
361 /* DESIGNATOR TYPE == Logical Unit Group identifier */
362 buf[off++] |= 0x6;
363 off++; /* Skip over Reserved */
364 buf[off++] = 4; /* DESIGNATOR LENGTH */
365 off += 2; /* Skip over Reserved Field */
366 buf[off++] = ((lu_gp_id >> 8) & 0xff);
367 buf[off++] = (lu_gp_id & 0xff);
368 len += 8; /* Header size + Designation descriptor */
369 /*
370 * SCSI name string designator, see spc4r17
371 * section 7.7.3.11
372 *
373 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
374 * section 7.5.1 Table 362
375 */
376check_scsi_name:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400377 buf[off] =
378 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
379 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
380 buf[off] = 0x80; /* Set PIV=1 */
381 /* Set ASSOCIATION == target port: 01b */
382 buf[off] |= 0x10;
383 /* DESIGNATOR TYPE == SCSI name string */
384 buf[off++] |= 0x8;
385 off += 2; /* Skip over Reserved and length */
386 /*
387 * SCSI name string identifer containing, $FABRIC_MOD
388 * dependent information. For LIO-Target and iSCSI
389 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
390 * UTF-8 encoding.
391 */
392 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
393 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
394 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
395 scsi_name_len += 1 /* Include NULL terminator */;
396 /*
397 * The null-terminated, null-padded (see 4.4.2) SCSI
398 * NAME STRING field contains a UTF-8 format string.
399 * The number of bytes in the SCSI NAME STRING field
400 * (i.e., the value in the DESIGNATOR LENGTH field)
401 * shall be no larger than 256 and shall be a multiple
402 * of four.
403 */
Hannes Reinecke03ba84c2013-12-17 09:18:47 +0100404 padding = ((-scsi_name_len) & 3);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400405 if (padding)
406 scsi_name_len += padding;
Hannes Reinecke03ba84c2013-12-17 09:18:47 +0100407 if (scsi_name_len > 256)
408 scsi_name_len = 256;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400409
410 buf[off-1] = scsi_name_len;
411 off += scsi_name_len;
412 /* Header size + Designation descriptor */
413 len += (scsi_name_len + 4);
Hannes Reineckefbfe8582013-12-17 09:18:48 +0100414
415 /*
416 * Target device designator
417 */
418 buf[off] =
419 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
420 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
421 buf[off] = 0x80; /* Set PIV=1 */
422 /* Set ASSOCIATION == target device: 10b */
423 buf[off] |= 0x20;
424 /* DESIGNATOR TYPE == SCSI name string */
425 buf[off++] |= 0x8;
426 off += 2; /* Skip over Reserved and length */
427 /*
428 * SCSI name string identifer containing, $FABRIC_MOD
429 * dependent information. For LIO-Target and iSCSI
430 * Target Port, this means "<iSCSI name>" in
431 * UTF-8 encoding.
432 */
433 scsi_target_len = sprintf(&buf[off], "%s",
434 tpg->se_tpg_tfo->tpg_get_wwn(tpg));
435 scsi_target_len += 1 /* Include NULL terminator */;
436 /*
437 * The null-terminated, null-padded (see 4.4.2) SCSI
438 * NAME STRING field contains a UTF-8 format string.
439 * The number of bytes in the SCSI NAME STRING field
440 * (i.e., the value in the DESIGNATOR LENGTH field)
441 * shall be no larger than 256 and shall be a multiple
442 * of four.
443 */
444 padding = ((-scsi_target_len) & 3);
445 if (padding)
446 scsi_target_len += padding;
Roland Dreier6a16d7b2014-02-03 00:35:03 -0800447 if (scsi_target_len > 256)
448 scsi_target_len = 256;
Hannes Reineckefbfe8582013-12-17 09:18:48 +0100449
450 buf[off-1] = scsi_target_len;
451 off += scsi_target_len;
452
453 /* Header size + Designation descriptor */
454 len += (scsi_target_len + 4);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400455 }
456 buf[2] = ((len >> 8) & 0xff);
457 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
458 return 0;
459}
Hannes Reinecke0dfa1c52012-12-17 09:53:35 +0100460EXPORT_SYMBOL(spc_emulate_evpd_83);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400461
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800462static bool
463spc_check_dev_wce(struct se_device *dev)
464{
465 bool wce = false;
466
467 if (dev->transport->get_write_cache)
468 wce = dev->transport->get_write_cache(dev);
469 else if (dev->dev_attrib.emulate_write_cache > 0)
470 wce = true;
471
472 return wce;
473}
474
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400475/* Extended INQUIRY Data VPD Page */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800476static sense_reason_t
477spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400478{
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800479 struct se_device *dev = cmd->se_dev;
Nicholas Bellingerd2c53042014-04-02 13:27:43 -0700480 struct se_session *sess = cmd->se_sess;
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800481
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400482 buf[3] = 0x3c;
Nicholas Bellinger43bb95c2013-12-23 20:33:21 +0000483 /*
484 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
485 * only for TYPE3 protection.
486 */
Nicholas Bellingerd2c53042014-04-02 13:27:43 -0700487 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
488 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
489 buf[4] = 0x5;
490 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
491 buf[4] = 0x4;
492 }
Nicholas Bellinger43bb95c2013-12-23 20:33:21 +0000493
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400494 /* Set HEADSUP, ORDSUP, SIMPSUP */
495 buf[5] = 0x07;
496
497 /* If WriteCache emulation is enabled, set V_SUP */
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800498 if (spc_check_dev_wce(dev))
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400499 buf[6] = 0x01;
Hannes Reineckec66094b2013-12-17 09:18:49 +0100500 /* If an LBA map is present set R_SUP */
501 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
502 if (!list_empty(&dev->t10_alua.lba_map_list))
503 buf[8] = 0x10;
504 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400505 return 0;
506}
507
508/* Block Limits VPD page */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800509static sense_reason_t
510spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400511{
512 struct se_device *dev = cmd->se_dev;
513 u32 max_sectors;
514 int have_tp = 0;
Andy Grover7f7caf62013-11-11 08:59:17 -0800515 int opt, min;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400516
517 /*
518 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
519 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
520 * different page length for Thin Provisioning.
521 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400522 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400523 have_tp = 1;
524
525 buf[0] = dev->transport->get_device_type(dev);
526 buf[3] = have_tp ? 0x3c : 0x10;
527
528 /* Set WSNZ to 1 */
529 buf[4] = 0x01;
Nicholas Bellinger0123a9e2013-08-20 14:24:09 -0700530 /*
531 * Set MAXIMUM COMPARE AND WRITE LENGTH
532 */
533 if (dev->dev_attrib.emulate_caw)
534 buf[5] = 0x01;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400535
536 /*
537 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
538 */
Andy Grover7f7caf62013-11-11 08:59:17 -0800539 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
540 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
541 else
542 put_unaligned_be16(1, &buf[6]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400543
544 /*
545 * Set MAXIMUM TRANSFER LENGTH
546 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400547 max_sectors = min(dev->dev_attrib.fabric_max_sectors,
548 dev->dev_attrib.hw_max_sectors);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400549 put_unaligned_be32(max_sectors, &buf[8]);
550
551 /*
552 * Set OPTIMAL TRANSFER LENGTH
553 */
Andy Grover7f7caf62013-11-11 08:59:17 -0800554 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
555 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
556 else
557 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400558
559 /*
560 * Exit now if we don't support TP.
561 */
562 if (!have_tp)
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800563 goto max_write_same;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400564
565 /*
566 * Set MAXIMUM UNMAP LBA COUNT
567 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400568 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400569
570 /*
571 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
572 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400573 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400574 &buf[24]);
575
576 /*
577 * Set OPTIMAL UNMAP GRANULARITY
578 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400579 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400580
581 /*
582 * UNMAP GRANULARITY ALIGNMENT
583 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400584 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400585 &buf[32]);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400586 if (dev->dev_attrib.unmap_granularity_alignment != 0)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400587 buf[32] |= 0x80; /* Set the UGAVALID bit */
588
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800589 /*
590 * MAXIMUM WRITE SAME LENGTH
591 */
592max_write_same:
593 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
594
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400595 return 0;
596}
597
598/* Block Device Characteristics VPD page */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800599static sense_reason_t
600spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400601{
602 struct se_device *dev = cmd->se_dev;
603
604 buf[0] = dev->transport->get_device_type(dev);
605 buf[3] = 0x3c;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400606 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400607
608 return 0;
609}
610
611/* Thin Provisioning VPD */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800612static sense_reason_t
613spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400614{
615 struct se_device *dev = cmd->se_dev;
616
617 /*
618 * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
619 *
620 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
621 * zero, then the page length shall be set to 0004h. If the DP bit
622 * is set to one, then the page length shall be set to the value
623 * defined in table 162.
624 */
625 buf[0] = dev->transport->get_device_type(dev);
626
627 /*
628 * Set Hardcoded length mentioned above for DP=0
629 */
630 put_unaligned_be16(0x0004, &buf[2]);
631
632 /*
633 * The THRESHOLD EXPONENT field indicates the threshold set size in
634 * LBAs as a power of 2 (i.e., the threshold set size is equal to
635 * 2(threshold exponent)).
636 *
637 * Note that this is currently set to 0x00 as mkp says it will be
638 * changing again. We can enable this once it has settled in T10
639 * and is actually used by Linux/SCSI ML code.
640 */
641 buf[4] = 0x00;
642
643 /*
644 * A TPU bit set to one indicates that the device server supports
645 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
646 * that the device server does not support the UNMAP command.
647 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400648 if (dev->dev_attrib.emulate_tpu != 0)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400649 buf[5] = 0x80;
650
651 /*
652 * A TPWS bit set to one indicates that the device server supports
653 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
654 * A TPWS bit set to zero indicates that the device server does not
655 * support the use of the WRITE SAME (16) command to unmap LBAs.
656 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400657 if (dev->dev_attrib.emulate_tpws != 0)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400658 buf[5] |= 0x40;
659
660 return 0;
661}
662
Hannes Reineckec66094b2013-12-17 09:18:49 +0100663/* Referrals VPD page */
664static sense_reason_t
665spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
666{
667 struct se_device *dev = cmd->se_dev;
668
669 buf[0] = dev->transport->get_device_type(dev);
670 buf[3] = 0x0c;
671 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
672 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
673
674 return 0;
675}
676
Christoph Hellwigde103c92012-11-06 12:24:09 -0800677static sense_reason_t
678spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400679
680static struct {
681 uint8_t page;
Christoph Hellwigde103c92012-11-06 12:24:09 -0800682 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400683} evpd_handlers[] = {
684 { .page = 0x00, .emulate = spc_emulate_evpd_00 },
685 { .page = 0x80, .emulate = spc_emulate_evpd_80 },
686 { .page = 0x83, .emulate = spc_emulate_evpd_83 },
687 { .page = 0x86, .emulate = spc_emulate_evpd_86 },
688 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
689 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
690 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
Hannes Reineckec66094b2013-12-17 09:18:49 +0100691 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400692};
693
694/* supported vital product data pages */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800695static sense_reason_t
696spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400697{
698 int p;
699
700 /*
701 * Only report the INQUIRY EVPD=1 pages after a valid NAA
702 * Registered Extended LUN WWN has been set via ConfigFS
703 * during device creation/restart.
704 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400705 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400706 buf[3] = ARRAY_SIZE(evpd_handlers);
707 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
708 buf[p + 4] = evpd_handlers[p].page;
709 }
710
711 return 0;
712}
713
Christoph Hellwigde103c92012-11-06 12:24:09 -0800714static sense_reason_t
715spc_emulate_inquiry(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400716{
717 struct se_device *dev = cmd->se_dev;
718 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
Paolo Bonziniffe7b0e2012-09-07 17:30:38 +0200719 unsigned char *rbuf;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400720 unsigned char *cdb = cmd->t_task_cdb;
Nicholas Bellingerf82f3202013-12-19 14:13:28 -0800721 unsigned char *buf;
Christoph Hellwigde103c92012-11-06 12:24:09 -0800722 sense_reason_t ret;
723 int p;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400724
Nicholas Bellingerf82f3202013-12-19 14:13:28 -0800725 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
726 if (!buf) {
727 pr_err("Unable to allocate response buffer for INQUIRY\n");
728 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
729 }
Nicholas Bellingerdea5f092012-10-31 22:04:26 -0700730
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400731 if (dev == tpg->tpg_virt_lun0.lun_se_dev)
732 buf[0] = 0x3f; /* Not connected */
733 else
734 buf[0] = dev->transport->get_device_type(dev);
735
736 if (!(cdb[1] & 0x1)) {
737 if (cdb[2]) {
738 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
739 cdb[2]);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800740 ret = TCM_INVALID_CDB_FIELD;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400741 goto out;
742 }
743
744 ret = spc_emulate_inquiry_std(cmd, buf);
745 goto out;
746 }
747
748 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
749 if (cdb[2] == evpd_handlers[p].page) {
750 buf[1] = cdb[2];
751 ret = evpd_handlers[p].emulate(cmd, buf);
752 goto out;
753 }
754 }
755
756 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800757 ret = TCM_INVALID_CDB_FIELD;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400758
759out:
Paolo Bonziniffe7b0e2012-09-07 17:30:38 +0200760 rbuf = transport_kmap_data_sg(cmd);
Nicholas Bellinger49df9fc2013-01-29 13:33:05 -0800761 if (rbuf) {
Nicholas Bellingerf82f3202013-12-19 14:13:28 -0800762 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
Nicholas Bellinger49df9fc2013-01-29 13:33:05 -0800763 transport_kunmap_data_sg(cmd);
764 }
Nicholas Bellingerf82f3202013-12-19 14:13:28 -0800765 kfree(buf);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400766
767 if (!ret)
768 target_complete_cmd(cmd, GOOD);
769 return ret;
770}
771
Roland Dreierd4b2b862012-10-31 09:16:48 -0700772static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400773{
774 p[0] = 0x01;
775 p[1] = 0x0a;
776
Roland Dreierd4b2b862012-10-31 09:16:48 -0700777 /* No changeable values for now */
778 if (pc == 1)
779 goto out;
780
781out:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400782 return 12;
783}
784
Roland Dreierd4b2b862012-10-31 09:16:48 -0700785static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400786{
787 p[0] = 0x0a;
788 p[1] = 0x0a;
Roland Dreierd4b2b862012-10-31 09:16:48 -0700789
790 /* No changeable values for now */
791 if (pc == 1)
792 goto out;
793
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400794 p[2] = 2;
795 /*
796 * From spc4r23, 7.4.7 Control mode page
797 *
798 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
799 * restrictions on the algorithm used for reordering commands
800 * having the SIMPLE task attribute (see SAM-4).
801 *
802 * Table 368 -- QUEUE ALGORITHM MODIFIER field
803 * Code Description
804 * 0h Restricted reordering
805 * 1h Unrestricted reordering allowed
806 * 2h to 7h Reserved
807 * 8h to Fh Vendor specific
808 *
809 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
810 * the device server shall order the processing sequence of commands
811 * having the SIMPLE task attribute such that data integrity is maintained
812 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
813 * requests is halted at any time, the final value of all data observable
814 * on the medium shall be the same as if all the commands had been processed
815 * with the ORDERED task attribute).
816 *
817 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
818 * device server may reorder the processing sequence of commands having the
819 * SIMPLE task attribute in any manner. Any data integrity exposures related to
820 * command sequence order shall be explicitly handled by the application client
821 * through the selection of appropriate ommands and task attributes.
822 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400823 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400824 /*
825 * From spc4r17, section 7.4.6 Control mode Page
826 *
827 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
828 *
829 * 00b: The logical unit shall clear any unit attention condition
830 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
831 * status and shall not establish a unit attention condition when a com-
832 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
833 * status.
834 *
835 * 10b: The logical unit shall not clear any unit attention condition
836 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
837 * status and shall not establish a unit attention condition when
838 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
839 * CONFLICT status.
840 *
841 * 11b a The logical unit shall not clear any unit attention condition
842 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
843 * status and shall establish a unit attention condition for the
844 * initiator port associated with the I_T nexus on which the BUSY,
845 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
846 * Depending on the status, the additional sense code shall be set to
847 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
848 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
849 * command, a unit attention condition shall be established only once
850 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
851 * to the number of commands completed with one of those status codes.
852 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400853 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
854 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400855 /*
856 * From spc4r17, section 7.4.6 Control mode Page
857 *
858 * Task Aborted Status (TAS) bit set to zero.
859 *
860 * A task aborted status (TAS) bit set to zero specifies that aborted
861 * tasks shall be terminated by the device server without any response
862 * to the application client. A TAS bit set to one specifies that tasks
863 * aborted by the actions of an I_T nexus other than the I_T nexus on
864 * which the command was received shall be completed with TASK ABORTED
865 * status (see SAM-4).
866 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400867 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
Nicholas Bellinger0c30f422013-12-23 21:23:34 +0000868 /*
869 * From spc4r30, section 7.5.7 Control mode page
870 *
871 * Application Tag Owner (ATO) bit set to one.
872 *
873 * If the ATO bit is set to one the device server shall not modify the
874 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
875 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
876 * TAG field.
877 */
878 if (dev->dev_attrib.pi_prot_type)
879 p[5] |= 0x80;
880
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400881 p[8] = 0xff;
882 p[9] = 0xff;
883 p[11] = 30;
884
Roland Dreierd4b2b862012-10-31 09:16:48 -0700885out:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400886 return 12;
887}
888
Roland Dreierd4b2b862012-10-31 09:16:48 -0700889static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400890{
891 p[0] = 0x08;
892 p[1] = 0x12;
Roland Dreierd4b2b862012-10-31 09:16:48 -0700893
894 /* No changeable values for now */
895 if (pc == 1)
896 goto out;
897
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800898 if (spc_check_dev_wce(dev))
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400899 p[2] = 0x04; /* Write Cache Enable */
900 p[12] = 0x20; /* Disabled Read Ahead */
901
Roland Dreierd4b2b862012-10-31 09:16:48 -0700902out:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400903 return 20;
904}
905
Roland Dreier0f6d64c2012-10-31 09:16:49 -0700906static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
907{
908 p[0] = 0x1c;
909 p[1] = 0x0a;
910
911 /* No changeable values for now */
912 if (pc == 1)
913 goto out;
914
915out:
916 return 12;
917}
918
Roland Dreierd4b2b862012-10-31 09:16:48 -0700919static struct {
920 uint8_t page;
921 uint8_t subpage;
922 int (*emulate)(struct se_device *, u8, unsigned char *);
923} modesense_handlers[] = {
924 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
925 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
926 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
Roland Dreier0f6d64c2012-10-31 09:16:49 -0700927 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
Roland Dreierd4b2b862012-10-31 09:16:48 -0700928};
929
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400930static void spc_modesense_write_protect(unsigned char *buf, int type)
931{
932 /*
933 * I believe that the WP bit (bit 7) in the mode header is the same for
934 * all device types..
935 */
936 switch (type) {
937 case TYPE_DISK:
938 case TYPE_TAPE:
939 default:
940 buf[0] |= 0x80; /* WP bit */
941 break;
942 }
943}
944
945static void spc_modesense_dpofua(unsigned char *buf, int type)
946{
947 switch (type) {
948 case TYPE_DISK:
949 buf[0] |= 0x10; /* DPOFUA bit */
950 break;
951 default:
952 break;
953 }
954}
955
Roland Dreierd4b2b862012-10-31 09:16:48 -0700956static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
957{
958 *buf++ = 8;
959 put_unaligned_be32(min(blocks, 0xffffffffull), buf);
960 buf += 4;
961 put_unaligned_be32(block_size, buf);
962 return 9;
963}
964
965static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
966{
967 if (blocks <= 0xffffffff)
968 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
969
970 *buf++ = 1; /* LONGLBA */
971 buf += 2;
972 *buf++ = 16;
973 put_unaligned_be64(blocks, buf);
974 buf += 12;
975 put_unaligned_be32(block_size, buf);
976
977 return 17;
978}
979
Christoph Hellwigde103c92012-11-06 12:24:09 -0800980static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400981{
982 struct se_device *dev = cmd->se_dev;
983 char *cdb = cmd->t_task_cdb;
Nicholas Bellingercab96092013-01-29 14:10:01 -0800984 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400985 int type = dev->transport->get_device_type(dev);
986 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
Roland Dreierd4b2b862012-10-31 09:16:48 -0700987 bool dbd = !!(cdb[1] & 0x08);
988 bool llba = ten ? !!(cdb[1] & 0x10) : false;
989 u8 pc = cdb[2] >> 6;
990 u8 page = cdb[2] & 0x3f;
991 u8 subpage = cdb[3];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400992 int length = 0;
Roland Dreierd4b2b862012-10-31 09:16:48 -0700993 int ret;
994 int i;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400995
Nicholas Bellingercab96092013-01-29 14:10:01 -0800996 memset(buf, 0, SE_MODE_PAGE_BUF);
997
Nicholas Bellingerfecae402012-11-01 18:43:03 -0700998 /*
999 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
1000 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
1001 */
1002 length = ten ? 3 : 2;
Roland Dreierd4b2b862012-10-31 09:16:48 -07001003
1004 /* DEVICE-SPECIFIC PARAMETER */
1005 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
1006 (cmd->se_deve &&
1007 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
1008 spc_modesense_write_protect(&buf[length], type);
1009
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -08001010 if ((spc_check_dev_wce(dev)) &&
Roland Dreierd4b2b862012-10-31 09:16:48 -07001011 (dev->dev_attrib.emulate_fua_write > 0))
1012 spc_modesense_dpofua(&buf[length], type);
1013
1014 ++length;
1015
1016 /* BLOCK DESCRIPTOR */
1017
1018 /*
1019 * For now we only include a block descriptor for disk (SBC)
1020 * devices; other command sets use a slightly different format.
1021 */
1022 if (!dbd && type == TYPE_DISK) {
1023 u64 blocks = dev->transport->get_blocks(dev);
1024 u32 block_size = dev->dev_attrib.block_size;
1025
1026 if (ten) {
1027 if (llba) {
1028 length += spc_modesense_long_blockdesc(&buf[length],
1029 blocks, block_size);
1030 } else {
1031 length += 3;
1032 length += spc_modesense_blockdesc(&buf[length],
1033 blocks, block_size);
1034 }
1035 } else {
1036 length += spc_modesense_blockdesc(&buf[length], blocks,
1037 block_size);
1038 }
1039 } else {
1040 if (ten)
1041 length += 4;
1042 else
1043 length += 1;
Paolo Bonzini7a3f3692012-09-07 17:30:39 +02001044 }
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001045
Roland Dreierd4b2b862012-10-31 09:16:48 -07001046 if (page == 0x3f) {
1047 if (subpage != 0x00 && subpage != 0xff) {
Christoph Hellwigde103c92012-11-06 12:24:09 -08001048 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001049 return TCM_INVALID_CDB_FIELD;
Roland Dreierd4b2b862012-10-31 09:16:48 -07001050 }
1051
1052 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
1053 /*
1054 * Tricky way to say all subpage 00h for
1055 * subpage==0, all subpages for subpage==0xff
1056 * (and we just checked above that those are
1057 * the only two possibilities).
1058 */
1059 if ((modesense_handlers[i].subpage & ~subpage) == 0) {
1060 ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
1061 if (!ten && length + ret >= 255)
1062 break;
1063 length += ret;
1064 }
1065 }
1066
1067 goto set_length;
1068 }
1069
1070 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1071 if (modesense_handlers[i].page == page &&
1072 modesense_handlers[i].subpage == subpage) {
1073 length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
1074 goto set_length;
1075 }
1076
1077 /*
1078 * We don't intend to implement:
1079 * - obsolete page 03h "format parameters" (checked by Solaris)
1080 */
1081 if (page != 0x03)
1082 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
1083 page, subpage);
1084
Christoph Hellwigde103c92012-11-06 12:24:09 -08001085 return TCM_UNKNOWN_MODE_PAGE;
Roland Dreierd4b2b862012-10-31 09:16:48 -07001086
1087set_length:
1088 if (ten)
1089 put_unaligned_be16(length - 2, buf);
1090 else
1091 buf[0] = length - 1;
1092
Nicholas Bellingercab96092013-01-29 14:10:01 -08001093 rbuf = transport_kmap_data_sg(cmd);
1094 if (rbuf) {
1095 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
1096 transport_kunmap_data_sg(cmd);
Roland Dreierd4b2b862012-10-31 09:16:48 -07001097 }
1098
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001099 target_complete_cmd(cmd, GOOD);
1100 return 0;
1101}
1102
Christoph Hellwigde103c92012-11-06 12:24:09 -08001103static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001104{
1105 struct se_device *dev = cmd->se_dev;
1106 char *cdb = cmd->t_task_cdb;
1107 bool ten = cdb[0] == MODE_SELECT_10;
1108 int off = ten ? 8 : 4;
1109 bool pf = !!(cdb[1] & 0x10);
1110 u8 page, subpage;
1111 unsigned char *buf;
1112 unsigned char tbuf[SE_MODE_PAGE_BUF];
1113 int length;
1114 int ret = 0;
1115 int i;
1116
Roland Dreier71f41fe2013-02-08 15:18:40 -08001117 if (!cmd->data_length) {
1118 target_complete_cmd(cmd, GOOD);
1119 return 0;
1120 }
1121
1122 if (cmd->data_length < off + 2)
1123 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1124
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001125 buf = transport_kmap_data_sg(cmd);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001126 if (!buf)
1127 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001128
1129 if (!pf) {
Christoph Hellwigde103c92012-11-06 12:24:09 -08001130 ret = TCM_INVALID_CDB_FIELD;
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001131 goto out;
1132 }
1133
1134 page = buf[off] & 0x3f;
1135 subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
1136
1137 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1138 if (modesense_handlers[i].page == page &&
1139 modesense_handlers[i].subpage == subpage) {
1140 memset(tbuf, 0, SE_MODE_PAGE_BUF);
1141 length = modesense_handlers[i].emulate(dev, 0, tbuf);
1142 goto check_contents;
1143 }
1144
Christoph Hellwigde103c92012-11-06 12:24:09 -08001145 ret = TCM_UNKNOWN_MODE_PAGE;
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001146 goto out;
1147
1148check_contents:
Roland Dreier71f41fe2013-02-08 15:18:40 -08001149 if (cmd->data_length < off + length) {
1150 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
1151 goto out;
1152 }
1153
Christoph Hellwigde103c92012-11-06 12:24:09 -08001154 if (memcmp(buf + off, tbuf, length))
1155 ret = TCM_INVALID_PARAMETER_LIST;
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001156
1157out:
1158 transport_kunmap_data_sg(cmd);
1159
1160 if (!ret)
1161 target_complete_cmd(cmd, GOOD);
1162 return ret;
1163}
1164
Christoph Hellwigde103c92012-11-06 12:24:09 -08001165static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001166{
1167 unsigned char *cdb = cmd->t_task_cdb;
Paolo Bonzini32a88112012-09-07 17:30:36 +02001168 unsigned char *rbuf;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001169 u8 ua_asc = 0, ua_ascq = 0;
Paolo Bonzini32a88112012-09-07 17:30:36 +02001170 unsigned char buf[SE_SENSE_BUF];
1171
1172 memset(buf, 0, SE_SENSE_BUF);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001173
1174 if (cdb[1] & 0x01) {
1175 pr_err("REQUEST_SENSE description emulation not"
1176 " supported\n");
Christoph Hellwigde103c92012-11-06 12:24:09 -08001177 return TCM_INVALID_CDB_FIELD;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001178 }
1179
Paolo Bonzini32a88112012-09-07 17:30:36 +02001180 rbuf = transport_kmap_data_sg(cmd);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001181 if (!rbuf)
1182 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1183
1184 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001185 /*
1186 * CURRENT ERROR, UNIT ATTENTION
1187 */
1188 buf[0] = 0x70;
1189 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1190
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001191 /*
1192 * The Additional Sense Code (ASC) from the UNIT ATTENTION
1193 */
1194 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1195 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1196 buf[7] = 0x0A;
1197 } else {
1198 /*
1199 * CURRENT ERROR, NO SENSE
1200 */
1201 buf[0] = 0x70;
1202 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1203
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001204 /*
1205 * NO ADDITIONAL SENSE INFORMATION
1206 */
1207 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1208 buf[7] = 0x0A;
1209 }
1210
Christoph Hellwigde103c92012-11-06 12:24:09 -08001211 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1212 transport_kunmap_data_sg(cmd);
Paolo Bonzini32a88112012-09-07 17:30:36 +02001213
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001214 target_complete_cmd(cmd, GOOD);
1215 return 0;
1216}
1217
Christoph Hellwigde103c92012-11-06 12:24:09 -08001218sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001219{
1220 struct se_dev_entry *deve;
1221 struct se_session *sess = cmd->se_sess;
1222 unsigned char *buf;
1223 u32 lun_count = 0, offset = 8, i;
1224
1225 if (cmd->data_length < 16) {
1226 pr_warn("REPORT LUNS allocation length %u too small\n",
1227 cmd->data_length);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001228 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001229 }
1230
1231 buf = transport_kmap_data_sg(cmd);
1232 if (!buf)
Christoph Hellwigde103c92012-11-06 12:24:09 -08001233 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001234
1235 /*
1236 * If no struct se_session pointer is present, this struct se_cmd is
1237 * coming via a target_core_mod PASSTHROUGH op, and not through
1238 * a $FABRIC_MOD. In that case, report LUN=0 only.
1239 */
1240 if (!sess) {
1241 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
1242 lun_count = 1;
1243 goto done;
1244 }
1245
1246 spin_lock_irq(&sess->se_node_acl->device_list_lock);
1247 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1248 deve = sess->se_node_acl->device_list[i];
1249 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
1250 continue;
1251 /*
1252 * We determine the correct LUN LIST LENGTH even once we
1253 * have reached the initial allocation length.
1254 * See SPC2-R20 7.19.
1255 */
1256 lun_count++;
1257 if ((offset + 8) > cmd->data_length)
1258 continue;
1259
1260 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
1261 offset += 8;
1262 }
1263 spin_unlock_irq(&sess->se_node_acl->device_list_lock);
1264
1265 /*
1266 * See SPC3 r07, page 159.
1267 */
1268done:
1269 lun_count *= 8;
1270 buf[0] = ((lun_count >> 24) & 0xff);
1271 buf[1] = ((lun_count >> 16) & 0xff);
1272 buf[2] = ((lun_count >> 8) & 0xff);
1273 buf[3] = (lun_count & 0xff);
1274 transport_kunmap_data_sg(cmd);
1275
1276 target_complete_cmd(cmd, GOOD);
1277 return 0;
1278}
Christoph Hellwig8de530a2012-10-07 10:55:52 -04001279EXPORT_SYMBOL(spc_emulate_report_luns);
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001280
Christoph Hellwigde103c92012-11-06 12:24:09 -08001281static sense_reason_t
1282spc_emulate_testunitready(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001283{
1284 target_complete_cmd(cmd, GOOD);
1285 return 0;
1286}
1287
Christoph Hellwigde103c92012-11-06 12:24:09 -08001288sense_reason_t
1289spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001290{
Nicholas Bellingereba2ca42012-05-30 14:09:10 -07001291 struct se_device *dev = cmd->se_dev;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001292 unsigned char *cdb = cmd->t_task_cdb;
1293
1294 switch (cdb[0]) {
1295 case MODE_SELECT:
1296 *size = cdb[4];
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001297 cmd->execute_cmd = spc_emulate_modeselect;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001298 break;
1299 case MODE_SELECT_10:
1300 *size = (cdb[7] << 8) + cdb[8];
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001301 cmd->execute_cmd = spc_emulate_modeselect;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001302 break;
1303 case MODE_SENSE:
1304 *size = cdb[4];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001305 cmd->execute_cmd = spc_emulate_modesense;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001306 break;
1307 case MODE_SENSE_10:
1308 *size = (cdb[7] << 8) + cdb[8];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001309 cmd->execute_cmd = spc_emulate_modesense;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001310 break;
1311 case LOG_SELECT:
1312 case LOG_SENSE:
1313 *size = (cdb[7] << 8) + cdb[8];
1314 break;
1315 case PERSISTENT_RESERVE_IN:
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001316 *size = (cdb[7] << 8) + cdb[8];
Christoph Hellwigd977f432012-10-10 17:37:15 -04001317 cmd->execute_cmd = target_scsi3_emulate_pr_in;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001318 break;
1319 case PERSISTENT_RESERVE_OUT:
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001320 *size = (cdb[7] << 8) + cdb[8];
Christoph Hellwigd977f432012-10-10 17:37:15 -04001321 cmd->execute_cmd = target_scsi3_emulate_pr_out;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001322 break;
1323 case RELEASE:
1324 case RELEASE_10:
1325 if (cdb[0] == RELEASE_10)
1326 *size = (cdb[7] << 8) | cdb[8];
1327 else
1328 *size = cmd->data_length;
1329
Christoph Hellwigd977f432012-10-10 17:37:15 -04001330 cmd->execute_cmd = target_scsi2_reservation_release;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001331 break;
1332 case RESERVE:
1333 case RESERVE_10:
1334 /*
1335 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
1336 * Assume the passthrough or $FABRIC_MOD will tell us about it.
1337 */
1338 if (cdb[0] == RESERVE_10)
1339 *size = (cdb[7] << 8) | cdb[8];
1340 else
1341 *size = cmd->data_length;
1342
Christoph Hellwigd977f432012-10-10 17:37:15 -04001343 cmd->execute_cmd = target_scsi2_reservation_reserve;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001344 break;
1345 case REQUEST_SENSE:
1346 *size = cdb[4];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001347 cmd->execute_cmd = spc_emulate_request_sense;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001348 break;
1349 case INQUIRY:
1350 *size = (cdb[3] << 8) + cdb[4];
1351
1352 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +01001353 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001354 * See spc4r17 section 5.3
1355 */
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001356 cmd->sam_task_attr = MSG_HEAD_TAG;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001357 cmd->execute_cmd = spc_emulate_inquiry;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001358 break;
1359 case SECURITY_PROTOCOL_IN:
1360 case SECURITY_PROTOCOL_OUT:
1361 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1362 break;
1363 case EXTENDED_COPY:
Nicholas Bellinger04b1b792013-08-22 12:29:59 -07001364 *size = get_unaligned_be32(&cdb[10]);
1365 cmd->execute_cmd = target_do_xcopy;
1366 break;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001367 case RECEIVE_COPY_RESULTS:
Nicholas Bellinger04b1b792013-08-22 12:29:59 -07001368 *size = get_unaligned_be32(&cdb[10]);
1369 cmd->execute_cmd = target_do_receive_copy_results;
1370 break;
1371 case READ_ATTRIBUTE:
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001372 case WRITE_ATTRIBUTE:
1373 *size = (cdb[10] << 24) | (cdb[11] << 16) |
1374 (cdb[12] << 8) | cdb[13];
1375 break;
1376 case RECEIVE_DIAGNOSTIC:
1377 case SEND_DIAGNOSTIC:
1378 *size = (cdb[3] << 8) | cdb[4];
1379 break;
1380 case WRITE_BUFFER:
1381 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
1382 break;
1383 case REPORT_LUNS:
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001384 cmd->execute_cmd = spc_emulate_report_luns;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001385 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1386 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +01001387 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001388 * See spc4r17 section 5.3
1389 */
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001390 cmd->sam_task_attr = MSG_HEAD_TAG;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001391 break;
1392 case TEST_UNIT_READY:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001393 cmd->execute_cmd = spc_emulate_testunitready;
Christoph Hellwigd6e01752012-05-20 11:59:14 -04001394 *size = 0;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001395 break;
Nicholas Bellingereba2ca42012-05-30 14:09:10 -07001396 case MAINTENANCE_IN:
1397 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1398 /*
1399 * MAINTENANCE_IN from SCC-2
1400 * Check for emulated MI_REPORT_TARGET_PGS
1401 */
Christoph Hellwigc87fbd52012-10-10 17:37:16 -04001402 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
Nicholas Bellingereba2ca42012-05-30 14:09:10 -07001403 cmd->execute_cmd =
1404 target_emulate_report_target_port_groups;
1405 }
1406 *size = get_unaligned_be32(&cdb[6]);
1407 } else {
1408 /*
1409 * GPCMD_SEND_KEY from multi media commands
1410 */
1411 *size = get_unaligned_be16(&cdb[8]);
1412 }
1413 break;
1414 case MAINTENANCE_OUT:
1415 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1416 /*
1417 * MAINTENANCE_OUT from SCC-2
1418 * Check for emulated MO_SET_TARGET_PGS.
1419 */
Christoph Hellwigc87fbd52012-10-10 17:37:16 -04001420 if (cdb[1] == MO_SET_TARGET_PGS) {
Nicholas Bellingereba2ca42012-05-30 14:09:10 -07001421 cmd->execute_cmd =
1422 target_emulate_set_target_port_groups;
1423 }
1424 *size = get_unaligned_be32(&cdb[6]);
1425 } else {
1426 /*
1427 * GPCMD_SEND_KEY from multi media commands
1428 */
1429 *size = get_unaligned_be16(&cdb[8]);
1430 }
1431 break;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001432 default:
1433 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
1434 " 0x%02x, sending CHECK_CONDITION.\n",
1435 cmd->se_tfo->get_fabric_name(), cdb[0]);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001436 return TCM_UNSUPPORTED_SCSI_OPCODE;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001437 }
1438
1439 return 0;
1440}
1441EXPORT_SYMBOL(spc_parse_cdb);