blob: 4178c2a0f2104ea1955d36b88be4ac978581633c [file] [log] [blame]
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001/*
2 * SCSI Primary Commands (SPC) parsing and emulation.
3 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07004 * (c) Copyright 2002-2013 Datera, Inc.
Christoph Hellwig88455ec2012-05-20 11:59:13 -04005 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <asm/unaligned.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_tcq.h>
29
30#include <target/target_core_base.h>
31#include <target/target_core_backend.h>
32#include <target/target_core_fabric.h>
33
34#include "target_core_internal.h"
Nicholas Bellingereba2ca42012-05-30 14:09:10 -070035#include "target_core_alua.h"
Christoph Hellwig88455ec2012-05-20 11:59:13 -040036#include "target_core_pr.h"
37#include "target_core_ua.h"
Nicholas Bellinger04b1b792013-08-22 12:29:59 -070038#include "target_core_xcopy.h"
Christoph Hellwig88455ec2012-05-20 11:59:13 -040039
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040040static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
41{
42 struct t10_alua_tg_pt_gp *tg_pt_gp;
43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
44
45 /*
46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
47 */
48 buf[5] = 0x80;
49
50 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +010051 * Set TPGS field for explicit and/or implicit ALUA access type
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040052 * and opteration.
53 *
54 * See spc4r17 section 6.4.2 Table 135
55 */
56 if (!port)
57 return;
58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
59 if (!tg_pt_gp_mem)
60 return;
61
62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
64 if (tg_pt_gp)
65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
67}
68
Hannes Reinecke0dfa1c52012-12-17 09:53:35 +010069sense_reason_t
70spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040071{
72 struct se_lun *lun = cmd->se_lun;
73 struct se_device *dev = cmd->se_dev;
74
75 /* Set RMB (removable media) for tape devices */
76 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
77 buf[1] = 0x80;
78
Christoph Hellwig48c25672012-10-10 17:37:17 -040079 buf[2] = 0x05; /* SPC-3 */
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040080
81 /*
82 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
83 *
84 * SPC4 says:
85 * A RESPONSE DATA FORMAT field set to 2h indicates that the
86 * standard INQUIRY data is in the format defined in this
87 * standard. Response data format values less than 2h are
88 * obsolete. Response data format values greater than 2h are
89 * reserved.
90 */
91 buf[3] = 2;
92
93 /*
94 * Enable SCCS and TPGS fields for Emulated ALUA
95 */
Christoph Hellwigc87fbd52012-10-10 17:37:16 -040096 spc_fill_alua_data(lun->lun_sep, buf);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040097
Nicholas Bellingerd397a442013-08-22 14:17:20 -070098 /*
99 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
100 */
101 if (dev->dev_attrib.emulate_3pc)
102 buf[5] |= 0x8;
Nicholas Bellingerbdbad2b2013-12-23 20:32:46 +0000103 /*
104 * Set Protection (PROTECT) bit when DIF has been enabled.
105 */
106 if (dev->dev_attrib.pi_prot_type)
107 buf[5] |= 0x1;
Nicholas Bellingerd397a442013-08-22 14:17:20 -0700108
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400109 buf[7] = 0x2; /* CmdQue=1 */
110
Nicholas Bellingeree60bdd2013-07-24 16:15:08 -0700111 memcpy(&buf[8], "LIO-ORG ", 8);
112 memset(&buf[16], 0x20, 16);
113 memcpy(&buf[16], dev->t10_wwn.model,
114 min_t(size_t, strlen(dev->t10_wwn.model), 16));
115 memcpy(&buf[32], dev->t10_wwn.revision,
116 min_t(size_t, strlen(dev->t10_wwn.revision), 4));
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400117 buf[4] = 31; /* Set additional length to 31 */
118
119 return 0;
120}
Hannes Reinecke0dfa1c52012-12-17 09:53:35 +0100121EXPORT_SYMBOL(spc_emulate_inquiry_std);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400122
123/* unit serial number */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800124static sense_reason_t
125spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400126{
127 struct se_device *dev = cmd->se_dev;
128 u16 len = 0;
129
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400130 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400131 u32 unit_serial_len;
132
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400133 unit_serial_len = strlen(dev->t10_wwn.unit_serial);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400134 unit_serial_len++; /* For NULL Terminator */
135
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400136 len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400137 len++; /* Extra Byte for NULL Terminator */
138 buf[3] = len;
139 }
140 return 0;
141}
142
Nicholas Bellinger68366022013-08-20 17:02:21 -0700143void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
144 unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400145{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400146 unsigned char *p = &dev->t10_wwn.unit_serial[0];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400147 int cnt;
148 bool next = true;
149
150 /*
151 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
152 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
153 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
154 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
155 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
156 * per device uniqeness.
157 */
158 for (cnt = 0; *p && cnt < 13; p++) {
159 int val = hex_to_bin(*p);
160
161 if (val < 0)
162 continue;
163
164 if (next) {
165 next = false;
166 buf[cnt++] |= val;
167 } else {
168 next = true;
169 buf[cnt] = val << 4;
170 }
171 }
172}
173
174/*
175 * Device identification VPD, for a complete list of
176 * DESIGNATOR TYPEs see spc4r17 Table 459.
177 */
Hannes Reinecke0dfa1c52012-12-17 09:53:35 +0100178sense_reason_t
Christoph Hellwigde103c92012-11-06 12:24:09 -0800179spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400180{
181 struct se_device *dev = cmd->se_dev;
182 struct se_lun *lun = cmd->se_lun;
183 struct se_port *port = NULL;
184 struct se_portal_group *tpg = NULL;
185 struct t10_alua_lu_gp_member *lu_gp_mem;
186 struct t10_alua_tg_pt_gp *tg_pt_gp;
187 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400188 unsigned char *prod = &dev->t10_wwn.model[0];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400189 u32 prod_len;
190 u32 unit_serial_len, off = 0;
191 u16 len = 0, id_len;
192
193 off = 4;
194
195 /*
196 * NAA IEEE Registered Extended Assigned designator format, see
197 * spc4r17 section 7.7.3.6.5
198 *
199 * We depend upon a target_core_mod/ConfigFS provided
200 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
201 * value in order to return the NAA id.
202 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400203 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400204 goto check_t10_vend_desc;
205
206 /* CODE SET == Binary */
207 buf[off++] = 0x1;
208
209 /* Set ASSOCIATION == addressed logical unit: 0)b */
210 buf[off] = 0x00;
211
212 /* Identifier/Designator type == NAA identifier */
213 buf[off++] |= 0x3;
214 off++;
215
216 /* Identifier/Designator length */
217 buf[off++] = 0x10;
218
219 /*
220 * Start NAA IEEE Registered Extended Identifier/Designator
221 */
222 buf[off++] = (0x6 << 4);
223
224 /*
225 * Use OpenFabrics IEEE Company ID: 00 14 05
226 */
227 buf[off++] = 0x01;
228 buf[off++] = 0x40;
229 buf[off] = (0x5 << 4);
230
231 /*
232 * Return ConfigFS Unit Serial Number information for
233 * VENDOR_SPECIFIC_IDENTIFIER and
234 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
235 */
236 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
237
238 len = 20;
239 off = (len + 4);
240
241check_t10_vend_desc:
242 /*
243 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
244 */
245 id_len = 8; /* For Vendor field */
246 prod_len = 4; /* For VPD Header */
247 prod_len += 8; /* For Vendor field */
248 prod_len += strlen(prod);
249 prod_len++; /* For : */
250
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400251 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
252 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400253 unit_serial_len++; /* For NULL Terminator */
254
255 id_len += sprintf(&buf[off+12], "%s:%s", prod,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400256 &dev->t10_wwn.unit_serial[0]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400257 }
258 buf[off] = 0x2; /* ASCII */
259 buf[off+1] = 0x1; /* T10 Vendor ID */
260 buf[off+2] = 0x0;
261 memcpy(&buf[off+4], "LIO-ORG", 8);
262 /* Extra Byte for NULL Terminator */
263 id_len++;
264 /* Identifier Length */
265 buf[off+3] = id_len;
266 /* Header size for Designation descriptor */
267 len += (id_len + 4);
268 off += (id_len + 4);
269 /*
270 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
271 */
272 port = lun->lun_sep;
273 if (port) {
274 struct t10_alua_lu_gp *lu_gp;
Hannes Reineckefbfe8582013-12-17 09:18:48 +0100275 u32 padding, scsi_name_len, scsi_target_len;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400276 u16 lu_gp_id = 0;
277 u16 tg_pt_gp_id = 0;
278 u16 tpgt;
279
280 tpg = port->sep_tpg;
281 /*
282 * Relative target port identifer, see spc4r17
283 * section 7.7.3.7
284 *
285 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
286 * section 7.5.1 Table 362
287 */
288 buf[off] =
289 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
290 buf[off++] |= 0x1; /* CODE SET == Binary */
291 buf[off] = 0x80; /* Set PIV=1 */
292 /* Set ASSOCIATION == target port: 01b */
293 buf[off] |= 0x10;
294 /* DESIGNATOR TYPE == Relative target port identifer */
295 buf[off++] |= 0x4;
296 off++; /* Skip over Reserved */
297 buf[off++] = 4; /* DESIGNATOR LENGTH */
298 /* Skip over Obsolete field in RTPI payload
299 * in Table 472 */
300 off += 2;
301 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
302 buf[off++] = (port->sep_rtpi & 0xff);
303 len += 8; /* Header size + Designation descriptor */
304 /*
305 * Target port group identifier, see spc4r17
306 * section 7.7.3.8
307 *
308 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
309 * section 7.5.1 Table 362
310 */
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400311 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
312 if (!tg_pt_gp_mem)
313 goto check_lu_gp;
314
315 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
316 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
317 if (!tg_pt_gp) {
318 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
319 goto check_lu_gp;
320 }
321 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
322 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
323
324 buf[off] =
325 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
326 buf[off++] |= 0x1; /* CODE SET == Binary */
327 buf[off] = 0x80; /* Set PIV=1 */
328 /* Set ASSOCIATION == target port: 01b */
329 buf[off] |= 0x10;
330 /* DESIGNATOR TYPE == Target port group identifier */
331 buf[off++] |= 0x5;
332 off++; /* Skip over Reserved */
333 buf[off++] = 4; /* DESIGNATOR LENGTH */
334 off += 2; /* Skip over Reserved Field */
335 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
336 buf[off++] = (tg_pt_gp_id & 0xff);
337 len += 8; /* Header size + Designation descriptor */
338 /*
339 * Logical Unit Group identifier, see spc4r17
340 * section 7.7.3.8
341 */
342check_lu_gp:
343 lu_gp_mem = dev->dev_alua_lu_gp_mem;
344 if (!lu_gp_mem)
345 goto check_scsi_name;
346
347 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
348 lu_gp = lu_gp_mem->lu_gp;
349 if (!lu_gp) {
350 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
351 goto check_scsi_name;
352 }
353 lu_gp_id = lu_gp->lu_gp_id;
354 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
355
356 buf[off++] |= 0x1; /* CODE SET == Binary */
357 /* DESIGNATOR TYPE == Logical Unit Group identifier */
358 buf[off++] |= 0x6;
359 off++; /* Skip over Reserved */
360 buf[off++] = 4; /* DESIGNATOR LENGTH */
361 off += 2; /* Skip over Reserved Field */
362 buf[off++] = ((lu_gp_id >> 8) & 0xff);
363 buf[off++] = (lu_gp_id & 0xff);
364 len += 8; /* Header size + Designation descriptor */
365 /*
366 * SCSI name string designator, see spc4r17
367 * section 7.7.3.11
368 *
369 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
370 * section 7.5.1 Table 362
371 */
372check_scsi_name:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400373 buf[off] =
374 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
375 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
376 buf[off] = 0x80; /* Set PIV=1 */
377 /* Set ASSOCIATION == target port: 01b */
378 buf[off] |= 0x10;
379 /* DESIGNATOR TYPE == SCSI name string */
380 buf[off++] |= 0x8;
381 off += 2; /* Skip over Reserved and length */
382 /*
383 * SCSI name string identifer containing, $FABRIC_MOD
384 * dependent information. For LIO-Target and iSCSI
385 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
386 * UTF-8 encoding.
387 */
388 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
389 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
390 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
391 scsi_name_len += 1 /* Include NULL terminator */;
392 /*
393 * The null-terminated, null-padded (see 4.4.2) SCSI
394 * NAME STRING field contains a UTF-8 format string.
395 * The number of bytes in the SCSI NAME STRING field
396 * (i.e., the value in the DESIGNATOR LENGTH field)
397 * shall be no larger than 256 and shall be a multiple
398 * of four.
399 */
Hannes Reinecke03ba84c2013-12-17 09:18:47 +0100400 padding = ((-scsi_name_len) & 3);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400401 if (padding)
402 scsi_name_len += padding;
Hannes Reinecke03ba84c2013-12-17 09:18:47 +0100403 if (scsi_name_len > 256)
404 scsi_name_len = 256;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400405
406 buf[off-1] = scsi_name_len;
407 off += scsi_name_len;
408 /* Header size + Designation descriptor */
409 len += (scsi_name_len + 4);
Hannes Reineckefbfe8582013-12-17 09:18:48 +0100410
411 /*
412 * Target device designator
413 */
414 buf[off] =
415 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
416 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
417 buf[off] = 0x80; /* Set PIV=1 */
418 /* Set ASSOCIATION == target device: 10b */
419 buf[off] |= 0x20;
420 /* DESIGNATOR TYPE == SCSI name string */
421 buf[off++] |= 0x8;
422 off += 2; /* Skip over Reserved and length */
423 /*
424 * SCSI name string identifer containing, $FABRIC_MOD
425 * dependent information. For LIO-Target and iSCSI
426 * Target Port, this means "<iSCSI name>" in
427 * UTF-8 encoding.
428 */
429 scsi_target_len = sprintf(&buf[off], "%s",
430 tpg->se_tpg_tfo->tpg_get_wwn(tpg));
431 scsi_target_len += 1 /* Include NULL terminator */;
432 /*
433 * The null-terminated, null-padded (see 4.4.2) SCSI
434 * NAME STRING field contains a UTF-8 format string.
435 * The number of bytes in the SCSI NAME STRING field
436 * (i.e., the value in the DESIGNATOR LENGTH field)
437 * shall be no larger than 256 and shall be a multiple
438 * of four.
439 */
440 padding = ((-scsi_target_len) & 3);
441 if (padding)
442 scsi_target_len += padding;
443 if (scsi_name_len > 256)
444 scsi_name_len = 256;
445
446 buf[off-1] = scsi_target_len;
447 off += scsi_target_len;
448
449 /* Header size + Designation descriptor */
450 len += (scsi_target_len + 4);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400451 }
452 buf[2] = ((len >> 8) & 0xff);
453 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
454 return 0;
455}
Hannes Reinecke0dfa1c52012-12-17 09:53:35 +0100456EXPORT_SYMBOL(spc_emulate_evpd_83);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400457
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800458static bool
459spc_check_dev_wce(struct se_device *dev)
460{
461 bool wce = false;
462
463 if (dev->transport->get_write_cache)
464 wce = dev->transport->get_write_cache(dev);
465 else if (dev->dev_attrib.emulate_write_cache > 0)
466 wce = true;
467
468 return wce;
469}
470
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400471/* Extended INQUIRY Data VPD Page */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800472static sense_reason_t
473spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400474{
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800475 struct se_device *dev = cmd->se_dev;
476
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400477 buf[3] = 0x3c;
478 /* Set HEADSUP, ORDSUP, SIMPSUP */
479 buf[5] = 0x07;
480
481 /* If WriteCache emulation is enabled, set V_SUP */
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800482 if (spc_check_dev_wce(dev))
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400483 buf[6] = 0x01;
Hannes Reineckec66094b2013-12-17 09:18:49 +0100484 /* If an LBA map is present set R_SUP */
485 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
486 if (!list_empty(&dev->t10_alua.lba_map_list))
487 buf[8] = 0x10;
488 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400489 return 0;
490}
491
492/* Block Limits VPD page */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800493static sense_reason_t
494spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400495{
496 struct se_device *dev = cmd->se_dev;
497 u32 max_sectors;
498 int have_tp = 0;
Andy Grover7f7caf62013-11-11 08:59:17 -0800499 int opt, min;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400500
501 /*
502 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
503 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
504 * different page length for Thin Provisioning.
505 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400506 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400507 have_tp = 1;
508
509 buf[0] = dev->transport->get_device_type(dev);
510 buf[3] = have_tp ? 0x3c : 0x10;
511
512 /* Set WSNZ to 1 */
513 buf[4] = 0x01;
Nicholas Bellinger0123a9e2013-08-20 14:24:09 -0700514 /*
515 * Set MAXIMUM COMPARE AND WRITE LENGTH
516 */
517 if (dev->dev_attrib.emulate_caw)
518 buf[5] = 0x01;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400519
520 /*
521 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
522 */
Andy Grover7f7caf62013-11-11 08:59:17 -0800523 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
524 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
525 else
526 put_unaligned_be16(1, &buf[6]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400527
528 /*
529 * Set MAXIMUM TRANSFER LENGTH
530 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400531 max_sectors = min(dev->dev_attrib.fabric_max_sectors,
532 dev->dev_attrib.hw_max_sectors);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400533 put_unaligned_be32(max_sectors, &buf[8]);
534
535 /*
536 * Set OPTIMAL TRANSFER LENGTH
537 */
Andy Grover7f7caf62013-11-11 08:59:17 -0800538 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
539 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
540 else
541 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400542
543 /*
544 * Exit now if we don't support TP.
545 */
546 if (!have_tp)
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800547 goto max_write_same;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400548
549 /*
550 * Set MAXIMUM UNMAP LBA COUNT
551 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400552 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400553
554 /*
555 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
556 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400557 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400558 &buf[24]);
559
560 /*
561 * Set OPTIMAL UNMAP GRANULARITY
562 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400563 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400564
565 /*
566 * UNMAP GRANULARITY ALIGNMENT
567 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400568 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400569 &buf[32]);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400570 if (dev->dev_attrib.unmap_granularity_alignment != 0)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400571 buf[32] |= 0x80; /* Set the UGAVALID bit */
572
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800573 /*
574 * MAXIMUM WRITE SAME LENGTH
575 */
576max_write_same:
577 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
578
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400579 return 0;
580}
581
582/* Block Device Characteristics VPD page */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800583static sense_reason_t
584spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400585{
586 struct se_device *dev = cmd->se_dev;
587
588 buf[0] = dev->transport->get_device_type(dev);
589 buf[3] = 0x3c;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400590 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400591
592 return 0;
593}
594
595/* Thin Provisioning VPD */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800596static sense_reason_t
597spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400598{
599 struct se_device *dev = cmd->se_dev;
600
601 /*
602 * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
603 *
604 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
605 * zero, then the page length shall be set to 0004h. If the DP bit
606 * is set to one, then the page length shall be set to the value
607 * defined in table 162.
608 */
609 buf[0] = dev->transport->get_device_type(dev);
610
611 /*
612 * Set Hardcoded length mentioned above for DP=0
613 */
614 put_unaligned_be16(0x0004, &buf[2]);
615
616 /*
617 * The THRESHOLD EXPONENT field indicates the threshold set size in
618 * LBAs as a power of 2 (i.e., the threshold set size is equal to
619 * 2(threshold exponent)).
620 *
621 * Note that this is currently set to 0x00 as mkp says it will be
622 * changing again. We can enable this once it has settled in T10
623 * and is actually used by Linux/SCSI ML code.
624 */
625 buf[4] = 0x00;
626
627 /*
628 * A TPU bit set to one indicates that the device server supports
629 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
630 * that the device server does not support the UNMAP command.
631 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400632 if (dev->dev_attrib.emulate_tpu != 0)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400633 buf[5] = 0x80;
634
635 /*
636 * A TPWS bit set to one indicates that the device server supports
637 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
638 * A TPWS bit set to zero indicates that the device server does not
639 * support the use of the WRITE SAME (16) command to unmap LBAs.
640 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400641 if (dev->dev_attrib.emulate_tpws != 0)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400642 buf[5] |= 0x40;
643
644 return 0;
645}
646
Hannes Reineckec66094b2013-12-17 09:18:49 +0100647/* Referrals VPD page */
648static sense_reason_t
649spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
650{
651 struct se_device *dev = cmd->se_dev;
652
653 buf[0] = dev->transport->get_device_type(dev);
654 buf[3] = 0x0c;
655 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
656 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]);
657
658 return 0;
659}
660
Christoph Hellwigde103c92012-11-06 12:24:09 -0800661static sense_reason_t
662spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400663
664static struct {
665 uint8_t page;
Christoph Hellwigde103c92012-11-06 12:24:09 -0800666 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400667} evpd_handlers[] = {
668 { .page = 0x00, .emulate = spc_emulate_evpd_00 },
669 { .page = 0x80, .emulate = spc_emulate_evpd_80 },
670 { .page = 0x83, .emulate = spc_emulate_evpd_83 },
671 { .page = 0x86, .emulate = spc_emulate_evpd_86 },
672 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
673 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
674 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
Hannes Reineckec66094b2013-12-17 09:18:49 +0100675 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400676};
677
678/* supported vital product data pages */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800679static sense_reason_t
680spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400681{
682 int p;
683
684 /*
685 * Only report the INQUIRY EVPD=1 pages after a valid NAA
686 * Registered Extended LUN WWN has been set via ConfigFS
687 * during device creation/restart.
688 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400689 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400690 buf[3] = ARRAY_SIZE(evpd_handlers);
691 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
692 buf[p + 4] = evpd_handlers[p].page;
693 }
694
695 return 0;
696}
697
Christoph Hellwigde103c92012-11-06 12:24:09 -0800698static sense_reason_t
699spc_emulate_inquiry(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400700{
701 struct se_device *dev = cmd->se_dev;
702 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
Paolo Bonziniffe7b0e2012-09-07 17:30:38 +0200703 unsigned char *rbuf;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400704 unsigned char *cdb = cmd->t_task_cdb;
Nicholas Bellingerf82f3202013-12-19 14:13:28 -0800705 unsigned char *buf;
Christoph Hellwigde103c92012-11-06 12:24:09 -0800706 sense_reason_t ret;
707 int p;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400708
Nicholas Bellingerf82f3202013-12-19 14:13:28 -0800709 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
710 if (!buf) {
711 pr_err("Unable to allocate response buffer for INQUIRY\n");
712 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
713 }
Nicholas Bellingerdea5f092012-10-31 22:04:26 -0700714
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400715 if (dev == tpg->tpg_virt_lun0.lun_se_dev)
716 buf[0] = 0x3f; /* Not connected */
717 else
718 buf[0] = dev->transport->get_device_type(dev);
719
720 if (!(cdb[1] & 0x1)) {
721 if (cdb[2]) {
722 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
723 cdb[2]);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800724 ret = TCM_INVALID_CDB_FIELD;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400725 goto out;
726 }
727
728 ret = spc_emulate_inquiry_std(cmd, buf);
729 goto out;
730 }
731
732 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
733 if (cdb[2] == evpd_handlers[p].page) {
734 buf[1] = cdb[2];
735 ret = evpd_handlers[p].emulate(cmd, buf);
736 goto out;
737 }
738 }
739
740 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800741 ret = TCM_INVALID_CDB_FIELD;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400742
743out:
Paolo Bonziniffe7b0e2012-09-07 17:30:38 +0200744 rbuf = transport_kmap_data_sg(cmd);
Nicholas Bellinger49df9fc2013-01-29 13:33:05 -0800745 if (rbuf) {
Nicholas Bellingerf82f3202013-12-19 14:13:28 -0800746 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
Nicholas Bellinger49df9fc2013-01-29 13:33:05 -0800747 transport_kunmap_data_sg(cmd);
748 }
Nicholas Bellingerf82f3202013-12-19 14:13:28 -0800749 kfree(buf);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400750
751 if (!ret)
752 target_complete_cmd(cmd, GOOD);
753 return ret;
754}
755
Roland Dreierd4b2b862012-10-31 09:16:48 -0700756static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400757{
758 p[0] = 0x01;
759 p[1] = 0x0a;
760
Roland Dreierd4b2b862012-10-31 09:16:48 -0700761 /* No changeable values for now */
762 if (pc == 1)
763 goto out;
764
765out:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400766 return 12;
767}
768
Roland Dreierd4b2b862012-10-31 09:16:48 -0700769static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400770{
771 p[0] = 0x0a;
772 p[1] = 0x0a;
Roland Dreierd4b2b862012-10-31 09:16:48 -0700773
774 /* No changeable values for now */
775 if (pc == 1)
776 goto out;
777
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400778 p[2] = 2;
779 /*
780 * From spc4r23, 7.4.7 Control mode page
781 *
782 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
783 * restrictions on the algorithm used for reordering commands
784 * having the SIMPLE task attribute (see SAM-4).
785 *
786 * Table 368 -- QUEUE ALGORITHM MODIFIER field
787 * Code Description
788 * 0h Restricted reordering
789 * 1h Unrestricted reordering allowed
790 * 2h to 7h Reserved
791 * 8h to Fh Vendor specific
792 *
793 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
794 * the device server shall order the processing sequence of commands
795 * having the SIMPLE task attribute such that data integrity is maintained
796 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
797 * requests is halted at any time, the final value of all data observable
798 * on the medium shall be the same as if all the commands had been processed
799 * with the ORDERED task attribute).
800 *
801 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
802 * device server may reorder the processing sequence of commands having the
803 * SIMPLE task attribute in any manner. Any data integrity exposures related to
804 * command sequence order shall be explicitly handled by the application client
805 * through the selection of appropriate ommands and task attributes.
806 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400807 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400808 /*
809 * From spc4r17, section 7.4.6 Control mode Page
810 *
811 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
812 *
813 * 00b: The logical unit shall clear any unit attention condition
814 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
815 * status and shall not establish a unit attention condition when a com-
816 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
817 * status.
818 *
819 * 10b: The logical unit shall not clear any unit attention condition
820 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
821 * status and shall not establish a unit attention condition when
822 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
823 * CONFLICT status.
824 *
825 * 11b a The logical unit shall not clear any unit attention condition
826 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
827 * status and shall establish a unit attention condition for the
828 * initiator port associated with the I_T nexus on which the BUSY,
829 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
830 * Depending on the status, the additional sense code shall be set to
831 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
832 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
833 * command, a unit attention condition shall be established only once
834 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
835 * to the number of commands completed with one of those status codes.
836 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400837 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
838 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400839 /*
840 * From spc4r17, section 7.4.6 Control mode Page
841 *
842 * Task Aborted Status (TAS) bit set to zero.
843 *
844 * A task aborted status (TAS) bit set to zero specifies that aborted
845 * tasks shall be terminated by the device server without any response
846 * to the application client. A TAS bit set to one specifies that tasks
847 * aborted by the actions of an I_T nexus other than the I_T nexus on
848 * which the command was received shall be completed with TASK ABORTED
849 * status (see SAM-4).
850 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400851 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400852 p[8] = 0xff;
853 p[9] = 0xff;
854 p[11] = 30;
855
Roland Dreierd4b2b862012-10-31 09:16:48 -0700856out:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400857 return 12;
858}
859
Roland Dreierd4b2b862012-10-31 09:16:48 -0700860static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400861{
862 p[0] = 0x08;
863 p[1] = 0x12;
Roland Dreierd4b2b862012-10-31 09:16:48 -0700864
865 /* No changeable values for now */
866 if (pc == 1)
867 goto out;
868
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800869 if (spc_check_dev_wce(dev))
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400870 p[2] = 0x04; /* Write Cache Enable */
871 p[12] = 0x20; /* Disabled Read Ahead */
872
Roland Dreierd4b2b862012-10-31 09:16:48 -0700873out:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400874 return 20;
875}
876
Roland Dreier0f6d64c2012-10-31 09:16:49 -0700877static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
878{
879 p[0] = 0x1c;
880 p[1] = 0x0a;
881
882 /* No changeable values for now */
883 if (pc == 1)
884 goto out;
885
886out:
887 return 12;
888}
889
Roland Dreierd4b2b862012-10-31 09:16:48 -0700890static struct {
891 uint8_t page;
892 uint8_t subpage;
893 int (*emulate)(struct se_device *, u8, unsigned char *);
894} modesense_handlers[] = {
895 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
896 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
897 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
Roland Dreier0f6d64c2012-10-31 09:16:49 -0700898 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
Roland Dreierd4b2b862012-10-31 09:16:48 -0700899};
900
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400901static void spc_modesense_write_protect(unsigned char *buf, int type)
902{
903 /*
904 * I believe that the WP bit (bit 7) in the mode header is the same for
905 * all device types..
906 */
907 switch (type) {
908 case TYPE_DISK:
909 case TYPE_TAPE:
910 default:
911 buf[0] |= 0x80; /* WP bit */
912 break;
913 }
914}
915
916static void spc_modesense_dpofua(unsigned char *buf, int type)
917{
918 switch (type) {
919 case TYPE_DISK:
920 buf[0] |= 0x10; /* DPOFUA bit */
921 break;
922 default:
923 break;
924 }
925}
926
Roland Dreierd4b2b862012-10-31 09:16:48 -0700927static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
928{
929 *buf++ = 8;
930 put_unaligned_be32(min(blocks, 0xffffffffull), buf);
931 buf += 4;
932 put_unaligned_be32(block_size, buf);
933 return 9;
934}
935
936static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
937{
938 if (blocks <= 0xffffffff)
939 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
940
941 *buf++ = 1; /* LONGLBA */
942 buf += 2;
943 *buf++ = 16;
944 put_unaligned_be64(blocks, buf);
945 buf += 12;
946 put_unaligned_be32(block_size, buf);
947
948 return 17;
949}
950
Christoph Hellwigde103c92012-11-06 12:24:09 -0800951static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400952{
953 struct se_device *dev = cmd->se_dev;
954 char *cdb = cmd->t_task_cdb;
Nicholas Bellingercab96092013-01-29 14:10:01 -0800955 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400956 int type = dev->transport->get_device_type(dev);
957 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
Roland Dreierd4b2b862012-10-31 09:16:48 -0700958 bool dbd = !!(cdb[1] & 0x08);
959 bool llba = ten ? !!(cdb[1] & 0x10) : false;
960 u8 pc = cdb[2] >> 6;
961 u8 page = cdb[2] & 0x3f;
962 u8 subpage = cdb[3];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400963 int length = 0;
Roland Dreierd4b2b862012-10-31 09:16:48 -0700964 int ret;
965 int i;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400966
Nicholas Bellingercab96092013-01-29 14:10:01 -0800967 memset(buf, 0, SE_MODE_PAGE_BUF);
968
Nicholas Bellingerfecae402012-11-01 18:43:03 -0700969 /*
970 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
971 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
972 */
973 length = ten ? 3 : 2;
Roland Dreierd4b2b862012-10-31 09:16:48 -0700974
975 /* DEVICE-SPECIFIC PARAMETER */
976 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
977 (cmd->se_deve &&
978 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
979 spc_modesense_write_protect(&buf[length], type);
980
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800981 if ((spc_check_dev_wce(dev)) &&
Roland Dreierd4b2b862012-10-31 09:16:48 -0700982 (dev->dev_attrib.emulate_fua_write > 0))
983 spc_modesense_dpofua(&buf[length], type);
984
985 ++length;
986
987 /* BLOCK DESCRIPTOR */
988
989 /*
990 * For now we only include a block descriptor for disk (SBC)
991 * devices; other command sets use a slightly different format.
992 */
993 if (!dbd && type == TYPE_DISK) {
994 u64 blocks = dev->transport->get_blocks(dev);
995 u32 block_size = dev->dev_attrib.block_size;
996
997 if (ten) {
998 if (llba) {
999 length += spc_modesense_long_blockdesc(&buf[length],
1000 blocks, block_size);
1001 } else {
1002 length += 3;
1003 length += spc_modesense_blockdesc(&buf[length],
1004 blocks, block_size);
1005 }
1006 } else {
1007 length += spc_modesense_blockdesc(&buf[length], blocks,
1008 block_size);
1009 }
1010 } else {
1011 if (ten)
1012 length += 4;
1013 else
1014 length += 1;
Paolo Bonzini7a3f3692012-09-07 17:30:39 +02001015 }
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001016
Roland Dreierd4b2b862012-10-31 09:16:48 -07001017 if (page == 0x3f) {
1018 if (subpage != 0x00 && subpage != 0xff) {
Christoph Hellwigde103c92012-11-06 12:24:09 -08001019 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001020 return TCM_INVALID_CDB_FIELD;
Roland Dreierd4b2b862012-10-31 09:16:48 -07001021 }
1022
1023 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
1024 /*
1025 * Tricky way to say all subpage 00h for
1026 * subpage==0, all subpages for subpage==0xff
1027 * (and we just checked above that those are
1028 * the only two possibilities).
1029 */
1030 if ((modesense_handlers[i].subpage & ~subpage) == 0) {
1031 ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
1032 if (!ten && length + ret >= 255)
1033 break;
1034 length += ret;
1035 }
1036 }
1037
1038 goto set_length;
1039 }
1040
1041 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1042 if (modesense_handlers[i].page == page &&
1043 modesense_handlers[i].subpage == subpage) {
1044 length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
1045 goto set_length;
1046 }
1047
1048 /*
1049 * We don't intend to implement:
1050 * - obsolete page 03h "format parameters" (checked by Solaris)
1051 */
1052 if (page != 0x03)
1053 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
1054 page, subpage);
1055
Christoph Hellwigde103c92012-11-06 12:24:09 -08001056 return TCM_UNKNOWN_MODE_PAGE;
Roland Dreierd4b2b862012-10-31 09:16:48 -07001057
1058set_length:
1059 if (ten)
1060 put_unaligned_be16(length - 2, buf);
1061 else
1062 buf[0] = length - 1;
1063
Nicholas Bellingercab96092013-01-29 14:10:01 -08001064 rbuf = transport_kmap_data_sg(cmd);
1065 if (rbuf) {
1066 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
1067 transport_kunmap_data_sg(cmd);
Roland Dreierd4b2b862012-10-31 09:16:48 -07001068 }
1069
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001070 target_complete_cmd(cmd, GOOD);
1071 return 0;
1072}
1073
Christoph Hellwigde103c92012-11-06 12:24:09 -08001074static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001075{
1076 struct se_device *dev = cmd->se_dev;
1077 char *cdb = cmd->t_task_cdb;
1078 bool ten = cdb[0] == MODE_SELECT_10;
1079 int off = ten ? 8 : 4;
1080 bool pf = !!(cdb[1] & 0x10);
1081 u8 page, subpage;
1082 unsigned char *buf;
1083 unsigned char tbuf[SE_MODE_PAGE_BUF];
1084 int length;
1085 int ret = 0;
1086 int i;
1087
Roland Dreier71f41fe2013-02-08 15:18:40 -08001088 if (!cmd->data_length) {
1089 target_complete_cmd(cmd, GOOD);
1090 return 0;
1091 }
1092
1093 if (cmd->data_length < off + 2)
1094 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1095
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001096 buf = transport_kmap_data_sg(cmd);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001097 if (!buf)
1098 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001099
1100 if (!pf) {
Christoph Hellwigde103c92012-11-06 12:24:09 -08001101 ret = TCM_INVALID_CDB_FIELD;
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001102 goto out;
1103 }
1104
1105 page = buf[off] & 0x3f;
1106 subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
1107
1108 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1109 if (modesense_handlers[i].page == page &&
1110 modesense_handlers[i].subpage == subpage) {
1111 memset(tbuf, 0, SE_MODE_PAGE_BUF);
1112 length = modesense_handlers[i].emulate(dev, 0, tbuf);
1113 goto check_contents;
1114 }
1115
Christoph Hellwigde103c92012-11-06 12:24:09 -08001116 ret = TCM_UNKNOWN_MODE_PAGE;
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001117 goto out;
1118
1119check_contents:
Roland Dreier71f41fe2013-02-08 15:18:40 -08001120 if (cmd->data_length < off + length) {
1121 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
1122 goto out;
1123 }
1124
Christoph Hellwigde103c92012-11-06 12:24:09 -08001125 if (memcmp(buf + off, tbuf, length))
1126 ret = TCM_INVALID_PARAMETER_LIST;
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001127
1128out:
1129 transport_kunmap_data_sg(cmd);
1130
1131 if (!ret)
1132 target_complete_cmd(cmd, GOOD);
1133 return ret;
1134}
1135
Christoph Hellwigde103c92012-11-06 12:24:09 -08001136static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001137{
1138 unsigned char *cdb = cmd->t_task_cdb;
Paolo Bonzini32a88112012-09-07 17:30:36 +02001139 unsigned char *rbuf;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001140 u8 ua_asc = 0, ua_ascq = 0;
Paolo Bonzini32a88112012-09-07 17:30:36 +02001141 unsigned char buf[SE_SENSE_BUF];
1142
1143 memset(buf, 0, SE_SENSE_BUF);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001144
1145 if (cdb[1] & 0x01) {
1146 pr_err("REQUEST_SENSE description emulation not"
1147 " supported\n");
Christoph Hellwigde103c92012-11-06 12:24:09 -08001148 return TCM_INVALID_CDB_FIELD;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001149 }
1150
Paolo Bonzini32a88112012-09-07 17:30:36 +02001151 rbuf = transport_kmap_data_sg(cmd);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001152 if (!rbuf)
1153 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1154
1155 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001156 /*
1157 * CURRENT ERROR, UNIT ATTENTION
1158 */
1159 buf[0] = 0x70;
1160 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1161
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001162 /*
1163 * The Additional Sense Code (ASC) from the UNIT ATTENTION
1164 */
1165 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1166 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1167 buf[7] = 0x0A;
1168 } else {
1169 /*
1170 * CURRENT ERROR, NO SENSE
1171 */
1172 buf[0] = 0x70;
1173 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1174
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001175 /*
1176 * NO ADDITIONAL SENSE INFORMATION
1177 */
1178 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1179 buf[7] = 0x0A;
1180 }
1181
Christoph Hellwigde103c92012-11-06 12:24:09 -08001182 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1183 transport_kunmap_data_sg(cmd);
Paolo Bonzini32a88112012-09-07 17:30:36 +02001184
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001185 target_complete_cmd(cmd, GOOD);
1186 return 0;
1187}
1188
Christoph Hellwigde103c92012-11-06 12:24:09 -08001189sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001190{
1191 struct se_dev_entry *deve;
1192 struct se_session *sess = cmd->se_sess;
1193 unsigned char *buf;
1194 u32 lun_count = 0, offset = 8, i;
1195
1196 if (cmd->data_length < 16) {
1197 pr_warn("REPORT LUNS allocation length %u too small\n",
1198 cmd->data_length);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001199 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001200 }
1201
1202 buf = transport_kmap_data_sg(cmd);
1203 if (!buf)
Christoph Hellwigde103c92012-11-06 12:24:09 -08001204 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001205
1206 /*
1207 * If no struct se_session pointer is present, this struct se_cmd is
1208 * coming via a target_core_mod PASSTHROUGH op, and not through
1209 * a $FABRIC_MOD. In that case, report LUN=0 only.
1210 */
1211 if (!sess) {
1212 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
1213 lun_count = 1;
1214 goto done;
1215 }
1216
1217 spin_lock_irq(&sess->se_node_acl->device_list_lock);
1218 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1219 deve = sess->se_node_acl->device_list[i];
1220 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
1221 continue;
1222 /*
1223 * We determine the correct LUN LIST LENGTH even once we
1224 * have reached the initial allocation length.
1225 * See SPC2-R20 7.19.
1226 */
1227 lun_count++;
1228 if ((offset + 8) > cmd->data_length)
1229 continue;
1230
1231 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
1232 offset += 8;
1233 }
1234 spin_unlock_irq(&sess->se_node_acl->device_list_lock);
1235
1236 /*
1237 * See SPC3 r07, page 159.
1238 */
1239done:
1240 lun_count *= 8;
1241 buf[0] = ((lun_count >> 24) & 0xff);
1242 buf[1] = ((lun_count >> 16) & 0xff);
1243 buf[2] = ((lun_count >> 8) & 0xff);
1244 buf[3] = (lun_count & 0xff);
1245 transport_kunmap_data_sg(cmd);
1246
1247 target_complete_cmd(cmd, GOOD);
1248 return 0;
1249}
Christoph Hellwig8de530a2012-10-07 10:55:52 -04001250EXPORT_SYMBOL(spc_emulate_report_luns);
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001251
Christoph Hellwigde103c92012-11-06 12:24:09 -08001252static sense_reason_t
1253spc_emulate_testunitready(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001254{
1255 target_complete_cmd(cmd, GOOD);
1256 return 0;
1257}
1258
Christoph Hellwigde103c92012-11-06 12:24:09 -08001259sense_reason_t
1260spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001261{
Nicholas Bellingereba2ca42012-05-30 14:09:10 -07001262 struct se_device *dev = cmd->se_dev;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001263 unsigned char *cdb = cmd->t_task_cdb;
1264
1265 switch (cdb[0]) {
1266 case MODE_SELECT:
1267 *size = cdb[4];
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001268 cmd->execute_cmd = spc_emulate_modeselect;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001269 break;
1270 case MODE_SELECT_10:
1271 *size = (cdb[7] << 8) + cdb[8];
Roland Dreier3a3c5e42012-10-31 09:16:50 -07001272 cmd->execute_cmd = spc_emulate_modeselect;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001273 break;
1274 case MODE_SENSE:
1275 *size = cdb[4];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001276 cmd->execute_cmd = spc_emulate_modesense;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001277 break;
1278 case MODE_SENSE_10:
1279 *size = (cdb[7] << 8) + cdb[8];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001280 cmd->execute_cmd = spc_emulate_modesense;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001281 break;
1282 case LOG_SELECT:
1283 case LOG_SENSE:
1284 *size = (cdb[7] << 8) + cdb[8];
1285 break;
1286 case PERSISTENT_RESERVE_IN:
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001287 *size = (cdb[7] << 8) + cdb[8];
Christoph Hellwigd977f432012-10-10 17:37:15 -04001288 cmd->execute_cmd = target_scsi3_emulate_pr_in;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001289 break;
1290 case PERSISTENT_RESERVE_OUT:
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001291 *size = (cdb[7] << 8) + cdb[8];
Christoph Hellwigd977f432012-10-10 17:37:15 -04001292 cmd->execute_cmd = target_scsi3_emulate_pr_out;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001293 break;
1294 case RELEASE:
1295 case RELEASE_10:
1296 if (cdb[0] == RELEASE_10)
1297 *size = (cdb[7] << 8) | cdb[8];
1298 else
1299 *size = cmd->data_length;
1300
Christoph Hellwigd977f432012-10-10 17:37:15 -04001301 cmd->execute_cmd = target_scsi2_reservation_release;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001302 break;
1303 case RESERVE:
1304 case RESERVE_10:
1305 /*
1306 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
1307 * Assume the passthrough or $FABRIC_MOD will tell us about it.
1308 */
1309 if (cdb[0] == RESERVE_10)
1310 *size = (cdb[7] << 8) | cdb[8];
1311 else
1312 *size = cmd->data_length;
1313
Christoph Hellwigd977f432012-10-10 17:37:15 -04001314 cmd->execute_cmd = target_scsi2_reservation_reserve;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001315 break;
1316 case REQUEST_SENSE:
1317 *size = cdb[4];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001318 cmd->execute_cmd = spc_emulate_request_sense;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001319 break;
1320 case INQUIRY:
1321 *size = (cdb[3] << 8) + cdb[4];
1322
1323 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +01001324 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001325 * See spc4r17 section 5.3
1326 */
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001327 cmd->sam_task_attr = MSG_HEAD_TAG;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001328 cmd->execute_cmd = spc_emulate_inquiry;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001329 break;
1330 case SECURITY_PROTOCOL_IN:
1331 case SECURITY_PROTOCOL_OUT:
1332 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1333 break;
1334 case EXTENDED_COPY:
Nicholas Bellinger04b1b792013-08-22 12:29:59 -07001335 *size = get_unaligned_be32(&cdb[10]);
1336 cmd->execute_cmd = target_do_xcopy;
1337 break;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001338 case RECEIVE_COPY_RESULTS:
Nicholas Bellinger04b1b792013-08-22 12:29:59 -07001339 *size = get_unaligned_be32(&cdb[10]);
1340 cmd->execute_cmd = target_do_receive_copy_results;
1341 break;
1342 case READ_ATTRIBUTE:
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001343 case WRITE_ATTRIBUTE:
1344 *size = (cdb[10] << 24) | (cdb[11] << 16) |
1345 (cdb[12] << 8) | cdb[13];
1346 break;
1347 case RECEIVE_DIAGNOSTIC:
1348 case SEND_DIAGNOSTIC:
1349 *size = (cdb[3] << 8) | cdb[4];
1350 break;
1351 case WRITE_BUFFER:
1352 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
1353 break;
1354 case REPORT_LUNS:
Christoph Hellwigd1b1f802012-10-07 10:55:51 -04001355 cmd->execute_cmd = spc_emulate_report_luns;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001356 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1357 /*
Hannes Reinecke125d0112013-11-19 09:07:46 +01001358 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001359 * See spc4r17 section 5.3
1360 */
Christoph Hellwig019c4ca2012-10-10 17:37:14 -04001361 cmd->sam_task_attr = MSG_HEAD_TAG;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001362 break;
1363 case TEST_UNIT_READY:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -04001364 cmd->execute_cmd = spc_emulate_testunitready;
Christoph Hellwigd6e01752012-05-20 11:59:14 -04001365 *size = 0;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001366 break;
Nicholas Bellingereba2ca42012-05-30 14:09:10 -07001367 case MAINTENANCE_IN:
1368 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1369 /*
1370 * MAINTENANCE_IN from SCC-2
1371 * Check for emulated MI_REPORT_TARGET_PGS
1372 */
Christoph Hellwigc87fbd52012-10-10 17:37:16 -04001373 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
Nicholas Bellingereba2ca42012-05-30 14:09:10 -07001374 cmd->execute_cmd =
1375 target_emulate_report_target_port_groups;
1376 }
1377 *size = get_unaligned_be32(&cdb[6]);
1378 } else {
1379 /*
1380 * GPCMD_SEND_KEY from multi media commands
1381 */
1382 *size = get_unaligned_be16(&cdb[8]);
1383 }
1384 break;
1385 case MAINTENANCE_OUT:
1386 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1387 /*
1388 * MAINTENANCE_OUT from SCC-2
1389 * Check for emulated MO_SET_TARGET_PGS.
1390 */
Christoph Hellwigc87fbd52012-10-10 17:37:16 -04001391 if (cdb[1] == MO_SET_TARGET_PGS) {
Nicholas Bellingereba2ca42012-05-30 14:09:10 -07001392 cmd->execute_cmd =
1393 target_emulate_set_target_port_groups;
1394 }
1395 *size = get_unaligned_be32(&cdb[6]);
1396 } else {
1397 /*
1398 * GPCMD_SEND_KEY from multi media commands
1399 */
1400 *size = get_unaligned_be16(&cdb[8]);
1401 }
1402 break;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001403 default:
1404 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
1405 " 0x%02x, sending CHECK_CONDITION.\n",
1406 cmd->se_tfo->get_fabric_name(), cdb[0]);
Christoph Hellwigde103c92012-11-06 12:24:09 -08001407 return TCM_UNSUPPORTED_SCSI_OPCODE;
Christoph Hellwig88455ec2012-05-20 11:59:13 -04001408 }
1409
1410 return 0;
1411}
1412EXPORT_SYMBOL(spc_parse_cdb);