blob: d9b92b2c524d4f055a035f4343175fbdd3877c4c [file] [log] [blame]
Christoph Hellwigd6e01752012-05-20 11:59:14 -04001/*
2 * SCSI Block Commands (SBC) parsing and emulation.
3 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07004 * (c) Copyright 2002-2013 Datera, Inc.
Christoph Hellwigd6e01752012-05-20 11:59:14 -04005 *
6 * Nicholas A. Bellinger <nab@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/ratelimit.h>
26#include <asm/unaligned.h>
27#include <scsi/scsi.h>
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -070028#include <scsi/scsi_tcq.h>
Christoph Hellwigd6e01752012-05-20 11:59:14 -040029
30#include <target/target_core_base.h>
31#include <target/target_core_backend.h>
32#include <target/target_core_fabric.h>
33
34#include "target_core_internal.h"
35#include "target_core_ua.h"
36
37
Christoph Hellwigde103c92012-11-06 12:24:09 -080038static sense_reason_t
39sbc_emulate_readcapacity(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040040{
41 struct se_device *dev = cmd->se_dev;
Roland Dreier8dc86322013-06-26 17:36:19 -070042 unsigned char *cdb = cmd->t_task_cdb;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040043 unsigned long long blocks_long = dev->transport->get_blocks(dev);
Paolo Bonzinia50da142012-09-07 17:30:40 +020044 unsigned char *rbuf;
45 unsigned char buf[8];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040046 u32 blocks;
47
Roland Dreier8dc86322013-06-26 17:36:19 -070048 /*
49 * SBC-2 says:
50 * If the PMI bit is set to zero and the LOGICAL BLOCK
51 * ADDRESS field is not set to zero, the device server shall
52 * terminate the command with CHECK CONDITION status with
53 * the sense key set to ILLEGAL REQUEST and the additional
54 * sense code set to INVALID FIELD IN CDB.
55 *
56 * In SBC-3, these fields are obsolete, but some SCSI
57 * compliance tests actually check this, so we might as well
58 * follow SBC-2.
59 */
60 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
61 return TCM_INVALID_CDB_FIELD;
62
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040063 if (blocks_long >= 0x00000000ffffffff)
64 blocks = 0xffffffff;
65 else
66 blocks = (u32)blocks_long;
67
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040068 buf[0] = (blocks >> 24) & 0xff;
69 buf[1] = (blocks >> 16) & 0xff;
70 buf[2] = (blocks >> 8) & 0xff;
71 buf[3] = blocks & 0xff;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040072 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
73 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
74 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
75 buf[7] = dev->dev_attrib.block_size & 0xff;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040076
Paolo Bonzinia50da142012-09-07 17:30:40 +020077 rbuf = transport_kmap_data_sg(cmd);
Nicholas Bellinger8b4b0dc2013-01-29 14:18:00 -080078 if (rbuf) {
79 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
80 transport_kunmap_data_sg(cmd);
81 }
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040082
83 target_complete_cmd(cmd, GOOD);
84 return 0;
85}
86
Christoph Hellwigde103c92012-11-06 12:24:09 -080087static sense_reason_t
88sbc_emulate_readcapacity_16(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040089{
90 struct se_device *dev = cmd->se_dev;
Paolo Bonzinia50da142012-09-07 17:30:40 +020091 unsigned char *rbuf;
92 unsigned char buf[32];
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040093 unsigned long long blocks = dev->transport->get_blocks(dev);
94
Paolo Bonzinia50da142012-09-07 17:30:40 +020095 memset(buf, 0, sizeof(buf));
Christoph Hellwig1fd032e2012-05-20 11:59:15 -040096 buf[0] = (blocks >> 56) & 0xff;
97 buf[1] = (blocks >> 48) & 0xff;
98 buf[2] = (blocks >> 40) & 0xff;
99 buf[3] = (blocks >> 32) & 0xff;
100 buf[4] = (blocks >> 24) & 0xff;
101 buf[5] = (blocks >> 16) & 0xff;
102 buf[6] = (blocks >> 8) & 0xff;
103 buf[7] = blocks & 0xff;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400104 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
105 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
106 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
107 buf[11] = dev->dev_attrib.block_size & 0xff;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400108 /*
109 * Set Thin Provisioning Enable bit following sbc3r22 in section
110 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
111 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400112 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400113 buf[14] = 0x80;
114
Paolo Bonzinia50da142012-09-07 17:30:40 +0200115 rbuf = transport_kmap_data_sg(cmd);
Nicholas Bellinger8b4b0dc2013-01-29 14:18:00 -0800116 if (rbuf) {
117 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
118 transport_kunmap_data_sg(cmd);
119 }
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400120
121 target_complete_cmd(cmd, GOOD);
122 return 0;
123}
124
Roland Dreier972b29c82013-02-22 09:52:57 -0800125sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400126{
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400127 u32 num_blocks;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400128
129 if (cmd->t_task_cdb[0] == WRITE_SAME)
130 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
131 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
132 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
133 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
134 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
135
136 /*
137 * Use the explicit range when non zero is supplied, otherwise calculate
138 * the remaining range based on ->get_blocks() - starting LBA.
139 */
Christoph Hellwig6f974e82012-06-17 18:40:54 -0400140 if (num_blocks)
141 return num_blocks;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400142
Christoph Hellwig6f974e82012-06-17 18:40:54 -0400143 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
144 cmd->t_task_lba + 1;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400145}
Roland Dreier972b29c82013-02-22 09:52:57 -0800146EXPORT_SYMBOL(sbc_get_write_same_sectors);
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400147
Christoph Hellwigde103c92012-11-06 12:24:09 -0800148static sense_reason_t
Nicholas Bellinger1920ed62012-11-06 20:59:41 -0800149sbc_emulate_noop(struct se_cmd *cmd)
Bernhard Kohl1a1ff382012-10-24 15:53:58 +0200150{
151 target_complete_cmd(cmd, GOOD);
152 return 0;
153}
154
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400155static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
156{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400157 return cmd->se_dev->dev_attrib.block_size * sectors;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400158}
159
160static int sbc_check_valid_sectors(struct se_cmd *cmd)
161{
162 struct se_device *dev = cmd->se_dev;
163 unsigned long long end_lba;
164 u32 sectors;
165
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400166 sectors = cmd->data_length / dev->dev_attrib.block_size;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400167 end_lba = dev->transport->get_blocks(dev) + 1;
168
169 if (cmd->t_task_lba + sectors > end_lba) {
170 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
171 cmd->t_task_lba, sectors, end_lba);
172 return -EINVAL;
173 }
174
175 return 0;
176}
177
178static inline u32 transport_get_sectors_6(unsigned char *cdb)
179{
180 /*
181 * Use 8-bit sector value. SBC-3 says:
182 *
183 * A TRANSFER LENGTH field set to zero specifies that 256
184 * logical blocks shall be written. Any other value
185 * specifies the number of logical blocks that shall be
186 * written.
187 */
188 return cdb[4] ? : 256;
189}
190
191static inline u32 transport_get_sectors_10(unsigned char *cdb)
192{
193 return (u32)(cdb[7] << 8) + cdb[8];
194}
195
196static inline u32 transport_get_sectors_12(unsigned char *cdb)
197{
198 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
199}
200
201static inline u32 transport_get_sectors_16(unsigned char *cdb)
202{
203 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
204 (cdb[12] << 8) + cdb[13];
205}
206
207/*
208 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
209 */
210static inline u32 transport_get_sectors_32(unsigned char *cdb)
211{
212 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
213 (cdb[30] << 8) + cdb[31];
214
215}
216
217static inline u32 transport_lba_21(unsigned char *cdb)
218{
219 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
220}
221
222static inline u32 transport_lba_32(unsigned char *cdb)
223{
224 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
225}
226
227static inline unsigned long long transport_lba_64(unsigned char *cdb)
228{
229 unsigned int __v1, __v2;
230
231 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
232 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
233
234 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
235}
236
237/*
238 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
239 */
240static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
241{
242 unsigned int __v1, __v2;
243
244 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
245 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
246
247 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
248}
249
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800250static sense_reason_t
251sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400252{
Roland Dreier972b29c82013-02-22 09:52:57 -0800253 unsigned int sectors = sbc_get_write_same_sectors(cmd);
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800254
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400255 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
256 pr_err("WRITE_SAME PBDATA and LBDATA"
257 " bits not supported for Block Discard"
258 " Emulation\n");
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800259 return TCM_UNSUPPORTED_SCSI_OPCODE;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400260 }
Nicholas Bellinger773cbaf2012-11-15 11:02:49 -0800261 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
262 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
263 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
264 return TCM_INVALID_CDB_FIELD;
265 }
Roland Dreier5cb770b2013-10-14 15:49:23 -0700266 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
267 if (flags[0] & 0x10) {
268 pr_warn("WRITE SAME with ANCHOR not supported\n");
269 return TCM_INVALID_CDB_FIELD;
270 }
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400271 /*
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800272 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
273 * translated into block discard requests within backend code.
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400274 */
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800275 if (flags[0] & 0x08) {
276 if (!ops->execute_write_same_unmap)
277 return TCM_UNSUPPORTED_SCSI_OPCODE;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400278
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800279 cmd->execute_cmd = ops->execute_write_same_unmap;
280 return 0;
281 }
282 if (!ops->execute_write_same)
283 return TCM_UNSUPPORTED_SCSI_OPCODE;
284
285 cmd->execute_cmd = ops->execute_write_same;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400286 return 0;
287}
288
Nicholas Bellingera6b01332013-08-19 14:34:17 -0700289static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400290{
291 unsigned char *buf, *addr;
292 struct scatterlist *sg;
293 unsigned int offset;
Nicholas Bellingera6b01332013-08-19 14:34:17 -0700294 sense_reason_t ret = TCM_NO_SENSE;
295 int i, count;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400296 /*
297 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
298 *
299 * 1) read the specified logical block(s);
300 * 2) transfer logical blocks from the data-out buffer;
301 * 3) XOR the logical blocks transferred from the data-out buffer with
302 * the logical blocks read, storing the resulting XOR data in a buffer;
303 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
304 * blocks transferred from the data-out buffer; and
305 * 5) transfer the resulting XOR data to the data-in buffer.
306 */
307 buf = kmalloc(cmd->data_length, GFP_KERNEL);
308 if (!buf) {
309 pr_err("Unable to allocate xor_callback buf\n");
Nicholas Bellingera6b01332013-08-19 14:34:17 -0700310 return TCM_OUT_OF_RESOURCES;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400311 }
312 /*
313 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
314 * into the locally allocated *buf
315 */
316 sg_copy_to_buffer(cmd->t_data_sg,
317 cmd->t_data_nents,
318 buf,
319 cmd->data_length);
320
321 /*
322 * Now perform the XOR against the BIDI read memory located at
323 * cmd->t_mem_bidi_list
324 */
325
326 offset = 0;
327 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
328 addr = kmap_atomic(sg_page(sg));
Nicholas Bellingera6b01332013-08-19 14:34:17 -0700329 if (!addr) {
330 ret = TCM_OUT_OF_RESOURCES;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400331 goto out;
Nicholas Bellingera6b01332013-08-19 14:34:17 -0700332 }
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400333
334 for (i = 0; i < sg->length; i++)
335 *(addr + sg->offset + i) ^= *(buf + offset + i);
336
337 offset += sg->length;
338 kunmap_atomic(addr);
339 }
340
341out:
342 kfree(buf);
Nicholas Bellingera6b01332013-08-19 14:34:17 -0700343 return ret;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400344}
345
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700346static sense_reason_t
347sbc_execute_rw(struct se_cmd *cmd)
348{
349 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
350 cmd->data_direction);
351}
352
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700353static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
354{
355 struct se_device *dev = cmd->se_dev;
356
Nicholas Bellingerd8855c12013-10-01 16:53:10 -0700357 /*
358 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
359 * within target_complete_ok_work() if the command was successfully
360 * sent to the backend driver.
361 */
362 spin_lock_irq(&cmd->t_state_lock);
363 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
364 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
365 spin_unlock_irq(&cmd->t_state_lock);
366
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700367 /*
368 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
369 * before the original READ I/O submission.
370 */
371 up(&dev->caw_sem);
372
373 return TCM_NO_SENSE;
374}
375
376static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
377{
378 struct se_device *dev = cmd->se_dev;
379 struct scatterlist *write_sg = NULL, *sg;
Nicholas Bellingerdb60df82013-10-01 17:04:40 -0700380 unsigned char *buf = NULL, *addr;
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700381 struct sg_mapping_iter m;
382 unsigned int offset = 0, len;
383 unsigned int nlbas = cmd->t_task_nolb;
384 unsigned int block_size = dev->dev_attrib.block_size;
385 unsigned int compare_len = (nlbas * block_size);
386 sense_reason_t ret = TCM_NO_SENSE;
387 int rc, i;
388
Nicholas Bellingercf6d1f02013-08-21 19:34:43 -0700389 /*
390 * Handle early failure in transport_generic_request_failure(),
391 * which will not have taken ->caw_mutex yet..
392 */
393 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
394 return TCM_NO_SENSE;
Nicholas Bellingerdb60df82013-10-01 17:04:40 -0700395 /*
396 * Immediately exit + release dev->caw_sem if command has already
397 * been failed with a non-zero SCSI status.
398 */
399 if (cmd->scsi_status) {
400 pr_err("compare_and_write_callback: non zero scsi_status:"
401 " 0x%02x\n", cmd->scsi_status);
402 goto out;
403 }
Nicholas Bellingercf6d1f02013-08-21 19:34:43 -0700404
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700405 buf = kzalloc(cmd->data_length, GFP_KERNEL);
406 if (!buf) {
407 pr_err("Unable to allocate compare_and_write buf\n");
Nicholas Bellingera2890082013-08-21 18:10:04 -0700408 ret = TCM_OUT_OF_RESOURCES;
409 goto out;
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700410 }
411
412 write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
413 GFP_KERNEL);
414 if (!write_sg) {
415 pr_err("Unable to allocate compare_and_write sg\n");
416 ret = TCM_OUT_OF_RESOURCES;
417 goto out;
418 }
419 /*
420 * Setup verify and write data payloads from total NumberLBAs.
421 */
422 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
423 cmd->data_length);
424 if (!rc) {
425 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
426 ret = TCM_OUT_OF_RESOURCES;
427 goto out;
428 }
429 /*
430 * Compare against SCSI READ payload against verify payload
431 */
432 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
433 addr = (unsigned char *)kmap_atomic(sg_page(sg));
434 if (!addr) {
435 ret = TCM_OUT_OF_RESOURCES;
436 goto out;
437 }
438
439 len = min(sg->length, compare_len);
440
441 if (memcmp(addr, buf + offset, len)) {
442 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
443 addr, buf + offset);
444 kunmap_atomic(addr);
445 goto miscompare;
446 }
447 kunmap_atomic(addr);
448
449 offset += len;
450 compare_len -= len;
451 if (!compare_len)
452 break;
453 }
454
455 i = 0;
456 len = cmd->t_task_nolb * block_size;
457 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
458 /*
459 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
460 */
461 while (len) {
462 sg_miter_next(&m);
463
464 if (block_size < PAGE_SIZE) {
465 sg_set_page(&write_sg[i], m.page, block_size,
466 block_size);
467 } else {
468 sg_miter_next(&m);
469 sg_set_page(&write_sg[i], m.page, block_size,
470 0);
471 }
472 len -= block_size;
473 i++;
474 }
475 sg_miter_stop(&m);
476 /*
477 * Save the original SGL + nents values before updating to new
478 * assignments, to be released in transport_free_pages() ->
479 * transport_reset_sgl_orig()
480 */
481 cmd->t_data_sg_orig = cmd->t_data_sg;
482 cmd->t_data_sg = write_sg;
483 cmd->t_data_nents_orig = cmd->t_data_nents;
484 cmd->t_data_nents = 1;
485
486 cmd->sam_task_attr = MSG_HEAD_TAG;
487 cmd->transport_complete_callback = compare_and_write_post;
488 /*
489 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
490 * for submitting the adjusted SGL to write instance user-data.
491 */
492 cmd->execute_cmd = sbc_execute_rw;
493
494 spin_lock_irq(&cmd->t_state_lock);
495 cmd->t_state = TRANSPORT_PROCESSING;
496 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
497 spin_unlock_irq(&cmd->t_state_lock);
498
499 __target_execute_cmd(cmd);
500
501 kfree(buf);
502 return ret;
503
504miscompare:
505 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
506 dev->transport->name);
507 ret = TCM_MISCOMPARE_VERIFY;
508out:
509 /*
510 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
511 * sbc_compare_and_write() before the original READ I/O submission.
512 */
513 up(&dev->caw_sem);
514 kfree(write_sg);
515 kfree(buf);
516 return ret;
517}
518
519static sense_reason_t
520sbc_compare_and_write(struct se_cmd *cmd)
521{
522 struct se_device *dev = cmd->se_dev;
523 sense_reason_t ret;
524 int rc;
525 /*
526 * Submit the READ first for COMPARE_AND_WRITE to perform the
527 * comparision using SGLs at cmd->t_bidi_data_sg..
528 */
529 rc = down_interruptible(&dev->caw_sem);
530 if ((rc != 0) || signal_pending(current)) {
531 cmd->transport_complete_callback = NULL;
532 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
533 }
Nicholas Bellingerb7191252013-10-01 16:46:37 -0700534 /*
535 * Reset cmd->data_length to individual block_size in order to not
536 * confuse backend drivers that depend on this value matching the
537 * size of the I/O being submitted.
538 */
539 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700540
541 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
542 DMA_FROM_DEVICE);
543 if (ret) {
544 cmd->transport_complete_callback = NULL;
545 up(&dev->caw_sem);
546 return ret;
547 }
548 /*
549 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
550 * upon MISCOMPARE, or in compare_and_write_done() upon completion
551 * of WRITE instance user-data.
552 */
553 return TCM_NO_SENSE;
554}
555
Christoph Hellwigde103c92012-11-06 12:24:09 -0800556sense_reason_t
557sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400558{
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400559 struct se_device *dev = cmd->se_dev;
560 unsigned char *cdb = cmd->t_task_cdb;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400561 unsigned int size;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400562 u32 sectors = 0;
Christoph Hellwigde103c92012-11-06 12:24:09 -0800563 sense_reason_t ret;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400564
565 switch (cdb[0]) {
566 case READ_6:
567 sectors = transport_get_sectors_6(cdb);
568 cmd->t_task_lba = transport_lba_21(cdb);
569 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700570 cmd->execute_rw = ops->execute_rw;
571 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400572 break;
573 case READ_10:
574 sectors = transport_get_sectors_10(cdb);
575 cmd->t_task_lba = transport_lba_32(cdb);
576 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700577 cmd->execute_rw = ops->execute_rw;
578 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400579 break;
580 case READ_12:
581 sectors = transport_get_sectors_12(cdb);
582 cmd->t_task_lba = transport_lba_32(cdb);
583 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700584 cmd->execute_rw = ops->execute_rw;
585 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400586 break;
587 case READ_16:
588 sectors = transport_get_sectors_16(cdb);
589 cmd->t_task_lba = transport_lba_64(cdb);
590 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700591 cmd->execute_rw = ops->execute_rw;
592 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400593 break;
594 case WRITE_6:
595 sectors = transport_get_sectors_6(cdb);
596 cmd->t_task_lba = transport_lba_21(cdb);
597 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700598 cmd->execute_rw = ops->execute_rw;
599 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400600 break;
601 case WRITE_10:
602 case WRITE_VERIFY:
603 sectors = transport_get_sectors_10(cdb);
604 cmd->t_task_lba = transport_lba_32(cdb);
605 if (cdb[1] & 0x8)
606 cmd->se_cmd_flags |= SCF_FUA;
607 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700608 cmd->execute_rw = ops->execute_rw;
609 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400610 break;
611 case WRITE_12:
612 sectors = transport_get_sectors_12(cdb);
613 cmd->t_task_lba = transport_lba_32(cdb);
614 if (cdb[1] & 0x8)
615 cmd->se_cmd_flags |= SCF_FUA;
616 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700617 cmd->execute_rw = ops->execute_rw;
618 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400619 break;
620 case WRITE_16:
621 sectors = transport_get_sectors_16(cdb);
622 cmd->t_task_lba = transport_lba_64(cdb);
623 if (cdb[1] & 0x8)
624 cmd->se_cmd_flags |= SCF_FUA;
625 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700626 cmd->execute_rw = ops->execute_rw;
627 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400628 break;
629 case XDWRITEREAD_10:
Christoph Hellwigde103c92012-11-06 12:24:09 -0800630 if (cmd->data_direction != DMA_TO_DEVICE ||
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400631 !(cmd->se_cmd_flags & SCF_BIDI))
Christoph Hellwigde103c92012-11-06 12:24:09 -0800632 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400633 sectors = transport_get_sectors_10(cdb);
634
635 cmd->t_task_lba = transport_lba_32(cdb);
636 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
637
638 /*
639 * Setup BIDI XOR callback to be run after I/O completion.
640 */
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700641 cmd->execute_rw = ops->execute_rw;
642 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400643 cmd->transport_complete_callback = &xdreadwrite_callback;
644 if (cdb[1] & 0x8)
645 cmd->se_cmd_flags |= SCF_FUA;
646 break;
647 case VARIABLE_LENGTH_CMD:
648 {
649 u16 service_action = get_unaligned_be16(&cdb[8]);
650 switch (service_action) {
651 case XDWRITEREAD_32:
652 sectors = transport_get_sectors_32(cdb);
653
654 /*
655 * Use WRITE_32 and READ_32 opcodes for the emulated
656 * XDWRITE_READ_32 logic.
657 */
658 cmd->t_task_lba = transport_lba_64_ext(cdb);
659 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
660
661 /*
662 * Setup BIDI XOR callback to be run during after I/O
663 * completion.
664 */
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700665 cmd->execute_rw = ops->execute_rw;
666 cmd->execute_cmd = sbc_execute_rw;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400667 cmd->transport_complete_callback = &xdreadwrite_callback;
668 if (cdb[1] & 0x8)
669 cmd->se_cmd_flags |= SCF_FUA;
670 break;
671 case WRITE_SAME_32:
672 sectors = transport_get_sectors_32(cdb);
673 if (!sectors) {
674 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
675 " supported\n");
Christoph Hellwigde103c92012-11-06 12:24:09 -0800676 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400677 }
678
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400679 size = sbc_get_size(cmd, 1);
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400680 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
681
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800682 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
Dan Carpenter6b64e1f2012-11-27 17:27:01 +0300683 if (ret)
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800684 return ret;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400685 break;
686 default:
687 pr_err("VARIABLE_LENGTH_CMD service action"
688 " 0x%04x not supported\n", service_action);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800689 return TCM_UNSUPPORTED_SCSI_OPCODE;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400690 }
691 break;
692 }
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700693 case COMPARE_AND_WRITE:
694 sectors = cdb[13];
695 /*
696 * Currently enforce COMPARE_AND_WRITE for a single sector
697 */
698 if (sectors > 1) {
699 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
700 " than 1\n", sectors);
701 return TCM_INVALID_CDB_FIELD;
702 }
703 /*
704 * Double size because we have two buffers, note that
705 * zero is not an error..
706 */
707 size = 2 * sbc_get_size(cmd, sectors);
708 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
709 cmd->t_task_nolb = sectors;
710 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
711 cmd->execute_rw = ops->execute_rw;
712 cmd->execute_cmd = sbc_compare_and_write;
713 cmd->transport_complete_callback = compare_and_write_callback;
714 break;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400715 case READ_CAPACITY:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400716 size = READ_CAP_LEN;
717 cmd->execute_cmd = sbc_emulate_readcapacity;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400718 break;
719 case SERVICE_ACTION_IN:
720 switch (cmd->t_task_cdb[1] & 0x1f) {
721 case SAI_READ_CAPACITY_16:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400722 cmd->execute_cmd = sbc_emulate_readcapacity_16;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400723 break;
724 default:
725 pr_err("Unsupported SA: 0x%02x\n",
726 cmd->t_task_cdb[1] & 0x1f);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800727 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400728 }
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400729 size = (cdb[10] << 24) | (cdb[11] << 16) |
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400730 (cdb[12] << 8) | cdb[13];
731 break;
732 case SYNCHRONIZE_CACHE:
733 case SYNCHRONIZE_CACHE_16:
Hannes Reinecke882e3f82013-03-04 14:08:06 +0100734 if (!ops->execute_sync_cache) {
735 size = 0;
736 cmd->execute_cmd = sbc_emulate_noop;
737 break;
738 }
Christoph Hellwigad67f0d2012-06-17 18:40:53 -0400739
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400740 /*
741 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
742 */
743 if (cdb[0] == SYNCHRONIZE_CACHE) {
744 sectors = transport_get_sectors_10(cdb);
745 cmd->t_task_lba = transport_lba_32(cdb);
746 } else {
747 sectors = transport_get_sectors_16(cdb);
748 cmd->t_task_lba = transport_lba_64(cdb);
749 }
750
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400751 size = sbc_get_size(cmd, sectors);
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400752
753 /*
754 * Check to ensure that LBA + Range does not exceed past end of
755 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
756 */
757 if (cmd->t_task_lba || sectors) {
758 if (sbc_check_valid_sectors(cmd) < 0)
Roland Dreier33633672013-02-08 15:18:38 -0800759 return TCM_ADDRESS_OUT_OF_RANGE;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400760 }
Christoph Hellwigad67f0d2012-06-17 18:40:53 -0400761 cmd->execute_cmd = ops->execute_sync_cache;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400762 break;
763 case UNMAP:
Christoph Hellwig14150a62012-06-17 18:40:55 -0400764 if (!ops->execute_unmap)
Christoph Hellwigde103c92012-11-06 12:24:09 -0800765 return TCM_UNSUPPORTED_SCSI_OPCODE;
Christoph Hellwig14150a62012-06-17 18:40:55 -0400766
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400767 size = get_unaligned_be16(&cdb[7]);
Christoph Hellwig14150a62012-06-17 18:40:55 -0400768 cmd->execute_cmd = ops->execute_unmap;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400769 break;
770 case WRITE_SAME_16:
771 sectors = transport_get_sectors_16(cdb);
772 if (!sectors) {
773 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
Christoph Hellwigde103c92012-11-06 12:24:09 -0800774 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400775 }
776
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400777 size = sbc_get_size(cmd, 1);
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400778 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
779
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800780 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
Dan Carpenter6b64e1f2012-11-27 17:27:01 +0300781 if (ret)
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800782 return ret;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400783 break;
784 case WRITE_SAME:
785 sectors = transport_get_sectors_10(cdb);
786 if (!sectors) {
787 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
Christoph Hellwigde103c92012-11-06 12:24:09 -0800788 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400789 }
790
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400791 size = sbc_get_size(cmd, 1);
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400792 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
793
794 /*
795 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
796 * of byte 1 bit 3 UNMAP instead of original reserved field
797 */
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800798 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
Dan Carpenter6b64e1f2012-11-27 17:27:01 +0300799 if (ret)
Nicholas Bellingercd063be2012-11-07 20:01:10 -0800800 return ret;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400801 break;
802 case VERIFY:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400803 size = 0;
Nicholas Bellinger1920ed62012-11-06 20:59:41 -0800804 cmd->execute_cmd = sbc_emulate_noop;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400805 break;
Bernhard Kohl1a1ff382012-10-24 15:53:58 +0200806 case REZERO_UNIT:
807 case SEEK_6:
808 case SEEK_10:
809 /*
810 * There are still clients out there which use these old SCSI-2
811 * commands. This mainly happens when running VMs with legacy
812 * guest systems, connected via SCSI command pass-through to
813 * iSCSI targets. Make them happy and return status GOOD.
814 */
815 size = 0;
816 cmd->execute_cmd = sbc_emulate_noop;
817 break;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400818 default:
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400819 ret = spc_parse_cdb(cmd, &size);
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400820 if (ret)
821 return ret;
822 }
823
824 /* reject any command that we don't have a handler for */
825 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
Christoph Hellwigde103c92012-11-06 12:24:09 -0800826 return TCM_UNSUPPORTED_SCSI_OPCODE;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400827
828 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400829 unsigned long long end_lba;
830
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400831 if (sectors > dev->dev_attrib.fabric_max_sectors) {
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400832 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
833 " big sectors %u exceeds fabric_max_sectors:"
834 " %u\n", cdb[0], sectors,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400835 dev->dev_attrib.fabric_max_sectors);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800836 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400837 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400838 if (sectors > dev->dev_attrib.hw_max_sectors) {
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400839 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
840 " big sectors %u exceeds backend hw_max_sectors:"
841 " %u\n", cdb[0], sectors,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400842 dev->dev_attrib.hw_max_sectors);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800843 return TCM_INVALID_CDB_FIELD;
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400844 }
845
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400846 end_lba = dev->transport->get_blocks(dev) + 1;
847 if (cmd->t_task_lba + sectors > end_lba) {
848 pr_err("cmd exceeds last lba %llu "
849 "(lba %llu, sectors %u)\n",
850 end_lba, cmd->t_task_lba, sectors);
Roland Dreier09ceadc2013-06-26 17:36:18 -0700851 return TCM_ADDRESS_OUT_OF_RANGE;
Christoph Hellwig1fd032e2012-05-20 11:59:15 -0400852 }
853
Nicholas Bellinger68ff9b92013-08-19 15:20:28 -0700854 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
855 size = sbc_get_size(cmd, sectors);
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400856 }
857
Christoph Hellwigde103c92012-11-06 12:24:09 -0800858 return target_cmd_size_check(cmd, size);
Christoph Hellwigd6e01752012-05-20 11:59:14 -0400859}
860EXPORT_SYMBOL(sbc_parse_cdb);
Christoph Hellwig6f23ac82012-10-07 10:55:53 -0400861
Christoph Hellwig6f23ac82012-10-07 10:55:53 -0400862u32 sbc_get_device_type(struct se_device *dev)
863{
864 return TYPE_DISK;
865}
866EXPORT_SYMBOL(sbc_get_device_type);
Asias He86d71822013-02-25 14:03:46 +0800867
868sense_reason_t
869sbc_execute_unmap(struct se_cmd *cmd,
870 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
871 sector_t, sector_t),
872 void *priv)
873{
874 struct se_device *dev = cmd->se_dev;
875 unsigned char *buf, *ptr = NULL;
876 sector_t lba;
877 int size;
878 u32 range;
879 sense_reason_t ret = 0;
880 int dl, bd_dl;
881
882 /* We never set ANC_SUP */
883 if (cmd->t_task_cdb[1])
884 return TCM_INVALID_CDB_FIELD;
885
886 if (cmd->data_length == 0) {
887 target_complete_cmd(cmd, SAM_STAT_GOOD);
888 return 0;
889 }
890
891 if (cmd->data_length < 8) {
892 pr_warn("UNMAP parameter list length %u too small\n",
893 cmd->data_length);
894 return TCM_PARAMETER_LIST_LENGTH_ERROR;
895 }
896
897 buf = transport_kmap_data_sg(cmd);
898 if (!buf)
899 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
900
901 dl = get_unaligned_be16(&buf[0]);
902 bd_dl = get_unaligned_be16(&buf[2]);
903
904 size = cmd->data_length - 8;
905 if (bd_dl > size)
906 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
907 cmd->data_length, bd_dl);
908 else
909 size = bd_dl;
910
911 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
912 ret = TCM_INVALID_PARAMETER_LIST;
913 goto err;
914 }
915
916 /* First UNMAP block descriptor starts at 8 byte offset */
917 ptr = &buf[8];
918 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
919 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
920
921 while (size >= 16) {
922 lba = get_unaligned_be64(&ptr[0]);
923 range = get_unaligned_be32(&ptr[8]);
924 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
925 (unsigned long long)lba, range);
926
927 if (range > dev->dev_attrib.max_unmap_lba_count) {
928 ret = TCM_INVALID_PARAMETER_LIST;
929 goto err;
930 }
931
932 if (lba + range > dev->transport->get_blocks(dev) + 1) {
933 ret = TCM_ADDRESS_OUT_OF_RANGE;
934 goto err;
935 }
936
937 ret = do_unmap_fn(cmd, priv, lba, range);
938 if (ret)
939 goto err;
940
941 ptr += 16;
942 size -= 16;
943 }
944
945err:
946 transport_kunmap_data_sg(cmd);
947 if (!ret)
948 target_complete_cmd(cmd, GOOD);
949 return ret;
950}
951EXPORT_SYMBOL(sbc_execute_unmap);