blob: bbfb2883f05a0de8d312dac074a1093323e04743 [file] [log] [blame]
Vishal Verma5d0f6132013-03-04 18:40:58 -07001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19/*
20 * Refer to the SCSI-NVMe Translation spec for details on how
21 * each command is translated.
22 */
23
24#include <linux/nvme.h>
25#include <linux/bio.h>
26#include <linux/bitops.h>
27#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/errno.h>
30#include <linux/fs.h>
31#include <linux/genhd.h>
32#include <linux/idr.h>
33#include <linux/init.h>
34#include <linux/interrupt.h>
35#include <linux/io.h>
36#include <linux/kdev_t.h>
37#include <linux/kthread.h>
38#include <linux/kernel.h>
39#include <linux/mm.h>
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42#include <linux/pci.h>
43#include <linux/poison.h>
44#include <linux/sched.h>
45#include <linux/slab.h>
46#include <linux/types.h>
47#include <linux/version.h>
48#include <scsi/sg.h>
49#include <scsi/scsi.h>
50
51
52static int sg_version_num = 30534; /* 2 digits for each component */
53
54#define SNTI_TRANSLATION_SUCCESS 0
55#define SNTI_INTERNAL_ERROR 1
56
57/* VPD Page Codes */
58#define VPD_SUPPORTED_PAGES 0x00
59#define VPD_SERIAL_NUMBER 0x80
60#define VPD_DEVICE_IDENTIFIERS 0x83
61#define VPD_EXTENDED_INQUIRY 0x86
62#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
63
64/* CDB offsets */
65#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6
66#define REPORT_LUNS_SR_OFFSET 2
67#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10
68#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4
69#define REQUEST_SENSE_DESC_OFFSET 1
70#define REQUEST_SENSE_DESC_MASK 0x01
71#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1
72#define INQUIRY_EVPD_BYTE_OFFSET 1
73#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2
74#define INQUIRY_EVPD_BIT_MASK 1
75#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3
76#define START_STOP_UNIT_CDB_IMMED_OFFSET 1
77#define START_STOP_UNIT_CDB_IMMED_MASK 0x1
78#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3
79#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF
80#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4
81#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0
82#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4
83#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4
84#define START_STOP_UNIT_CDB_START_OFFSET 4
85#define START_STOP_UNIT_CDB_START_MASK 0x1
86#define WRITE_BUFFER_CDB_MODE_OFFSET 1
87#define WRITE_BUFFER_CDB_MODE_MASK 0x1F
88#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2
89#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3
90#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6
91#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1
92#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0
93#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6
94#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1
95#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20
96#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1
97#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10
98#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4
99#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8
100#define FORMAT_UNIT_PROT_INT_OFFSET 3
101#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0
102#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07
103
104/* Misc. defines */
105#define NIBBLE_SHIFT 4
106#define FIXED_SENSE_DATA 0x70
107#define DESC_FORMAT_SENSE_DATA 0x72
108#define FIXED_SENSE_DATA_ADD_LENGTH 10
109#define LUN_ENTRY_SIZE 8
110#define LUN_DATA_HEADER_SIZE 8
111#define ALL_LUNS_RETURNED 0x02
112#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
113#define RESTRICTED_LUNS_RETURNED 0x00
114#define NVME_POWER_STATE_START_VALID 0x00
115#define NVME_POWER_STATE_ACTIVE 0x01
116#define NVME_POWER_STATE_IDLE 0x02
117#define NVME_POWER_STATE_STANDBY 0x03
118#define NVME_POWER_STATE_LU_CONTROL 0x07
119#define POWER_STATE_0 0
120#define POWER_STATE_1 1
121#define POWER_STATE_2 2
122#define POWER_STATE_3 3
123#define DOWNLOAD_SAVE_ACTIVATE 0x05
124#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
125#define ACTIVATE_DEFERRED_MICROCODE 0x0F
126#define FORMAT_UNIT_IMMED_MASK 0x2
127#define FORMAT_UNIT_IMMED_OFFSET 1
128#define KELVIN_TEMP_FACTOR 273
129#define FIXED_FMT_SENSE_DATA_SIZE 18
130#define DESC_FMT_SENSE_DATA_SIZE 8
131
132/* SCSI/NVMe defines and bit masks */
133#define INQ_STANDARD_INQUIRY_PAGE 0x00
134#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00
135#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
136#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
137#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
138#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
139#define INQ_SERIAL_NUMBER_LENGTH 0x14
140#define INQ_NUM_SUPPORTED_VPD_PAGES 5
141#define VERSION_SPC_4 0x06
142#define ACA_UNSUPPORTED 0
143#define STANDARD_INQUIRY_LENGTH 36
144#define ADDITIONAL_STD_INQ_LENGTH 31
145#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C
146#define RESERVED_FIELD 0
147
148/* SCSI READ/WRITE Defines */
149#define IO_CDB_WP_MASK 0xE0
150#define IO_CDB_WP_SHIFT 5
151#define IO_CDB_FUA_MASK 0x8
152#define IO_6_CDB_LBA_OFFSET 0
153#define IO_6_CDB_LBA_MASK 0x001FFFFF
154#define IO_6_CDB_TX_LEN_OFFSET 4
155#define IO_6_DEFAULT_TX_LEN 256
156#define IO_10_CDB_LBA_OFFSET 2
157#define IO_10_CDB_TX_LEN_OFFSET 7
158#define IO_10_CDB_WP_OFFSET 1
159#define IO_10_CDB_FUA_OFFSET 1
160#define IO_12_CDB_LBA_OFFSET 2
161#define IO_12_CDB_TX_LEN_OFFSET 6
162#define IO_12_CDB_WP_OFFSET 1
163#define IO_12_CDB_FUA_OFFSET 1
164#define IO_16_CDB_FUA_OFFSET 1
165#define IO_16_CDB_WP_OFFSET 1
166#define IO_16_CDB_LBA_OFFSET 2
167#define IO_16_CDB_TX_LEN_OFFSET 10
168
169/* Mode Sense/Select defines */
170#define MODE_PAGE_INFO_EXCEP 0x1C
171#define MODE_PAGE_CACHING 0x08
172#define MODE_PAGE_CONTROL 0x0A
173#define MODE_PAGE_POWER_CONDITION 0x1A
174#define MODE_PAGE_RETURN_ALL 0x3F
175#define MODE_PAGE_BLK_DES_LEN 0x08
176#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10
177#define MODE_PAGE_CACHING_LEN 0x14
178#define MODE_PAGE_CONTROL_LEN 0x0C
179#define MODE_PAGE_POW_CND_LEN 0x28
180#define MODE_PAGE_INF_EXC_LEN 0x0C
181#define MODE_PAGE_ALL_LEN 0x54
182#define MODE_SENSE6_MPH_SIZE 4
183#define MODE_SENSE6_ALLOC_LEN_OFFSET 4
184#define MODE_SENSE_PAGE_CONTROL_OFFSET 2
185#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0
186#define MODE_SENSE_PAGE_CODE_OFFSET 2
187#define MODE_SENSE_PAGE_CODE_MASK 0x3F
188#define MODE_SENSE_LLBAA_OFFSET 1
189#define MODE_SENSE_LLBAA_MASK 0x10
190#define MODE_SENSE_LLBAA_SHIFT 4
191#define MODE_SENSE_DBD_OFFSET 1
192#define MODE_SENSE_DBD_MASK 8
193#define MODE_SENSE_DBD_SHIFT 3
194#define MODE_SENSE10_MPH_SIZE 8
195#define MODE_SENSE10_ALLOC_LEN_OFFSET 7
196#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1
197#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1
198#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4
199#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7
200#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10
201#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1
202#define MODE_SELECT_6_BD_OFFSET 3
203#define MODE_SELECT_10_BD_OFFSET 6
204#define MODE_SELECT_10_LLBAA_OFFSET 4
205#define MODE_SELECT_10_LLBAA_MASK 1
206#define MODE_SELECT_6_MPH_SIZE 4
207#define MODE_SELECT_10_MPH_SIZE 8
208#define CACHING_MODE_PAGE_WCE_MASK 0x04
209#define MODE_SENSE_BLK_DESC_ENABLED 0
210#define MODE_SENSE_BLK_DESC_COUNT 1
211#define MODE_SELECT_PAGE_CODE_MASK 0x3F
212#define SHORT_DESC_BLOCK 8
213#define LONG_DESC_BLOCK 16
214#define MODE_PAGE_POW_CND_LEN_FIELD 0x26
215#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A
216#define MODE_PAGE_CACHING_LEN_FIELD 0x12
217#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A
218#define MODE_SENSE_PC_CURRENT_VALUES 0
219
220/* Log Sense defines */
221#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00
222#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07
223#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F
224#define LOG_PAGE_TEMPERATURE_PAGE 0x0D
225#define LOG_SENSE_CDB_SP_OFFSET 1
226#define LOG_SENSE_CDB_SP_NOT_ENABLED 0
227#define LOG_SENSE_CDB_PC_OFFSET 2
228#define LOG_SENSE_CDB_PC_MASK 0xC0
229#define LOG_SENSE_CDB_PC_SHIFT 6
230#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1
231#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F
232#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7
233#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8
234#define LOG_INFO_EXCP_PAGE_LENGTH 0xC
235#define REMAINING_TEMP_PAGE_LENGTH 0xC
236#define LOG_TEMP_PAGE_LENGTH 0x10
237#define LOG_TEMP_UNKNOWN 0xFF
238#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3
239
240/* Read Capacity defines */
241#define READ_CAP_10_RESP_SIZE 8
242#define READ_CAP_16_RESP_SIZE 32
243
244/* NVMe Namespace and Command Defines */
245#define NVME_GET_SMART_LOG_PAGE 0x02
246#define NVME_GET_FEAT_TEMP_THRESH 0x04
247#define BYTES_TO_DWORDS 4
248#define NVME_MAX_FIRMWARE_SLOT 7
249
250/* Report LUNs defines */
251#define REPORT_LUNS_FIRST_LUN_OFFSET 8
252
253/* SCSI ADDITIONAL SENSE Codes */
254
255#define SCSI_ASC_NO_SENSE 0x00
256#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03
257#define SCSI_ASC_LUN_NOT_READY 0x04
258#define SCSI_ASC_WARNING 0x0B
259#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10
260#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10
261#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10
262#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11
263#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D
264#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20
265#define SCSI_ASC_ILLEGAL_COMMAND 0x20
266#define SCSI_ASC_ILLEGAL_BLOCK 0x21
267#define SCSI_ASC_INVALID_CDB 0x24
268#define SCSI_ASC_INVALID_LUN 0x25
269#define SCSI_ASC_INVALID_PARAMETER 0x26
270#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31
271#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44
272
273/* SCSI ADDITIONAL SENSE Code Qualifiers */
274
275#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00
276#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01
277#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01
278#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02
279#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03
280#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
281#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08
282#define SCSI_ASCQ_INVALID_LUN_ID 0x09
283
284/**
285 * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to
286 * enable DPOFUA support type 0x10 value.
287 */
288#define DEVICE_SPECIFIC_PARAMETER 0
289#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
290
291/* MACROs to extract information from CDBs */
292
293#define GET_OPCODE(cdb) cdb[0]
294
295#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
296
297#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
298
299#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
300(cdb[index + 1] << 8) | \
301(cdb[index + 2] << 0))
302
303#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
304(cdb[index + 1] << 16) | \
305(cdb[index + 2] << 8) | \
306(cdb[index + 3] << 0))
307
308#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
309(((u64)cdb[index + 1]) << 48) | \
310(((u64)cdb[index + 2]) << 40) | \
311(((u64)cdb[index + 3]) << 32) | \
312(((u64)cdb[index + 4]) << 24) | \
313(((u64)cdb[index + 5]) << 16) | \
314(((u64)cdb[index + 6]) << 8) | \
315(((u64)cdb[index + 7]) << 0))
316
317/* Inquiry Helper Macros */
318#define GET_INQ_EVPD_BIT(cdb) \
319((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \
320INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
321
322#define GET_INQ_PAGE_CODE(cdb) \
323(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
324
325#define GET_INQ_ALLOC_LENGTH(cdb) \
326(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
327
328/* Report LUNs Helper Macros */
329#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \
330(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
331
332/* Read Capacity Helper Macros */
333#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \
334(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
335
336#define IS_READ_CAP_16(cdb) \
337((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
338
339/* Request Sense Helper Macros */
340#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
341(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
342
343/* Mode Sense Helper Macros */
344#define GET_MODE_SENSE_DBD(cdb) \
345((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \
346MODE_SENSE_DBD_SHIFT)
347
348#define GET_MODE_SENSE_LLBAA(cdb) \
349((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \
350MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
351
352#define GET_MODE_SENSE_MPH_SIZE(cdb10) \
353(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
354
355
356/* Struct to gather data that needs to be extracted from a SCSI CDB.
357 Not conforming to any particular CDB variant, but compatible with all. */
358
359struct nvme_trans_io_cdb {
360 u8 fua;
361 u8 prot_info;
362 u64 lba;
363 u32 xfer_len;
364};
365
366
367/* Internal Helper Functions */
368
369
370/* Copy data to userspace memory */
371
372static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
373 unsigned long n)
374{
375 int res = SNTI_TRANSLATION_SUCCESS;
376 unsigned long not_copied;
377 int i;
378 void *index = from;
379 size_t remaining = n;
380 size_t xfer_len;
381
382 if (hdr->iovec_count > 0) {
Vishal Verma8741ee42013-04-04 17:52:27 -0600383 struct sg_iovec sgl;
Vishal Verma5d0f6132013-03-04 18:40:58 -0700384
385 for (i = 0; i < hdr->iovec_count; i++) {
Vishal Verma8741ee42013-04-04 17:52:27 -0600386 not_copied = copy_from_user(&sgl, hdr->dxferp +
387 i * sizeof(struct sg_iovec),
388 sizeof(struct sg_iovec));
389 if (not_copied)
390 return -EFAULT;
391 xfer_len = min(remaining, sgl.iov_len);
392 not_copied = copy_to_user(sgl.iov_base, index,
Vishal Verma5d0f6132013-03-04 18:40:58 -0700393 xfer_len);
394 if (not_copied) {
395 res = -EFAULT;
396 break;
397 }
398 index += xfer_len;
399 remaining -= xfer_len;
400 if (remaining == 0)
401 break;
402 }
403 return res;
404 }
Vishal Verma8741ee42013-04-04 17:52:27 -0600405 not_copied = copy_to_user(hdr->dxferp, from, n);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700406 if (not_copied)
407 res = -EFAULT;
408 return res;
409}
410
411/* Copy data from userspace memory */
412
413static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
414 unsigned long n)
415{
416 int res = SNTI_TRANSLATION_SUCCESS;
417 unsigned long not_copied;
418 int i;
419 void *index = to;
420 size_t remaining = n;
421 size_t xfer_len;
422
423 if (hdr->iovec_count > 0) {
Vishal Verma8741ee42013-04-04 17:52:27 -0600424 struct sg_iovec sgl;
Vishal Verma5d0f6132013-03-04 18:40:58 -0700425
426 for (i = 0; i < hdr->iovec_count; i++) {
Vishal Verma8741ee42013-04-04 17:52:27 -0600427 not_copied = copy_from_user(&sgl, hdr->dxferp +
428 i * sizeof(struct sg_iovec),
429 sizeof(struct sg_iovec));
430 if (not_copied)
431 return -EFAULT;
432 xfer_len = min(remaining, sgl.iov_len);
433 not_copied = copy_from_user(index, sgl.iov_base,
434 xfer_len);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700435 if (not_copied) {
436 res = -EFAULT;
437 break;
438 }
439 index += xfer_len;
440 remaining -= xfer_len;
441 if (remaining == 0)
442 break;
443 }
444 return res;
445 }
446
Vishal Verma8741ee42013-04-04 17:52:27 -0600447 not_copied = copy_from_user(to, hdr->dxferp, n);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700448 if (not_copied)
449 res = -EFAULT;
450 return res;
451}
452
453/* Status/Sense Buffer Writeback */
454
455static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
456 u8 asc, u8 ascq)
457{
458 int res = SNTI_TRANSLATION_SUCCESS;
459 u8 xfer_len;
460 u8 resp[DESC_FMT_SENSE_DATA_SIZE];
461
462 if (scsi_status_is_good(status)) {
463 hdr->status = SAM_STAT_GOOD;
464 hdr->masked_status = GOOD;
465 hdr->host_status = DID_OK;
466 hdr->driver_status = DRIVER_OK;
467 hdr->sb_len_wr = 0;
468 } else {
469 hdr->status = status;
470 hdr->masked_status = status >> 1;
471 hdr->host_status = DID_OK;
472 hdr->driver_status = DRIVER_OK;
473
474 memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
475 resp[0] = DESC_FORMAT_SENSE_DATA;
476 resp[1] = sense_key;
477 resp[2] = asc;
478 resp[3] = ascq;
479
480 xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
481 hdr->sb_len_wr = xfer_len;
Vishal Verma8741ee42013-04-04 17:52:27 -0600482 if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
Vishal Verma5d0f6132013-03-04 18:40:58 -0700483 res = -EFAULT;
484 }
485
486 return res;
487}
488
489static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
490{
491 u8 status, sense_key, asc, ascq;
492 int res = SNTI_TRANSLATION_SUCCESS;
493
494 /* For non-nvme (Linux) errors, simply return the error code */
495 if (nvme_sc < 0)
496 return nvme_sc;
497
498 /* Mask DNR, More, and reserved fields */
499 nvme_sc &= 0x7FF;
500
501 switch (nvme_sc) {
502 /* Generic Command Status */
503 case NVME_SC_SUCCESS:
504 status = SAM_STAT_GOOD;
505 sense_key = NO_SENSE;
506 asc = SCSI_ASC_NO_SENSE;
507 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
508 break;
509 case NVME_SC_INVALID_OPCODE:
510 status = SAM_STAT_CHECK_CONDITION;
511 sense_key = ILLEGAL_REQUEST;
512 asc = SCSI_ASC_ILLEGAL_COMMAND;
513 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
514 break;
515 case NVME_SC_INVALID_FIELD:
516 status = SAM_STAT_CHECK_CONDITION;
517 sense_key = ILLEGAL_REQUEST;
518 asc = SCSI_ASC_INVALID_CDB;
519 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
520 break;
521 case NVME_SC_DATA_XFER_ERROR:
522 status = SAM_STAT_CHECK_CONDITION;
523 sense_key = MEDIUM_ERROR;
524 asc = SCSI_ASC_NO_SENSE;
525 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
526 break;
527 case NVME_SC_POWER_LOSS:
528 status = SAM_STAT_TASK_ABORTED;
529 sense_key = ABORTED_COMMAND;
530 asc = SCSI_ASC_WARNING;
531 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
532 break;
533 case NVME_SC_INTERNAL:
534 status = SAM_STAT_CHECK_CONDITION;
535 sense_key = HARDWARE_ERROR;
536 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
537 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
538 break;
539 case NVME_SC_ABORT_REQ:
540 status = SAM_STAT_TASK_ABORTED;
541 sense_key = ABORTED_COMMAND;
542 asc = SCSI_ASC_NO_SENSE;
543 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
544 break;
545 case NVME_SC_ABORT_QUEUE:
546 status = SAM_STAT_TASK_ABORTED;
547 sense_key = ABORTED_COMMAND;
548 asc = SCSI_ASC_NO_SENSE;
549 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
550 break;
551 case NVME_SC_FUSED_FAIL:
552 status = SAM_STAT_TASK_ABORTED;
553 sense_key = ABORTED_COMMAND;
554 asc = SCSI_ASC_NO_SENSE;
555 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
556 break;
557 case NVME_SC_FUSED_MISSING:
558 status = SAM_STAT_TASK_ABORTED;
559 sense_key = ABORTED_COMMAND;
560 asc = SCSI_ASC_NO_SENSE;
561 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
562 break;
563 case NVME_SC_INVALID_NS:
564 status = SAM_STAT_CHECK_CONDITION;
565 sense_key = ILLEGAL_REQUEST;
566 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
567 ascq = SCSI_ASCQ_INVALID_LUN_ID;
568 break;
569 case NVME_SC_LBA_RANGE:
570 status = SAM_STAT_CHECK_CONDITION;
571 sense_key = ILLEGAL_REQUEST;
572 asc = SCSI_ASC_ILLEGAL_BLOCK;
573 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
574 break;
575 case NVME_SC_CAP_EXCEEDED:
576 status = SAM_STAT_CHECK_CONDITION;
577 sense_key = MEDIUM_ERROR;
578 asc = SCSI_ASC_NO_SENSE;
579 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
580 break;
581 case NVME_SC_NS_NOT_READY:
582 status = SAM_STAT_CHECK_CONDITION;
583 sense_key = NOT_READY;
584 asc = SCSI_ASC_LUN_NOT_READY;
585 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
586 break;
587
588 /* Command Specific Status */
589 case NVME_SC_INVALID_FORMAT:
590 status = SAM_STAT_CHECK_CONDITION;
591 sense_key = ILLEGAL_REQUEST;
592 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
593 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
594 break;
595 case NVME_SC_BAD_ATTRIBUTES:
596 status = SAM_STAT_CHECK_CONDITION;
597 sense_key = ILLEGAL_REQUEST;
598 asc = SCSI_ASC_INVALID_CDB;
599 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
600 break;
601
602 /* Media Errors */
603 case NVME_SC_WRITE_FAULT:
604 status = SAM_STAT_CHECK_CONDITION;
605 sense_key = MEDIUM_ERROR;
606 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
607 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
608 break;
609 case NVME_SC_READ_ERROR:
610 status = SAM_STAT_CHECK_CONDITION;
611 sense_key = MEDIUM_ERROR;
612 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
613 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
614 break;
615 case NVME_SC_GUARD_CHECK:
616 status = SAM_STAT_CHECK_CONDITION;
617 sense_key = MEDIUM_ERROR;
618 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
619 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
620 break;
621 case NVME_SC_APPTAG_CHECK:
622 status = SAM_STAT_CHECK_CONDITION;
623 sense_key = MEDIUM_ERROR;
624 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
625 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
626 break;
627 case NVME_SC_REFTAG_CHECK:
628 status = SAM_STAT_CHECK_CONDITION;
629 sense_key = MEDIUM_ERROR;
630 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
631 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
632 break;
633 case NVME_SC_COMPARE_FAILED:
634 status = SAM_STAT_CHECK_CONDITION;
635 sense_key = MISCOMPARE;
636 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
637 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
638 break;
639 case NVME_SC_ACCESS_DENIED:
640 status = SAM_STAT_CHECK_CONDITION;
641 sense_key = ILLEGAL_REQUEST;
642 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
643 ascq = SCSI_ASCQ_INVALID_LUN_ID;
644 break;
645
646 /* Unspecified/Default */
647 case NVME_SC_CMDID_CONFLICT:
648 case NVME_SC_CMD_SEQ_ERROR:
649 case NVME_SC_CQ_INVALID:
650 case NVME_SC_QID_INVALID:
651 case NVME_SC_QUEUE_SIZE:
652 case NVME_SC_ABORT_LIMIT:
653 case NVME_SC_ABORT_MISSING:
654 case NVME_SC_ASYNC_LIMIT:
655 case NVME_SC_FIRMWARE_SLOT:
656 case NVME_SC_FIRMWARE_IMAGE:
657 case NVME_SC_INVALID_VECTOR:
658 case NVME_SC_INVALID_LOG_PAGE:
659 default:
660 status = SAM_STAT_CHECK_CONDITION;
661 sense_key = ILLEGAL_REQUEST;
662 asc = SCSI_ASC_NO_SENSE;
663 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
664 break;
665 }
666
667 res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
668
669 return res;
670}
671
672/* INQUIRY Helper Functions */
673
674static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
675 struct sg_io_hdr *hdr, u8 *inq_response,
676 int alloc_len)
677{
678 struct nvme_dev *dev = ns->dev;
679 dma_addr_t dma_addr;
680 void *mem;
681 struct nvme_id_ns *id_ns;
682 int res = SNTI_TRANSLATION_SUCCESS;
683 int nvme_sc;
684 int xfer_len;
685 u8 resp_data_format = 0x02;
686 u8 protect;
687 u8 cmdque = 0x01 << 1;
688
689 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
690 &dma_addr, GFP_KERNEL);
691 if (mem == NULL) {
692 res = -ENOMEM;
693 goto out_dma;
694 }
695
696 /* nvme ns identify - use DPS value for PROTECT field */
697 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
698 res = nvme_trans_status_code(hdr, nvme_sc);
699 /*
700 * If nvme_sc was -ve, res will be -ve here.
701 * If nvme_sc was +ve, the status would bace been translated, and res
702 * can only be 0 or -ve.
703 * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc
704 * - If -ve, return because its a Linux error.
705 */
706 if (res)
707 goto out_free;
708 if (nvme_sc) {
709 res = nvme_sc;
710 goto out_free;
711 }
712 id_ns = mem;
713 (id_ns->dps) ? (protect = 0x01) : (protect = 0);
714
715 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
716 inq_response[2] = VERSION_SPC_4;
717 inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */
718 inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
719 inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
720 inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
721 strncpy(&inq_response[8], "NVMe ", 8);
722 strncpy(&inq_response[16], dev->model, 16);
723 strncpy(&inq_response[32], dev->firmware_rev, 4);
724
725 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
726 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
727
728 out_free:
729 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
730 dma_addr);
731 out_dma:
732 return res;
733}
734
735static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
736 struct sg_io_hdr *hdr, u8 *inq_response,
737 int alloc_len)
738{
739 int res = SNTI_TRANSLATION_SUCCESS;
740 int xfer_len;
741
742 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
743 inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */
744 inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */
745 inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
746 inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
747 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
748 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
749 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
750
751 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
752 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
753
754 return res;
755}
756
757static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
758 struct sg_io_hdr *hdr, u8 *inq_response,
759 int alloc_len)
760{
761 struct nvme_dev *dev = ns->dev;
762 int res = SNTI_TRANSLATION_SUCCESS;
763 int xfer_len;
764
765 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
766 inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
767 inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
768 strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
769
770 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
771 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
772
773 return res;
774}
775
776static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
777 u8 *inq_response, int alloc_len)
778{
779 struct nvme_dev *dev = ns->dev;
780 dma_addr_t dma_addr;
781 void *mem;
782 struct nvme_id_ctrl *id_ctrl;
783 int res = SNTI_TRANSLATION_SUCCESS;
784 int nvme_sc;
785 u8 ieee[4];
786 int xfer_len;
Vishal Verma8741ee42013-04-04 17:52:27 -0600787 __be32 tmp_id = cpu_to_be32(ns->ns_id);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700788
789 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
790 &dma_addr, GFP_KERNEL);
791 if (mem == NULL) {
792 res = -ENOMEM;
793 goto out_dma;
794 }
795
796 /* nvme controller identify */
797 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
798 res = nvme_trans_status_code(hdr, nvme_sc);
799 if (res)
800 goto out_free;
801 if (nvme_sc) {
802 res = nvme_sc;
803 goto out_free;
804 }
805 id_ctrl = mem;
806
807 /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
808 ieee[0] = id_ctrl->ieee[0] << 4;
809 ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
810 ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
811 ieee[3] = id_ctrl->ieee[2] >> 4;
812
813 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
814 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
815 inq_response[3] = 20; /* Page Length */
816 /* Designation Descriptor start */
817 inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
818 inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */
819 inq_response[6] = 0x00; /* Rsvd */
820 inq_response[7] = 16; /* Designator Length */
821 /* Designator start */
822 inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
823 inq_response[9] = ieee[2]; /* IEEE ID */
824 inq_response[10] = ieee[1]; /* IEEE ID */
825 inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */
826 inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
827 inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
828 inq_response[14] = dev->serial[0];
829 inq_response[15] = dev->serial[1];
830 inq_response[16] = dev->model[0];
831 inq_response[17] = dev->model[1];
832 memcpy(&inq_response[18], &tmp_id, sizeof(u32));
833 /* Last 2 bytes are zero */
834
835 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
836 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
837
838 out_free:
839 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
840 dma_addr);
841 out_dma:
842 return res;
843}
844
845static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
846 int alloc_len)
847{
848 u8 *inq_response;
849 int res = SNTI_TRANSLATION_SUCCESS;
850 int nvme_sc;
851 struct nvme_dev *dev = ns->dev;
852 dma_addr_t dma_addr;
853 void *mem;
854 struct nvme_id_ctrl *id_ctrl;
855 struct nvme_id_ns *id_ns;
856 int xfer_len;
857 u8 microcode = 0x80;
858 u8 spt;
859 u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
860 u8 grd_chk, app_chk, ref_chk, protect;
861 u8 uask_sup = 0x20;
862 u8 v_sup;
863 u8 luiclr = 0x01;
864
865 inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
866 if (inq_response == NULL) {
867 res = -ENOMEM;
868 goto out_mem;
869 }
870
871 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
872 &dma_addr, GFP_KERNEL);
873 if (mem == NULL) {
874 res = -ENOMEM;
875 goto out_dma;
876 }
877
878 /* nvme ns identify */
879 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
880 res = nvme_trans_status_code(hdr, nvme_sc);
881 if (res)
882 goto out_free;
883 if (nvme_sc) {
884 res = nvme_sc;
885 goto out_free;
886 }
887 id_ns = mem;
888 spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
889 (id_ns->dps) ? (protect = 0x01) : (protect = 0);
890 grd_chk = protect << 2;
891 app_chk = protect << 1;
892 ref_chk = protect;
893
894 /* nvme controller identify */
895 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
896 res = nvme_trans_status_code(hdr, nvme_sc);
897 if (res)
898 goto out_free;
899 if (nvme_sc) {
900 res = nvme_sc;
901 goto out_free;
902 }
903 id_ctrl = mem;
904 v_sup = id_ctrl->vwc;
905
906 memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
907 inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
908 inq_response[2] = 0x00; /* Page Length MSB */
909 inq_response[3] = 0x3C; /* Page Length LSB */
910 inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
911 inq_response[5] = uask_sup;
912 inq_response[6] = v_sup;
913 inq_response[7] = luiclr;
914 inq_response[8] = 0;
915 inq_response[9] = 0;
916
917 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
918 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
919
920 out_free:
921 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
922 dma_addr);
923 out_dma:
924 kfree(inq_response);
925 out_mem:
926 return res;
927}
928
929static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
930 int alloc_len)
931{
932 u8 *inq_response;
933 int res = SNTI_TRANSLATION_SUCCESS;
934 int xfer_len;
935
936 inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
937 if (inq_response == NULL) {
938 res = -ENOMEM;
939 goto out_mem;
940 }
941
942 memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
943 inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
944 inq_response[2] = 0x00; /* Page Length MSB */
945 inq_response[3] = 0x3C; /* Page Length LSB */
946 inq_response[4] = 0x00; /* Medium Rotation Rate MSB */
947 inq_response[5] = 0x01; /* Medium Rotation Rate LSB */
948 inq_response[6] = 0x00; /* Form Factor */
949
950 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
951 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
952
953 kfree(inq_response);
954 out_mem:
955 return res;
956}
957
958/* LOG SENSE Helper Functions */
959
960static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
961 int alloc_len)
962{
963 int res = SNTI_TRANSLATION_SUCCESS;
964 int xfer_len;
965 u8 *log_response;
966
967 log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
968 if (log_response == NULL) {
969 res = -ENOMEM;
970 goto out_mem;
971 }
972 memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
973
974 log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
975 /* Subpage=0x00, Page Length MSB=0 */
976 log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
977 log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
978 log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
979 log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
980
981 xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
982 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
983
984 kfree(log_response);
985 out_mem:
986 return res;
987}
988
989static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
990 struct sg_io_hdr *hdr, int alloc_len)
991{
992 int res = SNTI_TRANSLATION_SUCCESS;
993 int xfer_len;
994 u8 *log_response;
995 struct nvme_command c;
996 struct nvme_dev *dev = ns->dev;
997 struct nvme_smart_log *smart_log;
998 dma_addr_t dma_addr;
999 void *mem;
1000 u8 temp_c;
1001 u16 temp_k;
1002
1003 log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
1004 if (log_response == NULL) {
1005 res = -ENOMEM;
1006 goto out_mem;
1007 }
1008 memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH);
1009
1010 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1011 sizeof(struct nvme_smart_log),
1012 &dma_addr, GFP_KERNEL);
1013 if (mem == NULL) {
1014 res = -ENOMEM;
1015 goto out_dma;
1016 }
1017
1018 /* Get SMART Log Page */
1019 memset(&c, 0, sizeof(c));
1020 c.common.opcode = nvme_admin_get_log_page;
1021 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1022 c.common.prp1 = cpu_to_le64(dma_addr);
1023 c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
1024 BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
1025 res = nvme_submit_admin_cmd(dev, &c, NULL);
1026 if (res != NVME_SC_SUCCESS) {
1027 temp_c = LOG_TEMP_UNKNOWN;
1028 } else {
1029 smart_log = mem;
1030 temp_k = (smart_log->temperature[1] << 8) +
1031 (smart_log->temperature[0]);
1032 temp_c = temp_k - KELVIN_TEMP_FACTOR;
1033 }
1034
1035 log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
1036 /* Subpage=0x00, Page Length MSB=0 */
1037 log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
1038 /* Informational Exceptions Log Parameter 1 Start */
1039 /* Parameter Code=0x0000 bytes 4,5 */
1040 log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
1041 log_response[7] = 0x04; /* PARAMETER LENGTH */
1042 /* Add sense Code and qualifier = 0x00 each */
1043 /* Use Temperature from NVMe Get Log Page, convert to C from K */
1044 log_response[10] = temp_c;
1045
1046 xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
1047 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1048
1049 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
1050 mem, dma_addr);
1051 out_dma:
1052 kfree(log_response);
1053 out_mem:
1054 return res;
1055}
1056
1057static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1058 int alloc_len)
1059{
1060 int res = SNTI_TRANSLATION_SUCCESS;
1061 int xfer_len;
1062 u8 *log_response;
1063 struct nvme_command c;
1064 struct nvme_dev *dev = ns->dev;
1065 struct nvme_smart_log *smart_log;
1066 dma_addr_t dma_addr;
1067 void *mem;
1068 u32 feature_resp;
1069 u8 temp_c_cur, temp_c_thresh;
1070 u16 temp_k;
1071
1072 log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
1073 if (log_response == NULL) {
1074 res = -ENOMEM;
1075 goto out_mem;
1076 }
1077 memset(log_response, 0, LOG_TEMP_PAGE_LENGTH);
1078
1079 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1080 sizeof(struct nvme_smart_log),
1081 &dma_addr, GFP_KERNEL);
1082 if (mem == NULL) {
1083 res = -ENOMEM;
1084 goto out_dma;
1085 }
1086
1087 /* Get SMART Log Page */
1088 memset(&c, 0, sizeof(c));
1089 c.common.opcode = nvme_admin_get_log_page;
1090 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1091 c.common.prp1 = cpu_to_le64(dma_addr);
1092 c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
1093 BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
1094 res = nvme_submit_admin_cmd(dev, &c, NULL);
1095 if (res != NVME_SC_SUCCESS) {
1096 temp_c_cur = LOG_TEMP_UNKNOWN;
1097 } else {
1098 smart_log = mem;
1099 temp_k = (smart_log->temperature[1] << 8) +
1100 (smart_log->temperature[0]);
1101 temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
1102 }
1103
1104 /* Get Features for Temp Threshold */
1105 res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
1106 &feature_resp);
1107 if (res != NVME_SC_SUCCESS)
1108 temp_c_thresh = LOG_TEMP_UNKNOWN;
1109 else
1110 temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
1111
1112 log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
1113 /* Subpage=0x00, Page Length MSB=0 */
1114 log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
1115 /* Temperature Log Parameter 1 (Temperature) Start */
1116 /* Parameter Code = 0x0000 */
1117 log_response[6] = 0x01; /* Format and Linking = 01b */
1118 log_response[7] = 0x02; /* Parameter Length */
1119 /* Use Temperature from NVMe Get Log Page, convert to C from K */
1120 log_response[9] = temp_c_cur;
1121 /* Temperature Log Parameter 2 (Reference Temperature) Start */
1122 log_response[11] = 0x01; /* Parameter Code = 0x0001 */
1123 log_response[12] = 0x01; /* Format and Linking = 01b */
1124 log_response[13] = 0x02; /* Parameter Length */
1125 /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
1126 log_response[15] = temp_c_thresh;
1127
1128 xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
1129 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1130
1131 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
1132 mem, dma_addr);
1133 out_dma:
1134 kfree(log_response);
1135 out_mem:
1136 return res;
1137}
1138
1139/* MODE SENSE Helper Functions */
1140
1141static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
1142 u16 mode_data_length, u16 blk_desc_len)
1143{
1144 /* Quick check to make sure I don't stomp on my own memory... */
1145 if ((cdb10 && len < 8) || (!cdb10 && len < 4))
1146 return SNTI_INTERNAL_ERROR;
1147
1148 if (cdb10) {
1149 resp[0] = (mode_data_length & 0xFF00) >> 8;
1150 resp[1] = (mode_data_length & 0x00FF);
1151 /* resp[2] and [3] are zero */
1152 resp[4] = llbaa;
1153 resp[5] = RESERVED_FIELD;
1154 resp[6] = (blk_desc_len & 0xFF00) >> 8;
1155 resp[7] = (blk_desc_len & 0x00FF);
1156 } else {
1157 resp[0] = (mode_data_length & 0x00FF);
1158 /* resp[1] and [2] are zero */
1159 resp[3] = (blk_desc_len & 0x00FF);
1160 }
1161
1162 return SNTI_TRANSLATION_SUCCESS;
1163}
1164
1165static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1166 u8 *resp, int len, u8 llbaa)
1167{
1168 int res = SNTI_TRANSLATION_SUCCESS;
1169 int nvme_sc;
1170 struct nvme_dev *dev = ns->dev;
1171 dma_addr_t dma_addr;
1172 void *mem;
1173 struct nvme_id_ns *id_ns;
1174 u8 flbas;
1175 u32 lba_length;
1176
1177 if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
1178 return SNTI_INTERNAL_ERROR;
1179 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
1180 return SNTI_INTERNAL_ERROR;
1181
1182 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1183 &dma_addr, GFP_KERNEL);
1184 if (mem == NULL) {
1185 res = -ENOMEM;
1186 goto out;
1187 }
1188
1189 /* nvme ns identify */
1190 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1191 res = nvme_trans_status_code(hdr, nvme_sc);
1192 if (res)
1193 goto out_dma;
1194 if (nvme_sc) {
1195 res = nvme_sc;
1196 goto out_dma;
1197 }
1198 id_ns = mem;
1199 flbas = (id_ns->flbas) & 0x0F;
1200 lba_length = (1 << (id_ns->lbaf[flbas].ds));
1201
1202 if (llbaa == 0) {
Vishal Verma8741ee42013-04-04 17:52:27 -06001203 __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
Vishal Verma5d0f6132013-03-04 18:40:58 -07001204 /* Byte 4 is reserved */
Vishal Verma8741ee42013-04-04 17:52:27 -06001205 __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001206
1207 memcpy(resp, &tmp_cap, sizeof(u32));
1208 memcpy(&resp[4], &tmp_len, sizeof(u32));
1209 } else {
Vishal Verma8741ee42013-04-04 17:52:27 -06001210 __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
1211 __be32 tmp_len = cpu_to_be32(lba_length);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001212
1213 memcpy(resp, &tmp_cap, sizeof(u64));
1214 /* Bytes 8, 9, 10, 11 are reserved */
1215 memcpy(&resp[12], &tmp_len, sizeof(u32));
1216 }
1217
1218 out_dma:
1219 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
1220 dma_addr);
1221 out:
1222 return res;
1223}
1224
1225static int nvme_trans_fill_control_page(struct nvme_ns *ns,
1226 struct sg_io_hdr *hdr, u8 *resp,
1227 int len)
1228{
1229 if (len < MODE_PAGE_CONTROL_LEN)
1230 return SNTI_INTERNAL_ERROR;
1231
1232 resp[0] = MODE_PAGE_CONTROL;
1233 resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
1234 resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1,
1235 * D_SENSE=1, GLTSD=1, RLEC=0 */
1236 resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
1237 /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */
1238 resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
1239 /* resp[6] and [7] are obsolete, thus zero */
1240 resp[8] = 0xFF; /* Busy timeout period = 0xffff */
1241 resp[9] = 0xFF;
1242 /* Bytes 10,11: Extended selftest completion time = 0x0000 */
1243
1244 return SNTI_TRANSLATION_SUCCESS;
1245}
1246
1247static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1248 struct sg_io_hdr *hdr,
1249 u8 *resp, int len)
1250{
1251 int res = SNTI_TRANSLATION_SUCCESS;
1252 int nvme_sc;
1253 struct nvme_dev *dev = ns->dev;
1254 u32 feature_resp;
1255 u8 vwc;
1256
1257 if (len < MODE_PAGE_CACHING_LEN)
1258 return SNTI_INTERNAL_ERROR;
1259
1260 nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
1261 &feature_resp);
1262 res = nvme_trans_status_code(hdr, nvme_sc);
1263 if (res)
1264 goto out;
1265 if (nvme_sc) {
1266 res = nvme_sc;
1267 goto out;
1268 }
1269 vwc = feature_resp & 0x00000001;
1270
1271 resp[0] = MODE_PAGE_CACHING;
1272 resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
1273 resp[2] = vwc << 2;
1274
1275 out:
1276 return res;
1277}
1278
1279static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
1280 struct sg_io_hdr *hdr, u8 *resp,
1281 int len)
1282{
1283 int res = SNTI_TRANSLATION_SUCCESS;
1284
1285 if (len < MODE_PAGE_POW_CND_LEN)
1286 return SNTI_INTERNAL_ERROR;
1287
1288 resp[0] = MODE_PAGE_POWER_CONDITION;
1289 resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
1290 /* All other bytes are zero */
1291
1292 return res;
1293}
1294
1295static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
1296 struct sg_io_hdr *hdr, u8 *resp,
1297 int len)
1298{
1299 int res = SNTI_TRANSLATION_SUCCESS;
1300
1301 if (len < MODE_PAGE_INF_EXC_LEN)
1302 return SNTI_INTERNAL_ERROR;
1303
1304 resp[0] = MODE_PAGE_INFO_EXCEP;
1305 resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
1306 resp[2] = 0x88;
1307 /* All other bytes are zero */
1308
1309 return res;
1310}
1311
1312static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1313 u8 *resp, int len)
1314{
1315 int res = SNTI_TRANSLATION_SUCCESS;
1316 u16 mode_pages_offset_1 = 0;
1317 u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
1318
1319 mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
1320 mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
1321 mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
1322
1323 res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
1324 MODE_PAGE_CACHING_LEN);
1325 if (res != SNTI_TRANSLATION_SUCCESS)
1326 goto out;
1327 res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
1328 MODE_PAGE_CONTROL_LEN);
1329 if (res != SNTI_TRANSLATION_SUCCESS)
1330 goto out;
1331 res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
1332 MODE_PAGE_POW_CND_LEN);
1333 if (res != SNTI_TRANSLATION_SUCCESS)
1334 goto out;
1335 res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
1336 MODE_PAGE_INF_EXC_LEN);
1337 if (res != SNTI_TRANSLATION_SUCCESS)
1338 goto out;
1339
1340 out:
1341 return res;
1342}
1343
1344static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
1345{
1346 if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
1347 /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
1348 return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
1349 } else {
1350 return 0;
1351 }
1352}
1353
1354static int nvme_trans_mode_page_create(struct nvme_ns *ns,
1355 struct sg_io_hdr *hdr, u8 *cmd,
1356 u16 alloc_len, u8 cdb10,
1357 int (*mode_page_fill_func)
1358 (struct nvme_ns *,
1359 struct sg_io_hdr *hdr, u8 *, int),
1360 u16 mode_pages_tot_len)
1361{
1362 int res = SNTI_TRANSLATION_SUCCESS;
1363 int xfer_len;
1364 u8 *response;
1365 u8 dbd, llbaa;
1366 u16 resp_size;
1367 int mph_size;
1368 u16 mode_pages_offset_1;
1369 u16 blk_desc_len, blk_desc_offset, mode_data_length;
1370
1371 dbd = GET_MODE_SENSE_DBD(cmd);
1372 llbaa = GET_MODE_SENSE_LLBAA(cmd);
1373 mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
1374 blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
1375
1376 resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
1377 /* Refer spc4r34 Table 440 for calculation of Mode data Length field */
1378 mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
1379
1380 blk_desc_offset = mph_size;
1381 mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
1382
1383 response = kmalloc(resp_size, GFP_KERNEL);
1384 if (response == NULL) {
1385 res = -ENOMEM;
1386 goto out_mem;
1387 }
1388 memset(response, 0, resp_size);
1389
1390 res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
1391 llbaa, mode_data_length, blk_desc_len);
1392 if (res != SNTI_TRANSLATION_SUCCESS)
1393 goto out_free;
1394 if (blk_desc_len > 0) {
1395 res = nvme_trans_fill_blk_desc(ns, hdr,
1396 &response[blk_desc_offset],
1397 blk_desc_len, llbaa);
1398 if (res != SNTI_TRANSLATION_SUCCESS)
1399 goto out_free;
1400 }
1401 res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
1402 mode_pages_tot_len);
1403 if (res != SNTI_TRANSLATION_SUCCESS)
1404 goto out_free;
1405
1406 xfer_len = min(alloc_len, resp_size);
1407 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
1408
1409 out_free:
1410 kfree(response);
1411 out_mem:
1412 return res;
1413}
1414
1415/* Read Capacity Helper Functions */
1416
1417static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
1418 u8 cdb16)
1419{
1420 u8 flbas;
1421 u32 lba_length;
1422 u64 rlba;
1423 u8 prot_en;
1424 u8 p_type_lut[4] = {0, 0, 1, 2};
Vishal Verma8741ee42013-04-04 17:52:27 -06001425 __be64 tmp_rlba;
1426 __be32 tmp_rlba_32;
1427 __be32 tmp_len;
Vishal Verma5d0f6132013-03-04 18:40:58 -07001428
1429 flbas = (id_ns->flbas) & 0x0F;
1430 lba_length = (1 << (id_ns->lbaf[flbas].ds));
1431 rlba = le64_to_cpup(&id_ns->nsze) - 1;
1432 (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
1433
1434 if (!cdb16) {
1435 if (rlba > 0xFFFFFFFF)
1436 rlba = 0xFFFFFFFF;
1437 tmp_rlba_32 = cpu_to_be32(rlba);
1438 tmp_len = cpu_to_be32(lba_length);
1439 memcpy(response, &tmp_rlba_32, sizeof(u32));
1440 memcpy(&response[4], &tmp_len, sizeof(u32));
1441 } else {
1442 tmp_rlba = cpu_to_be64(rlba);
1443 tmp_len = cpu_to_be32(lba_length);
1444 memcpy(response, &tmp_rlba, sizeof(u64));
1445 memcpy(&response[8], &tmp_len, sizeof(u32));
1446 response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
1447 /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
1448 /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
1449 /* Bytes 16-31 - Reserved */
1450 }
1451}
1452
1453/* Start Stop Unit Helper Functions */
1454
1455static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1456 u8 pc, u8 pcmod, u8 start)
1457{
1458 int res = SNTI_TRANSLATION_SUCCESS;
1459 int nvme_sc;
1460 struct nvme_dev *dev = ns->dev;
1461 dma_addr_t dma_addr;
1462 void *mem;
1463 struct nvme_id_ctrl *id_ctrl;
1464 int lowest_pow_st; /* max npss = lowest power consumption */
1465 unsigned ps_desired = 0;
1466
1467 /* NVMe Controller Identify */
1468 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1469 sizeof(struct nvme_id_ctrl),
1470 &dma_addr, GFP_KERNEL);
1471 if (mem == NULL) {
1472 res = -ENOMEM;
1473 goto out;
1474 }
1475 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
1476 res = nvme_trans_status_code(hdr, nvme_sc);
1477 if (res)
1478 goto out_dma;
1479 if (nvme_sc) {
1480 res = nvme_sc;
1481 goto out_dma;
1482 }
1483 id_ctrl = mem;
1484 lowest_pow_st = id_ctrl->npss - 1;
1485
1486 switch (pc) {
1487 case NVME_POWER_STATE_START_VALID:
1488 /* Action unspecified if POWER CONDITION MODIFIER != 0 */
1489 if (pcmod == 0 && start == 0x1)
1490 ps_desired = POWER_STATE_0;
1491 if (pcmod == 0 && start == 0x0)
1492 ps_desired = lowest_pow_st;
1493 break;
1494 case NVME_POWER_STATE_ACTIVE:
1495 /* Action unspecified if POWER CONDITION MODIFIER != 0 */
1496 if (pcmod == 0)
1497 ps_desired = POWER_STATE_0;
1498 break;
1499 case NVME_POWER_STATE_IDLE:
1500 /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
1501 /* min of desired state and (lps-1) because lps is STOP */
1502 if (pcmod == 0x0)
1503 ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1));
1504 else if (pcmod == 0x1)
1505 ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1));
1506 else if (pcmod == 0x2)
1507 ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1));
1508 break;
1509 case NVME_POWER_STATE_STANDBY:
1510 /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
1511 if (pcmod == 0x0)
1512 ps_desired = max(0, (lowest_pow_st - 2));
1513 else if (pcmod == 0x1)
1514 ps_desired = max(0, (lowest_pow_st - 1));
1515 break;
1516 case NVME_POWER_STATE_LU_CONTROL:
1517 default:
1518 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1519 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1520 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1521 break;
1522 }
1523 nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
1524 NULL);
1525 res = nvme_trans_status_code(hdr, nvme_sc);
1526 if (res)
1527 goto out_dma;
1528 if (nvme_sc)
1529 res = nvme_sc;
1530 out_dma:
1531 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
1532 dma_addr);
1533 out:
1534 return res;
1535}
1536
1537/* Write Buffer Helper Functions */
1538/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */
1539
1540static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1541 u8 opcode, u32 tot_len, u32 offset,
1542 u8 buffer_id)
1543{
1544 int res = SNTI_TRANSLATION_SUCCESS;
1545 int nvme_sc;
1546 struct nvme_dev *dev = ns->dev;
1547 struct nvme_command c;
1548 struct nvme_iod *iod = NULL;
1549 unsigned length;
1550
1551 memset(&c, 0, sizeof(c));
1552 c.common.opcode = opcode;
1553 if (opcode == nvme_admin_download_fw) {
1554 if (hdr->iovec_count > 0) {
1555 /* Assuming SGL is not allowed for this command */
1556 res = nvme_trans_completion(hdr,
1557 SAM_STAT_CHECK_CONDITION,
1558 ILLEGAL_REQUEST,
1559 SCSI_ASC_INVALID_CDB,
1560 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1561 goto out;
1562 }
1563 iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
1564 (unsigned long)hdr->dxferp, tot_len);
1565 if (IS_ERR(iod)) {
1566 res = PTR_ERR(iod);
1567 goto out;
1568 }
1569 length = nvme_setup_prps(dev, &c.common, iod, tot_len,
1570 GFP_KERNEL);
1571 if (length != tot_len) {
1572 res = -ENOMEM;
1573 goto out_unmap;
1574 }
1575
Vishal Verma8741ee42013-04-04 17:52:27 -06001576 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1577 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001578 } else if (opcode == nvme_admin_activate_fw) {
Vishal Verma8741ee42013-04-04 17:52:27 -06001579 c.common.cdw10[0] = cpu_to_le32(buffer_id);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001580 /* AA=01b Replace & activate at reset */
Vishal Verma8741ee42013-04-04 17:52:27 -06001581 c.common.cdw10[0] = cpu_to_le32(le32_to_cpu(
1582 c.common.cdw10[0]) | 0x00000008);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001583 }
1584
1585 nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
1586 res = nvme_trans_status_code(hdr, nvme_sc);
1587 if (res)
1588 goto out_unmap;
1589 if (nvme_sc)
1590 res = nvme_sc;
1591
1592 out_unmap:
1593 if (opcode == nvme_admin_download_fw) {
1594 nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
1595 nvme_free_iod(dev, iod);
1596 }
1597 out:
1598 return res;
1599}
1600
1601/* Mode Select Helper Functions */
1602
1603static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1604 u16 *bd_len, u8 *llbaa)
1605{
1606 if (cdb10) {
1607 /* 10 Byte CDB */
1608 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1609 parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1610 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
1611 MODE_SELECT_10_LLBAA_MASK;
1612 } else {
1613 /* 6 Byte CDB */
1614 *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
1615 }
1616}
1617
1618static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1619 u16 idx, u16 bd_len, u8 llbaa)
1620{
1621 u16 bd_num;
1622
1623 bd_num = bd_len / ((llbaa == 0) ?
1624 SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
1625 /* Store block descriptor info if a FORMAT UNIT comes later */
1626 /* TODO Saving 1st BD info; what to do if multiple BD received? */
1627 if (llbaa == 0) {
1628 /* Standard Block Descriptor - spc4r34 7.5.5.1 */
1629 ns->mode_select_num_blocks =
1630 (parm_list[idx + 1] << 16) +
1631 (parm_list[idx + 2] << 8) +
1632 (parm_list[idx + 3]);
1633
1634 ns->mode_select_block_len =
1635 (parm_list[idx + 5] << 16) +
1636 (parm_list[idx + 6] << 8) +
1637 (parm_list[idx + 7]);
1638 } else {
1639 /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
1640 ns->mode_select_num_blocks =
1641 (((u64)parm_list[idx + 0]) << 56) +
1642 (((u64)parm_list[idx + 1]) << 48) +
1643 (((u64)parm_list[idx + 2]) << 40) +
1644 (((u64)parm_list[idx + 3]) << 32) +
1645 (((u64)parm_list[idx + 4]) << 24) +
1646 (((u64)parm_list[idx + 5]) << 16) +
1647 (((u64)parm_list[idx + 6]) << 8) +
1648 ((u64)parm_list[idx + 7]);
1649
1650 ns->mode_select_block_len =
1651 (parm_list[idx + 12] << 24) +
1652 (parm_list[idx + 13] << 16) +
1653 (parm_list[idx + 14] << 8) +
1654 (parm_list[idx + 15]);
1655 }
1656}
1657
1658static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1659 u8 *mode_page, u8 page_code)
1660{
1661 int res = SNTI_TRANSLATION_SUCCESS;
1662 int nvme_sc;
1663 struct nvme_dev *dev = ns->dev;
1664 unsigned dword11;
1665
1666 switch (page_code) {
1667 case MODE_PAGE_CACHING:
1668 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1669 nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
1670 0, NULL);
1671 res = nvme_trans_status_code(hdr, nvme_sc);
1672 if (res)
1673 break;
1674 if (nvme_sc) {
1675 res = nvme_sc;
1676 break;
1677 }
1678 break;
1679 case MODE_PAGE_CONTROL:
1680 break;
1681 case MODE_PAGE_POWER_CONDITION:
1682 /* Verify the OS is not trying to set timers */
1683 if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
1684 res = nvme_trans_completion(hdr,
1685 SAM_STAT_CHECK_CONDITION,
1686 ILLEGAL_REQUEST,
1687 SCSI_ASC_INVALID_PARAMETER,
1688 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1689 if (!res)
1690 res = SNTI_INTERNAL_ERROR;
1691 break;
1692 }
1693 break;
1694 default:
1695 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1696 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1697 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1698 if (!res)
1699 res = SNTI_INTERNAL_ERROR;
1700 break;
1701 }
1702
1703 return res;
1704}
1705
1706static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1707 u8 *cmd, u16 parm_list_len, u8 pf,
1708 u8 sp, u8 cdb10)
1709{
1710 int res = SNTI_TRANSLATION_SUCCESS;
1711 u8 *parm_list;
1712 u16 bd_len;
1713 u8 llbaa = 0;
1714 u16 index, saved_index;
1715 u8 page_code;
1716 u16 mp_size;
1717
1718 /* Get parm list from data-in/out buffer */
1719 parm_list = kmalloc(parm_list_len, GFP_KERNEL);
1720 if (parm_list == NULL) {
1721 res = -ENOMEM;
1722 goto out;
1723 }
1724
1725 res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
1726 if (res != SNTI_TRANSLATION_SUCCESS)
1727 goto out_mem;
1728
1729 nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
1730 index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
1731
1732 if (bd_len != 0) {
1733 /* Block Descriptors present, parse */
1734 nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
1735 index += bd_len;
1736 }
1737 saved_index = index;
1738
1739 /* Multiple mode pages may be present; iterate through all */
1740 /* In 1st Iteration, don't do NVME Command, only check for CDB errors */
1741 do {
1742 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1743 mp_size = parm_list[index + 1] + 2;
1744 if ((page_code != MODE_PAGE_CACHING) &&
1745 (page_code != MODE_PAGE_CONTROL) &&
1746 (page_code != MODE_PAGE_POWER_CONDITION)) {
1747 res = nvme_trans_completion(hdr,
1748 SAM_STAT_CHECK_CONDITION,
1749 ILLEGAL_REQUEST,
1750 SCSI_ASC_INVALID_CDB,
1751 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1752 goto out_mem;
1753 }
1754 index += mp_size;
1755 } while (index < parm_list_len);
1756
1757 /* In 2nd Iteration, do the NVME Commands */
1758 index = saved_index;
1759 do {
1760 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1761 mp_size = parm_list[index + 1] + 2;
1762 res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
1763 page_code);
1764 if (res != SNTI_TRANSLATION_SUCCESS)
1765 break;
1766 index += mp_size;
1767 } while (index < parm_list_len);
1768
1769 out_mem:
1770 kfree(parm_list);
1771 out:
1772 return res;
1773}
1774
1775/* Format Unit Helper Functions */
1776
1777static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1778 struct sg_io_hdr *hdr)
1779{
1780 int res = SNTI_TRANSLATION_SUCCESS;
1781 int nvme_sc;
1782 struct nvme_dev *dev = ns->dev;
1783 dma_addr_t dma_addr;
1784 void *mem;
1785 struct nvme_id_ns *id_ns;
1786 u8 flbas;
1787
1788 /*
1789 * SCSI Expects a MODE SELECT would have been issued prior to
1790 * a FORMAT UNIT, and the block size and number would be used
1791 * from the block descriptor in it. If a MODE SELECT had not
1792 * been issued, FORMAT shall use the current values for both.
1793 */
1794
1795 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
1796 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1797 sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
1798 if (mem == NULL) {
1799 res = -ENOMEM;
1800 goto out;
1801 }
1802 /* nvme ns identify */
1803 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1804 res = nvme_trans_status_code(hdr, nvme_sc);
1805 if (res)
1806 goto out_dma;
1807 if (nvme_sc) {
1808 res = nvme_sc;
1809 goto out_dma;
1810 }
1811 id_ns = mem;
1812
1813 if (ns->mode_select_num_blocks == 0)
Vishal Verma8741ee42013-04-04 17:52:27 -06001814 ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001815 if (ns->mode_select_block_len == 0) {
1816 flbas = (id_ns->flbas) & 0x0F;
1817 ns->mode_select_block_len =
1818 (1 << (id_ns->lbaf[flbas].ds));
1819 }
1820 out_dma:
1821 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1822 mem, dma_addr);
1823 }
1824 out:
1825 return res;
1826}
1827
1828static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
1829 u8 format_prot_info, u8 *nvme_pf_code)
1830{
1831 int res = SNTI_TRANSLATION_SUCCESS;
1832 u8 *parm_list;
1833 u8 pf_usage, pf_code;
1834
1835 parm_list = kmalloc(len, GFP_KERNEL);
1836 if (parm_list == NULL) {
1837 res = -ENOMEM;
1838 goto out;
1839 }
1840 res = nvme_trans_copy_from_user(hdr, parm_list, len);
1841 if (res != SNTI_TRANSLATION_SUCCESS)
1842 goto out_mem;
1843
1844 if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
1845 FORMAT_UNIT_IMMED_MASK) != 0) {
1846 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1847 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1848 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1849 goto out_mem;
1850 }
1851
1852 if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
1853 (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
1854 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1855 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1856 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1857 goto out_mem;
1858 }
1859 pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
1860 FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
1861 pf_code = (pf_usage << 2) | format_prot_info;
1862 switch (pf_code) {
1863 case 0:
1864 *nvme_pf_code = 0;
1865 break;
1866 case 2:
1867 *nvme_pf_code = 1;
1868 break;
1869 case 3:
1870 *nvme_pf_code = 2;
1871 break;
1872 case 7:
1873 *nvme_pf_code = 3;
1874 break;
1875 default:
1876 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1877 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1878 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1879 break;
1880 }
1881
1882 out_mem:
1883 kfree(parm_list);
1884 out:
1885 return res;
1886}
1887
1888static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1889 u8 prot_info)
1890{
1891 int res = SNTI_TRANSLATION_SUCCESS;
1892 int nvme_sc;
1893 struct nvme_dev *dev = ns->dev;
1894 dma_addr_t dma_addr;
1895 void *mem;
1896 struct nvme_id_ns *id_ns;
1897 u8 i;
1898 u8 flbas, nlbaf;
1899 u8 selected_lbaf = 0xFF;
1900 u32 cdw10 = 0;
1901 struct nvme_command c;
1902
1903 /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
1904 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1905 &dma_addr, GFP_KERNEL);
1906 if (mem == NULL) {
1907 res = -ENOMEM;
1908 goto out;
1909 }
1910 /* nvme ns identify */
1911 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1912 res = nvme_trans_status_code(hdr, nvme_sc);
1913 if (res)
1914 goto out_dma;
1915 if (nvme_sc) {
1916 res = nvme_sc;
1917 goto out_dma;
1918 }
1919 id_ns = mem;
1920 flbas = (id_ns->flbas) & 0x0F;
1921 nlbaf = id_ns->nlbaf;
1922
1923 for (i = 0; i < nlbaf; i++) {
1924 if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
1925 selected_lbaf = i;
1926 break;
1927 }
1928 }
1929 if (selected_lbaf > 0x0F) {
1930 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1931 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1932 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1933 }
Vishal Verma8741ee42013-04-04 17:52:27 -06001934 if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
Vishal Verma5d0f6132013-03-04 18:40:58 -07001935 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1936 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1937 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1938 }
1939
1940 cdw10 |= prot_info << 5;
1941 cdw10 |= selected_lbaf & 0x0F;
1942 memset(&c, 0, sizeof(c));
1943 c.format.opcode = nvme_admin_format_nvm;
Vishal Verma8741ee42013-04-04 17:52:27 -06001944 c.format.nsid = cpu_to_le32(ns->ns_id);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001945 c.format.cdw10 = cpu_to_le32(cdw10);
1946
1947 nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
1948 res = nvme_trans_status_code(hdr, nvme_sc);
1949 if (res)
1950 goto out_dma;
1951 if (nvme_sc)
1952 res = nvme_sc;
1953
1954 out_dma:
1955 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
1956 dma_addr);
1957 out:
1958 return res;
1959}
1960
1961/* Read/Write Helper Functions */
1962
1963static inline void nvme_trans_get_io_cdb6(u8 *cmd,
1964 struct nvme_trans_io_cdb *cdb_info)
1965{
1966 cdb_info->fua = 0;
1967 cdb_info->prot_info = 0;
1968 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
1969 IO_6_CDB_LBA_MASK;
1970 cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
1971
1972 /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */
1973 if (cdb_info->xfer_len == 0)
1974 cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
1975}
1976
1977static inline void nvme_trans_get_io_cdb10(u8 *cmd,
1978 struct nvme_trans_io_cdb *cdb_info)
1979{
1980 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
1981 IO_CDB_FUA_MASK;
1982 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
1983 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
1984 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
1985 cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
1986}
1987
1988static inline void nvme_trans_get_io_cdb12(u8 *cmd,
1989 struct nvme_trans_io_cdb *cdb_info)
1990{
1991 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
1992 IO_CDB_FUA_MASK;
1993 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
1994 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
1995 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
1996 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
1997}
1998
1999static inline void nvme_trans_get_io_cdb16(u8 *cmd,
2000 struct nvme_trans_io_cdb *cdb_info)
2001{
2002 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
2003 IO_CDB_FUA_MASK;
2004 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
2005 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
2006 cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
2007 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
2008}
2009
2010static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
2011 struct nvme_trans_io_cdb *cdb_info,
2012 u32 max_blocks)
2013{
2014 /* If using iovecs, send one nvme command per vector */
2015 if (hdr->iovec_count > 0)
2016 return hdr->iovec_count;
2017 else if (cdb_info->xfer_len > max_blocks)
2018 return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
2019 else
2020 return 1;
2021}
2022
2023static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
2024 struct nvme_trans_io_cdb *cdb_info)
2025{
2026 u16 control = 0;
2027
2028 /* When Protection information support is added, implement here */
2029
2030 if (cdb_info->fua > 0)
2031 control |= NVME_RW_FUA;
2032
2033 return control;
2034}
2035
2036static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2037 struct nvme_trans_io_cdb *cdb_info, u8 is_write)
2038{
2039 int res = SNTI_TRANSLATION_SUCCESS;
2040 int nvme_sc;
2041 struct nvme_dev *dev = ns->dev;
2042 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
2043 u32 num_cmds;
2044 struct nvme_iod *iod;
2045 u64 unit_len;
2046 u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
2047 u32 retcode;
2048 u32 i = 0;
2049 u64 nvme_offset = 0;
Vishal Verma8741ee42013-04-04 17:52:27 -06002050 void __user *next_mapping_addr;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002051 struct nvme_command c;
2052 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
2053 u16 control;
Matthew Wilcox063cc6d2013-03-27 21:28:22 -04002054 u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
Vishal Verma5d0f6132013-03-04 18:40:58 -07002055
2056 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
2057
2058 /*
2059 * This loop handles two cases.
2060 * First, when an SGL is used in the form of an iovec list:
2061 * - Use iov_base as the next mapping address for the nvme command_id
2062 * - Use iov_len as the data transfer length for the command.
2063 * Second, when we have a single buffer
2064 * - If larger than max_blocks, split into chunks, offset
2065 * each nvme command accordingly.
2066 */
2067 for (i = 0; i < num_cmds; i++) {
2068 memset(&c, 0, sizeof(c));
2069 if (hdr->iovec_count > 0) {
Vishal Verma8741ee42013-04-04 17:52:27 -06002070 struct sg_iovec sgl;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002071
Vishal Verma8741ee42013-04-04 17:52:27 -06002072 retcode = copy_from_user(&sgl, hdr->dxferp +
2073 i * sizeof(struct sg_iovec),
2074 sizeof(struct sg_iovec));
2075 if (retcode)
2076 return -EFAULT;
2077 unit_len = sgl.iov_len;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002078 unit_num_blocks = unit_len >> ns->lba_shift;
Vishal Verma8741ee42013-04-04 17:52:27 -06002079 next_mapping_addr = sgl.iov_base;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002080 } else {
2081 unit_num_blocks = min((u64)max_blocks,
2082 (cdb_info->xfer_len - nvme_offset));
2083 unit_len = unit_num_blocks << ns->lba_shift;
2084 next_mapping_addr = hdr->dxferp +
2085 ((1 << ns->lba_shift) * nvme_offset);
2086 }
2087
2088 c.rw.opcode = opcode;
2089 c.rw.nsid = cpu_to_le32(ns->ns_id);
2090 c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
2091 c.rw.length = cpu_to_le16(unit_num_blocks - 1);
2092 control = nvme_trans_io_get_control(ns, cdb_info);
2093 c.rw.control = cpu_to_le16(control);
2094
2095 iod = nvme_map_user_pages(dev,
2096 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2097 (unsigned long)next_mapping_addr, unit_len);
2098 if (IS_ERR(iod)) {
2099 res = PTR_ERR(iod);
2100 goto out;
2101 }
2102 retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
2103 GFP_KERNEL);
2104 if (retcode != unit_len) {
2105 nvme_unmap_user_pages(dev,
2106 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2107 iod);
2108 nvme_free_iod(dev, iod);
2109 res = -ENOMEM;
2110 goto out;
2111 }
2112
2113 nvme_offset += unit_num_blocks;
2114
2115 nvmeq = get_nvmeq(dev);
2116 /*
2117 * Since nvme_submit_sync_cmd sleeps, we can't keep
2118 * preemption disabled. We may be preempted at any
2119 * point, and be rescheduled to a different CPU. That
2120 * will cause cacheline bouncing, but no additional
2121 * races since q_lock already protects against other
2122 * CPUs.
2123 */
2124 put_nvmeq(nvmeq);
2125 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
2126 NVME_IO_TIMEOUT);
2127 if (nvme_sc != NVME_SC_SUCCESS) {
2128 nvme_unmap_user_pages(dev,
2129 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2130 iod);
2131 nvme_free_iod(dev, iod);
2132 res = nvme_trans_status_code(hdr, nvme_sc);
2133 goto out;
2134 }
2135 nvme_unmap_user_pages(dev,
2136 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2137 iod);
2138 nvme_free_iod(dev, iod);
2139 }
2140 res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2141
2142 out:
2143 return res;
2144}
2145
2146
2147/* SCSI Command Translation Functions */
2148
2149static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
2150 u8 *cmd)
2151{
2152 int res = SNTI_TRANSLATION_SUCCESS;
2153 struct nvme_trans_io_cdb cdb_info;
2154 u8 opcode = cmd[0];
2155 u64 xfer_bytes;
2156 u64 sum_iov_len = 0;
Vishal Verma8741ee42013-04-04 17:52:27 -06002157 struct sg_iovec sgl;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002158 int i;
Vishal Verma8741ee42013-04-04 17:52:27 -06002159 size_t not_copied;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002160
2161 /* Extract Fields from CDB */
2162 switch (opcode) {
2163 case WRITE_6:
2164 case READ_6:
2165 nvme_trans_get_io_cdb6(cmd, &cdb_info);
2166 break;
2167 case WRITE_10:
2168 case READ_10:
2169 nvme_trans_get_io_cdb10(cmd, &cdb_info);
2170 break;
2171 case WRITE_12:
2172 case READ_12:
2173 nvme_trans_get_io_cdb12(cmd, &cdb_info);
2174 break;
2175 case WRITE_16:
2176 case READ_16:
2177 nvme_trans_get_io_cdb16(cmd, &cdb_info);
2178 break;
2179 default:
2180 /* Will never really reach here */
2181 res = SNTI_INTERNAL_ERROR;
2182 goto out;
2183 }
2184
2185 /* Calculate total length of transfer (in bytes) */
2186 if (hdr->iovec_count > 0) {
Vishal Verma5d0f6132013-03-04 18:40:58 -07002187 for (i = 0; i < hdr->iovec_count; i++) {
Vishal Verma8741ee42013-04-04 17:52:27 -06002188 not_copied = copy_from_user(&sgl, hdr->dxferp +
2189 i * sizeof(struct sg_iovec),
2190 sizeof(struct sg_iovec));
2191 if (not_copied)
2192 return -EFAULT;
2193 sum_iov_len += sgl.iov_len;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002194 /* IO vector sizes should be multiples of block size */
Vishal Verma8741ee42013-04-04 17:52:27 -06002195 if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
Vishal Verma5d0f6132013-03-04 18:40:58 -07002196 res = nvme_trans_completion(hdr,
2197 SAM_STAT_CHECK_CONDITION,
2198 ILLEGAL_REQUEST,
2199 SCSI_ASC_INVALID_PARAMETER,
2200 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2201 goto out;
2202 }
2203 }
2204 } else {
2205 sum_iov_len = hdr->dxfer_len;
2206 }
2207
2208 /* As Per sg ioctl howto, if the lengths differ, use the lower one */
2209 xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
2210
2211 /* If block count and actual data buffer size dont match, error out */
2212 if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
2213 res = -EINVAL;
2214 goto out;
2215 }
2216
2217 /* Check for 0 length transfer - it is not illegal */
2218 if (cdb_info.xfer_len == 0)
2219 goto out;
2220
2221 /* Send NVMe IO Command(s) */
2222 res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
2223 if (res != SNTI_TRANSLATION_SUCCESS)
2224 goto out;
2225
2226 out:
2227 return res;
2228}
2229
2230static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2231 u8 *cmd)
2232{
2233 int res = SNTI_TRANSLATION_SUCCESS;
2234 u8 evpd;
2235 u8 page_code;
2236 int alloc_len;
2237 u8 *inq_response;
2238
2239 evpd = GET_INQ_EVPD_BIT(cmd);
2240 page_code = GET_INQ_PAGE_CODE(cmd);
2241 alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2242
2243 inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
2244 if (inq_response == NULL) {
2245 res = -ENOMEM;
2246 goto out_mem;
2247 }
2248
2249 if (evpd == 0) {
2250 if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
2251 res = nvme_trans_standard_inquiry_page(ns, hdr,
2252 inq_response, alloc_len);
2253 } else {
2254 res = nvme_trans_completion(hdr,
2255 SAM_STAT_CHECK_CONDITION,
2256 ILLEGAL_REQUEST,
2257 SCSI_ASC_INVALID_CDB,
2258 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2259 }
2260 } else {
2261 switch (page_code) {
2262 case VPD_SUPPORTED_PAGES:
2263 res = nvme_trans_supported_vpd_pages(ns, hdr,
2264 inq_response, alloc_len);
2265 break;
2266 case VPD_SERIAL_NUMBER:
2267 res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
2268 alloc_len);
2269 break;
2270 case VPD_DEVICE_IDENTIFIERS:
2271 res = nvme_trans_device_id_page(ns, hdr, inq_response,
2272 alloc_len);
2273 break;
2274 case VPD_EXTENDED_INQUIRY:
2275 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
2276 break;
2277 case VPD_BLOCK_DEV_CHARACTERISTICS:
2278 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
2279 break;
2280 default:
2281 res = nvme_trans_completion(hdr,
2282 SAM_STAT_CHECK_CONDITION,
2283 ILLEGAL_REQUEST,
2284 SCSI_ASC_INVALID_CDB,
2285 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2286 break;
2287 }
2288 }
2289 kfree(inq_response);
2290 out_mem:
2291 return res;
2292}
2293
2294static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2295 u8 *cmd)
2296{
2297 int res = SNTI_TRANSLATION_SUCCESS;
2298 u16 alloc_len;
2299 u8 sp;
2300 u8 pc;
2301 u8 page_code;
2302
2303 sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
2304 if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
2305 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2306 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2307 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2308 goto out;
2309 }
2310 pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
2311 page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
2312 pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
2313 if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
2314 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2315 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2316 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2317 goto out;
2318 }
2319 alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
2320 switch (page_code) {
2321 case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
2322 res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
2323 break;
2324 case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
2325 res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
2326 break;
2327 case LOG_PAGE_TEMPERATURE_PAGE:
2328 res = nvme_trans_log_temperature(ns, hdr, alloc_len);
2329 break;
2330 default:
2331 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2332 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2333 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2334 break;
2335 }
2336
2337 out:
2338 return res;
2339}
2340
2341static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2342 u8 *cmd)
2343{
2344 int res = SNTI_TRANSLATION_SUCCESS;
2345 u8 cdb10 = 0;
2346 u16 parm_list_len;
2347 u8 page_format;
2348 u8 save_pages;
2349
2350 page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
2351 page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
2352
2353 save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
2354 save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
2355
2356 if (GET_OPCODE(cmd) == MODE_SELECT) {
2357 parm_list_len = GET_U8_FROM_CDB(cmd,
2358 MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
2359 } else {
2360 parm_list_len = GET_U16_FROM_CDB(cmd,
2361 MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
2362 cdb10 = 1;
2363 }
2364
2365 if (parm_list_len != 0) {
2366 /*
2367 * According to SPC-4 r24, a paramter list length field of 0
2368 * shall not be considered an error
2369 */
2370 res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
2371 page_format, save_pages, cdb10);
2372 }
2373
2374 return res;
2375}
2376
2377static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2378 u8 *cmd)
2379{
2380 int res = SNTI_TRANSLATION_SUCCESS;
2381 u16 alloc_len;
2382 u8 cdb10 = 0;
2383 u8 page_code;
2384 u8 pc;
2385
2386 if (GET_OPCODE(cmd) == MODE_SENSE) {
2387 alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
2388 } else {
2389 alloc_len = GET_U16_FROM_CDB(cmd,
2390 MODE_SENSE10_ALLOC_LEN_OFFSET);
2391 cdb10 = 1;
2392 }
2393
2394 pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
2395 MODE_SENSE_PAGE_CONTROL_MASK;
2396 if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
2397 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2398 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2399 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2400 goto out;
2401 }
2402
2403 page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
2404 MODE_SENSE_PAGE_CODE_MASK;
2405 switch (page_code) {
2406 case MODE_PAGE_CACHING:
2407 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2408 cdb10,
2409 &nvme_trans_fill_caching_page,
2410 MODE_PAGE_CACHING_LEN);
2411 break;
2412 case MODE_PAGE_CONTROL:
2413 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2414 cdb10,
2415 &nvme_trans_fill_control_page,
2416 MODE_PAGE_CONTROL_LEN);
2417 break;
2418 case MODE_PAGE_POWER_CONDITION:
2419 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2420 cdb10,
2421 &nvme_trans_fill_pow_cnd_page,
2422 MODE_PAGE_POW_CND_LEN);
2423 break;
2424 case MODE_PAGE_INFO_EXCEP:
2425 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2426 cdb10,
2427 &nvme_trans_fill_inf_exc_page,
2428 MODE_PAGE_INF_EXC_LEN);
2429 break;
2430 case MODE_PAGE_RETURN_ALL:
2431 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2432 cdb10,
2433 &nvme_trans_fill_all_pages,
2434 MODE_PAGE_ALL_LEN);
2435 break;
2436 default:
2437 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2438 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2439 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2440 break;
2441 }
2442
2443 out:
2444 return res;
2445}
2446
2447static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2448 u8 *cmd)
2449{
2450 int res = SNTI_TRANSLATION_SUCCESS;
2451 int nvme_sc;
2452 u32 alloc_len = READ_CAP_10_RESP_SIZE;
2453 u32 resp_size = READ_CAP_10_RESP_SIZE;
2454 u32 xfer_len;
2455 u8 cdb16;
2456 struct nvme_dev *dev = ns->dev;
2457 dma_addr_t dma_addr;
2458 void *mem;
2459 struct nvme_id_ns *id_ns;
2460 u8 *response;
2461
2462 cdb16 = IS_READ_CAP_16(cmd);
2463 if (cdb16) {
2464 alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
2465 resp_size = READ_CAP_16_RESP_SIZE;
2466 }
2467
2468 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
2469 &dma_addr, GFP_KERNEL);
2470 if (mem == NULL) {
2471 res = -ENOMEM;
2472 goto out;
2473 }
2474 /* nvme ns identify */
2475 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
2476 res = nvme_trans_status_code(hdr, nvme_sc);
2477 if (res)
2478 goto out_dma;
2479 if (nvme_sc) {
2480 res = nvme_sc;
2481 goto out_dma;
2482 }
2483 id_ns = mem;
2484
2485 response = kmalloc(resp_size, GFP_KERNEL);
2486 if (response == NULL) {
2487 res = -ENOMEM;
2488 goto out_dma;
2489 }
2490 memset(response, 0, resp_size);
2491 nvme_trans_fill_read_cap(response, id_ns, cdb16);
2492
2493 xfer_len = min(alloc_len, resp_size);
2494 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2495
2496 kfree(response);
2497 out_dma:
2498 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
2499 dma_addr);
2500 out:
2501 return res;
2502}
2503
2504static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2505 u8 *cmd)
2506{
2507 int res = SNTI_TRANSLATION_SUCCESS;
2508 int nvme_sc;
2509 u32 alloc_len, xfer_len, resp_size;
2510 u8 select_report;
2511 u8 *response;
2512 struct nvme_dev *dev = ns->dev;
2513 dma_addr_t dma_addr;
2514 void *mem;
2515 struct nvme_id_ctrl *id_ctrl;
2516 u32 ll_length, lun_id;
2517 u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
Vishal Verma8741ee42013-04-04 17:52:27 -06002518 __be32 tmp_len;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002519
2520 alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
2521 select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
2522
2523 if ((select_report != ALL_LUNS_RETURNED) &&
2524 (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
2525 (select_report != RESTRICTED_LUNS_RETURNED)) {
2526 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2527 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2528 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2529 goto out;
2530 } else {
2531 /* NVMe Controller Identify */
2532 mem = dma_alloc_coherent(&dev->pci_dev->dev,
2533 sizeof(struct nvme_id_ctrl),
2534 &dma_addr, GFP_KERNEL);
2535 if (mem == NULL) {
2536 res = -ENOMEM;
2537 goto out;
2538 }
2539 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
2540 res = nvme_trans_status_code(hdr, nvme_sc);
2541 if (res)
2542 goto out_dma;
2543 if (nvme_sc) {
2544 res = nvme_sc;
2545 goto out_dma;
2546 }
2547 id_ctrl = mem;
Vishal Verma8741ee42013-04-04 17:52:27 -06002548 ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
Vishal Verma5d0f6132013-03-04 18:40:58 -07002549 resp_size = ll_length + LUN_DATA_HEADER_SIZE;
2550
2551 if (alloc_len < resp_size) {
2552 res = nvme_trans_completion(hdr,
2553 SAM_STAT_CHECK_CONDITION,
2554 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2555 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2556 goto out_dma;
2557 }
2558
2559 response = kmalloc(resp_size, GFP_KERNEL);
2560 if (response == NULL) {
2561 res = -ENOMEM;
2562 goto out_dma;
2563 }
2564 memset(response, 0, resp_size);
2565
2566 /* The first LUN ID will always be 0 per the SAM spec */
Vishal Verma8741ee42013-04-04 17:52:27 -06002567 for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
Vishal Verma5d0f6132013-03-04 18:40:58 -07002568 /*
2569 * Set the LUN Id and then increment to the next LUN
2570 * location in the parameter data.
2571 */
Vishal Verma8741ee42013-04-04 17:52:27 -06002572 __be64 tmp_id = cpu_to_be64(lun_id);
Vishal Verma5d0f6132013-03-04 18:40:58 -07002573 memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
2574 lun_id_offset += LUN_ENTRY_SIZE;
2575 }
2576 tmp_len = cpu_to_be32(ll_length);
2577 memcpy(response, &tmp_len, sizeof(u32));
2578 }
2579
2580 xfer_len = min(alloc_len, resp_size);
2581 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2582
2583 kfree(response);
2584 out_dma:
2585 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
2586 dma_addr);
2587 out:
2588 return res;
2589}
2590
2591static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2592 u8 *cmd)
2593{
2594 int res = SNTI_TRANSLATION_SUCCESS;
2595 u8 alloc_len, xfer_len, resp_size;
2596 u8 desc_format;
2597 u8 *response;
2598
2599 alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
2600 desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
2601 desc_format &= REQUEST_SENSE_DESC_MASK;
2602
2603 resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
2604 (FIXED_FMT_SENSE_DATA_SIZE));
2605 response = kmalloc(resp_size, GFP_KERNEL);
2606 if (response == NULL) {
2607 res = -ENOMEM;
2608 goto out;
2609 }
2610 memset(response, 0, resp_size);
2611
2612 if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
2613 /* Descriptor Format Sense Data */
2614 response[0] = DESC_FORMAT_SENSE_DATA;
2615 response[1] = NO_SENSE;
2616 /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
2617 response[2] = SCSI_ASC_NO_SENSE;
2618 response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2619 /* SDAT_OVFL = 0 | Additional Sense Length = 0 */
2620 } else {
2621 /* Fixed Format Sense Data */
2622 response[0] = FIXED_SENSE_DATA;
2623 /* Byte 1 = Obsolete */
2624 response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
2625 /* Bytes 3-6 - Information - set to zero */
2626 response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
2627 /* Bytes 8-11 - Cmd Specific Information - set to zero */
2628 response[12] = SCSI_ASC_NO_SENSE;
2629 response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2630 /* Byte 14 = Field Replaceable Unit Code = 0 */
2631 /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
2632 }
2633
2634 xfer_len = min(alloc_len, resp_size);
2635 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2636
2637 kfree(response);
2638 out:
2639 return res;
2640}
2641
2642static int nvme_trans_security_protocol(struct nvme_ns *ns,
2643 struct sg_io_hdr *hdr,
2644 u8 *cmd)
2645{
2646 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2647 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2648 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2649}
2650
2651static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2652 u8 *cmd)
2653{
2654 int res = SNTI_TRANSLATION_SUCCESS;
2655 int nvme_sc;
2656 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
2657 u8 immed, pcmod, pc, no_flush, start;
2658
2659 immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
2660 pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
2661 pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
2662 no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
2663 start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
2664
2665 immed &= START_STOP_UNIT_CDB_IMMED_MASK;
2666 pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
2667 pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
2668 no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
2669 start &= START_STOP_UNIT_CDB_START_MASK;
2670
2671 if (immed != 0) {
2672 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2673 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2674 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2675 } else {
2676 if (no_flush == 0) {
2677 /* Issue NVME FLUSH command prior to START STOP UNIT */
2678 nvme_sc = nvme_submit_flush_data(nvmeq, ns);
2679 put_nvmeq(nvmeq);
2680 res = nvme_trans_status_code(hdr, nvme_sc);
2681 if (res)
2682 goto out;
2683 if (nvme_sc) {
2684 res = nvme_sc;
2685 goto out;
2686 }
2687 }
2688 /* Setup the expected power state transition */
2689 res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
2690 }
2691
2692 out:
2693 return res;
2694}
2695
2696static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2697 struct sg_io_hdr *hdr, u8 *cmd)
2698{
2699 int res = SNTI_TRANSLATION_SUCCESS;
2700 int nvme_sc;
2701 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
2702 put_nvmeq(nvmeq);
2703 nvme_sc = nvme_submit_flush_data(nvmeq, ns);
2704 res = nvme_trans_status_code(hdr, nvme_sc);
2705 if (res)
2706 goto out;
2707 if (nvme_sc)
2708 res = nvme_sc;
2709
2710 out:
2711 return res;
2712}
2713
2714static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2715 u8 *cmd)
2716{
2717 int res = SNTI_TRANSLATION_SUCCESS;
2718 u8 parm_hdr_len = 0;
2719 u8 nvme_pf_code = 0;
2720 u8 format_prot_info, long_list, format_data;
2721
2722 format_prot_info = GET_U8_FROM_CDB(cmd,
2723 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
2724 long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
2725 format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
2726
2727 format_prot_info = (format_prot_info &
2728 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
2729 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
2730 long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
2731 format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
2732
2733 if (format_data != 0) {
2734 if (format_prot_info != 0) {
2735 if (long_list == 0)
2736 parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
2737 else
2738 parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
2739 }
2740 } else if (format_data == 0 && format_prot_info != 0) {
2741 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2742 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2743 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2744 goto out;
2745 }
2746
2747 /* Get parm header from data-in/out buffer */
2748 /*
2749 * According to the translation spec, the only fields in the parameter
2750 * list we are concerned with are in the header. So allocate only that.
2751 */
2752 if (parm_hdr_len > 0) {
2753 res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
2754 format_prot_info, &nvme_pf_code);
2755 if (res != SNTI_TRANSLATION_SUCCESS)
2756 goto out;
2757 }
2758
2759 /* Attempt to activate any previously downloaded firmware image */
2760 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
2761
2762 /* Determine Block size and count and send format command */
2763 res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
2764 if (res != SNTI_TRANSLATION_SUCCESS)
2765 goto out;
2766
2767 res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
2768
2769 out:
2770 return res;
2771}
2772
2773static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2774 struct sg_io_hdr *hdr,
2775 u8 *cmd)
2776{
2777 int res = SNTI_TRANSLATION_SUCCESS;
2778 struct nvme_dev *dev = ns->dev;
2779
2780 if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
2781 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2782 NOT_READY, SCSI_ASC_LUN_NOT_READY,
2783 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2784 else
2785 res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
2786
2787 return res;
2788}
2789
2790static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2791 u8 *cmd)
2792{
2793 int res = SNTI_TRANSLATION_SUCCESS;
2794 u32 buffer_offset, parm_list_length;
2795 u8 buffer_id, mode;
2796
2797 parm_list_length =
2798 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
2799 if (parm_list_length % BYTES_TO_DWORDS != 0) {
2800 /* NVMe expects Firmware file to be a whole number of DWORDS */
2801 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2802 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2803 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2804 goto out;
2805 }
2806 buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
2807 if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
2808 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2809 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2810 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2811 goto out;
2812 }
2813 mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
2814 WRITE_BUFFER_CDB_MODE_MASK;
2815 buffer_offset =
2816 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
2817
2818 switch (mode) {
2819 case DOWNLOAD_SAVE_ACTIVATE:
2820 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2821 parm_list_length, buffer_offset,
2822 buffer_id);
2823 if (res != SNTI_TRANSLATION_SUCCESS)
2824 goto out;
2825 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2826 parm_list_length, buffer_offset,
2827 buffer_id);
2828 break;
2829 case DOWNLOAD_SAVE_DEFER_ACTIVATE:
2830 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2831 parm_list_length, buffer_offset,
2832 buffer_id);
2833 break;
2834 case ACTIVATE_DEFERRED_MICROCODE:
2835 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2836 parm_list_length, buffer_offset,
2837 buffer_id);
2838 break;
2839 default:
2840 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2841 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2842 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2843 break;
2844 }
2845
2846 out:
2847 return res;
2848}
2849
2850static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2851{
2852 u8 cmd[BLK_MAX_CDB];
2853 int retcode;
2854 unsigned int opcode;
2855
2856 if (hdr->cmdp == NULL)
2857 return -EMSGSIZE;
2858 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
2859 return -EFAULT;
2860
2861 opcode = cmd[0];
2862
2863 switch (opcode) {
2864 case READ_6:
2865 case READ_10:
2866 case READ_12:
2867 case READ_16:
2868 retcode = nvme_trans_io(ns, hdr, 0, cmd);
2869 break;
2870 case WRITE_6:
2871 case WRITE_10:
2872 case WRITE_12:
2873 case WRITE_16:
2874 retcode = nvme_trans_io(ns, hdr, 1, cmd);
2875 break;
2876 case INQUIRY:
2877 retcode = nvme_trans_inquiry(ns, hdr, cmd);
2878 break;
2879 case LOG_SENSE:
2880 retcode = nvme_trans_log_sense(ns, hdr, cmd);
2881 break;
2882 case MODE_SELECT:
2883 case MODE_SELECT_10:
2884 retcode = nvme_trans_mode_select(ns, hdr, cmd);
2885 break;
2886 case MODE_SENSE:
2887 case MODE_SENSE_10:
2888 retcode = nvme_trans_mode_sense(ns, hdr, cmd);
2889 break;
2890 case READ_CAPACITY:
2891 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2892 break;
2893 case SERVICE_ACTION_IN:
2894 if (IS_READ_CAP_16(cmd))
2895 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2896 else
2897 goto out;
2898 break;
2899 case REPORT_LUNS:
2900 retcode = nvme_trans_report_luns(ns, hdr, cmd);
2901 break;
2902 case REQUEST_SENSE:
2903 retcode = nvme_trans_request_sense(ns, hdr, cmd);
2904 break;
2905 case SECURITY_PROTOCOL_IN:
2906 case SECURITY_PROTOCOL_OUT:
2907 retcode = nvme_trans_security_protocol(ns, hdr, cmd);
2908 break;
2909 case START_STOP:
2910 retcode = nvme_trans_start_stop(ns, hdr, cmd);
2911 break;
2912 case SYNCHRONIZE_CACHE:
2913 retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
2914 break;
2915 case FORMAT_UNIT:
2916 retcode = nvme_trans_format_unit(ns, hdr, cmd);
2917 break;
2918 case TEST_UNIT_READY:
2919 retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
2920 break;
2921 case WRITE_BUFFER:
2922 retcode = nvme_trans_write_buffer(ns, hdr, cmd);
2923 break;
2924 default:
2925 out:
2926 retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2927 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2928 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2929 break;
2930 }
2931 return retcode;
2932}
2933
2934int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
2935{
2936 struct sg_io_hdr hdr;
2937 int retcode;
2938
2939 if (!capable(CAP_SYS_ADMIN))
2940 return -EACCES;
2941 if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
2942 return -EFAULT;
2943 if (hdr.interface_id != 'S')
2944 return -EINVAL;
2945 if (hdr.cmd_len > BLK_MAX_CDB)
2946 return -EINVAL;
2947
2948 retcode = nvme_scsi_translate(ns, &hdr);
2949 if (retcode < 0)
2950 return retcode;
2951 if (retcode > 0)
2952 retcode = SNTI_TRANSLATION_SUCCESS;
Vishal Verma8741ee42013-04-04 17:52:27 -06002953 if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
Vishal Verma5d0f6132013-03-04 18:40:58 -07002954 return -EFAULT;
2955
2956 return retcode;
2957}
2958
2959int nvme_sg_get_version_num(int __user *ip)
2960{
2961 return put_user(sg_version_num, ip);
2962}