Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * NVM Express device driver |
Matthew Wilcox | 8757ad6 | 2014-04-11 10:37:39 -0400 | [diff] [blame] | 3 | * Copyright (c) 2011-2014, Intel Corporation. |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | /* |
| 16 | * Refer to the SCSI-NVMe Translation spec for details on how |
| 17 | * each command is translated. |
| 18 | */ |
| 19 | |
| 20 | #include <linux/nvme.h> |
| 21 | #include <linux/bio.h> |
| 22 | #include <linux/bitops.h> |
| 23 | #include <linux/blkdev.h> |
Keith Busch | 320a382 | 2013-10-23 13:07:34 -0600 | [diff] [blame] | 24 | #include <linux/compat.h> |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 25 | #include <linux/delay.h> |
| 26 | #include <linux/errno.h> |
| 27 | #include <linux/fs.h> |
| 28 | #include <linux/genhd.h> |
| 29 | #include <linux/idr.h> |
| 30 | #include <linux/init.h> |
| 31 | #include <linux/interrupt.h> |
| 32 | #include <linux/io.h> |
| 33 | #include <linux/kdev_t.h> |
| 34 | #include <linux/kthread.h> |
| 35 | #include <linux/kernel.h> |
| 36 | #include <linux/mm.h> |
| 37 | #include <linux/module.h> |
| 38 | #include <linux/moduleparam.h> |
| 39 | #include <linux/pci.h> |
| 40 | #include <linux/poison.h> |
| 41 | #include <linux/sched.h> |
| 42 | #include <linux/slab.h> |
| 43 | #include <linux/types.h> |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 44 | #include <asm/unaligned.h> |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 45 | #include <scsi/sg.h> |
| 46 | #include <scsi/scsi.h> |
| 47 | |
| 48 | |
| 49 | static int sg_version_num = 30534; /* 2 digits for each component */ |
| 50 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 51 | /* VPD Page Codes */ |
| 52 | #define VPD_SUPPORTED_PAGES 0x00 |
| 53 | #define VPD_SERIAL_NUMBER 0x80 |
| 54 | #define VPD_DEVICE_IDENTIFIERS 0x83 |
| 55 | #define VPD_EXTENDED_INQUIRY 0x86 |
Keith Busch | 7f749d9 | 2015-04-07 15:34:18 -0600 | [diff] [blame] | 56 | #define VPD_BLOCK_LIMITS 0xB0 |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 57 | #define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 |
| 58 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 59 | /* format unit paramter list offsets */ |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 60 | #define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4 |
| 61 | #define FORMAT_UNIT_LONG_PARM_LIST_LEN 8 |
| 62 | #define FORMAT_UNIT_PROT_INT_OFFSET 3 |
| 63 | #define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0 |
| 64 | #define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07 |
| 65 | |
| 66 | /* Misc. defines */ |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 67 | #define FIXED_SENSE_DATA 0x70 |
| 68 | #define DESC_FORMAT_SENSE_DATA 0x72 |
| 69 | #define FIXED_SENSE_DATA_ADD_LENGTH 10 |
| 70 | #define LUN_ENTRY_SIZE 8 |
| 71 | #define LUN_DATA_HEADER_SIZE 8 |
| 72 | #define ALL_LUNS_RETURNED 0x02 |
| 73 | #define ALL_WELL_KNOWN_LUNS_RETURNED 0x01 |
| 74 | #define RESTRICTED_LUNS_RETURNED 0x00 |
| 75 | #define NVME_POWER_STATE_START_VALID 0x00 |
| 76 | #define NVME_POWER_STATE_ACTIVE 0x01 |
| 77 | #define NVME_POWER_STATE_IDLE 0x02 |
| 78 | #define NVME_POWER_STATE_STANDBY 0x03 |
| 79 | #define NVME_POWER_STATE_LU_CONTROL 0x07 |
| 80 | #define POWER_STATE_0 0 |
| 81 | #define POWER_STATE_1 1 |
| 82 | #define POWER_STATE_2 2 |
| 83 | #define POWER_STATE_3 3 |
| 84 | #define DOWNLOAD_SAVE_ACTIVATE 0x05 |
| 85 | #define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E |
| 86 | #define ACTIVATE_DEFERRED_MICROCODE 0x0F |
| 87 | #define FORMAT_UNIT_IMMED_MASK 0x2 |
| 88 | #define FORMAT_UNIT_IMMED_OFFSET 1 |
| 89 | #define KELVIN_TEMP_FACTOR 273 |
| 90 | #define FIXED_FMT_SENSE_DATA_SIZE 18 |
| 91 | #define DESC_FMT_SENSE_DATA_SIZE 8 |
| 92 | |
| 93 | /* SCSI/NVMe defines and bit masks */ |
| 94 | #define INQ_STANDARD_INQUIRY_PAGE 0x00 |
| 95 | #define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00 |
| 96 | #define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 |
| 97 | #define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 |
| 98 | #define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 |
Keith Busch | 7f749d9 | 2015-04-07 15:34:18 -0600 | [diff] [blame] | 99 | #define INQ_BDEV_LIMITS_PAGE 0xB0 |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 100 | #define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 |
| 101 | #define INQ_SERIAL_NUMBER_LENGTH 0x14 |
Keith Busch | 7f749d9 | 2015-04-07 15:34:18 -0600 | [diff] [blame] | 102 | #define INQ_NUM_SUPPORTED_VPD_PAGES 6 |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 103 | #define VERSION_SPC_4 0x06 |
| 104 | #define ACA_UNSUPPORTED 0 |
| 105 | #define STANDARD_INQUIRY_LENGTH 36 |
| 106 | #define ADDITIONAL_STD_INQ_LENGTH 31 |
| 107 | #define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C |
| 108 | #define RESERVED_FIELD 0 |
| 109 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 110 | /* Mode Sense/Select defines */ |
| 111 | #define MODE_PAGE_INFO_EXCEP 0x1C |
| 112 | #define MODE_PAGE_CACHING 0x08 |
| 113 | #define MODE_PAGE_CONTROL 0x0A |
| 114 | #define MODE_PAGE_POWER_CONDITION 0x1A |
| 115 | #define MODE_PAGE_RETURN_ALL 0x3F |
| 116 | #define MODE_PAGE_BLK_DES_LEN 0x08 |
| 117 | #define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10 |
| 118 | #define MODE_PAGE_CACHING_LEN 0x14 |
| 119 | #define MODE_PAGE_CONTROL_LEN 0x0C |
| 120 | #define MODE_PAGE_POW_CND_LEN 0x28 |
| 121 | #define MODE_PAGE_INF_EXC_LEN 0x0C |
| 122 | #define MODE_PAGE_ALL_LEN 0x54 |
| 123 | #define MODE_SENSE6_MPH_SIZE 4 |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 124 | #define MODE_SENSE_PAGE_CONTROL_MASK 0xC0 |
| 125 | #define MODE_SENSE_PAGE_CODE_OFFSET 2 |
| 126 | #define MODE_SENSE_PAGE_CODE_MASK 0x3F |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 127 | #define MODE_SENSE_LLBAA_MASK 0x10 |
| 128 | #define MODE_SENSE_LLBAA_SHIFT 4 |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 129 | #define MODE_SENSE_DBD_MASK 8 |
| 130 | #define MODE_SENSE_DBD_SHIFT 3 |
| 131 | #define MODE_SENSE10_MPH_SIZE 8 |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 132 | #define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10 |
| 133 | #define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1 |
| 134 | #define MODE_SELECT_6_BD_OFFSET 3 |
| 135 | #define MODE_SELECT_10_BD_OFFSET 6 |
| 136 | #define MODE_SELECT_10_LLBAA_OFFSET 4 |
| 137 | #define MODE_SELECT_10_LLBAA_MASK 1 |
| 138 | #define MODE_SELECT_6_MPH_SIZE 4 |
| 139 | #define MODE_SELECT_10_MPH_SIZE 8 |
| 140 | #define CACHING_MODE_PAGE_WCE_MASK 0x04 |
| 141 | #define MODE_SENSE_BLK_DESC_ENABLED 0 |
| 142 | #define MODE_SENSE_BLK_DESC_COUNT 1 |
| 143 | #define MODE_SELECT_PAGE_CODE_MASK 0x3F |
| 144 | #define SHORT_DESC_BLOCK 8 |
| 145 | #define LONG_DESC_BLOCK 16 |
| 146 | #define MODE_PAGE_POW_CND_LEN_FIELD 0x26 |
| 147 | #define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A |
| 148 | #define MODE_PAGE_CACHING_LEN_FIELD 0x12 |
| 149 | #define MODE_PAGE_CONTROL_LEN_FIELD 0x0A |
| 150 | #define MODE_SENSE_PC_CURRENT_VALUES 0 |
| 151 | |
| 152 | /* Log Sense defines */ |
| 153 | #define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00 |
| 154 | #define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07 |
| 155 | #define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F |
| 156 | #define LOG_PAGE_TEMPERATURE_PAGE 0x0D |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 157 | #define LOG_SENSE_CDB_SP_NOT_ENABLED 0 |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 158 | #define LOG_SENSE_CDB_PC_MASK 0xC0 |
| 159 | #define LOG_SENSE_CDB_PC_SHIFT 6 |
| 160 | #define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1 |
| 161 | #define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 162 | #define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8 |
| 163 | #define LOG_INFO_EXCP_PAGE_LENGTH 0xC |
| 164 | #define REMAINING_TEMP_PAGE_LENGTH 0xC |
| 165 | #define LOG_TEMP_PAGE_LENGTH 0x10 |
| 166 | #define LOG_TEMP_UNKNOWN 0xFF |
| 167 | #define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3 |
| 168 | |
| 169 | /* Read Capacity defines */ |
| 170 | #define READ_CAP_10_RESP_SIZE 8 |
| 171 | #define READ_CAP_16_RESP_SIZE 32 |
| 172 | |
| 173 | /* NVMe Namespace and Command Defines */ |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 174 | #define BYTES_TO_DWORDS 4 |
| 175 | #define NVME_MAX_FIRMWARE_SLOT 7 |
| 176 | |
| 177 | /* Report LUNs defines */ |
| 178 | #define REPORT_LUNS_FIRST_LUN_OFFSET 8 |
| 179 | |
| 180 | /* SCSI ADDITIONAL SENSE Codes */ |
| 181 | |
| 182 | #define SCSI_ASC_NO_SENSE 0x00 |
| 183 | #define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03 |
| 184 | #define SCSI_ASC_LUN_NOT_READY 0x04 |
| 185 | #define SCSI_ASC_WARNING 0x0B |
| 186 | #define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10 |
| 187 | #define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10 |
| 188 | #define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10 |
| 189 | #define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11 |
| 190 | #define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D |
| 191 | #define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20 |
| 192 | #define SCSI_ASC_ILLEGAL_COMMAND 0x20 |
| 193 | #define SCSI_ASC_ILLEGAL_BLOCK 0x21 |
| 194 | #define SCSI_ASC_INVALID_CDB 0x24 |
| 195 | #define SCSI_ASC_INVALID_LUN 0x25 |
| 196 | #define SCSI_ASC_INVALID_PARAMETER 0x26 |
| 197 | #define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31 |
| 198 | #define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44 |
| 199 | |
| 200 | /* SCSI ADDITIONAL SENSE Code Qualifiers */ |
| 201 | |
| 202 | #define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00 |
| 203 | #define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01 |
| 204 | #define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01 |
| 205 | #define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02 |
| 206 | #define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03 |
| 207 | #define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04 |
| 208 | #define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08 |
| 209 | #define SCSI_ASCQ_INVALID_LUN_ID 0x09 |
| 210 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 211 | /* copied from drivers/usb/gadget/function/storage_common.h */ |
| 212 | static inline u32 get_unaligned_be24(u8 *buf) |
| 213 | { |
| 214 | return 0xffffff & (u32) get_unaligned_be32(buf - 1); |
| 215 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 216 | |
| 217 | /* Struct to gather data that needs to be extracted from a SCSI CDB. |
| 218 | Not conforming to any particular CDB variant, but compatible with all. */ |
| 219 | |
| 220 | struct nvme_trans_io_cdb { |
| 221 | u8 fua; |
| 222 | u8 prot_info; |
| 223 | u64 lba; |
| 224 | u32 xfer_len; |
| 225 | }; |
| 226 | |
| 227 | |
| 228 | /* Internal Helper Functions */ |
| 229 | |
| 230 | |
| 231 | /* Copy data to userspace memory */ |
| 232 | |
| 233 | static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from, |
| 234 | unsigned long n) |
| 235 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 236 | int i; |
| 237 | void *index = from; |
| 238 | size_t remaining = n; |
| 239 | size_t xfer_len; |
| 240 | |
| 241 | if (hdr->iovec_count > 0) { |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 242 | struct sg_iovec sgl; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 243 | |
| 244 | for (i = 0; i < hdr->iovec_count; i++) { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 245 | if (copy_from_user(&sgl, hdr->dxferp + |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 246 | i * sizeof(struct sg_iovec), |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 247 | sizeof(struct sg_iovec))) |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 248 | return -EFAULT; |
| 249 | xfer_len = min(remaining, sgl.iov_len); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 250 | if (copy_to_user(sgl.iov_base, index, xfer_len)) |
| 251 | return -EFAULT; |
| 252 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 253 | index += xfer_len; |
| 254 | remaining -= xfer_len; |
| 255 | if (remaining == 0) |
| 256 | break; |
| 257 | } |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 258 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 259 | } |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 260 | |
| 261 | if (copy_to_user(hdr->dxferp, from, n)) |
| 262 | return -EFAULT; |
| 263 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | /* Copy data from userspace memory */ |
| 267 | |
| 268 | static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to, |
| 269 | unsigned long n) |
| 270 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 271 | int i; |
| 272 | void *index = to; |
| 273 | size_t remaining = n; |
| 274 | size_t xfer_len; |
| 275 | |
| 276 | if (hdr->iovec_count > 0) { |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 277 | struct sg_iovec sgl; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 278 | |
| 279 | for (i = 0; i < hdr->iovec_count; i++) { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 280 | if (copy_from_user(&sgl, hdr->dxferp + |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 281 | i * sizeof(struct sg_iovec), |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 282 | sizeof(struct sg_iovec))) |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 283 | return -EFAULT; |
| 284 | xfer_len = min(remaining, sgl.iov_len); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 285 | if (copy_from_user(index, sgl.iov_base, xfer_len)) |
| 286 | return -EFAULT; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 287 | index += xfer_len; |
| 288 | remaining -= xfer_len; |
| 289 | if (remaining == 0) |
| 290 | break; |
| 291 | } |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 292 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 293 | } |
| 294 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 295 | if (copy_from_user(to, hdr->dxferp, n)) |
| 296 | return -EFAULT; |
| 297 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | /* Status/Sense Buffer Writeback */ |
| 301 | |
| 302 | static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key, |
| 303 | u8 asc, u8 ascq) |
| 304 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 305 | u8 xfer_len; |
| 306 | u8 resp[DESC_FMT_SENSE_DATA_SIZE]; |
| 307 | |
| 308 | if (scsi_status_is_good(status)) { |
| 309 | hdr->status = SAM_STAT_GOOD; |
| 310 | hdr->masked_status = GOOD; |
| 311 | hdr->host_status = DID_OK; |
| 312 | hdr->driver_status = DRIVER_OK; |
| 313 | hdr->sb_len_wr = 0; |
| 314 | } else { |
| 315 | hdr->status = status; |
| 316 | hdr->masked_status = status >> 1; |
| 317 | hdr->host_status = DID_OK; |
| 318 | hdr->driver_status = DRIVER_OK; |
| 319 | |
| 320 | memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE); |
| 321 | resp[0] = DESC_FORMAT_SENSE_DATA; |
| 322 | resp[1] = sense_key; |
| 323 | resp[2] = asc; |
| 324 | resp[3] = ascq; |
| 325 | |
| 326 | xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE); |
| 327 | hdr->sb_len_wr = xfer_len; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 328 | if (copy_to_user(hdr->sbp, resp, xfer_len) > 0) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 329 | return -EFAULT; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 330 | } |
| 331 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 332 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 333 | } |
| 334 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 335 | /* |
| 336 | * Take a status code from a lowlevel routine, and if it was a positive NVMe |
| 337 | * error code update the sense data based on it. In either case the passed |
| 338 | * in value is returned again, unless an -EFAULT from copy_to_user overrides |
| 339 | * it. |
| 340 | */ |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 341 | static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc) |
| 342 | { |
| 343 | u8 status, sense_key, asc, ascq; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 344 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 345 | |
| 346 | /* For non-nvme (Linux) errors, simply return the error code */ |
| 347 | if (nvme_sc < 0) |
| 348 | return nvme_sc; |
| 349 | |
| 350 | /* Mask DNR, More, and reserved fields */ |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 351 | switch (nvme_sc & 0x7FF) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 352 | /* Generic Command Status */ |
| 353 | case NVME_SC_SUCCESS: |
| 354 | status = SAM_STAT_GOOD; |
| 355 | sense_key = NO_SENSE; |
| 356 | asc = SCSI_ASC_NO_SENSE; |
| 357 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 358 | break; |
| 359 | case NVME_SC_INVALID_OPCODE: |
| 360 | status = SAM_STAT_CHECK_CONDITION; |
| 361 | sense_key = ILLEGAL_REQUEST; |
| 362 | asc = SCSI_ASC_ILLEGAL_COMMAND; |
| 363 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 364 | break; |
| 365 | case NVME_SC_INVALID_FIELD: |
| 366 | status = SAM_STAT_CHECK_CONDITION; |
| 367 | sense_key = ILLEGAL_REQUEST; |
| 368 | asc = SCSI_ASC_INVALID_CDB; |
| 369 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 370 | break; |
| 371 | case NVME_SC_DATA_XFER_ERROR: |
| 372 | status = SAM_STAT_CHECK_CONDITION; |
| 373 | sense_key = MEDIUM_ERROR; |
| 374 | asc = SCSI_ASC_NO_SENSE; |
| 375 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 376 | break; |
| 377 | case NVME_SC_POWER_LOSS: |
| 378 | status = SAM_STAT_TASK_ABORTED; |
| 379 | sense_key = ABORTED_COMMAND; |
| 380 | asc = SCSI_ASC_WARNING; |
| 381 | ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; |
| 382 | break; |
| 383 | case NVME_SC_INTERNAL: |
| 384 | status = SAM_STAT_CHECK_CONDITION; |
| 385 | sense_key = HARDWARE_ERROR; |
| 386 | asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; |
| 387 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 388 | break; |
| 389 | case NVME_SC_ABORT_REQ: |
| 390 | status = SAM_STAT_TASK_ABORTED; |
| 391 | sense_key = ABORTED_COMMAND; |
| 392 | asc = SCSI_ASC_NO_SENSE; |
| 393 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 394 | break; |
| 395 | case NVME_SC_ABORT_QUEUE: |
| 396 | status = SAM_STAT_TASK_ABORTED; |
| 397 | sense_key = ABORTED_COMMAND; |
| 398 | asc = SCSI_ASC_NO_SENSE; |
| 399 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 400 | break; |
| 401 | case NVME_SC_FUSED_FAIL: |
| 402 | status = SAM_STAT_TASK_ABORTED; |
| 403 | sense_key = ABORTED_COMMAND; |
| 404 | asc = SCSI_ASC_NO_SENSE; |
| 405 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 406 | break; |
| 407 | case NVME_SC_FUSED_MISSING: |
| 408 | status = SAM_STAT_TASK_ABORTED; |
| 409 | sense_key = ABORTED_COMMAND; |
| 410 | asc = SCSI_ASC_NO_SENSE; |
| 411 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 412 | break; |
| 413 | case NVME_SC_INVALID_NS: |
| 414 | status = SAM_STAT_CHECK_CONDITION; |
| 415 | sense_key = ILLEGAL_REQUEST; |
| 416 | asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; |
| 417 | ascq = SCSI_ASCQ_INVALID_LUN_ID; |
| 418 | break; |
| 419 | case NVME_SC_LBA_RANGE: |
| 420 | status = SAM_STAT_CHECK_CONDITION; |
| 421 | sense_key = ILLEGAL_REQUEST; |
| 422 | asc = SCSI_ASC_ILLEGAL_BLOCK; |
| 423 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 424 | break; |
| 425 | case NVME_SC_CAP_EXCEEDED: |
| 426 | status = SAM_STAT_CHECK_CONDITION; |
| 427 | sense_key = MEDIUM_ERROR; |
| 428 | asc = SCSI_ASC_NO_SENSE; |
| 429 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 430 | break; |
| 431 | case NVME_SC_NS_NOT_READY: |
| 432 | status = SAM_STAT_CHECK_CONDITION; |
| 433 | sense_key = NOT_READY; |
| 434 | asc = SCSI_ASC_LUN_NOT_READY; |
| 435 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 436 | break; |
| 437 | |
| 438 | /* Command Specific Status */ |
| 439 | case NVME_SC_INVALID_FORMAT: |
| 440 | status = SAM_STAT_CHECK_CONDITION; |
| 441 | sense_key = ILLEGAL_REQUEST; |
| 442 | asc = SCSI_ASC_FORMAT_COMMAND_FAILED; |
| 443 | ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; |
| 444 | break; |
| 445 | case NVME_SC_BAD_ATTRIBUTES: |
| 446 | status = SAM_STAT_CHECK_CONDITION; |
| 447 | sense_key = ILLEGAL_REQUEST; |
| 448 | asc = SCSI_ASC_INVALID_CDB; |
| 449 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 450 | break; |
| 451 | |
| 452 | /* Media Errors */ |
| 453 | case NVME_SC_WRITE_FAULT: |
| 454 | status = SAM_STAT_CHECK_CONDITION; |
| 455 | sense_key = MEDIUM_ERROR; |
| 456 | asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; |
| 457 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 458 | break; |
| 459 | case NVME_SC_READ_ERROR: |
| 460 | status = SAM_STAT_CHECK_CONDITION; |
| 461 | sense_key = MEDIUM_ERROR; |
| 462 | asc = SCSI_ASC_UNRECOVERED_READ_ERROR; |
| 463 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 464 | break; |
| 465 | case NVME_SC_GUARD_CHECK: |
| 466 | status = SAM_STAT_CHECK_CONDITION; |
| 467 | sense_key = MEDIUM_ERROR; |
| 468 | asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; |
| 469 | ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; |
| 470 | break; |
| 471 | case NVME_SC_APPTAG_CHECK: |
| 472 | status = SAM_STAT_CHECK_CONDITION; |
| 473 | sense_key = MEDIUM_ERROR; |
| 474 | asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; |
| 475 | ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; |
| 476 | break; |
| 477 | case NVME_SC_REFTAG_CHECK: |
| 478 | status = SAM_STAT_CHECK_CONDITION; |
| 479 | sense_key = MEDIUM_ERROR; |
| 480 | asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; |
| 481 | ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; |
| 482 | break; |
| 483 | case NVME_SC_COMPARE_FAILED: |
| 484 | status = SAM_STAT_CHECK_CONDITION; |
| 485 | sense_key = MISCOMPARE; |
| 486 | asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; |
| 487 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 488 | break; |
| 489 | case NVME_SC_ACCESS_DENIED: |
| 490 | status = SAM_STAT_CHECK_CONDITION; |
| 491 | sense_key = ILLEGAL_REQUEST; |
| 492 | asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; |
| 493 | ascq = SCSI_ASCQ_INVALID_LUN_ID; |
| 494 | break; |
| 495 | |
| 496 | /* Unspecified/Default */ |
| 497 | case NVME_SC_CMDID_CONFLICT: |
| 498 | case NVME_SC_CMD_SEQ_ERROR: |
| 499 | case NVME_SC_CQ_INVALID: |
| 500 | case NVME_SC_QID_INVALID: |
| 501 | case NVME_SC_QUEUE_SIZE: |
| 502 | case NVME_SC_ABORT_LIMIT: |
| 503 | case NVME_SC_ABORT_MISSING: |
| 504 | case NVME_SC_ASYNC_LIMIT: |
| 505 | case NVME_SC_FIRMWARE_SLOT: |
| 506 | case NVME_SC_FIRMWARE_IMAGE: |
| 507 | case NVME_SC_INVALID_VECTOR: |
| 508 | case NVME_SC_INVALID_LOG_PAGE: |
| 509 | default: |
| 510 | status = SAM_STAT_CHECK_CONDITION; |
| 511 | sense_key = ILLEGAL_REQUEST; |
| 512 | asc = SCSI_ASC_NO_SENSE; |
| 513 | ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 514 | break; |
| 515 | } |
| 516 | |
| 517 | res = nvme_trans_completion(hdr, status, sense_key, asc, ascq); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 518 | return res ? res : nvme_sc; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 519 | } |
| 520 | |
| 521 | /* INQUIRY Helper Functions */ |
| 522 | |
| 523 | static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, |
| 524 | struct sg_io_hdr *hdr, u8 *inq_response, |
| 525 | int alloc_len) |
| 526 | { |
| 527 | struct nvme_dev *dev = ns->dev; |
| 528 | dma_addr_t dma_addr; |
| 529 | void *mem; |
| 530 | struct nvme_id_ns *id_ns; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 531 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 532 | int nvme_sc; |
| 533 | int xfer_len; |
| 534 | u8 resp_data_format = 0x02; |
| 535 | u8 protect; |
| 536 | u8 cmdque = 0x01 << 1; |
Keith Busch | dedf4b1 | 2014-04-29 15:52:27 -0600 | [diff] [blame] | 537 | u8 fw_offset = sizeof(dev->firmware_rev); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 538 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 539 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 540 | &dma_addr, GFP_KERNEL); |
| 541 | if (mem == NULL) { |
| 542 | res = -ENOMEM; |
| 543 | goto out_dma; |
| 544 | } |
| 545 | |
| 546 | /* nvme ns identify - use DPS value for PROTECT field */ |
| 547 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); |
| 548 | res = nvme_trans_status_code(hdr, nvme_sc); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 549 | if (res) |
| 550 | goto out_free; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 551 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 552 | id_ns = mem; |
| 553 | (id_ns->dps) ? (protect = 0x01) : (protect = 0); |
| 554 | |
| 555 | memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); |
| 556 | inq_response[2] = VERSION_SPC_4; |
| 557 | inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */ |
| 558 | inq_response[4] = ADDITIONAL_STD_INQ_LENGTH; |
| 559 | inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */ |
| 560 | inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ |
| 561 | strncpy(&inq_response[8], "NVMe ", 8); |
| 562 | strncpy(&inq_response[16], dev->model, 16); |
Keith Busch | dedf4b1 | 2014-04-29 15:52:27 -0600 | [diff] [blame] | 563 | |
| 564 | while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4) |
| 565 | fw_offset--; |
| 566 | fw_offset -= 4; |
| 567 | strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 568 | |
| 569 | xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); |
| 570 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); |
| 571 | |
| 572 | out_free: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 573 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 574 | out_dma: |
| 575 | return res; |
| 576 | } |
| 577 | |
| 578 | static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns, |
| 579 | struct sg_io_hdr *hdr, u8 *inq_response, |
| 580 | int alloc_len) |
| 581 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 582 | int xfer_len; |
| 583 | |
| 584 | memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); |
| 585 | inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */ |
| 586 | inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */ |
| 587 | inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE; |
| 588 | inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE; |
| 589 | inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; |
| 590 | inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; |
| 591 | inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; |
Keith Busch | 7f749d9 | 2015-04-07 15:34:18 -0600 | [diff] [blame] | 592 | inq_response[9] = INQ_BDEV_LIMITS_PAGE; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 593 | |
| 594 | xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 595 | return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 596 | } |
| 597 | |
| 598 | static int nvme_trans_unit_serial_page(struct nvme_ns *ns, |
| 599 | struct sg_io_hdr *hdr, u8 *inq_response, |
| 600 | int alloc_len) |
| 601 | { |
| 602 | struct nvme_dev *dev = ns->dev; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 603 | int xfer_len; |
| 604 | |
| 605 | memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); |
| 606 | inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */ |
| 607 | inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */ |
| 608 | strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH); |
| 609 | |
| 610 | xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 611 | return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 612 | } |
| 613 | |
| 614 | static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 615 | u8 *inq_response, int alloc_len) |
| 616 | { |
| 617 | struct nvme_dev *dev = ns->dev; |
| 618 | dma_addr_t dma_addr; |
| 619 | void *mem; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 620 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 621 | int nvme_sc; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 622 | int xfer_len; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 623 | __be32 tmp_id = cpu_to_be32(ns->ns_id); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 624 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 625 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 626 | &dma_addr, GFP_KERNEL); |
| 627 | if (mem == NULL) { |
| 628 | res = -ENOMEM; |
| 629 | goto out_dma; |
| 630 | } |
| 631 | |
Keith Busch | 4f1982b | 2015-02-19 13:42:14 -0700 | [diff] [blame] | 632 | memset(inq_response, 0, alloc_len); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 633 | inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ |
Keith Busch | 4f1982b | 2015-02-19 13:42:14 -0700 | [diff] [blame] | 634 | if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) { |
| 635 | struct nvme_id_ns *id_ns = mem; |
| 636 | void *eui = id_ns->eui64; |
| 637 | int len = sizeof(id_ns->eui64); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 638 | |
Keith Busch | 4f1982b | 2015-02-19 13:42:14 -0700 | [diff] [blame] | 639 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); |
| 640 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 641 | if (res) |
| 642 | goto out_free; |
Keith Busch | 4f1982b | 2015-02-19 13:42:14 -0700 | [diff] [blame] | 643 | |
| 644 | if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) { |
| 645 | if (bitmap_empty(eui, len * 8)) { |
| 646 | eui = id_ns->nguid; |
| 647 | len = sizeof(id_ns->nguid); |
| 648 | } |
| 649 | } |
| 650 | if (bitmap_empty(eui, len * 8)) |
| 651 | goto scsi_string; |
| 652 | |
| 653 | inq_response[3] = 4 + len; /* Page Length */ |
| 654 | /* Designation Descriptor start */ |
| 655 | inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ |
| 656 | inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */ |
| 657 | inq_response[6] = 0x00; /* Rsvd */ |
| 658 | inq_response[7] = len; /* Designator Length */ |
| 659 | memcpy(&inq_response[8], eui, len); |
| 660 | } else { |
| 661 | scsi_string: |
| 662 | if (alloc_len < 72) { |
| 663 | res = nvme_trans_completion(hdr, |
| 664 | SAM_STAT_CHECK_CONDITION, |
| 665 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 666 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 667 | goto out_free; |
| 668 | } |
| 669 | inq_response[3] = 0x48; /* Page Length */ |
| 670 | /* Designation Descriptor start */ |
| 671 | inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */ |
| 672 | inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */ |
| 673 | inq_response[6] = 0x00; /* Rsvd */ |
| 674 | inq_response[7] = 0x44; /* Designator Length */ |
| 675 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 676 | sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor); |
Keith Busch | 4f1982b | 2015-02-19 13:42:14 -0700 | [diff] [blame] | 677 | memcpy(&inq_response[12], dev->model, sizeof(dev->model)); |
| 678 | sprintf(&inq_response[52], "%04x", tmp_id); |
| 679 | memcpy(&inq_response[56], dev->serial, sizeof(dev->serial)); |
| 680 | } |
| 681 | xfer_len = alloc_len; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 682 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); |
| 683 | |
| 684 | out_free: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 685 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 686 | out_dma: |
| 687 | return res; |
| 688 | } |
| 689 | |
| 690 | static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 691 | int alloc_len) |
| 692 | { |
| 693 | u8 *inq_response; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 694 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 695 | int nvme_sc; |
| 696 | struct nvme_dev *dev = ns->dev; |
| 697 | dma_addr_t dma_addr; |
| 698 | void *mem; |
| 699 | struct nvme_id_ctrl *id_ctrl; |
| 700 | struct nvme_id_ns *id_ns; |
| 701 | int xfer_len; |
| 702 | u8 microcode = 0x80; |
| 703 | u8 spt; |
| 704 | u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7}; |
| 705 | u8 grd_chk, app_chk, ref_chk, protect; |
| 706 | u8 uask_sup = 0x20; |
| 707 | u8 v_sup; |
| 708 | u8 luiclr = 0x01; |
| 709 | |
| 710 | inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); |
| 711 | if (inq_response == NULL) { |
| 712 | res = -ENOMEM; |
| 713 | goto out_mem; |
| 714 | } |
| 715 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 716 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 717 | &dma_addr, GFP_KERNEL); |
| 718 | if (mem == NULL) { |
| 719 | res = -ENOMEM; |
| 720 | goto out_dma; |
| 721 | } |
| 722 | |
| 723 | /* nvme ns identify */ |
| 724 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); |
| 725 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 726 | if (res) |
| 727 | goto out_free; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 728 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 729 | id_ns = mem; |
| 730 | spt = spt_lut[(id_ns->dpc) & 0x07] << 3; |
| 731 | (id_ns->dps) ? (protect = 0x01) : (protect = 0); |
| 732 | grd_chk = protect << 2; |
| 733 | app_chk = protect << 1; |
| 734 | ref_chk = protect; |
| 735 | |
| 736 | /* nvme controller identify */ |
| 737 | nvme_sc = nvme_identify(dev, 0, 1, dma_addr); |
| 738 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 739 | if (res) |
| 740 | goto out_free; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 741 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 742 | id_ctrl = mem; |
| 743 | v_sup = id_ctrl->vwc; |
| 744 | |
| 745 | memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); |
| 746 | inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */ |
| 747 | inq_response[2] = 0x00; /* Page Length MSB */ |
| 748 | inq_response[3] = 0x3C; /* Page Length LSB */ |
| 749 | inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk; |
| 750 | inq_response[5] = uask_sup; |
| 751 | inq_response[6] = v_sup; |
| 752 | inq_response[7] = luiclr; |
| 753 | inq_response[8] = 0; |
| 754 | inq_response[9] = 0; |
| 755 | |
| 756 | xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); |
| 757 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); |
| 758 | |
| 759 | out_free: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 760 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 761 | out_dma: |
| 762 | kfree(inq_response); |
| 763 | out_mem: |
| 764 | return res; |
| 765 | } |
| 766 | |
Keith Busch | 7f749d9 | 2015-04-07 15:34:18 -0600 | [diff] [blame] | 767 | static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 768 | u8 *inq_response, int alloc_len) |
| 769 | { |
| 770 | __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue)); |
| 771 | __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); |
| 772 | __be32 discard_desc_count = cpu_to_be32(0x100); |
| 773 | |
| 774 | memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); |
| 775 | inq_response[1] = VPD_BLOCK_LIMITS; |
| 776 | inq_response[3] = 0x3c; /* Page Length */ |
| 777 | memcpy(&inq_response[8], &max_sectors, sizeof(u32)); |
| 778 | memcpy(&inq_response[20], &max_discard, sizeof(u32)); |
| 779 | |
| 780 | if (max_discard) |
| 781 | memcpy(&inq_response[24], &discard_desc_count, sizeof(u32)); |
| 782 | |
| 783 | return nvme_trans_copy_to_user(hdr, inq_response, 0x3c); |
| 784 | } |
| 785 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 786 | static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 787 | int alloc_len) |
| 788 | { |
| 789 | u8 *inq_response; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 790 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 791 | int xfer_len; |
| 792 | |
Tushar Behera | 03ea83e | 2013-06-10 10:20:55 +0530 | [diff] [blame] | 793 | inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 794 | if (inq_response == NULL) { |
| 795 | res = -ENOMEM; |
| 796 | goto out_mem; |
| 797 | } |
| 798 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 799 | inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */ |
| 800 | inq_response[2] = 0x00; /* Page Length MSB */ |
| 801 | inq_response[3] = 0x3C; /* Page Length LSB */ |
| 802 | inq_response[4] = 0x00; /* Medium Rotation Rate MSB */ |
| 803 | inq_response[5] = 0x01; /* Medium Rotation Rate LSB */ |
| 804 | inq_response[6] = 0x00; /* Form Factor */ |
| 805 | |
| 806 | xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); |
| 807 | res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); |
| 808 | |
| 809 | kfree(inq_response); |
| 810 | out_mem: |
| 811 | return res; |
| 812 | } |
| 813 | |
| 814 | /* LOG SENSE Helper Functions */ |
| 815 | |
| 816 | static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 817 | int alloc_len) |
| 818 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 819 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 820 | int xfer_len; |
| 821 | u8 *log_response; |
| 822 | |
Tushar Behera | 03ea83e | 2013-06-10 10:20:55 +0530 | [diff] [blame] | 823 | log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 824 | if (log_response == NULL) { |
| 825 | res = -ENOMEM; |
| 826 | goto out_mem; |
| 827 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 828 | |
| 829 | log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; |
| 830 | /* Subpage=0x00, Page Length MSB=0 */ |
| 831 | log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH; |
| 832 | log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; |
| 833 | log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; |
| 834 | log_response[6] = LOG_PAGE_TEMPERATURE_PAGE; |
| 835 | |
| 836 | xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); |
| 837 | res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); |
| 838 | |
| 839 | kfree(log_response); |
| 840 | out_mem: |
| 841 | return res; |
| 842 | } |
| 843 | |
| 844 | static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, |
| 845 | struct sg_io_hdr *hdr, int alloc_len) |
| 846 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 847 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 848 | int xfer_len; |
| 849 | u8 *log_response; |
| 850 | struct nvme_command c; |
| 851 | struct nvme_dev *dev = ns->dev; |
| 852 | struct nvme_smart_log *smart_log; |
| 853 | dma_addr_t dma_addr; |
| 854 | void *mem; |
| 855 | u8 temp_c; |
| 856 | u16 temp_k; |
| 857 | |
Tushar Behera | 03ea83e | 2013-06-10 10:20:55 +0530 | [diff] [blame] | 858 | log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 859 | if (log_response == NULL) { |
| 860 | res = -ENOMEM; |
| 861 | goto out_mem; |
| 862 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 863 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 864 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 865 | &dma_addr, GFP_KERNEL); |
| 866 | if (mem == NULL) { |
| 867 | res = -ENOMEM; |
| 868 | goto out_dma; |
| 869 | } |
| 870 | |
| 871 | /* Get SMART Log Page */ |
| 872 | memset(&c, 0, sizeof(c)); |
| 873 | c.common.opcode = nvme_admin_get_log_page; |
| 874 | c.common.nsid = cpu_to_le32(0xFFFFFFFF); |
| 875 | c.common.prp1 = cpu_to_le64(dma_addr); |
Indraneel Mukherjee | 4131f2f | 2014-05-29 12:02:03 +0530 | [diff] [blame] | 876 | c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) / |
Matthew Wilcox | ef351b9 | 2014-06-13 10:54:21 -0400 | [diff] [blame] | 877 | BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART); |
Christoph Hellwig | f705f83 | 2015-05-22 11:12:38 +0200 | [diff] [blame] | 878 | res = nvme_submit_sync_cmd(dev->admin_q, &c); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 879 | if (res != NVME_SC_SUCCESS) { |
| 880 | temp_c = LOG_TEMP_UNKNOWN; |
| 881 | } else { |
| 882 | smart_log = mem; |
| 883 | temp_k = (smart_log->temperature[1] << 8) + |
| 884 | (smart_log->temperature[0]); |
| 885 | temp_c = temp_k - KELVIN_TEMP_FACTOR; |
| 886 | } |
| 887 | |
| 888 | log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; |
| 889 | /* Subpage=0x00, Page Length MSB=0 */ |
| 890 | log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH; |
| 891 | /* Informational Exceptions Log Parameter 1 Start */ |
| 892 | /* Parameter Code=0x0000 bytes 4,5 */ |
| 893 | log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */ |
| 894 | log_response[7] = 0x04; /* PARAMETER LENGTH */ |
| 895 | /* Add sense Code and qualifier = 0x00 each */ |
| 896 | /* Use Temperature from NVMe Get Log Page, convert to C from K */ |
| 897 | log_response[10] = temp_c; |
| 898 | |
| 899 | xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH); |
| 900 | res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); |
| 901 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 902 | dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 903 | mem, dma_addr); |
| 904 | out_dma: |
| 905 | kfree(log_response); |
| 906 | out_mem: |
| 907 | return res; |
| 908 | } |
| 909 | |
| 910 | static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 911 | int alloc_len) |
| 912 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 913 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 914 | int xfer_len; |
| 915 | u8 *log_response; |
| 916 | struct nvme_command c; |
| 917 | struct nvme_dev *dev = ns->dev; |
| 918 | struct nvme_smart_log *smart_log; |
| 919 | dma_addr_t dma_addr; |
| 920 | void *mem; |
| 921 | u32 feature_resp; |
| 922 | u8 temp_c_cur, temp_c_thresh; |
| 923 | u16 temp_k; |
| 924 | |
Tushar Behera | 03ea83e | 2013-06-10 10:20:55 +0530 | [diff] [blame] | 925 | log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 926 | if (log_response == NULL) { |
| 927 | res = -ENOMEM; |
| 928 | goto out_mem; |
| 929 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 930 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 931 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_smart_log), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 932 | &dma_addr, GFP_KERNEL); |
| 933 | if (mem == NULL) { |
| 934 | res = -ENOMEM; |
| 935 | goto out_dma; |
| 936 | } |
| 937 | |
| 938 | /* Get SMART Log Page */ |
| 939 | memset(&c, 0, sizeof(c)); |
| 940 | c.common.opcode = nvme_admin_get_log_page; |
| 941 | c.common.nsid = cpu_to_le32(0xFFFFFFFF); |
| 942 | c.common.prp1 = cpu_to_le64(dma_addr); |
Indraneel Mukherjee | 4131f2f | 2014-05-29 12:02:03 +0530 | [diff] [blame] | 943 | c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) / |
Matthew Wilcox | ef351b9 | 2014-06-13 10:54:21 -0400 | [diff] [blame] | 944 | BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART); |
Christoph Hellwig | f705f83 | 2015-05-22 11:12:38 +0200 | [diff] [blame] | 945 | res = nvme_submit_sync_cmd(dev->admin_q, &c); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 946 | if (res != NVME_SC_SUCCESS) { |
| 947 | temp_c_cur = LOG_TEMP_UNKNOWN; |
| 948 | } else { |
| 949 | smart_log = mem; |
| 950 | temp_k = (smart_log->temperature[1] << 8) + |
| 951 | (smart_log->temperature[0]); |
| 952 | temp_c_cur = temp_k - KELVIN_TEMP_FACTOR; |
| 953 | } |
| 954 | |
| 955 | /* Get Features for Temp Threshold */ |
| 956 | res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0, |
| 957 | &feature_resp); |
| 958 | if (res != NVME_SC_SUCCESS) |
| 959 | temp_c_thresh = LOG_TEMP_UNKNOWN; |
| 960 | else |
| 961 | temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR; |
| 962 | |
| 963 | log_response[0] = LOG_PAGE_TEMPERATURE_PAGE; |
| 964 | /* Subpage=0x00, Page Length MSB=0 */ |
| 965 | log_response[3] = REMAINING_TEMP_PAGE_LENGTH; |
| 966 | /* Temperature Log Parameter 1 (Temperature) Start */ |
| 967 | /* Parameter Code = 0x0000 */ |
| 968 | log_response[6] = 0x01; /* Format and Linking = 01b */ |
| 969 | log_response[7] = 0x02; /* Parameter Length */ |
| 970 | /* Use Temperature from NVMe Get Log Page, convert to C from K */ |
| 971 | log_response[9] = temp_c_cur; |
| 972 | /* Temperature Log Parameter 2 (Reference Temperature) Start */ |
| 973 | log_response[11] = 0x01; /* Parameter Code = 0x0001 */ |
| 974 | log_response[12] = 0x01; /* Format and Linking = 01b */ |
| 975 | log_response[13] = 0x02; /* Parameter Length */ |
| 976 | /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */ |
| 977 | log_response[15] = temp_c_thresh; |
| 978 | |
| 979 | xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH); |
| 980 | res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); |
| 981 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 982 | dma_free_coherent(dev->dev, sizeof(struct nvme_smart_log), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 983 | mem, dma_addr); |
| 984 | out_dma: |
| 985 | kfree(log_response); |
| 986 | out_mem: |
| 987 | return res; |
| 988 | } |
| 989 | |
| 990 | /* MODE SENSE Helper Functions */ |
| 991 | |
| 992 | static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa, |
| 993 | u16 mode_data_length, u16 blk_desc_len) |
| 994 | { |
| 995 | /* Quick check to make sure I don't stomp on my own memory... */ |
| 996 | if ((cdb10 && len < 8) || (!cdb10 && len < 4)) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 997 | return -EINVAL; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 998 | |
| 999 | if (cdb10) { |
| 1000 | resp[0] = (mode_data_length & 0xFF00) >> 8; |
| 1001 | resp[1] = (mode_data_length & 0x00FF); |
| 1002 | /* resp[2] and [3] are zero */ |
| 1003 | resp[4] = llbaa; |
| 1004 | resp[5] = RESERVED_FIELD; |
| 1005 | resp[6] = (blk_desc_len & 0xFF00) >> 8; |
| 1006 | resp[7] = (blk_desc_len & 0x00FF); |
| 1007 | } else { |
| 1008 | resp[0] = (mode_data_length & 0x00FF); |
| 1009 | /* resp[1] and [2] are zero */ |
| 1010 | resp[3] = (blk_desc_len & 0x00FF); |
| 1011 | } |
| 1012 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1013 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1014 | } |
| 1015 | |
| 1016 | static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 1017 | u8 *resp, int len, u8 llbaa) |
| 1018 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1019 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1020 | int nvme_sc; |
| 1021 | struct nvme_dev *dev = ns->dev; |
| 1022 | dma_addr_t dma_addr; |
| 1023 | void *mem; |
| 1024 | struct nvme_id_ns *id_ns; |
| 1025 | u8 flbas; |
| 1026 | u32 lba_length; |
| 1027 | |
| 1028 | if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1029 | return -EINVAL; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1030 | else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1031 | return -EINVAL; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1032 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1033 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1034 | &dma_addr, GFP_KERNEL); |
| 1035 | if (mem == NULL) { |
| 1036 | res = -ENOMEM; |
| 1037 | goto out; |
| 1038 | } |
| 1039 | |
| 1040 | /* nvme ns identify */ |
| 1041 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); |
| 1042 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 1043 | if (res) |
| 1044 | goto out_dma; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1045 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1046 | id_ns = mem; |
| 1047 | flbas = (id_ns->flbas) & 0x0F; |
| 1048 | lba_length = (1 << (id_ns->lbaf[flbas].ds)); |
| 1049 | |
| 1050 | if (llbaa == 0) { |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1051 | __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap)); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1052 | /* Byte 4 is reserved */ |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1053 | __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1054 | |
| 1055 | memcpy(resp, &tmp_cap, sizeof(u32)); |
| 1056 | memcpy(&resp[4], &tmp_len, sizeof(u32)); |
| 1057 | } else { |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1058 | __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap)); |
| 1059 | __be32 tmp_len = cpu_to_be32(lba_length); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1060 | |
| 1061 | memcpy(resp, &tmp_cap, sizeof(u64)); |
| 1062 | /* Bytes 8, 9, 10, 11 are reserved */ |
| 1063 | memcpy(&resp[12], &tmp_len, sizeof(u32)); |
| 1064 | } |
| 1065 | |
| 1066 | out_dma: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1067 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1068 | out: |
| 1069 | return res; |
| 1070 | } |
| 1071 | |
| 1072 | static int nvme_trans_fill_control_page(struct nvme_ns *ns, |
| 1073 | struct sg_io_hdr *hdr, u8 *resp, |
| 1074 | int len) |
| 1075 | { |
| 1076 | if (len < MODE_PAGE_CONTROL_LEN) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1077 | return -EINVAL; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1078 | |
| 1079 | resp[0] = MODE_PAGE_CONTROL; |
| 1080 | resp[1] = MODE_PAGE_CONTROL_LEN_FIELD; |
| 1081 | resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1, |
| 1082 | * D_SENSE=1, GLTSD=1, RLEC=0 */ |
| 1083 | resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */ |
| 1084 | /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */ |
| 1085 | resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */ |
| 1086 | /* resp[6] and [7] are obsolete, thus zero */ |
| 1087 | resp[8] = 0xFF; /* Busy timeout period = 0xffff */ |
| 1088 | resp[9] = 0xFF; |
| 1089 | /* Bytes 10,11: Extended selftest completion time = 0x0000 */ |
| 1090 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1091 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1092 | } |
| 1093 | |
| 1094 | static int nvme_trans_fill_caching_page(struct nvme_ns *ns, |
| 1095 | struct sg_io_hdr *hdr, |
| 1096 | u8 *resp, int len) |
| 1097 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1098 | int res = 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1099 | int nvme_sc; |
| 1100 | struct nvme_dev *dev = ns->dev; |
| 1101 | u32 feature_resp; |
| 1102 | u8 vwc; |
| 1103 | |
| 1104 | if (len < MODE_PAGE_CACHING_LEN) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1105 | return -EINVAL; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1106 | |
| 1107 | nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0, |
| 1108 | &feature_resp); |
| 1109 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 1110 | if (res) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1111 | return res; |
| 1112 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1113 | vwc = feature_resp & 0x00000001; |
| 1114 | |
| 1115 | resp[0] = MODE_PAGE_CACHING; |
| 1116 | resp[1] = MODE_PAGE_CACHING_LEN_FIELD; |
| 1117 | resp[2] = vwc << 2; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1118 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1119 | } |
| 1120 | |
| 1121 | static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns, |
| 1122 | struct sg_io_hdr *hdr, u8 *resp, |
| 1123 | int len) |
| 1124 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1125 | if (len < MODE_PAGE_POW_CND_LEN) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1126 | return -EINVAL; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1127 | |
| 1128 | resp[0] = MODE_PAGE_POWER_CONDITION; |
| 1129 | resp[1] = MODE_PAGE_POW_CND_LEN_FIELD; |
| 1130 | /* All other bytes are zero */ |
| 1131 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1132 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1133 | } |
| 1134 | |
| 1135 | static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns, |
| 1136 | struct sg_io_hdr *hdr, u8 *resp, |
| 1137 | int len) |
| 1138 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1139 | if (len < MODE_PAGE_INF_EXC_LEN) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1140 | return -EINVAL; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1141 | |
| 1142 | resp[0] = MODE_PAGE_INFO_EXCEP; |
| 1143 | resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD; |
| 1144 | resp[2] = 0x88; |
| 1145 | /* All other bytes are zero */ |
| 1146 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1147 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1148 | } |
| 1149 | |
| 1150 | static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 1151 | u8 *resp, int len) |
| 1152 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1153 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1154 | u16 mode_pages_offset_1 = 0; |
| 1155 | u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4; |
| 1156 | |
| 1157 | mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN; |
| 1158 | mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN; |
| 1159 | mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN; |
| 1160 | |
| 1161 | res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1], |
| 1162 | MODE_PAGE_CACHING_LEN); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1163 | if (res) |
| 1164 | return res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1165 | res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2], |
| 1166 | MODE_PAGE_CONTROL_LEN); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1167 | if (res) |
| 1168 | return res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1169 | res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3], |
| 1170 | MODE_PAGE_POW_CND_LEN); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1171 | if (res) |
| 1172 | return res; |
| 1173 | return nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4], |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1174 | MODE_PAGE_INF_EXC_LEN); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1175 | } |
| 1176 | |
| 1177 | static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa) |
| 1178 | { |
| 1179 | if (dbd == MODE_SENSE_BLK_DESC_ENABLED) { |
| 1180 | /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */ |
| 1181 | return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT; |
| 1182 | } else { |
| 1183 | return 0; |
| 1184 | } |
| 1185 | } |
| 1186 | |
| 1187 | static int nvme_trans_mode_page_create(struct nvme_ns *ns, |
| 1188 | struct sg_io_hdr *hdr, u8 *cmd, |
| 1189 | u16 alloc_len, u8 cdb10, |
| 1190 | int (*mode_page_fill_func) |
| 1191 | (struct nvme_ns *, |
| 1192 | struct sg_io_hdr *hdr, u8 *, int), |
| 1193 | u16 mode_pages_tot_len) |
| 1194 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1195 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1196 | int xfer_len; |
| 1197 | u8 *response; |
| 1198 | u8 dbd, llbaa; |
| 1199 | u16 resp_size; |
| 1200 | int mph_size; |
| 1201 | u16 mode_pages_offset_1; |
| 1202 | u16 blk_desc_len, blk_desc_offset, mode_data_length; |
| 1203 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 1204 | dbd = (cmd[1] & MODE_SENSE_DBD_MASK) >> MODE_SENSE_DBD_SHIFT; |
| 1205 | llbaa = (cmd[1] & MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT; |
| 1206 | mph_size = cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE; |
| 1207 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1208 | blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa); |
| 1209 | |
| 1210 | resp_size = mph_size + blk_desc_len + mode_pages_tot_len; |
| 1211 | /* Refer spc4r34 Table 440 for calculation of Mode data Length field */ |
| 1212 | mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len; |
| 1213 | |
| 1214 | blk_desc_offset = mph_size; |
| 1215 | mode_pages_offset_1 = blk_desc_offset + blk_desc_len; |
| 1216 | |
Tushar Behera | 03ea83e | 2013-06-10 10:20:55 +0530 | [diff] [blame] | 1217 | response = kzalloc(resp_size, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1218 | if (response == NULL) { |
| 1219 | res = -ENOMEM; |
| 1220 | goto out_mem; |
| 1221 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1222 | |
| 1223 | res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10, |
| 1224 | llbaa, mode_data_length, blk_desc_len); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1225 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1226 | goto out_free; |
| 1227 | if (blk_desc_len > 0) { |
| 1228 | res = nvme_trans_fill_blk_desc(ns, hdr, |
| 1229 | &response[blk_desc_offset], |
| 1230 | blk_desc_len, llbaa); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1231 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1232 | goto out_free; |
| 1233 | } |
| 1234 | res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1], |
| 1235 | mode_pages_tot_len); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1236 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1237 | goto out_free; |
| 1238 | |
| 1239 | xfer_len = min(alloc_len, resp_size); |
| 1240 | res = nvme_trans_copy_to_user(hdr, response, xfer_len); |
| 1241 | |
| 1242 | out_free: |
| 1243 | kfree(response); |
| 1244 | out_mem: |
| 1245 | return res; |
| 1246 | } |
| 1247 | |
| 1248 | /* Read Capacity Helper Functions */ |
| 1249 | |
| 1250 | static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns, |
| 1251 | u8 cdb16) |
| 1252 | { |
| 1253 | u8 flbas; |
| 1254 | u32 lba_length; |
| 1255 | u64 rlba; |
| 1256 | u8 prot_en; |
| 1257 | u8 p_type_lut[4] = {0, 0, 1, 2}; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1258 | __be64 tmp_rlba; |
| 1259 | __be32 tmp_rlba_32; |
| 1260 | __be32 tmp_len; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1261 | |
| 1262 | flbas = (id_ns->flbas) & 0x0F; |
| 1263 | lba_length = (1 << (id_ns->lbaf[flbas].ds)); |
| 1264 | rlba = le64_to_cpup(&id_ns->nsze) - 1; |
| 1265 | (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0); |
| 1266 | |
| 1267 | if (!cdb16) { |
| 1268 | if (rlba > 0xFFFFFFFF) |
| 1269 | rlba = 0xFFFFFFFF; |
| 1270 | tmp_rlba_32 = cpu_to_be32(rlba); |
| 1271 | tmp_len = cpu_to_be32(lba_length); |
| 1272 | memcpy(response, &tmp_rlba_32, sizeof(u32)); |
| 1273 | memcpy(&response[4], &tmp_len, sizeof(u32)); |
| 1274 | } else { |
| 1275 | tmp_rlba = cpu_to_be64(rlba); |
| 1276 | tmp_len = cpu_to_be32(lba_length); |
| 1277 | memcpy(response, &tmp_rlba, sizeof(u64)); |
| 1278 | memcpy(&response[8], &tmp_len, sizeof(u32)); |
| 1279 | response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en; |
| 1280 | /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */ |
| 1281 | /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */ |
| 1282 | /* Bytes 16-31 - Reserved */ |
| 1283 | } |
| 1284 | } |
| 1285 | |
| 1286 | /* Start Stop Unit Helper Functions */ |
| 1287 | |
| 1288 | static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 1289 | u8 pc, u8 pcmod, u8 start) |
| 1290 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1291 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1292 | int nvme_sc; |
| 1293 | struct nvme_dev *dev = ns->dev; |
| 1294 | dma_addr_t dma_addr; |
| 1295 | void *mem; |
| 1296 | struct nvme_id_ctrl *id_ctrl; |
| 1297 | int lowest_pow_st; /* max npss = lowest power consumption */ |
| 1298 | unsigned ps_desired = 0; |
| 1299 | |
| 1300 | /* NVMe Controller Identify */ |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1301 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1302 | &dma_addr, GFP_KERNEL); |
| 1303 | if (mem == NULL) { |
| 1304 | res = -ENOMEM; |
| 1305 | goto out; |
| 1306 | } |
| 1307 | nvme_sc = nvme_identify(dev, 0, 1, dma_addr); |
| 1308 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 1309 | if (res) |
| 1310 | goto out_dma; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1311 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1312 | id_ctrl = mem; |
Dan McLeran | b8e0808 | 2014-06-06 08:27:27 -0600 | [diff] [blame] | 1313 | lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1)); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1314 | |
| 1315 | switch (pc) { |
| 1316 | case NVME_POWER_STATE_START_VALID: |
| 1317 | /* Action unspecified if POWER CONDITION MODIFIER != 0 */ |
| 1318 | if (pcmod == 0 && start == 0x1) |
| 1319 | ps_desired = POWER_STATE_0; |
| 1320 | if (pcmod == 0 && start == 0x0) |
| 1321 | ps_desired = lowest_pow_st; |
| 1322 | break; |
| 1323 | case NVME_POWER_STATE_ACTIVE: |
| 1324 | /* Action unspecified if POWER CONDITION MODIFIER != 0 */ |
| 1325 | if (pcmod == 0) |
| 1326 | ps_desired = POWER_STATE_0; |
| 1327 | break; |
| 1328 | case NVME_POWER_STATE_IDLE: |
| 1329 | /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */ |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1330 | if (pcmod == 0x0) |
Dan McLeran | b8e0808 | 2014-06-06 08:27:27 -0600 | [diff] [blame] | 1331 | ps_desired = POWER_STATE_1; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1332 | else if (pcmod == 0x1) |
Dan McLeran | b8e0808 | 2014-06-06 08:27:27 -0600 | [diff] [blame] | 1333 | ps_desired = POWER_STATE_2; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1334 | else if (pcmod == 0x2) |
Dan McLeran | b8e0808 | 2014-06-06 08:27:27 -0600 | [diff] [blame] | 1335 | ps_desired = POWER_STATE_3; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1336 | break; |
| 1337 | case NVME_POWER_STATE_STANDBY: |
| 1338 | /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */ |
| 1339 | if (pcmod == 0x0) |
Dan McLeran | b8e0808 | 2014-06-06 08:27:27 -0600 | [diff] [blame] | 1340 | ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2)); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1341 | else if (pcmod == 0x1) |
Dan McLeran | b8e0808 | 2014-06-06 08:27:27 -0600 | [diff] [blame] | 1342 | ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1)); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1343 | break; |
| 1344 | case NVME_POWER_STATE_LU_CONTROL: |
| 1345 | default: |
| 1346 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 1347 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 1348 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 1349 | break; |
| 1350 | } |
| 1351 | nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0, |
| 1352 | NULL); |
| 1353 | res = nvme_trans_status_code(hdr, nvme_sc); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1354 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1355 | out_dma: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1356 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1357 | out: |
| 1358 | return res; |
| 1359 | } |
| 1360 | |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 1361 | static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 1362 | u8 buffer_id) |
| 1363 | { |
| 1364 | struct nvme_command c; |
| 1365 | int nvme_sc; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1366 | |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 1367 | memset(&c, 0, sizeof(c)); |
| 1368 | c.common.opcode = nvme_admin_activate_fw; |
| 1369 | c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV); |
| 1370 | |
| 1371 | nvme_sc = nvme_submit_sync_cmd(ns->queue, &c); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1372 | return nvme_trans_status_code(hdr, nvme_sc); |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 1373 | } |
| 1374 | |
| 1375 | static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1376 | u8 opcode, u32 tot_len, u32 offset, |
| 1377 | u8 buffer_id) |
| 1378 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1379 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1380 | int nvme_sc; |
| 1381 | struct nvme_dev *dev = ns->dev; |
| 1382 | struct nvme_command c; |
| 1383 | struct nvme_iod *iod = NULL; |
| 1384 | unsigned length; |
| 1385 | |
| 1386 | memset(&c, 0, sizeof(c)); |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 1387 | c.common.opcode = nvme_admin_download_fw; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1388 | |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 1389 | if (hdr->iovec_count > 0) { |
| 1390 | /* Assuming SGL is not allowed for this command */ |
| 1391 | return nvme_trans_completion(hdr, |
| 1392 | SAM_STAT_CHECK_CONDITION, |
| 1393 | ILLEGAL_REQUEST, |
| 1394 | SCSI_ASC_INVALID_CDB, |
| 1395 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1396 | } |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 1397 | iod = nvme_map_user_pages(dev, DMA_TO_DEVICE, |
| 1398 | (unsigned long)hdr->dxferp, tot_len); |
| 1399 | if (IS_ERR(iod)) |
| 1400 | return PTR_ERR(iod); |
| 1401 | length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL); |
| 1402 | if (length != tot_len) { |
| 1403 | res = -ENOMEM; |
| 1404 | goto out_unmap; |
| 1405 | } |
| 1406 | |
| 1407 | c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); |
| 1408 | c.dlfw.prp2 = cpu_to_le64(iod->first_dma); |
| 1409 | c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); |
| 1410 | c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1411 | |
Christoph Hellwig | f705f83 | 2015-05-22 11:12:38 +0200 | [diff] [blame] | 1412 | nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1413 | res = nvme_trans_status_code(hdr, nvme_sc); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1414 | |
| 1415 | out_unmap: |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 1416 | nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod); |
| 1417 | nvme_free_iod(dev, iod); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1418 | return res; |
| 1419 | } |
| 1420 | |
| 1421 | /* Mode Select Helper Functions */ |
| 1422 | |
| 1423 | static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, |
| 1424 | u16 *bd_len, u8 *llbaa) |
| 1425 | { |
| 1426 | if (cdb10) { |
| 1427 | /* 10 Byte CDB */ |
| 1428 | *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + |
| 1429 | parm_list[MODE_SELECT_10_BD_OFFSET + 1]; |
Keith Busch | 9ac1693 | 2015-01-09 16:52:08 -0700 | [diff] [blame] | 1430 | *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] & |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1431 | MODE_SELECT_10_LLBAA_MASK; |
| 1432 | } else { |
| 1433 | /* 6 Byte CDB */ |
| 1434 | *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET]; |
| 1435 | } |
| 1436 | } |
| 1437 | |
| 1438 | static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, |
| 1439 | u16 idx, u16 bd_len, u8 llbaa) |
| 1440 | { |
| 1441 | u16 bd_num; |
| 1442 | |
| 1443 | bd_num = bd_len / ((llbaa == 0) ? |
| 1444 | SHORT_DESC_BLOCK : LONG_DESC_BLOCK); |
| 1445 | /* Store block descriptor info if a FORMAT UNIT comes later */ |
| 1446 | /* TODO Saving 1st BD info; what to do if multiple BD received? */ |
| 1447 | if (llbaa == 0) { |
| 1448 | /* Standard Block Descriptor - spc4r34 7.5.5.1 */ |
| 1449 | ns->mode_select_num_blocks = |
| 1450 | (parm_list[idx + 1] << 16) + |
| 1451 | (parm_list[idx + 2] << 8) + |
| 1452 | (parm_list[idx + 3]); |
| 1453 | |
| 1454 | ns->mode_select_block_len = |
| 1455 | (parm_list[idx + 5] << 16) + |
| 1456 | (parm_list[idx + 6] << 8) + |
| 1457 | (parm_list[idx + 7]); |
| 1458 | } else { |
| 1459 | /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */ |
| 1460 | ns->mode_select_num_blocks = |
| 1461 | (((u64)parm_list[idx + 0]) << 56) + |
| 1462 | (((u64)parm_list[idx + 1]) << 48) + |
| 1463 | (((u64)parm_list[idx + 2]) << 40) + |
| 1464 | (((u64)parm_list[idx + 3]) << 32) + |
| 1465 | (((u64)parm_list[idx + 4]) << 24) + |
| 1466 | (((u64)parm_list[idx + 5]) << 16) + |
| 1467 | (((u64)parm_list[idx + 6]) << 8) + |
| 1468 | ((u64)parm_list[idx + 7]); |
| 1469 | |
| 1470 | ns->mode_select_block_len = |
| 1471 | (parm_list[idx + 12] << 24) + |
| 1472 | (parm_list[idx + 13] << 16) + |
| 1473 | (parm_list[idx + 14] << 8) + |
| 1474 | (parm_list[idx + 15]); |
| 1475 | } |
| 1476 | } |
| 1477 | |
Vishal Verma | 710a143 | 2013-05-13 14:55:18 -0600 | [diff] [blame] | 1478 | static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1479 | u8 *mode_page, u8 page_code) |
| 1480 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1481 | int res = 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1482 | int nvme_sc; |
| 1483 | struct nvme_dev *dev = ns->dev; |
| 1484 | unsigned dword11; |
| 1485 | |
| 1486 | switch (page_code) { |
| 1487 | case MODE_PAGE_CACHING: |
| 1488 | dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); |
| 1489 | nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11, |
| 1490 | 0, NULL); |
| 1491 | res = nvme_trans_status_code(hdr, nvme_sc); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1492 | break; |
| 1493 | case MODE_PAGE_CONTROL: |
| 1494 | break; |
| 1495 | case MODE_PAGE_POWER_CONDITION: |
| 1496 | /* Verify the OS is not trying to set timers */ |
| 1497 | if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) { |
| 1498 | res = nvme_trans_completion(hdr, |
| 1499 | SAM_STAT_CHECK_CONDITION, |
| 1500 | ILLEGAL_REQUEST, |
| 1501 | SCSI_ASC_INVALID_PARAMETER, |
| 1502 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1503 | break; |
| 1504 | } |
| 1505 | break; |
| 1506 | default: |
| 1507 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 1508 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 1509 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1510 | break; |
| 1511 | } |
| 1512 | |
| 1513 | return res; |
| 1514 | } |
| 1515 | |
| 1516 | static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 1517 | u8 *cmd, u16 parm_list_len, u8 pf, |
| 1518 | u8 sp, u8 cdb10) |
| 1519 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1520 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1521 | u8 *parm_list; |
| 1522 | u16 bd_len; |
| 1523 | u8 llbaa = 0; |
| 1524 | u16 index, saved_index; |
| 1525 | u8 page_code; |
| 1526 | u16 mp_size; |
| 1527 | |
| 1528 | /* Get parm list from data-in/out buffer */ |
| 1529 | parm_list = kmalloc(parm_list_len, GFP_KERNEL); |
| 1530 | if (parm_list == NULL) { |
| 1531 | res = -ENOMEM; |
| 1532 | goto out; |
| 1533 | } |
| 1534 | |
| 1535 | res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1536 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1537 | goto out_mem; |
| 1538 | |
| 1539 | nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa); |
| 1540 | index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE); |
| 1541 | |
| 1542 | if (bd_len != 0) { |
| 1543 | /* Block Descriptors present, parse */ |
| 1544 | nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa); |
| 1545 | index += bd_len; |
| 1546 | } |
| 1547 | saved_index = index; |
| 1548 | |
| 1549 | /* Multiple mode pages may be present; iterate through all */ |
| 1550 | /* In 1st Iteration, don't do NVME Command, only check for CDB errors */ |
| 1551 | do { |
| 1552 | page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; |
| 1553 | mp_size = parm_list[index + 1] + 2; |
| 1554 | if ((page_code != MODE_PAGE_CACHING) && |
| 1555 | (page_code != MODE_PAGE_CONTROL) && |
| 1556 | (page_code != MODE_PAGE_POWER_CONDITION)) { |
| 1557 | res = nvme_trans_completion(hdr, |
| 1558 | SAM_STAT_CHECK_CONDITION, |
| 1559 | ILLEGAL_REQUEST, |
| 1560 | SCSI_ASC_INVALID_CDB, |
| 1561 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 1562 | goto out_mem; |
| 1563 | } |
| 1564 | index += mp_size; |
| 1565 | } while (index < parm_list_len); |
| 1566 | |
| 1567 | /* In 2nd Iteration, do the NVME Commands */ |
| 1568 | index = saved_index; |
| 1569 | do { |
| 1570 | page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; |
| 1571 | mp_size = parm_list[index + 1] + 2; |
| 1572 | res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index], |
| 1573 | page_code); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1574 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1575 | break; |
| 1576 | index += mp_size; |
| 1577 | } while (index < parm_list_len); |
| 1578 | |
| 1579 | out_mem: |
| 1580 | kfree(parm_list); |
| 1581 | out: |
| 1582 | return res; |
| 1583 | } |
| 1584 | |
| 1585 | /* Format Unit Helper Functions */ |
| 1586 | |
| 1587 | static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, |
| 1588 | struct sg_io_hdr *hdr) |
| 1589 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1590 | int res = 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1591 | int nvme_sc; |
| 1592 | struct nvme_dev *dev = ns->dev; |
| 1593 | dma_addr_t dma_addr; |
| 1594 | void *mem; |
| 1595 | struct nvme_id_ns *id_ns; |
| 1596 | u8 flbas; |
| 1597 | |
| 1598 | /* |
| 1599 | * SCSI Expects a MODE SELECT would have been issued prior to |
| 1600 | * a FORMAT UNIT, and the block size and number would be used |
| 1601 | * from the block descriptor in it. If a MODE SELECT had not |
| 1602 | * been issued, FORMAT shall use the current values for both. |
| 1603 | */ |
| 1604 | |
| 1605 | if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1606 | mem = dma_alloc_coherent(dev->dev, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1607 | sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL); |
| 1608 | if (mem == NULL) { |
| 1609 | res = -ENOMEM; |
| 1610 | goto out; |
| 1611 | } |
| 1612 | /* nvme ns identify */ |
| 1613 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); |
| 1614 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 1615 | if (res) |
| 1616 | goto out_dma; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1617 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1618 | id_ns = mem; |
| 1619 | |
| 1620 | if (ns->mode_select_num_blocks == 0) |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1621 | ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1622 | if (ns->mode_select_block_len == 0) { |
| 1623 | flbas = (id_ns->flbas) & 0x0F; |
| 1624 | ns->mode_select_block_len = |
| 1625 | (1 << (id_ns->lbaf[flbas].ds)); |
| 1626 | } |
| 1627 | out_dma: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1628 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1629 | mem, dma_addr); |
| 1630 | } |
| 1631 | out: |
| 1632 | return res; |
| 1633 | } |
| 1634 | |
| 1635 | static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len, |
| 1636 | u8 format_prot_info, u8 *nvme_pf_code) |
| 1637 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1638 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1639 | u8 *parm_list; |
| 1640 | u8 pf_usage, pf_code; |
| 1641 | |
| 1642 | parm_list = kmalloc(len, GFP_KERNEL); |
| 1643 | if (parm_list == NULL) { |
| 1644 | res = -ENOMEM; |
| 1645 | goto out; |
| 1646 | } |
| 1647 | res = nvme_trans_copy_from_user(hdr, parm_list, len); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1648 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1649 | goto out_mem; |
| 1650 | |
| 1651 | if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] & |
| 1652 | FORMAT_UNIT_IMMED_MASK) != 0) { |
| 1653 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 1654 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 1655 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 1656 | goto out_mem; |
| 1657 | } |
| 1658 | |
| 1659 | if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN && |
| 1660 | (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) { |
| 1661 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 1662 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 1663 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 1664 | goto out_mem; |
| 1665 | } |
| 1666 | pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] & |
| 1667 | FORMAT_UNIT_PROT_FIELD_USAGE_MASK; |
| 1668 | pf_code = (pf_usage << 2) | format_prot_info; |
| 1669 | switch (pf_code) { |
| 1670 | case 0: |
| 1671 | *nvme_pf_code = 0; |
| 1672 | break; |
| 1673 | case 2: |
| 1674 | *nvme_pf_code = 1; |
| 1675 | break; |
| 1676 | case 3: |
| 1677 | *nvme_pf_code = 2; |
| 1678 | break; |
| 1679 | case 7: |
| 1680 | *nvme_pf_code = 3; |
| 1681 | break; |
| 1682 | default: |
| 1683 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 1684 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 1685 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 1686 | break; |
| 1687 | } |
| 1688 | |
| 1689 | out_mem: |
| 1690 | kfree(parm_list); |
| 1691 | out: |
| 1692 | return res; |
| 1693 | } |
| 1694 | |
| 1695 | static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 1696 | u8 prot_info) |
| 1697 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1698 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1699 | int nvme_sc; |
| 1700 | struct nvme_dev *dev = ns->dev; |
| 1701 | dma_addr_t dma_addr; |
| 1702 | void *mem; |
| 1703 | struct nvme_id_ns *id_ns; |
| 1704 | u8 i; |
| 1705 | u8 flbas, nlbaf; |
| 1706 | u8 selected_lbaf = 0xFF; |
| 1707 | u32 cdw10 = 0; |
| 1708 | struct nvme_command c; |
| 1709 | |
| 1710 | /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1711 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1712 | &dma_addr, GFP_KERNEL); |
| 1713 | if (mem == NULL) { |
| 1714 | res = -ENOMEM; |
| 1715 | goto out; |
| 1716 | } |
| 1717 | /* nvme ns identify */ |
| 1718 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); |
| 1719 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 1720 | if (res) |
| 1721 | goto out_dma; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1722 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1723 | id_ns = mem; |
| 1724 | flbas = (id_ns->flbas) & 0x0F; |
| 1725 | nlbaf = id_ns->nlbaf; |
| 1726 | |
| 1727 | for (i = 0; i < nlbaf; i++) { |
| 1728 | if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) { |
| 1729 | selected_lbaf = i; |
| 1730 | break; |
| 1731 | } |
| 1732 | } |
| 1733 | if (selected_lbaf > 0x0F) { |
| 1734 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 1735 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, |
| 1736 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 1737 | } |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1738 | if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1739 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 1740 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, |
| 1741 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 1742 | } |
| 1743 | |
| 1744 | cdw10 |= prot_info << 5; |
| 1745 | cdw10 |= selected_lbaf & 0x0F; |
| 1746 | memset(&c, 0, sizeof(c)); |
| 1747 | c.format.opcode = nvme_admin_format_nvm; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1748 | c.format.nsid = cpu_to_le32(ns->ns_id); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1749 | c.format.cdw10 = cpu_to_le32(cdw10); |
| 1750 | |
Christoph Hellwig | f705f83 | 2015-05-22 11:12:38 +0200 | [diff] [blame] | 1751 | nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1752 | res = nvme_trans_status_code(hdr, nvme_sc); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1753 | |
| 1754 | out_dma: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 1755 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1756 | out: |
| 1757 | return res; |
| 1758 | } |
| 1759 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1760 | static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr, |
| 1761 | struct nvme_trans_io_cdb *cdb_info, |
| 1762 | u32 max_blocks) |
| 1763 | { |
| 1764 | /* If using iovecs, send one nvme command per vector */ |
| 1765 | if (hdr->iovec_count > 0) |
| 1766 | return hdr->iovec_count; |
| 1767 | else if (cdb_info->xfer_len > max_blocks) |
| 1768 | return ((cdb_info->xfer_len - 1) / max_blocks) + 1; |
| 1769 | else |
| 1770 | return 1; |
| 1771 | } |
| 1772 | |
| 1773 | static u16 nvme_trans_io_get_control(struct nvme_ns *ns, |
| 1774 | struct nvme_trans_io_cdb *cdb_info) |
| 1775 | { |
| 1776 | u16 control = 0; |
| 1777 | |
| 1778 | /* When Protection information support is added, implement here */ |
| 1779 | |
| 1780 | if (cdb_info->fua > 0) |
| 1781 | control |= NVME_RW_FUA; |
| 1782 | |
| 1783 | return control; |
| 1784 | } |
| 1785 | |
| 1786 | static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 1787 | struct nvme_trans_io_cdb *cdb_info, u8 is_write) |
| 1788 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1789 | int nvme_sc = NVME_SC_SUCCESS; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1790 | struct nvme_dev *dev = ns->dev; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1791 | u32 num_cmds; |
| 1792 | struct nvme_iod *iod; |
| 1793 | u64 unit_len; |
| 1794 | u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */ |
| 1795 | u32 retcode; |
| 1796 | u32 i = 0; |
| 1797 | u64 nvme_offset = 0; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1798 | void __user *next_mapping_addr; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1799 | struct nvme_command c; |
| 1800 | u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); |
| 1801 | u16 control; |
Keith Busch | ddcb776 | 2014-03-24 10:03:56 -0400 | [diff] [blame] | 1802 | u32 max_blocks = queue_max_hw_sectors(ns->queue); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1803 | |
| 1804 | num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); |
| 1805 | |
| 1806 | /* |
| 1807 | * This loop handles two cases. |
| 1808 | * First, when an SGL is used in the form of an iovec list: |
| 1809 | * - Use iov_base as the next mapping address for the nvme command_id |
| 1810 | * - Use iov_len as the data transfer length for the command. |
| 1811 | * Second, when we have a single buffer |
| 1812 | * - If larger than max_blocks, split into chunks, offset |
| 1813 | * each nvme command accordingly. |
| 1814 | */ |
| 1815 | for (i = 0; i < num_cmds; i++) { |
| 1816 | memset(&c, 0, sizeof(c)); |
| 1817 | if (hdr->iovec_count > 0) { |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1818 | struct sg_iovec sgl; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1819 | |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1820 | retcode = copy_from_user(&sgl, hdr->dxferp + |
| 1821 | i * sizeof(struct sg_iovec), |
| 1822 | sizeof(struct sg_iovec)); |
| 1823 | if (retcode) |
| 1824 | return -EFAULT; |
| 1825 | unit_len = sgl.iov_len; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1826 | unit_num_blocks = unit_len >> ns->lba_shift; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1827 | next_mapping_addr = sgl.iov_base; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1828 | } else { |
| 1829 | unit_num_blocks = min((u64)max_blocks, |
| 1830 | (cdb_info->xfer_len - nvme_offset)); |
| 1831 | unit_len = unit_num_blocks << ns->lba_shift; |
| 1832 | next_mapping_addr = hdr->dxferp + |
| 1833 | ((1 << ns->lba_shift) * nvme_offset); |
| 1834 | } |
| 1835 | |
| 1836 | c.rw.opcode = opcode; |
| 1837 | c.rw.nsid = cpu_to_le32(ns->ns_id); |
| 1838 | c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset); |
| 1839 | c.rw.length = cpu_to_le16(unit_num_blocks - 1); |
| 1840 | control = nvme_trans_io_get_control(ns, cdb_info); |
| 1841 | c.rw.control = cpu_to_le16(control); |
| 1842 | |
| 1843 | iod = nvme_map_user_pages(dev, |
| 1844 | (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, |
| 1845 | (unsigned long)next_mapping_addr, unit_len); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1846 | if (IS_ERR(iod)) |
| 1847 | return PTR_ERR(iod); |
| 1848 | |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 1849 | retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1850 | if (retcode != unit_len) { |
| 1851 | nvme_unmap_user_pages(dev, |
| 1852 | (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, |
| 1853 | iod); |
| 1854 | nvme_free_iod(dev, iod); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1855 | return -ENOMEM; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1856 | } |
Keith Busch | edd10d3 | 2014-04-03 16:45:23 -0600 | [diff] [blame] | 1857 | c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); |
| 1858 | c.rw.prp2 = cpu_to_le64(iod->first_dma); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1859 | |
| 1860 | nvme_offset += unit_num_blocks; |
| 1861 | |
Christoph Hellwig | f705f83 | 2015-05-22 11:12:38 +0200 | [diff] [blame] | 1862 | nvme_sc = nvme_submit_sync_cmd(ns->queue, &c); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1863 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1864 | nvme_unmap_user_pages(dev, |
| 1865 | (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, |
| 1866 | iod); |
| 1867 | nvme_free_iod(dev, iod); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1868 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1869 | |
| 1870 | if (nvme_sc != NVME_SC_SUCCESS) |
| 1871 | break; |
| 1872 | } |
| 1873 | |
| 1874 | return nvme_trans_status_code(hdr, nvme_sc); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1875 | } |
| 1876 | |
| 1877 | |
| 1878 | /* SCSI Command Translation Functions */ |
| 1879 | |
| 1880 | static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write, |
| 1881 | u8 *cmd) |
| 1882 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1883 | int res = 0; |
Christoph Hellwig | cbbb7a2 | 2015-05-22 11:12:43 +0200 | [diff] [blame^] | 1884 | struct nvme_trans_io_cdb cdb_info = { 0, }; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1885 | u8 opcode = cmd[0]; |
| 1886 | u64 xfer_bytes; |
| 1887 | u64 sum_iov_len = 0; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1888 | struct sg_iovec sgl; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1889 | int i; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1890 | size_t not_copied; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1891 | |
Christoph Hellwig | cbbb7a2 | 2015-05-22 11:12:43 +0200 | [diff] [blame^] | 1892 | /* |
| 1893 | * The FUA and WPROTECT fields are not supported in 6-byte CDBs, |
| 1894 | * but always in the same place for all others. |
| 1895 | */ |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1896 | switch (opcode) { |
| 1897 | case WRITE_6: |
| 1898 | case READ_6: |
Christoph Hellwig | cbbb7a2 | 2015-05-22 11:12:43 +0200 | [diff] [blame^] | 1899 | break; |
| 1900 | default: |
| 1901 | cdb_info.fua = cmd[1] & 0x8; |
| 1902 | cdb_info.prot_info = (cmd[1] & 0xe0) >> 5; |
| 1903 | } |
| 1904 | |
| 1905 | switch (opcode) { |
| 1906 | case WRITE_6: |
| 1907 | case READ_6: |
| 1908 | cdb_info.lba = get_unaligned_be24(&cmd[1]); |
| 1909 | cdb_info.xfer_len = cmd[4]; |
| 1910 | if (cdb_info.xfer_len == 0) |
| 1911 | cdb_info.xfer_len = 256; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1912 | break; |
| 1913 | case WRITE_10: |
| 1914 | case READ_10: |
Christoph Hellwig | cbbb7a2 | 2015-05-22 11:12:43 +0200 | [diff] [blame^] | 1915 | cdb_info.lba = get_unaligned_be32(&cmd[2]); |
| 1916 | cdb_info.xfer_len = get_unaligned_be16(&cmd[7]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1917 | break; |
| 1918 | case WRITE_12: |
| 1919 | case READ_12: |
Christoph Hellwig | cbbb7a2 | 2015-05-22 11:12:43 +0200 | [diff] [blame^] | 1920 | cdb_info.lba = get_unaligned_be32(&cmd[2]); |
| 1921 | cdb_info.xfer_len = get_unaligned_be32(&cmd[6]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1922 | break; |
| 1923 | case WRITE_16: |
| 1924 | case READ_16: |
Christoph Hellwig | cbbb7a2 | 2015-05-22 11:12:43 +0200 | [diff] [blame^] | 1925 | cdb_info.lba = get_unaligned_be64(&cmd[2]); |
| 1926 | cdb_info.xfer_len = get_unaligned_be32(&cmd[10]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1927 | break; |
| 1928 | default: |
| 1929 | /* Will never really reach here */ |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1930 | res = -EIO; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1931 | goto out; |
| 1932 | } |
| 1933 | |
| 1934 | /* Calculate total length of transfer (in bytes) */ |
| 1935 | if (hdr->iovec_count > 0) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1936 | for (i = 0; i < hdr->iovec_count; i++) { |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1937 | not_copied = copy_from_user(&sgl, hdr->dxferp + |
| 1938 | i * sizeof(struct sg_iovec), |
| 1939 | sizeof(struct sg_iovec)); |
| 1940 | if (not_copied) |
| 1941 | return -EFAULT; |
| 1942 | sum_iov_len += sgl.iov_len; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1943 | /* IO vector sizes should be multiples of block size */ |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 1944 | if (sgl.iov_len % (1 << ns->lba_shift) != 0) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1945 | res = nvme_trans_completion(hdr, |
| 1946 | SAM_STAT_CHECK_CONDITION, |
| 1947 | ILLEGAL_REQUEST, |
| 1948 | SCSI_ASC_INVALID_PARAMETER, |
| 1949 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 1950 | goto out; |
| 1951 | } |
| 1952 | } |
| 1953 | } else { |
| 1954 | sum_iov_len = hdr->dxfer_len; |
| 1955 | } |
| 1956 | |
| 1957 | /* As Per sg ioctl howto, if the lengths differ, use the lower one */ |
| 1958 | xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len); |
| 1959 | |
| 1960 | /* If block count and actual data buffer size dont match, error out */ |
| 1961 | if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) { |
| 1962 | res = -EINVAL; |
| 1963 | goto out; |
| 1964 | } |
| 1965 | |
| 1966 | /* Check for 0 length transfer - it is not illegal */ |
| 1967 | if (cdb_info.xfer_len == 0) |
| 1968 | goto out; |
| 1969 | |
| 1970 | /* Send NVMe IO Command(s) */ |
| 1971 | res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1972 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1973 | goto out; |
| 1974 | |
| 1975 | out: |
| 1976 | return res; |
| 1977 | } |
| 1978 | |
| 1979 | static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 1980 | u8 *cmd) |
| 1981 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 1982 | int res = 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1983 | u8 evpd; |
| 1984 | u8 page_code; |
| 1985 | int alloc_len; |
| 1986 | u8 *inq_response; |
| 1987 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 1988 | evpd = cmd[1] & 0x01; |
| 1989 | page_code = cmd[2]; |
| 1990 | alloc_len = get_unaligned_be16(&cmd[3]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1991 | |
Keith Busch | 4f1982b | 2015-02-19 13:42:14 -0700 | [diff] [blame] | 1992 | inq_response = kmalloc(alloc_len, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 1993 | if (inq_response == NULL) { |
| 1994 | res = -ENOMEM; |
| 1995 | goto out_mem; |
| 1996 | } |
| 1997 | |
| 1998 | if (evpd == 0) { |
| 1999 | if (page_code == INQ_STANDARD_INQUIRY_PAGE) { |
| 2000 | res = nvme_trans_standard_inquiry_page(ns, hdr, |
| 2001 | inq_response, alloc_len); |
| 2002 | } else { |
| 2003 | res = nvme_trans_completion(hdr, |
| 2004 | SAM_STAT_CHECK_CONDITION, |
| 2005 | ILLEGAL_REQUEST, |
| 2006 | SCSI_ASC_INVALID_CDB, |
| 2007 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2008 | } |
| 2009 | } else { |
| 2010 | switch (page_code) { |
| 2011 | case VPD_SUPPORTED_PAGES: |
| 2012 | res = nvme_trans_supported_vpd_pages(ns, hdr, |
| 2013 | inq_response, alloc_len); |
| 2014 | break; |
| 2015 | case VPD_SERIAL_NUMBER: |
| 2016 | res = nvme_trans_unit_serial_page(ns, hdr, inq_response, |
| 2017 | alloc_len); |
| 2018 | break; |
| 2019 | case VPD_DEVICE_IDENTIFIERS: |
| 2020 | res = nvme_trans_device_id_page(ns, hdr, inq_response, |
| 2021 | alloc_len); |
| 2022 | break; |
| 2023 | case VPD_EXTENDED_INQUIRY: |
| 2024 | res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); |
| 2025 | break; |
Keith Busch | 7f749d9 | 2015-04-07 15:34:18 -0600 | [diff] [blame] | 2026 | case VPD_BLOCK_LIMITS: |
| 2027 | res = nvme_trans_bdev_limits_page(ns, hdr, inq_response, |
| 2028 | alloc_len); |
| 2029 | break; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2030 | case VPD_BLOCK_DEV_CHARACTERISTICS: |
| 2031 | res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); |
| 2032 | break; |
| 2033 | default: |
| 2034 | res = nvme_trans_completion(hdr, |
| 2035 | SAM_STAT_CHECK_CONDITION, |
| 2036 | ILLEGAL_REQUEST, |
| 2037 | SCSI_ASC_INVALID_CDB, |
| 2038 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2039 | break; |
| 2040 | } |
| 2041 | } |
| 2042 | kfree(inq_response); |
| 2043 | out_mem: |
| 2044 | return res; |
| 2045 | } |
| 2046 | |
| 2047 | static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2048 | u8 *cmd) |
| 2049 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2050 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2051 | u16 alloc_len; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2052 | u8 pc; |
| 2053 | u8 page_code; |
| 2054 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2055 | if (cmd[1] != LOG_SENSE_CDB_SP_NOT_ENABLED) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2056 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2057 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2058 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2059 | goto out; |
| 2060 | } |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2061 | |
| 2062 | page_code = cmd[2] & LOG_SENSE_CDB_PAGE_CODE_MASK; |
| 2063 | pc = (cmd[2] & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2064 | if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) { |
| 2065 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2066 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2067 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2068 | goto out; |
| 2069 | } |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2070 | alloc_len = get_unaligned_be16(&cmd[7]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2071 | switch (page_code) { |
| 2072 | case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE: |
| 2073 | res = nvme_trans_log_supp_pages(ns, hdr, alloc_len); |
| 2074 | break; |
| 2075 | case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE: |
| 2076 | res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len); |
| 2077 | break; |
| 2078 | case LOG_PAGE_TEMPERATURE_PAGE: |
| 2079 | res = nvme_trans_log_temperature(ns, hdr, alloc_len); |
| 2080 | break; |
| 2081 | default: |
| 2082 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2083 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2084 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2085 | break; |
| 2086 | } |
| 2087 | |
| 2088 | out: |
| 2089 | return res; |
| 2090 | } |
| 2091 | |
| 2092 | static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2093 | u8 *cmd) |
| 2094 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2095 | u8 cdb10 = 0; |
| 2096 | u16 parm_list_len; |
| 2097 | u8 page_format; |
| 2098 | u8 save_pages; |
| 2099 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2100 | page_format = cmd[1] & MODE_SELECT_CDB_PAGE_FORMAT_MASK; |
| 2101 | save_pages = cmd[1] & MODE_SELECT_CDB_SAVE_PAGES_MASK; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2102 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2103 | if (cmd[0] == MODE_SELECT) { |
| 2104 | parm_list_len = cmd[4]; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2105 | } else { |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2106 | parm_list_len = cmd[7]; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2107 | cdb10 = 1; |
| 2108 | } |
| 2109 | |
| 2110 | if (parm_list_len != 0) { |
| 2111 | /* |
| 2112 | * According to SPC-4 r24, a paramter list length field of 0 |
| 2113 | * shall not be considered an error |
| 2114 | */ |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2115 | return nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2116 | page_format, save_pages, cdb10); |
| 2117 | } |
| 2118 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2119 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2120 | } |
| 2121 | |
| 2122 | static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2123 | u8 *cmd) |
| 2124 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2125 | int res = 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2126 | u16 alloc_len; |
| 2127 | u8 cdb10 = 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2128 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2129 | if (cmd[0] == MODE_SENSE) { |
| 2130 | alloc_len = cmd[4]; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2131 | } else { |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2132 | alloc_len = get_unaligned_be16(&cmd[7]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2133 | cdb10 = 1; |
| 2134 | } |
| 2135 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2136 | if ((cmd[2] & MODE_SENSE_PAGE_CONTROL_MASK) != |
| 2137 | MODE_SENSE_PC_CURRENT_VALUES) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2138 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2139 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2140 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2141 | goto out; |
| 2142 | } |
| 2143 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2144 | switch (cmd[2] & MODE_SENSE_PAGE_CODE_MASK) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2145 | case MODE_PAGE_CACHING: |
| 2146 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, |
| 2147 | cdb10, |
| 2148 | &nvme_trans_fill_caching_page, |
| 2149 | MODE_PAGE_CACHING_LEN); |
| 2150 | break; |
| 2151 | case MODE_PAGE_CONTROL: |
| 2152 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, |
| 2153 | cdb10, |
| 2154 | &nvme_trans_fill_control_page, |
| 2155 | MODE_PAGE_CONTROL_LEN); |
| 2156 | break; |
| 2157 | case MODE_PAGE_POWER_CONDITION: |
| 2158 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, |
| 2159 | cdb10, |
| 2160 | &nvme_trans_fill_pow_cnd_page, |
| 2161 | MODE_PAGE_POW_CND_LEN); |
| 2162 | break; |
| 2163 | case MODE_PAGE_INFO_EXCEP: |
| 2164 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, |
| 2165 | cdb10, |
| 2166 | &nvme_trans_fill_inf_exc_page, |
| 2167 | MODE_PAGE_INF_EXC_LEN); |
| 2168 | break; |
| 2169 | case MODE_PAGE_RETURN_ALL: |
| 2170 | res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, |
| 2171 | cdb10, |
| 2172 | &nvme_trans_fill_all_pages, |
| 2173 | MODE_PAGE_ALL_LEN); |
| 2174 | break; |
| 2175 | default: |
| 2176 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2177 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2178 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2179 | break; |
| 2180 | } |
| 2181 | |
| 2182 | out: |
| 2183 | return res; |
| 2184 | } |
| 2185 | |
| 2186 | static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2187 | u8 *cmd, u8 cdb16) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2188 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2189 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2190 | int nvme_sc; |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2191 | u32 alloc_len; |
| 2192 | u32 resp_size; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2193 | u32 xfer_len; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2194 | struct nvme_dev *dev = ns->dev; |
| 2195 | dma_addr_t dma_addr; |
| 2196 | void *mem; |
| 2197 | struct nvme_id_ns *id_ns; |
| 2198 | u8 *response; |
| 2199 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2200 | if (cdb16) { |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2201 | alloc_len = get_unaligned_be32(&cmd[10]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2202 | resp_size = READ_CAP_16_RESP_SIZE; |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2203 | } else { |
| 2204 | alloc_len = READ_CAP_10_RESP_SIZE; |
| 2205 | resp_size = READ_CAP_10_RESP_SIZE; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2206 | } |
| 2207 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2208 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ns), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2209 | &dma_addr, GFP_KERNEL); |
| 2210 | if (mem == NULL) { |
| 2211 | res = -ENOMEM; |
| 2212 | goto out; |
| 2213 | } |
| 2214 | /* nvme ns identify */ |
| 2215 | nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); |
| 2216 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 2217 | if (res) |
| 2218 | goto out_dma; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2219 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2220 | id_ns = mem; |
| 2221 | |
Tushar Behera | 03ea83e | 2013-06-10 10:20:55 +0530 | [diff] [blame] | 2222 | response = kzalloc(resp_size, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2223 | if (response == NULL) { |
| 2224 | res = -ENOMEM; |
| 2225 | goto out_dma; |
| 2226 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2227 | nvme_trans_fill_read_cap(response, id_ns, cdb16); |
| 2228 | |
| 2229 | xfer_len = min(alloc_len, resp_size); |
| 2230 | res = nvme_trans_copy_to_user(hdr, response, xfer_len); |
| 2231 | |
| 2232 | kfree(response); |
| 2233 | out_dma: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2234 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ns), mem, dma_addr); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2235 | out: |
| 2236 | return res; |
| 2237 | } |
| 2238 | |
| 2239 | static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2240 | u8 *cmd) |
| 2241 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2242 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2243 | int nvme_sc; |
| 2244 | u32 alloc_len, xfer_len, resp_size; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2245 | u8 *response; |
| 2246 | struct nvme_dev *dev = ns->dev; |
| 2247 | dma_addr_t dma_addr; |
| 2248 | void *mem; |
| 2249 | struct nvme_id_ctrl *id_ctrl; |
| 2250 | u32 ll_length, lun_id; |
| 2251 | u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 2252 | __be32 tmp_len; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2253 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2254 | switch (cmd[2]) { |
| 2255 | default: |
| 2256 | return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2257 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2258 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2259 | case ALL_LUNS_RETURNED: |
| 2260 | case ALL_WELL_KNOWN_LUNS_RETURNED: |
| 2261 | case RESTRICTED_LUNS_RETURNED: |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2262 | /* NVMe Controller Identify */ |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2263 | mem = dma_alloc_coherent(dev->dev, sizeof(struct nvme_id_ctrl), |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2264 | &dma_addr, GFP_KERNEL); |
| 2265 | if (mem == NULL) { |
| 2266 | res = -ENOMEM; |
| 2267 | goto out; |
| 2268 | } |
| 2269 | nvme_sc = nvme_identify(dev, 0, 1, dma_addr); |
| 2270 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 2271 | if (res) |
| 2272 | goto out_dma; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2273 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2274 | id_ctrl = mem; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 2275 | ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2276 | resp_size = ll_length + LUN_DATA_HEADER_SIZE; |
| 2277 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2278 | alloc_len = get_unaligned_be32(&cmd[6]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2279 | if (alloc_len < resp_size) { |
| 2280 | res = nvme_trans_completion(hdr, |
| 2281 | SAM_STAT_CHECK_CONDITION, |
| 2282 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2283 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2284 | goto out_dma; |
| 2285 | } |
| 2286 | |
Tushar Behera | 03ea83e | 2013-06-10 10:20:55 +0530 | [diff] [blame] | 2287 | response = kzalloc(resp_size, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2288 | if (response == NULL) { |
| 2289 | res = -ENOMEM; |
| 2290 | goto out_dma; |
| 2291 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2292 | |
| 2293 | /* The first LUN ID will always be 0 per the SAM spec */ |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 2294 | for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2295 | /* |
| 2296 | * Set the LUN Id and then increment to the next LUN |
| 2297 | * location in the parameter data. |
| 2298 | */ |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 2299 | __be64 tmp_id = cpu_to_be64(lun_id); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2300 | memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64)); |
| 2301 | lun_id_offset += LUN_ENTRY_SIZE; |
| 2302 | } |
| 2303 | tmp_len = cpu_to_be32(ll_length); |
| 2304 | memcpy(response, &tmp_len, sizeof(u32)); |
| 2305 | } |
| 2306 | |
| 2307 | xfer_len = min(alloc_len, resp_size); |
| 2308 | res = nvme_trans_copy_to_user(hdr, response, xfer_len); |
| 2309 | |
| 2310 | kfree(response); |
| 2311 | out_dma: |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2312 | dma_free_coherent(dev->dev, sizeof(struct nvme_id_ctrl), mem, dma_addr); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2313 | out: |
| 2314 | return res; |
| 2315 | } |
| 2316 | |
| 2317 | static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2318 | u8 *cmd) |
| 2319 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2320 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2321 | u8 alloc_len, xfer_len, resp_size; |
| 2322 | u8 desc_format; |
| 2323 | u8 *response; |
| 2324 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2325 | desc_format = cmd[1] & 0x01; |
| 2326 | alloc_len = cmd[4]; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2327 | |
| 2328 | resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) : |
| 2329 | (FIXED_FMT_SENSE_DATA_SIZE)); |
Tushar Behera | 03ea83e | 2013-06-10 10:20:55 +0530 | [diff] [blame] | 2330 | response = kzalloc(resp_size, GFP_KERNEL); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2331 | if (response == NULL) { |
| 2332 | res = -ENOMEM; |
| 2333 | goto out; |
| 2334 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2335 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2336 | if (desc_format) { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2337 | /* Descriptor Format Sense Data */ |
| 2338 | response[0] = DESC_FORMAT_SENSE_DATA; |
| 2339 | response[1] = NO_SENSE; |
| 2340 | /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */ |
| 2341 | response[2] = SCSI_ASC_NO_SENSE; |
| 2342 | response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 2343 | /* SDAT_OVFL = 0 | Additional Sense Length = 0 */ |
| 2344 | } else { |
| 2345 | /* Fixed Format Sense Data */ |
| 2346 | response[0] = FIXED_SENSE_DATA; |
| 2347 | /* Byte 1 = Obsolete */ |
| 2348 | response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */ |
| 2349 | /* Bytes 3-6 - Information - set to zero */ |
| 2350 | response[7] = FIXED_SENSE_DATA_ADD_LENGTH; |
| 2351 | /* Bytes 8-11 - Cmd Specific Information - set to zero */ |
| 2352 | response[12] = SCSI_ASC_NO_SENSE; |
| 2353 | response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; |
| 2354 | /* Byte 14 = Field Replaceable Unit Code = 0 */ |
| 2355 | /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */ |
| 2356 | } |
| 2357 | |
| 2358 | xfer_len = min(alloc_len, resp_size); |
| 2359 | res = nvme_trans_copy_to_user(hdr, response, xfer_len); |
| 2360 | |
| 2361 | kfree(response); |
| 2362 | out: |
| 2363 | return res; |
| 2364 | } |
| 2365 | |
| 2366 | static int nvme_trans_security_protocol(struct nvme_ns *ns, |
| 2367 | struct sg_io_hdr *hdr, |
| 2368 | u8 *cmd) |
| 2369 | { |
| 2370 | return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2371 | ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, |
| 2372 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2373 | } |
| 2374 | |
| 2375 | static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2376 | u8 *cmd) |
| 2377 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2378 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2379 | int nvme_sc; |
Keith Busch | 14385de | 2013-04-25 14:39:27 -0600 | [diff] [blame] | 2380 | struct nvme_command c; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2381 | u8 immed, pcmod, pc, no_flush, start; |
| 2382 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2383 | immed = cmd[1] & 0x01; |
| 2384 | pcmod = cmd[3] & 0x0f; |
| 2385 | pc = (cmd[4] & 0xf0) >> 4; |
| 2386 | no_flush = cmd[4] & 0x04; |
| 2387 | start = cmd[4] & 0x01; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2388 | |
| 2389 | if (immed != 0) { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2390 | return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2391 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2392 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2393 | } else { |
| 2394 | if (no_flush == 0) { |
| 2395 | /* Issue NVME FLUSH command prior to START STOP UNIT */ |
Keith Busch | 14385de | 2013-04-25 14:39:27 -0600 | [diff] [blame] | 2396 | memset(&c, 0, sizeof(c)); |
| 2397 | c.common.opcode = nvme_cmd_flush; |
| 2398 | c.common.nsid = cpu_to_le32(ns->ns_id); |
| 2399 | |
Christoph Hellwig | f705f83 | 2015-05-22 11:12:38 +0200 | [diff] [blame] | 2400 | nvme_sc = nvme_submit_sync_cmd(ns->queue, &c); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2401 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 2402 | if (res) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2403 | return res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2404 | } |
| 2405 | /* Setup the expected power state transition */ |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2406 | return nvme_trans_power_state(ns, hdr, pc, pcmod, start); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2407 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2408 | } |
| 2409 | |
| 2410 | static int nvme_trans_synchronize_cache(struct nvme_ns *ns, |
| 2411 | struct sg_io_hdr *hdr, u8 *cmd) |
| 2412 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2413 | int nvme_sc; |
Keith Busch | 14385de | 2013-04-25 14:39:27 -0600 | [diff] [blame] | 2414 | struct nvme_command c; |
Keith Busch | 14385de | 2013-04-25 14:39:27 -0600 | [diff] [blame] | 2415 | |
| 2416 | memset(&c, 0, sizeof(c)); |
| 2417 | c.common.opcode = nvme_cmd_flush; |
| 2418 | c.common.nsid = cpu_to_le32(ns->ns_id); |
| 2419 | |
Christoph Hellwig | f705f83 | 2015-05-22 11:12:38 +0200 | [diff] [blame] | 2420 | nvme_sc = nvme_submit_sync_cmd(ns->queue, &c); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2421 | return nvme_trans_status_code(hdr, nvme_sc); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2422 | } |
| 2423 | |
| 2424 | static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2425 | u8 *cmd) |
| 2426 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2427 | int res; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2428 | u8 parm_hdr_len = 0; |
| 2429 | u8 nvme_pf_code = 0; |
| 2430 | u8 format_prot_info, long_list, format_data; |
| 2431 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2432 | format_prot_info = (cmd[1] & 0xc0) >> 6; |
| 2433 | long_list = cmd[1] & 0x20; |
| 2434 | format_data = cmd[1] & 0x10; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2435 | |
| 2436 | if (format_data != 0) { |
| 2437 | if (format_prot_info != 0) { |
| 2438 | if (long_list == 0) |
| 2439 | parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN; |
| 2440 | else |
| 2441 | parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN; |
| 2442 | } |
| 2443 | } else if (format_data == 0 && format_prot_info != 0) { |
| 2444 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2445 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2446 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2447 | goto out; |
| 2448 | } |
| 2449 | |
| 2450 | /* Get parm header from data-in/out buffer */ |
| 2451 | /* |
| 2452 | * According to the translation spec, the only fields in the parameter |
| 2453 | * list we are concerned with are in the header. So allocate only that. |
| 2454 | */ |
| 2455 | if (parm_hdr_len > 0) { |
| 2456 | res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len, |
| 2457 | format_prot_info, &nvme_pf_code); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2458 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2459 | goto out; |
| 2460 | } |
| 2461 | |
| 2462 | /* Attempt to activate any previously downloaded firmware image */ |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 2463 | res = nvme_trans_send_activate_fw_cmd(ns, hdr, 0); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2464 | |
| 2465 | /* Determine Block size and count and send format command */ |
| 2466 | res = nvme_trans_fmt_set_blk_size_count(ns, hdr); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2467 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2468 | goto out; |
| 2469 | |
| 2470 | res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code); |
| 2471 | |
| 2472 | out: |
| 2473 | return res; |
| 2474 | } |
| 2475 | |
| 2476 | static int nvme_trans_test_unit_ready(struct nvme_ns *ns, |
| 2477 | struct sg_io_hdr *hdr, |
| 2478 | u8 *cmd) |
| 2479 | { |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2480 | struct nvme_dev *dev = ns->dev; |
| 2481 | |
| 2482 | if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2483 | return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2484 | NOT_READY, SCSI_ASC_LUN_NOT_READY, |
| 2485 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2486 | else |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2487 | return nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2488 | } |
| 2489 | |
| 2490 | static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2491 | u8 *cmd) |
| 2492 | { |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2493 | int res = 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2494 | u32 buffer_offset, parm_list_length; |
| 2495 | u8 buffer_id, mode; |
| 2496 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2497 | parm_list_length = get_unaligned_be24(&cmd[6]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2498 | if (parm_list_length % BYTES_TO_DWORDS != 0) { |
| 2499 | /* NVMe expects Firmware file to be a whole number of DWORDS */ |
| 2500 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2501 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2502 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2503 | goto out; |
| 2504 | } |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2505 | buffer_id = cmd[2]; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2506 | if (buffer_id > NVME_MAX_FIRMWARE_SLOT) { |
| 2507 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2508 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2509 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2510 | goto out; |
| 2511 | } |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2512 | mode = cmd[1] & 0x1f; |
| 2513 | buffer_offset = get_unaligned_be24(&cmd[3]); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2514 | |
| 2515 | switch (mode) { |
| 2516 | case DOWNLOAD_SAVE_ACTIVATE: |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 2517 | res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2518 | parm_list_length, buffer_offset, |
| 2519 | buffer_id); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2520 | if (res) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2521 | goto out; |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 2522 | res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2523 | break; |
| 2524 | case DOWNLOAD_SAVE_DEFER_ACTIVATE: |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 2525 | res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw, |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2526 | parm_list_length, buffer_offset, |
| 2527 | buffer_id); |
| 2528 | break; |
| 2529 | case ACTIVATE_DEFERRED_MICROCODE: |
Christoph Hellwig | b90c48d | 2015-05-22 11:12:40 +0200 | [diff] [blame] | 2530 | res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2531 | break; |
| 2532 | default: |
| 2533 | res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2534 | ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, |
| 2535 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2536 | break; |
| 2537 | } |
| 2538 | |
| 2539 | out: |
| 2540 | return res; |
| 2541 | } |
| 2542 | |
Keith Busch | ec50373 | 2013-04-24 15:44:24 -0600 | [diff] [blame] | 2543 | struct scsi_unmap_blk_desc { |
| 2544 | __be64 slba; |
| 2545 | __be32 nlb; |
| 2546 | u32 resv; |
| 2547 | }; |
| 2548 | |
| 2549 | struct scsi_unmap_parm_list { |
| 2550 | __be16 unmap_data_len; |
| 2551 | __be16 unmap_blk_desc_data_len; |
| 2552 | u32 resv; |
| 2553 | struct scsi_unmap_blk_desc desc[0]; |
| 2554 | }; |
| 2555 | |
| 2556 | static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
| 2557 | u8 *cmd) |
| 2558 | { |
| 2559 | struct nvme_dev *dev = ns->dev; |
| 2560 | struct scsi_unmap_parm_list *plist; |
| 2561 | struct nvme_dsm_range *range; |
Keith Busch | ec50373 | 2013-04-24 15:44:24 -0600 | [diff] [blame] | 2562 | struct nvme_command c; |
| 2563 | int i, nvme_sc, res = -ENOMEM; |
| 2564 | u16 ndesc, list_len; |
| 2565 | dma_addr_t dma_addr; |
| 2566 | |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2567 | list_len = get_unaligned_be16(&cmd[7]); |
Keith Busch | ec50373 | 2013-04-24 15:44:24 -0600 | [diff] [blame] | 2568 | if (!list_len) |
| 2569 | return -EINVAL; |
| 2570 | |
| 2571 | plist = kmalloc(list_len, GFP_KERNEL); |
| 2572 | if (!plist) |
| 2573 | return -ENOMEM; |
| 2574 | |
| 2575 | res = nvme_trans_copy_from_user(hdr, plist, list_len); |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2576 | if (res) |
Keith Busch | ec50373 | 2013-04-24 15:44:24 -0600 | [diff] [blame] | 2577 | goto out; |
| 2578 | |
| 2579 | ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4; |
| 2580 | if (!ndesc || ndesc > 256) { |
| 2581 | res = -EINVAL; |
| 2582 | goto out; |
| 2583 | } |
| 2584 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2585 | range = dma_alloc_coherent(dev->dev, ndesc * sizeof(*range), |
Keith Busch | ec50373 | 2013-04-24 15:44:24 -0600 | [diff] [blame] | 2586 | &dma_addr, GFP_KERNEL); |
| 2587 | if (!range) |
| 2588 | goto out; |
| 2589 | |
| 2590 | for (i = 0; i < ndesc; i++) { |
| 2591 | range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb)); |
| 2592 | range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba)); |
| 2593 | range[i].cattr = 0; |
| 2594 | } |
| 2595 | |
| 2596 | memset(&c, 0, sizeof(c)); |
| 2597 | c.dsm.opcode = nvme_cmd_dsm; |
| 2598 | c.dsm.nsid = cpu_to_le32(ns->ns_id); |
| 2599 | c.dsm.prp1 = cpu_to_le64(dma_addr); |
| 2600 | c.dsm.nr = cpu_to_le32(ndesc - 1); |
| 2601 | c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); |
| 2602 | |
Christoph Hellwig | f705f83 | 2015-05-22 11:12:38 +0200 | [diff] [blame] | 2603 | nvme_sc = nvme_submit_sync_cmd(ns->queue, &c); |
Keith Busch | ec50373 | 2013-04-24 15:44:24 -0600 | [diff] [blame] | 2604 | res = nvme_trans_status_code(hdr, nvme_sc); |
| 2605 | |
Christoph Hellwig | e75ec75 | 2015-05-22 11:12:39 +0200 | [diff] [blame] | 2606 | dma_free_coherent(dev->dev, ndesc * sizeof(*range), range, dma_addr); |
Keith Busch | ec50373 | 2013-04-24 15:44:24 -0600 | [diff] [blame] | 2607 | out: |
| 2608 | kfree(plist); |
| 2609 | return res; |
| 2610 | } |
| 2611 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2612 | static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) |
| 2613 | { |
| 2614 | u8 cmd[BLK_MAX_CDB]; |
| 2615 | int retcode; |
| 2616 | unsigned int opcode; |
| 2617 | |
| 2618 | if (hdr->cmdp == NULL) |
| 2619 | return -EMSGSIZE; |
| 2620 | if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) |
| 2621 | return -EFAULT; |
| 2622 | |
Keith Busch | 695a4fe | 2014-08-27 13:55:39 -0600 | [diff] [blame] | 2623 | /* |
| 2624 | * Prime the hdr with good status for scsi commands that don't require |
| 2625 | * an nvme command for translation. |
| 2626 | */ |
| 2627 | retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS); |
| 2628 | if (retcode) |
| 2629 | return retcode; |
| 2630 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2631 | opcode = cmd[0]; |
| 2632 | |
| 2633 | switch (opcode) { |
| 2634 | case READ_6: |
| 2635 | case READ_10: |
| 2636 | case READ_12: |
| 2637 | case READ_16: |
| 2638 | retcode = nvme_trans_io(ns, hdr, 0, cmd); |
| 2639 | break; |
| 2640 | case WRITE_6: |
| 2641 | case WRITE_10: |
| 2642 | case WRITE_12: |
| 2643 | case WRITE_16: |
| 2644 | retcode = nvme_trans_io(ns, hdr, 1, cmd); |
| 2645 | break; |
| 2646 | case INQUIRY: |
| 2647 | retcode = nvme_trans_inquiry(ns, hdr, cmd); |
| 2648 | break; |
| 2649 | case LOG_SENSE: |
| 2650 | retcode = nvme_trans_log_sense(ns, hdr, cmd); |
| 2651 | break; |
| 2652 | case MODE_SELECT: |
| 2653 | case MODE_SELECT_10: |
| 2654 | retcode = nvme_trans_mode_select(ns, hdr, cmd); |
| 2655 | break; |
| 2656 | case MODE_SENSE: |
| 2657 | case MODE_SENSE_10: |
| 2658 | retcode = nvme_trans_mode_sense(ns, hdr, cmd); |
| 2659 | break; |
| 2660 | case READ_CAPACITY: |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2661 | retcode = nvme_trans_read_capacity(ns, hdr, cmd, 0); |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2662 | break; |
Hannes Reinecke | eb846d9 | 2014-11-17 14:25:19 +0100 | [diff] [blame] | 2663 | case SERVICE_ACTION_IN_16: |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2664 | switch (cmd[1]) { |
| 2665 | case SAI_READ_CAPACITY_16: |
| 2666 | retcode = nvme_trans_read_capacity(ns, hdr, cmd, 1); |
| 2667 | break; |
| 2668 | default: |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2669 | goto out; |
Christoph Hellwig | 3726897 | 2015-05-22 11:12:42 +0200 | [diff] [blame] | 2670 | } |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2671 | break; |
| 2672 | case REPORT_LUNS: |
| 2673 | retcode = nvme_trans_report_luns(ns, hdr, cmd); |
| 2674 | break; |
| 2675 | case REQUEST_SENSE: |
| 2676 | retcode = nvme_trans_request_sense(ns, hdr, cmd); |
| 2677 | break; |
| 2678 | case SECURITY_PROTOCOL_IN: |
| 2679 | case SECURITY_PROTOCOL_OUT: |
| 2680 | retcode = nvme_trans_security_protocol(ns, hdr, cmd); |
| 2681 | break; |
| 2682 | case START_STOP: |
| 2683 | retcode = nvme_trans_start_stop(ns, hdr, cmd); |
| 2684 | break; |
| 2685 | case SYNCHRONIZE_CACHE: |
| 2686 | retcode = nvme_trans_synchronize_cache(ns, hdr, cmd); |
| 2687 | break; |
| 2688 | case FORMAT_UNIT: |
| 2689 | retcode = nvme_trans_format_unit(ns, hdr, cmd); |
| 2690 | break; |
| 2691 | case TEST_UNIT_READY: |
| 2692 | retcode = nvme_trans_test_unit_ready(ns, hdr, cmd); |
| 2693 | break; |
| 2694 | case WRITE_BUFFER: |
| 2695 | retcode = nvme_trans_write_buffer(ns, hdr, cmd); |
| 2696 | break; |
Keith Busch | ec50373 | 2013-04-24 15:44:24 -0600 | [diff] [blame] | 2697 | case UNMAP: |
| 2698 | retcode = nvme_trans_unmap(ns, hdr, cmd); |
| 2699 | break; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2700 | default: |
| 2701 | out: |
| 2702 | retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, |
| 2703 | ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, |
| 2704 | SCSI_ASCQ_CAUSE_NOT_REPORTABLE); |
| 2705 | break; |
| 2706 | } |
| 2707 | return retcode; |
| 2708 | } |
| 2709 | |
| 2710 | int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) |
| 2711 | { |
| 2712 | struct sg_io_hdr hdr; |
| 2713 | int retcode; |
| 2714 | |
| 2715 | if (!capable(CAP_SYS_ADMIN)) |
| 2716 | return -EACCES; |
| 2717 | if (copy_from_user(&hdr, u_hdr, sizeof(hdr))) |
| 2718 | return -EFAULT; |
| 2719 | if (hdr.interface_id != 'S') |
| 2720 | return -EINVAL; |
| 2721 | if (hdr.cmd_len > BLK_MAX_CDB) |
| 2722 | return -EINVAL; |
| 2723 | |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2724 | /* |
| 2725 | * A positive return code means a NVMe status, which has been |
| 2726 | * translated to sense data. |
| 2727 | */ |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2728 | retcode = nvme_scsi_translate(ns, &hdr); |
| 2729 | if (retcode < 0) |
| 2730 | return retcode; |
Vishal Verma | 8741ee4 | 2013-04-04 17:52:27 -0600 | [diff] [blame] | 2731 | if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0) |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2732 | return -EFAULT; |
Christoph Hellwig | e61b0a8 | 2015-05-22 11:12:41 +0200 | [diff] [blame] | 2733 | return 0; |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2734 | } |
| 2735 | |
Vishal Verma | 5d0f613 | 2013-03-04 18:40:58 -0700 | [diff] [blame] | 2736 | int nvme_sg_get_version_num(int __user *ip) |
| 2737 | { |
| 2738 | return put_user(sg_version_num, ip); |
| 2739 | } |