blob: 5b32521456d67491eb7bd88e72fb3756b9e1ff7e [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020018#include <linux/types.h>
Christoph Hellwigeb793e22016-06-13 16:45:25 +020019
20/* NQN names in commands fields specified one size */
21#define NVMF_NQN_FIELD_LEN 256
22
23/* However the max length of a qualified name is another size */
24#define NVMF_NQN_SIZE 223
25
26#define NVMF_TRSVCID_SIZE 32
27#define NVMF_TRADDR_SIZE 256
28#define NVMF_TSAS_SIZE 256
29
30#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
31
32#define NVME_RDMA_IP_PORT 4420
33
34enum nvme_subsys_type {
35 NVME_NQN_DISC = 1, /* Discovery type target subsystem */
36 NVME_NQN_NVME = 2, /* NVME type target subsystem */
37};
38
39/* Address Family codes for Discovery Log Page entry ADRFAM field */
40enum {
41 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
42 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
43 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
44 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
45 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
46};
47
48/* Transport Type codes for Discovery Log Page entry TRTYPE field */
49enum {
50 NVMF_TRTYPE_RDMA = 1, /* RDMA */
51 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
52 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
53 NVMF_TRTYPE_MAX,
54};
55
56/* Transport Requirements codes for Discovery Log Page entry TREQ field */
57enum {
58 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
59 NVMF_TREQ_REQUIRED = 1, /* Required */
60 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
61};
62
63/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
64 * RDMA_QPTYPE field
65 */
66enum {
67 NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */
68 NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */
69};
70
71/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
72 * RDMA_QPTYPE field
73 */
74enum {
75 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */
76 NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */
77 NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */
78 NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */
79 NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */
80};
81
82/* RDMA Connection Management Service Type codes for Discovery Log Page
83 * entry TSAS RDMA_CMS field
84 */
85enum {
86 NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */
87};
88
89#define NVMF_AQ_DEPTH 32
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020090
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +010091enum {
92 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
93 NVME_REG_VS = 0x0008, /* Version */
94 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +080095 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +010096 NVME_REG_CC = 0x0014, /* Controller Configuration */
97 NVME_REG_CSTS = 0x001c, /* Controller Status */
98 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
99 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
100 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800101 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100102 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
103 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500104};
105
Keith Buscha0cadb82012-07-27 13:57:23 -0400106#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400107#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400108#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -0600109#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -0600110#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -0600111#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400112
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600113#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
114#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
115#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
116#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
117
118#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
119#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
120#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
121#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
122#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
123
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200124/*
125 * Submission and Completion Queue Entry Sizes for the NVM command set.
126 * (In bytes and specified as a power of two (2^n)).
127 */
128#define NVME_NVM_IOSQES 6
129#define NVME_NVM_IOCQES 4
130
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500131enum {
132 NVME_CC_ENABLE = 1 << 0,
133 NVME_CC_CSS_NVM = 0 << 4,
134 NVME_CC_MPS_SHIFT = 7,
135 NVME_CC_ARB_RR = 0 << 11,
136 NVME_CC_ARB_WRRU = 1 << 11,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -0400137 NVME_CC_ARB_VS = 7 << 11,
138 NVME_CC_SHN_NONE = 0 << 14,
139 NVME_CC_SHN_NORMAL = 1 << 14,
140 NVME_CC_SHN_ABRUPT = 2 << 14,
Keith Busch1894d8f2013-07-15 15:02:22 -0600141 NVME_CC_SHN_MASK = 3 << 14,
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200142 NVME_CC_IOSQES = NVME_NVM_IOSQES << 16,
143 NVME_CC_IOCQES = NVME_NVM_IOCQES << 20,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500144 NVME_CSTS_RDY = 1 << 0,
145 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -0600146 NVME_CSTS_NSSRO = 1 << 4,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500147 NVME_CSTS_SHST_NORMAL = 0 << 2,
148 NVME_CSTS_SHST_OCCUR = 1 << 2,
149 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -0600150 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500151};
152
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200153struct nvme_id_power_state {
154 __le16 max_power; /* centiwatts */
155 __u8 rsvd2;
156 __u8 flags;
157 __le32 entry_lat; /* microseconds */
158 __le32 exit_lat; /* microseconds */
159 __u8 read_tput;
160 __u8 read_lat;
161 __u8 write_tput;
162 __u8 write_lat;
163 __le16 idle_power;
164 __u8 idle_scale;
165 __u8 rsvd19;
166 __le16 active_power;
167 __u8 active_work_scale;
168 __u8 rsvd23[9];
169};
170
171enum {
172 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
173 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
174};
175
176struct nvme_id_ctrl {
177 __le16 vid;
178 __le16 ssvid;
179 char sn[20];
180 char mn[40];
181 char fr[8];
182 __u8 rab;
183 __u8 ieee[3];
Christoph Hellwiga446c082016-09-30 13:51:06 +0200184 __u8 cmic;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200185 __u8 mdts;
Christoph Hellwig08c69642015-10-02 15:27:16 +0200186 __le16 cntlid;
187 __le32 ver;
Christoph Hellwig14e974a2016-06-06 23:20:43 +0200188 __le32 rtd3r;
189 __le32 rtd3e;
190 __le32 oaes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200191 __le32 ctratt;
192 __u8 rsvd100[156];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200193 __le16 oacs;
194 __u8 acl;
195 __u8 aerl;
196 __u8 frmw;
197 __u8 lpa;
198 __u8 elpe;
199 __u8 npss;
200 __u8 avscc;
201 __u8 apsta;
202 __le16 wctemp;
203 __le16 cctemp;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200204 __le16 mtfa;
205 __le32 hmpre;
206 __le32 hmmin;
207 __u8 tnvmcap[16];
208 __u8 unvmcap[16];
209 __le32 rpmbs;
210 __u8 rsvd316[4];
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200211 __le16 kas;
212 __u8 rsvd322[190];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200213 __u8 sqes;
214 __u8 cqes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200215 __le16 maxcmd;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200216 __le32 nn;
217 __le16 oncs;
218 __le16 fuses;
219 __u8 fna;
220 __u8 vwc;
221 __le16 awun;
222 __le16 awupf;
223 __u8 nvscc;
224 __u8 rsvd531;
225 __le16 acwu;
226 __u8 rsvd534[2];
227 __le32 sgls;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200228 __u8 rsvd540[228];
229 char subnqn[256];
230 __u8 rsvd1024[768];
231 __le32 ioccsz;
232 __le32 iorcsz;
233 __le16 icdoff;
234 __u8 ctrattr;
235 __u8 msdbd;
236 __u8 rsvd1804[244];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200237 struct nvme_id_power_state psd[32];
238 __u8 vs[1024];
239};
240
241enum {
242 NVME_CTRL_ONCS_COMPARE = 1 << 0,
243 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
244 NVME_CTRL_ONCS_DSM = 1 << 2,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800245 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200246 NVME_CTRL_VWC_PRESENT = 1 << 0,
Scott Bauer8a9ae522017-02-17 13:59:40 +0100247 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200248};
249
250struct nvme_lbaf {
251 __le16 ms;
252 __u8 ds;
253 __u8 rp;
254};
255
256struct nvme_id_ns {
257 __le64 nsze;
258 __le64 ncap;
259 __le64 nuse;
260 __u8 nsfeat;
261 __u8 nlbaf;
262 __u8 flbas;
263 __u8 mc;
264 __u8 dpc;
265 __u8 dps;
266 __u8 nmic;
267 __u8 rescap;
268 __u8 fpi;
269 __u8 rsvd33;
270 __le16 nawun;
271 __le16 nawupf;
272 __le16 nacwu;
273 __le16 nabsn;
274 __le16 nabo;
275 __le16 nabspf;
276 __u16 rsvd46;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200277 __u8 nvmcap[16];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200278 __u8 rsvd64[40];
279 __u8 nguid[16];
280 __u8 eui64[8];
281 struct nvme_lbaf lbaf[16];
282 __u8 rsvd192[192];
283 __u8 vs[3712];
284};
285
286enum {
Christoph Hellwig329dd762016-09-30 13:51:08 +0200287 NVME_ID_CNS_NS = 0x00,
288 NVME_ID_CNS_CTRL = 0x01,
289 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
290 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
291 NVME_ID_CNS_NS_PRESENT = 0x11,
292 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
293 NVME_ID_CNS_CTRL_LIST = 0x13,
294};
295
296enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200297 NVME_NS_FEAT_THIN = 1 << 0,
298 NVME_NS_FLBAS_LBA_MASK = 0xf,
299 NVME_NS_FLBAS_META_EXT = 0x10,
300 NVME_LBAF_RP_BEST = 0,
301 NVME_LBAF_RP_BETTER = 1,
302 NVME_LBAF_RP_GOOD = 2,
303 NVME_LBAF_RP_DEGRADED = 3,
304 NVME_NS_DPC_PI_LAST = 1 << 4,
305 NVME_NS_DPC_PI_FIRST = 1 << 3,
306 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
307 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
308 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
309 NVME_NS_DPS_PI_FIRST = 1 << 3,
310 NVME_NS_DPS_PI_MASK = 0x7,
311 NVME_NS_DPS_PI_TYPE1 = 1,
312 NVME_NS_DPS_PI_TYPE2 = 2,
313 NVME_NS_DPS_PI_TYPE3 = 3,
314};
315
316struct nvme_smart_log {
317 __u8 critical_warning;
318 __u8 temperature[2];
319 __u8 avail_spare;
320 __u8 spare_thresh;
321 __u8 percent_used;
322 __u8 rsvd6[26];
323 __u8 data_units_read[16];
324 __u8 data_units_written[16];
325 __u8 host_reads[16];
326 __u8 host_writes[16];
327 __u8 ctrl_busy_time[16];
328 __u8 power_cycles[16];
329 __u8 power_on_hours[16];
330 __u8 unsafe_shutdowns[16];
331 __u8 media_errors[16];
332 __u8 num_err_log_entries[16];
333 __le32 warning_temp_time;
334 __le32 critical_comp_time;
335 __le16 temp_sensor[8];
336 __u8 rsvd216[296];
337};
338
339enum {
340 NVME_SMART_CRIT_SPARE = 1 << 0,
341 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
342 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
343 NVME_SMART_CRIT_MEDIA = 1 << 3,
344 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
345};
346
347enum {
348 NVME_AER_NOTICE_NS_CHANGED = 0x0002,
349};
350
351struct nvme_lba_range_type {
352 __u8 type;
353 __u8 attributes;
354 __u8 rsvd2[14];
355 __u64 slba;
356 __u64 nlb;
357 __u8 guid[16];
358 __u8 rsvd48[16];
359};
360
361enum {
362 NVME_LBART_TYPE_FS = 0x01,
363 NVME_LBART_TYPE_RAID = 0x02,
364 NVME_LBART_TYPE_CACHE = 0x03,
365 NVME_LBART_TYPE_SWAP = 0x04,
366
367 NVME_LBART_ATTRIB_TEMP = 1 << 0,
368 NVME_LBART_ATTRIB_HIDE = 1 << 1,
369};
370
371struct nvme_reservation_status {
372 __le32 gen;
373 __u8 rtype;
374 __u8 regctl[2];
375 __u8 resv5[2];
376 __u8 ptpls;
377 __u8 resv10[13];
378 struct {
379 __le16 cntlid;
380 __u8 rcsts;
381 __u8 resv3[5];
382 __le64 hostid;
383 __le64 rkey;
384 } regctl_ds[];
385};
386
Christoph Hellwig79f370e2016-06-06 23:20:46 +0200387enum nvme_async_event_type {
388 NVME_AER_TYPE_ERROR = 0,
389 NVME_AER_TYPE_SMART = 1,
390 NVME_AER_TYPE_NOTICE = 2,
391};
392
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200393/* I/O commands */
394
395enum nvme_opcode {
396 nvme_cmd_flush = 0x00,
397 nvme_cmd_write = 0x01,
398 nvme_cmd_read = 0x02,
399 nvme_cmd_write_uncor = 0x04,
400 nvme_cmd_compare = 0x05,
401 nvme_cmd_write_zeroes = 0x08,
402 nvme_cmd_dsm = 0x09,
403 nvme_cmd_resv_register = 0x0d,
404 nvme_cmd_resv_report = 0x0e,
405 nvme_cmd_resv_acquire = 0x11,
406 nvme_cmd_resv_release = 0x15,
407};
408
James Smart3972be22016-06-06 23:20:47 +0200409/*
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200410 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
411 *
412 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
413 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
414 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
415 * request subtype
416 */
417enum {
418 NVME_SGL_FMT_ADDRESS = 0x00,
419 NVME_SGL_FMT_OFFSET = 0x01,
420 NVME_SGL_FMT_INVALIDATE = 0x0f,
421};
422
423/*
424 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
425 *
426 * For struct nvme_sgl_desc:
427 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
428 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
429 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
430 *
431 * For struct nvme_keyed_sgl_desc:
432 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
433 */
434enum {
435 NVME_SGL_FMT_DATA_DESC = 0x00,
436 NVME_SGL_FMT_SEG_DESC = 0x02,
437 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
438 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
439};
440
441struct nvme_sgl_desc {
442 __le64 addr;
443 __le32 length;
444 __u8 rsvd[3];
445 __u8 type;
446};
447
448struct nvme_keyed_sgl_desc {
449 __le64 addr;
450 __u8 length[3];
451 __u8 key[4];
452 __u8 type;
453};
454
455union nvme_data_ptr {
456 struct {
457 __le64 prp1;
458 __le64 prp2;
459 };
460 struct nvme_sgl_desc sgl;
461 struct nvme_keyed_sgl_desc ksgl;
462};
463
464/*
James Smart3972be22016-06-06 23:20:47 +0200465 * Lowest two bits of our flags field (FUSE field in the spec):
466 *
467 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
468 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
469 *
470 * Highest two bits in our flags field (PSDT field in the spec):
471 *
472 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
473 * If used, MPTR contains addr of single physical buffer (byte aligned).
474 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
475 * If used, MPTR contains an address of an SGL segment containing
476 * exactly 1 SGL descriptor (qword aligned).
477 */
478enum {
479 NVME_CMD_FUSE_FIRST = (1 << 0),
480 NVME_CMD_FUSE_SECOND = (1 << 1),
481
482 NVME_CMD_SGL_METABUF = (1 << 6),
483 NVME_CMD_SGL_METASEG = (1 << 7),
484 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
485};
486
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200487struct nvme_common_command {
488 __u8 opcode;
489 __u8 flags;
490 __u16 command_id;
491 __le32 nsid;
492 __le32 cdw2[2];
493 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200494 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200495 __le32 cdw10[6];
496};
497
498struct nvme_rw_command {
499 __u8 opcode;
500 __u8 flags;
501 __u16 command_id;
502 __le32 nsid;
503 __u64 rsvd2;
504 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200505 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200506 __le64 slba;
507 __le16 length;
508 __le16 control;
509 __le32 dsmgmt;
510 __le32 reftag;
511 __le16 apptag;
512 __le16 appmask;
513};
514
515enum {
516 NVME_RW_LR = 1 << 15,
517 NVME_RW_FUA = 1 << 14,
518 NVME_RW_DSM_FREQ_UNSPEC = 0,
519 NVME_RW_DSM_FREQ_TYPICAL = 1,
520 NVME_RW_DSM_FREQ_RARE = 2,
521 NVME_RW_DSM_FREQ_READS = 3,
522 NVME_RW_DSM_FREQ_WRITES = 4,
523 NVME_RW_DSM_FREQ_RW = 5,
524 NVME_RW_DSM_FREQ_ONCE = 6,
525 NVME_RW_DSM_FREQ_PREFETCH = 7,
526 NVME_RW_DSM_FREQ_TEMP = 8,
527 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
528 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
529 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
530 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
531 NVME_RW_DSM_SEQ_REQ = 1 << 6,
532 NVME_RW_DSM_COMPRESSED = 1 << 7,
533 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
534 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
535 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
536 NVME_RW_PRINFO_PRACT = 1 << 13,
537};
538
539struct nvme_dsm_cmd {
540 __u8 opcode;
541 __u8 flags;
542 __u16 command_id;
543 __le32 nsid;
544 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200545 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200546 __le32 nr;
547 __le32 attributes;
548 __u32 rsvd12[4];
549};
550
551enum {
552 NVME_DSMGMT_IDR = 1 << 0,
553 NVME_DSMGMT_IDW = 1 << 1,
554 NVME_DSMGMT_AD = 1 << 2,
555};
556
Christoph Hellwigb35ba012017-02-08 14:46:50 +0100557#define NVME_DSM_MAX_RANGES 256
558
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200559struct nvme_dsm_range {
560 __le32 cattr;
561 __le32 nlb;
562 __le64 slba;
563};
564
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800565struct nvme_write_zeroes_cmd {
566 __u8 opcode;
567 __u8 flags;
568 __u16 command_id;
569 __le32 nsid;
570 __u64 rsvd2;
571 __le64 metadata;
572 union nvme_data_ptr dptr;
573 __le64 slba;
574 __le16 length;
575 __le16 control;
576 __le32 dsmgmt;
577 __le32 reftag;
578 __le16 apptag;
579 __le16 appmask;
580};
581
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200582/* Admin commands */
583
584enum nvme_admin_opcode {
585 nvme_admin_delete_sq = 0x00,
586 nvme_admin_create_sq = 0x01,
587 nvme_admin_get_log_page = 0x02,
588 nvme_admin_delete_cq = 0x04,
589 nvme_admin_create_cq = 0x05,
590 nvme_admin_identify = 0x06,
591 nvme_admin_abort_cmd = 0x08,
592 nvme_admin_set_features = 0x09,
593 nvme_admin_get_features = 0x0a,
594 nvme_admin_async_event = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200595 nvme_admin_ns_mgmt = 0x0d,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200596 nvme_admin_activate_fw = 0x10,
597 nvme_admin_download_fw = 0x11,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200598 nvme_admin_ns_attach = 0x15,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200599 nvme_admin_keep_alive = 0x18,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200600 nvme_admin_format_nvm = 0x80,
601 nvme_admin_security_send = 0x81,
602 nvme_admin_security_recv = 0x82,
603};
604
605enum {
606 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
607 NVME_CQ_IRQ_ENABLED = (1 << 1),
608 NVME_SQ_PRIO_URGENT = (0 << 1),
609 NVME_SQ_PRIO_HIGH = (1 << 1),
610 NVME_SQ_PRIO_MEDIUM = (2 << 1),
611 NVME_SQ_PRIO_LOW = (3 << 1),
612 NVME_FEAT_ARBITRATION = 0x01,
613 NVME_FEAT_POWER_MGMT = 0x02,
614 NVME_FEAT_LBA_RANGE = 0x03,
615 NVME_FEAT_TEMP_THRESH = 0x04,
616 NVME_FEAT_ERR_RECOVERY = 0x05,
617 NVME_FEAT_VOLATILE_WC = 0x06,
618 NVME_FEAT_NUM_QUEUES = 0x07,
619 NVME_FEAT_IRQ_COALESCE = 0x08,
620 NVME_FEAT_IRQ_CONFIG = 0x09,
621 NVME_FEAT_WRITE_ATOMIC = 0x0a,
622 NVME_FEAT_ASYNC_EVENT = 0x0b,
623 NVME_FEAT_AUTO_PST = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200624 NVME_FEAT_HOST_MEM_BUF = 0x0d,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200625 NVME_FEAT_KATO = 0x0f,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200626 NVME_FEAT_SW_PROGRESS = 0x80,
627 NVME_FEAT_HOST_ID = 0x81,
628 NVME_FEAT_RESV_MASK = 0x82,
629 NVME_FEAT_RESV_PERSIST = 0x83,
630 NVME_LOG_ERROR = 0x01,
631 NVME_LOG_SMART = 0x02,
632 NVME_LOG_FW_SLOT = 0x03,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200633 NVME_LOG_DISC = 0x70,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200634 NVME_LOG_RESERVATION = 0x80,
635 NVME_FWACT_REPL = (0 << 3),
636 NVME_FWACT_REPL_ACTV = (1 << 3),
637 NVME_FWACT_ACTV = (2 << 3),
638};
639
640struct nvme_identify {
641 __u8 opcode;
642 __u8 flags;
643 __u16 command_id;
644 __le32 nsid;
645 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200646 union nvme_data_ptr dptr;
Parav Pandit986994a2017-01-26 17:17:28 +0200647 __u8 cns;
648 __u8 rsvd3;
649 __le16 ctrlid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200650 __u32 rsvd11[5];
651};
652
653struct nvme_features {
654 __u8 opcode;
655 __u8 flags;
656 __u16 command_id;
657 __le32 nsid;
658 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200659 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200660 __le32 fid;
661 __le32 dword11;
662 __u32 rsvd12[4];
663};
664
665struct nvme_create_cq {
666 __u8 opcode;
667 __u8 flags;
668 __u16 command_id;
669 __u32 rsvd1[5];
670 __le64 prp1;
671 __u64 rsvd8;
672 __le16 cqid;
673 __le16 qsize;
674 __le16 cq_flags;
675 __le16 irq_vector;
676 __u32 rsvd12[4];
677};
678
679struct nvme_create_sq {
680 __u8 opcode;
681 __u8 flags;
682 __u16 command_id;
683 __u32 rsvd1[5];
684 __le64 prp1;
685 __u64 rsvd8;
686 __le16 sqid;
687 __le16 qsize;
688 __le16 sq_flags;
689 __le16 cqid;
690 __u32 rsvd12[4];
691};
692
693struct nvme_delete_queue {
694 __u8 opcode;
695 __u8 flags;
696 __u16 command_id;
697 __u32 rsvd1[9];
698 __le16 qid;
699 __u16 rsvd10;
700 __u32 rsvd11[5];
701};
702
703struct nvme_abort_cmd {
704 __u8 opcode;
705 __u8 flags;
706 __u16 command_id;
707 __u32 rsvd1[9];
708 __le16 sqid;
709 __u16 cid;
710 __u32 rsvd11[5];
711};
712
713struct nvme_download_firmware {
714 __u8 opcode;
715 __u8 flags;
716 __u16 command_id;
717 __u32 rsvd1[5];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200718 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200719 __le32 numd;
720 __le32 offset;
721 __u32 rsvd12[4];
722};
723
724struct nvme_format_cmd {
725 __u8 opcode;
726 __u8 flags;
727 __u16 command_id;
728 __le32 nsid;
729 __u64 rsvd2[4];
730 __le32 cdw10;
731 __u32 rsvd11[5];
732};
733
Armen Baloyan725b3582016-06-06 23:20:44 +0200734struct nvme_get_log_page_command {
735 __u8 opcode;
736 __u8 flags;
737 __u16 command_id;
738 __le32 nsid;
739 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200740 union nvme_data_ptr dptr;
Armen Baloyan725b3582016-06-06 23:20:44 +0200741 __u8 lid;
742 __u8 rsvd10;
743 __le16 numdl;
744 __le16 numdu;
745 __u16 rsvd11;
746 __le32 lpol;
747 __le32 lpou;
748 __u32 rsvd14[2];
749};
750
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200751/*
752 * Fabrics subcommands.
753 */
754enum nvmf_fabrics_opcode {
755 nvme_fabrics_command = 0x7f,
756};
757
758enum nvmf_capsule_command {
759 nvme_fabrics_type_property_set = 0x00,
760 nvme_fabrics_type_connect = 0x01,
761 nvme_fabrics_type_property_get = 0x04,
762};
763
764struct nvmf_common_command {
765 __u8 opcode;
766 __u8 resv1;
767 __u16 command_id;
768 __u8 fctype;
769 __u8 resv2[35];
770 __u8 ts[24];
771};
772
773/*
774 * The legal cntlid range a NVMe Target will provide.
775 * Note that cntlid of value 0 is considered illegal in the fabrics world.
776 * Devices based on earlier specs did not have the subsystem concept;
777 * therefore, those devices had their cntlid value set to 0 as a result.
778 */
779#define NVME_CNTLID_MIN 1
780#define NVME_CNTLID_MAX 0xffef
781#define NVME_CNTLID_DYNAMIC 0xffff
782
783#define MAX_DISC_LOGS 255
784
785/* Discovery log page entry */
786struct nvmf_disc_rsp_page_entry {
787 __u8 trtype;
788 __u8 adrfam;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200789 __u8 subtype;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200790 __u8 treq;
791 __le16 portid;
792 __le16 cntlid;
793 __le16 asqsz;
794 __u8 resv8[22];
795 char trsvcid[NVMF_TRSVCID_SIZE];
796 __u8 resv64[192];
797 char subnqn[NVMF_NQN_FIELD_LEN];
798 char traddr[NVMF_TRADDR_SIZE];
799 union tsas {
800 char common[NVMF_TSAS_SIZE];
801 struct rdma {
802 __u8 qptype;
803 __u8 prtype;
804 __u8 cms;
805 __u8 resv3[5];
806 __u16 pkey;
807 __u8 resv10[246];
808 } rdma;
809 } tsas;
810};
811
812/* Discovery log page header */
813struct nvmf_disc_rsp_page_hdr {
814 __le64 genctr;
815 __le64 numrec;
816 __le16 recfmt;
817 __u8 resv14[1006];
818 struct nvmf_disc_rsp_page_entry entries[0];
819};
820
821struct nvmf_connect_command {
822 __u8 opcode;
823 __u8 resv1;
824 __u16 command_id;
825 __u8 fctype;
826 __u8 resv2[19];
827 union nvme_data_ptr dptr;
828 __le16 recfmt;
829 __le16 qid;
830 __le16 sqsize;
831 __u8 cattr;
832 __u8 resv3;
833 __le32 kato;
834 __u8 resv4[12];
835};
836
837struct nvmf_connect_data {
Christoph Hellwig8d636872016-09-30 13:51:07 +0200838 __u8 hostid[16];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200839 __le16 cntlid;
840 char resv4[238];
841 char subsysnqn[NVMF_NQN_FIELD_LEN];
842 char hostnqn[NVMF_NQN_FIELD_LEN];
843 char resv5[256];
844};
845
846struct nvmf_property_set_command {
847 __u8 opcode;
848 __u8 resv1;
849 __u16 command_id;
850 __u8 fctype;
851 __u8 resv2[35];
852 __u8 attrib;
853 __u8 resv3[3];
854 __le32 offset;
855 __le64 value;
856 __u8 resv4[8];
857};
858
859struct nvmf_property_get_command {
860 __u8 opcode;
861 __u8 resv1;
862 __u16 command_id;
863 __u8 fctype;
864 __u8 resv2[35];
865 __u8 attrib;
866 __u8 resv3[3];
867 __le32 offset;
868 __u8 resv4[16];
869};
870
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200871struct nvme_command {
872 union {
873 struct nvme_common_command common;
874 struct nvme_rw_command rw;
875 struct nvme_identify identify;
876 struct nvme_features features;
877 struct nvme_create_cq create_cq;
878 struct nvme_create_sq create_sq;
879 struct nvme_delete_queue delete_queue;
880 struct nvme_download_firmware dlfw;
881 struct nvme_format_cmd format;
882 struct nvme_dsm_cmd dsm;
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800883 struct nvme_write_zeroes_cmd write_zeroes;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200884 struct nvme_abort_cmd abort;
Armen Baloyan725b3582016-06-06 23:20:44 +0200885 struct nvme_get_log_page_command get_log_page;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200886 struct nvmf_common_command fabrics;
887 struct nvmf_connect_command connect;
888 struct nvmf_property_set_command prop_set;
889 struct nvmf_property_get_command prop_get;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200890 };
891};
892
Christoph Hellwig7a5abb42016-06-06 23:20:49 +0200893static inline bool nvme_is_write(struct nvme_command *cmd)
894{
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200895 /*
896 * What a mess...
897 *
898 * Why can't we simply have a Fabrics In and Fabrics out command?
899 */
900 if (unlikely(cmd->common.opcode == nvme_fabrics_command))
901 return cmd->fabrics.opcode & 1;
Christoph Hellwig7a5abb42016-06-06 23:20:49 +0200902 return cmd->common.opcode & 1;
903}
904
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200905enum {
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200906 /*
907 * Generic Command Status:
908 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200909 NVME_SC_SUCCESS = 0x0,
910 NVME_SC_INVALID_OPCODE = 0x1,
911 NVME_SC_INVALID_FIELD = 0x2,
912 NVME_SC_CMDID_CONFLICT = 0x3,
913 NVME_SC_DATA_XFER_ERROR = 0x4,
914 NVME_SC_POWER_LOSS = 0x5,
915 NVME_SC_INTERNAL = 0x6,
916 NVME_SC_ABORT_REQ = 0x7,
917 NVME_SC_ABORT_QUEUE = 0x8,
918 NVME_SC_FUSED_FAIL = 0x9,
919 NVME_SC_FUSED_MISSING = 0xa,
920 NVME_SC_INVALID_NS = 0xb,
921 NVME_SC_CMD_SEQ_ERROR = 0xc,
922 NVME_SC_SGL_INVALID_LAST = 0xd,
923 NVME_SC_SGL_INVALID_COUNT = 0xe,
924 NVME_SC_SGL_INVALID_DATA = 0xf,
925 NVME_SC_SGL_INVALID_METADATA = 0x10,
926 NVME_SC_SGL_INVALID_TYPE = 0x11,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200927
928 NVME_SC_SGL_INVALID_OFFSET = 0x16,
929 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
930
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200931 NVME_SC_LBA_RANGE = 0x80,
932 NVME_SC_CAP_EXCEEDED = 0x81,
933 NVME_SC_NS_NOT_READY = 0x82,
934 NVME_SC_RESERVATION_CONFLICT = 0x83,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200935
936 /*
937 * Command Specific Status:
938 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200939 NVME_SC_CQ_INVALID = 0x100,
940 NVME_SC_QID_INVALID = 0x101,
941 NVME_SC_QUEUE_SIZE = 0x102,
942 NVME_SC_ABORT_LIMIT = 0x103,
943 NVME_SC_ABORT_MISSING = 0x104,
944 NVME_SC_ASYNC_LIMIT = 0x105,
945 NVME_SC_FIRMWARE_SLOT = 0x106,
946 NVME_SC_FIRMWARE_IMAGE = 0x107,
947 NVME_SC_INVALID_VECTOR = 0x108,
948 NVME_SC_INVALID_LOG_PAGE = 0x109,
949 NVME_SC_INVALID_FORMAT = 0x10a,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200950 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200951 NVME_SC_INVALID_QUEUE = 0x10c,
952 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
953 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
954 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200955 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
956 NVME_SC_FW_NEEDS_RESET = 0x111,
957 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
958 NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
959 NVME_SC_OVERLAPPING_RANGE = 0x114,
960 NVME_SC_NS_INSUFFICENT_CAP = 0x115,
961 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
962 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
963 NVME_SC_NS_IS_PRIVATE = 0x119,
964 NVME_SC_NS_NOT_ATTACHED = 0x11a,
965 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
966 NVME_SC_CTRL_LIST_INVALID = 0x11c,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200967
968 /*
969 * I/O Command Set Specific - NVM commands:
970 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200971 NVME_SC_BAD_ATTRIBUTES = 0x180,
972 NVME_SC_INVALID_PI = 0x181,
973 NVME_SC_READ_ONLY = 0x182,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800974 NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200975
976 /*
977 * I/O Command Set Specific - Fabrics commands:
978 */
979 NVME_SC_CONNECT_FORMAT = 0x180,
980 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
981 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
982 NVME_SC_CONNECT_RESTART_DISC = 0x183,
983 NVME_SC_CONNECT_INVALID_HOST = 0x184,
984
985 NVME_SC_DISCOVERY_RESTART = 0x190,
986 NVME_SC_AUTH_REQUIRED = 0x191,
987
988 /*
989 * Media and Data Integrity Errors:
990 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200991 NVME_SC_WRITE_FAULT = 0x280,
992 NVME_SC_READ_ERROR = 0x281,
993 NVME_SC_GUARD_CHECK = 0x282,
994 NVME_SC_APPTAG_CHECK = 0x283,
995 NVME_SC_REFTAG_CHECK = 0x284,
996 NVME_SC_COMPARE_FAILED = 0x285,
997 NVME_SC_ACCESS_DENIED = 0x286,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200998 NVME_SC_UNWRITTEN_BLOCK = 0x287,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200999
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001000 NVME_SC_DNR = 0x4000,
James Smartcba3bdf2016-12-02 00:28:39 -08001001
1002
1003 /*
1004 * FC Transport-specific error status values for NVME commands
1005 *
1006 * Transport-specific status code values must be in the range 0xB0..0xBF
1007 */
1008
1009 /* Generic FC failure - catchall */
1010 NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
1011
1012 /* I/O failure due to FC ABTS'd */
1013 NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001014};
1015
1016struct nvme_completion {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001017 /*
1018 * Used by Admin and Fabrics commands to return data:
1019 */
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001020 union nvme_result {
1021 __le16 u16;
1022 __le32 u32;
1023 __le64 u64;
1024 } result;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001025 __le16 sq_head; /* how much of this queue may be reclaimed */
1026 __le16 sq_id; /* submission queue that generated this entry */
1027 __u16 command_id; /* of the command which completed */
1028 __le16 status; /* did the command fail, and if so, why? */
1029};
1030
Gabriel Krisman Bertazi8ef20742016-10-19 09:51:05 -06001031#define NVME_VS(major, minor, tertiary) \
1032 (((major) << 16) | ((minor) << 8) | (tertiary))
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001033
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001034#endif /* _LINUX_NVME_H */