blob: d8b37bab2887e75ed0c7f3cc58a57f2594109179 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020018#include <linux/types.h>
Christoph Hellwigeb793e22016-06-13 16:45:25 +020019#include <linux/uuid.h>
20
21/* NQN names in commands fields specified one size */
22#define NVMF_NQN_FIELD_LEN 256
23
24/* However the max length of a qualified name is another size */
25#define NVMF_NQN_SIZE 223
26
27#define NVMF_TRSVCID_SIZE 32
28#define NVMF_TRADDR_SIZE 256
29#define NVMF_TSAS_SIZE 256
30
31#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
32
33#define NVME_RDMA_IP_PORT 4420
34
35enum nvme_subsys_type {
36 NVME_NQN_DISC = 1, /* Discovery type target subsystem */
37 NVME_NQN_NVME = 2, /* NVME type target subsystem */
38};
39
40/* Address Family codes for Discovery Log Page entry ADRFAM field */
41enum {
42 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
43 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
44 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
45 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
46 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
47};
48
49/* Transport Type codes for Discovery Log Page entry TRTYPE field */
50enum {
51 NVMF_TRTYPE_RDMA = 1, /* RDMA */
52 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
53 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
54 NVMF_TRTYPE_MAX,
55};
56
57/* Transport Requirements codes for Discovery Log Page entry TREQ field */
58enum {
59 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
60 NVMF_TREQ_REQUIRED = 1, /* Required */
61 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
62};
63
64/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
65 * RDMA_QPTYPE field
66 */
67enum {
68 NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */
69 NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */
70};
71
72/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
73 * RDMA_QPTYPE field
74 */
75enum {
76 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */
77 NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */
78 NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */
79 NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */
80 NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */
81};
82
83/* RDMA Connection Management Service Type codes for Discovery Log Page
84 * entry TSAS RDMA_CMS field
85 */
86enum {
87 NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */
88};
89
90#define NVMF_AQ_DEPTH 32
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020091
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +010092enum {
93 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
94 NVME_REG_VS = 0x0008, /* Version */
95 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +080096 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +010097 NVME_REG_CC = 0x0014, /* Controller Configuration */
98 NVME_REG_CSTS = 0x001c, /* Controller Status */
99 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
100 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
101 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800102 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100103 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
104 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500105};
106
Keith Buscha0cadb82012-07-27 13:57:23 -0400107#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400108#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400109#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -0600110#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -0600111#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -0600112#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400113
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600114#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
115#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
116#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
117#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
118
119#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
120#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
121#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
122#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
123#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
124
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200125/*
126 * Submission and Completion Queue Entry Sizes for the NVM command set.
127 * (In bytes and specified as a power of two (2^n)).
128 */
129#define NVME_NVM_IOSQES 6
130#define NVME_NVM_IOCQES 4
131
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500132enum {
133 NVME_CC_ENABLE = 1 << 0,
134 NVME_CC_CSS_NVM = 0 << 4,
135 NVME_CC_MPS_SHIFT = 7,
136 NVME_CC_ARB_RR = 0 << 11,
137 NVME_CC_ARB_WRRU = 1 << 11,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -0400138 NVME_CC_ARB_VS = 7 << 11,
139 NVME_CC_SHN_NONE = 0 << 14,
140 NVME_CC_SHN_NORMAL = 1 << 14,
141 NVME_CC_SHN_ABRUPT = 2 << 14,
Keith Busch1894d8f2013-07-15 15:02:22 -0600142 NVME_CC_SHN_MASK = 3 << 14,
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200143 NVME_CC_IOSQES = NVME_NVM_IOSQES << 16,
144 NVME_CC_IOCQES = NVME_NVM_IOCQES << 20,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500145 NVME_CSTS_RDY = 1 << 0,
146 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -0600147 NVME_CSTS_NSSRO = 1 << 4,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500148 NVME_CSTS_SHST_NORMAL = 0 << 2,
149 NVME_CSTS_SHST_OCCUR = 1 << 2,
150 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -0600151 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500152};
153
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200154struct nvme_id_power_state {
155 __le16 max_power; /* centiwatts */
156 __u8 rsvd2;
157 __u8 flags;
158 __le32 entry_lat; /* microseconds */
159 __le32 exit_lat; /* microseconds */
160 __u8 read_tput;
161 __u8 read_lat;
162 __u8 write_tput;
163 __u8 write_lat;
164 __le16 idle_power;
165 __u8 idle_scale;
166 __u8 rsvd19;
167 __le16 active_power;
168 __u8 active_work_scale;
169 __u8 rsvd23[9];
170};
171
172enum {
173 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
174 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
175};
176
177struct nvme_id_ctrl {
178 __le16 vid;
179 __le16 ssvid;
180 char sn[20];
181 char mn[40];
182 char fr[8];
183 __u8 rab;
184 __u8 ieee[3];
185 __u8 mic;
186 __u8 mdts;
Christoph Hellwig08c69642015-10-02 15:27:16 +0200187 __le16 cntlid;
188 __le32 ver;
Christoph Hellwig14e974a2016-06-06 23:20:43 +0200189 __le32 rtd3r;
190 __le32 rtd3e;
191 __le32 oaes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200192 __le32 ctratt;
193 __u8 rsvd100[156];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200194 __le16 oacs;
195 __u8 acl;
196 __u8 aerl;
197 __u8 frmw;
198 __u8 lpa;
199 __u8 elpe;
200 __u8 npss;
201 __u8 avscc;
202 __u8 apsta;
203 __le16 wctemp;
204 __le16 cctemp;
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200205 __u8 rsvd270[50];
206 __le16 kas;
207 __u8 rsvd322[190];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200208 __u8 sqes;
209 __u8 cqes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200210 __le16 maxcmd;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200211 __le32 nn;
212 __le16 oncs;
213 __le16 fuses;
214 __u8 fna;
215 __u8 vwc;
216 __le16 awun;
217 __le16 awupf;
218 __u8 nvscc;
219 __u8 rsvd531;
220 __le16 acwu;
221 __u8 rsvd534[2];
222 __le32 sgls;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200223 __u8 rsvd540[228];
224 char subnqn[256];
225 __u8 rsvd1024[768];
226 __le32 ioccsz;
227 __le32 iorcsz;
228 __le16 icdoff;
229 __u8 ctrattr;
230 __u8 msdbd;
231 __u8 rsvd1804[244];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200232 struct nvme_id_power_state psd[32];
233 __u8 vs[1024];
234};
235
236enum {
237 NVME_CTRL_ONCS_COMPARE = 1 << 0,
238 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
239 NVME_CTRL_ONCS_DSM = 1 << 2,
240 NVME_CTRL_VWC_PRESENT = 1 << 0,
241};
242
243struct nvme_lbaf {
244 __le16 ms;
245 __u8 ds;
246 __u8 rp;
247};
248
249struct nvme_id_ns {
250 __le64 nsze;
251 __le64 ncap;
252 __le64 nuse;
253 __u8 nsfeat;
254 __u8 nlbaf;
255 __u8 flbas;
256 __u8 mc;
257 __u8 dpc;
258 __u8 dps;
259 __u8 nmic;
260 __u8 rescap;
261 __u8 fpi;
262 __u8 rsvd33;
263 __le16 nawun;
264 __le16 nawupf;
265 __le16 nacwu;
266 __le16 nabsn;
267 __le16 nabo;
268 __le16 nabspf;
269 __u16 rsvd46;
270 __le64 nvmcap[2];
271 __u8 rsvd64[40];
272 __u8 nguid[16];
273 __u8 eui64[8];
274 struct nvme_lbaf lbaf[16];
275 __u8 rsvd192[192];
276 __u8 vs[3712];
277};
278
279enum {
280 NVME_NS_FEAT_THIN = 1 << 0,
281 NVME_NS_FLBAS_LBA_MASK = 0xf,
282 NVME_NS_FLBAS_META_EXT = 0x10,
283 NVME_LBAF_RP_BEST = 0,
284 NVME_LBAF_RP_BETTER = 1,
285 NVME_LBAF_RP_GOOD = 2,
286 NVME_LBAF_RP_DEGRADED = 3,
287 NVME_NS_DPC_PI_LAST = 1 << 4,
288 NVME_NS_DPC_PI_FIRST = 1 << 3,
289 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
290 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
291 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
292 NVME_NS_DPS_PI_FIRST = 1 << 3,
293 NVME_NS_DPS_PI_MASK = 0x7,
294 NVME_NS_DPS_PI_TYPE1 = 1,
295 NVME_NS_DPS_PI_TYPE2 = 2,
296 NVME_NS_DPS_PI_TYPE3 = 3,
297};
298
299struct nvme_smart_log {
300 __u8 critical_warning;
301 __u8 temperature[2];
302 __u8 avail_spare;
303 __u8 spare_thresh;
304 __u8 percent_used;
305 __u8 rsvd6[26];
306 __u8 data_units_read[16];
307 __u8 data_units_written[16];
308 __u8 host_reads[16];
309 __u8 host_writes[16];
310 __u8 ctrl_busy_time[16];
311 __u8 power_cycles[16];
312 __u8 power_on_hours[16];
313 __u8 unsafe_shutdowns[16];
314 __u8 media_errors[16];
315 __u8 num_err_log_entries[16];
316 __le32 warning_temp_time;
317 __le32 critical_comp_time;
318 __le16 temp_sensor[8];
319 __u8 rsvd216[296];
320};
321
322enum {
323 NVME_SMART_CRIT_SPARE = 1 << 0,
324 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
325 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
326 NVME_SMART_CRIT_MEDIA = 1 << 3,
327 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
328};
329
330enum {
331 NVME_AER_NOTICE_NS_CHANGED = 0x0002,
332};
333
334struct nvme_lba_range_type {
335 __u8 type;
336 __u8 attributes;
337 __u8 rsvd2[14];
338 __u64 slba;
339 __u64 nlb;
340 __u8 guid[16];
341 __u8 rsvd48[16];
342};
343
344enum {
345 NVME_LBART_TYPE_FS = 0x01,
346 NVME_LBART_TYPE_RAID = 0x02,
347 NVME_LBART_TYPE_CACHE = 0x03,
348 NVME_LBART_TYPE_SWAP = 0x04,
349
350 NVME_LBART_ATTRIB_TEMP = 1 << 0,
351 NVME_LBART_ATTRIB_HIDE = 1 << 1,
352};
353
354struct nvme_reservation_status {
355 __le32 gen;
356 __u8 rtype;
357 __u8 regctl[2];
358 __u8 resv5[2];
359 __u8 ptpls;
360 __u8 resv10[13];
361 struct {
362 __le16 cntlid;
363 __u8 rcsts;
364 __u8 resv3[5];
365 __le64 hostid;
366 __le64 rkey;
367 } regctl_ds[];
368};
369
Christoph Hellwig79f370e2016-06-06 23:20:46 +0200370enum nvme_async_event_type {
371 NVME_AER_TYPE_ERROR = 0,
372 NVME_AER_TYPE_SMART = 1,
373 NVME_AER_TYPE_NOTICE = 2,
374};
375
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200376/* I/O commands */
377
378enum nvme_opcode {
379 nvme_cmd_flush = 0x00,
380 nvme_cmd_write = 0x01,
381 nvme_cmd_read = 0x02,
382 nvme_cmd_write_uncor = 0x04,
383 nvme_cmd_compare = 0x05,
384 nvme_cmd_write_zeroes = 0x08,
385 nvme_cmd_dsm = 0x09,
386 nvme_cmd_resv_register = 0x0d,
387 nvme_cmd_resv_report = 0x0e,
388 nvme_cmd_resv_acquire = 0x11,
389 nvme_cmd_resv_release = 0x15,
390};
391
James Smart3972be22016-06-06 23:20:47 +0200392/*
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200393 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
394 *
395 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
396 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
397 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
398 * request subtype
399 */
400enum {
401 NVME_SGL_FMT_ADDRESS = 0x00,
402 NVME_SGL_FMT_OFFSET = 0x01,
403 NVME_SGL_FMT_INVALIDATE = 0x0f,
404};
405
406/*
407 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
408 *
409 * For struct nvme_sgl_desc:
410 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
411 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
412 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
413 *
414 * For struct nvme_keyed_sgl_desc:
415 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
416 */
417enum {
418 NVME_SGL_FMT_DATA_DESC = 0x00,
419 NVME_SGL_FMT_SEG_DESC = 0x02,
420 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
421 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
422};
423
424struct nvme_sgl_desc {
425 __le64 addr;
426 __le32 length;
427 __u8 rsvd[3];
428 __u8 type;
429};
430
431struct nvme_keyed_sgl_desc {
432 __le64 addr;
433 __u8 length[3];
434 __u8 key[4];
435 __u8 type;
436};
437
438union nvme_data_ptr {
439 struct {
440 __le64 prp1;
441 __le64 prp2;
442 };
443 struct nvme_sgl_desc sgl;
444 struct nvme_keyed_sgl_desc ksgl;
445};
446
447/*
James Smart3972be22016-06-06 23:20:47 +0200448 * Lowest two bits of our flags field (FUSE field in the spec):
449 *
450 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
451 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
452 *
453 * Highest two bits in our flags field (PSDT field in the spec):
454 *
455 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
456 * If used, MPTR contains addr of single physical buffer (byte aligned).
457 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
458 * If used, MPTR contains an address of an SGL segment containing
459 * exactly 1 SGL descriptor (qword aligned).
460 */
461enum {
462 NVME_CMD_FUSE_FIRST = (1 << 0),
463 NVME_CMD_FUSE_SECOND = (1 << 1),
464
465 NVME_CMD_SGL_METABUF = (1 << 6),
466 NVME_CMD_SGL_METASEG = (1 << 7),
467 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
468};
469
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200470struct nvme_common_command {
471 __u8 opcode;
472 __u8 flags;
473 __u16 command_id;
474 __le32 nsid;
475 __le32 cdw2[2];
476 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200477 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200478 __le32 cdw10[6];
479};
480
481struct nvme_rw_command {
482 __u8 opcode;
483 __u8 flags;
484 __u16 command_id;
485 __le32 nsid;
486 __u64 rsvd2;
487 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200488 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200489 __le64 slba;
490 __le16 length;
491 __le16 control;
492 __le32 dsmgmt;
493 __le32 reftag;
494 __le16 apptag;
495 __le16 appmask;
496};
497
498enum {
499 NVME_RW_LR = 1 << 15,
500 NVME_RW_FUA = 1 << 14,
501 NVME_RW_DSM_FREQ_UNSPEC = 0,
502 NVME_RW_DSM_FREQ_TYPICAL = 1,
503 NVME_RW_DSM_FREQ_RARE = 2,
504 NVME_RW_DSM_FREQ_READS = 3,
505 NVME_RW_DSM_FREQ_WRITES = 4,
506 NVME_RW_DSM_FREQ_RW = 5,
507 NVME_RW_DSM_FREQ_ONCE = 6,
508 NVME_RW_DSM_FREQ_PREFETCH = 7,
509 NVME_RW_DSM_FREQ_TEMP = 8,
510 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
511 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
512 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
513 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
514 NVME_RW_DSM_SEQ_REQ = 1 << 6,
515 NVME_RW_DSM_COMPRESSED = 1 << 7,
516 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
517 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
518 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
519 NVME_RW_PRINFO_PRACT = 1 << 13,
520};
521
522struct nvme_dsm_cmd {
523 __u8 opcode;
524 __u8 flags;
525 __u16 command_id;
526 __le32 nsid;
527 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200528 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200529 __le32 nr;
530 __le32 attributes;
531 __u32 rsvd12[4];
532};
533
534enum {
535 NVME_DSMGMT_IDR = 1 << 0,
536 NVME_DSMGMT_IDW = 1 << 1,
537 NVME_DSMGMT_AD = 1 << 2,
538};
539
540struct nvme_dsm_range {
541 __le32 cattr;
542 __le32 nlb;
543 __le64 slba;
544};
545
546/* Admin commands */
547
548enum nvme_admin_opcode {
549 nvme_admin_delete_sq = 0x00,
550 nvme_admin_create_sq = 0x01,
551 nvme_admin_get_log_page = 0x02,
552 nvme_admin_delete_cq = 0x04,
553 nvme_admin_create_cq = 0x05,
554 nvme_admin_identify = 0x06,
555 nvme_admin_abort_cmd = 0x08,
556 nvme_admin_set_features = 0x09,
557 nvme_admin_get_features = 0x0a,
558 nvme_admin_async_event = 0x0c,
559 nvme_admin_activate_fw = 0x10,
560 nvme_admin_download_fw = 0x11,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200561 nvme_admin_keep_alive = 0x18,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200562 nvme_admin_format_nvm = 0x80,
563 nvme_admin_security_send = 0x81,
564 nvme_admin_security_recv = 0x82,
565};
566
567enum {
568 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
569 NVME_CQ_IRQ_ENABLED = (1 << 1),
570 NVME_SQ_PRIO_URGENT = (0 << 1),
571 NVME_SQ_PRIO_HIGH = (1 << 1),
572 NVME_SQ_PRIO_MEDIUM = (2 << 1),
573 NVME_SQ_PRIO_LOW = (3 << 1),
574 NVME_FEAT_ARBITRATION = 0x01,
575 NVME_FEAT_POWER_MGMT = 0x02,
576 NVME_FEAT_LBA_RANGE = 0x03,
577 NVME_FEAT_TEMP_THRESH = 0x04,
578 NVME_FEAT_ERR_RECOVERY = 0x05,
579 NVME_FEAT_VOLATILE_WC = 0x06,
580 NVME_FEAT_NUM_QUEUES = 0x07,
581 NVME_FEAT_IRQ_COALESCE = 0x08,
582 NVME_FEAT_IRQ_CONFIG = 0x09,
583 NVME_FEAT_WRITE_ATOMIC = 0x0a,
584 NVME_FEAT_ASYNC_EVENT = 0x0b,
585 NVME_FEAT_AUTO_PST = 0x0c,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200586 NVME_FEAT_KATO = 0x0f,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200587 NVME_FEAT_SW_PROGRESS = 0x80,
588 NVME_FEAT_HOST_ID = 0x81,
589 NVME_FEAT_RESV_MASK = 0x82,
590 NVME_FEAT_RESV_PERSIST = 0x83,
591 NVME_LOG_ERROR = 0x01,
592 NVME_LOG_SMART = 0x02,
593 NVME_LOG_FW_SLOT = 0x03,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200594 NVME_LOG_DISC = 0x70,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200595 NVME_LOG_RESERVATION = 0x80,
596 NVME_FWACT_REPL = (0 << 3),
597 NVME_FWACT_REPL_ACTV = (1 << 3),
598 NVME_FWACT_ACTV = (2 << 3),
599};
600
601struct nvme_identify {
602 __u8 opcode;
603 __u8 flags;
604 __u16 command_id;
605 __le32 nsid;
606 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200607 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200608 __le32 cns;
609 __u32 rsvd11[5];
610};
611
612struct nvme_features {
613 __u8 opcode;
614 __u8 flags;
615 __u16 command_id;
616 __le32 nsid;
617 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200618 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200619 __le32 fid;
620 __le32 dword11;
621 __u32 rsvd12[4];
622};
623
624struct nvme_create_cq {
625 __u8 opcode;
626 __u8 flags;
627 __u16 command_id;
628 __u32 rsvd1[5];
629 __le64 prp1;
630 __u64 rsvd8;
631 __le16 cqid;
632 __le16 qsize;
633 __le16 cq_flags;
634 __le16 irq_vector;
635 __u32 rsvd12[4];
636};
637
638struct nvme_create_sq {
639 __u8 opcode;
640 __u8 flags;
641 __u16 command_id;
642 __u32 rsvd1[5];
643 __le64 prp1;
644 __u64 rsvd8;
645 __le16 sqid;
646 __le16 qsize;
647 __le16 sq_flags;
648 __le16 cqid;
649 __u32 rsvd12[4];
650};
651
652struct nvme_delete_queue {
653 __u8 opcode;
654 __u8 flags;
655 __u16 command_id;
656 __u32 rsvd1[9];
657 __le16 qid;
658 __u16 rsvd10;
659 __u32 rsvd11[5];
660};
661
662struct nvme_abort_cmd {
663 __u8 opcode;
664 __u8 flags;
665 __u16 command_id;
666 __u32 rsvd1[9];
667 __le16 sqid;
668 __u16 cid;
669 __u32 rsvd11[5];
670};
671
672struct nvme_download_firmware {
673 __u8 opcode;
674 __u8 flags;
675 __u16 command_id;
676 __u32 rsvd1[5];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200677 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200678 __le32 numd;
679 __le32 offset;
680 __u32 rsvd12[4];
681};
682
683struct nvme_format_cmd {
684 __u8 opcode;
685 __u8 flags;
686 __u16 command_id;
687 __le32 nsid;
688 __u64 rsvd2[4];
689 __le32 cdw10;
690 __u32 rsvd11[5];
691};
692
Armen Baloyan725b3582016-06-06 23:20:44 +0200693struct nvme_get_log_page_command {
694 __u8 opcode;
695 __u8 flags;
696 __u16 command_id;
697 __le32 nsid;
698 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200699 union nvme_data_ptr dptr;
Armen Baloyan725b3582016-06-06 23:20:44 +0200700 __u8 lid;
701 __u8 rsvd10;
702 __le16 numdl;
703 __le16 numdu;
704 __u16 rsvd11;
705 __le32 lpol;
706 __le32 lpou;
707 __u32 rsvd14[2];
708};
709
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200710/*
711 * Fabrics subcommands.
712 */
713enum nvmf_fabrics_opcode {
714 nvme_fabrics_command = 0x7f,
715};
716
717enum nvmf_capsule_command {
718 nvme_fabrics_type_property_set = 0x00,
719 nvme_fabrics_type_connect = 0x01,
720 nvme_fabrics_type_property_get = 0x04,
721};
722
723struct nvmf_common_command {
724 __u8 opcode;
725 __u8 resv1;
726 __u16 command_id;
727 __u8 fctype;
728 __u8 resv2[35];
729 __u8 ts[24];
730};
731
732/*
733 * The legal cntlid range a NVMe Target will provide.
734 * Note that cntlid of value 0 is considered illegal in the fabrics world.
735 * Devices based on earlier specs did not have the subsystem concept;
736 * therefore, those devices had their cntlid value set to 0 as a result.
737 */
738#define NVME_CNTLID_MIN 1
739#define NVME_CNTLID_MAX 0xffef
740#define NVME_CNTLID_DYNAMIC 0xffff
741
742#define MAX_DISC_LOGS 255
743
744/* Discovery log page entry */
745struct nvmf_disc_rsp_page_entry {
746 __u8 trtype;
747 __u8 adrfam;
748 __u8 nqntype;
749 __u8 treq;
750 __le16 portid;
751 __le16 cntlid;
752 __le16 asqsz;
753 __u8 resv8[22];
754 char trsvcid[NVMF_TRSVCID_SIZE];
755 __u8 resv64[192];
756 char subnqn[NVMF_NQN_FIELD_LEN];
757 char traddr[NVMF_TRADDR_SIZE];
758 union tsas {
759 char common[NVMF_TSAS_SIZE];
760 struct rdma {
761 __u8 qptype;
762 __u8 prtype;
763 __u8 cms;
764 __u8 resv3[5];
765 __u16 pkey;
766 __u8 resv10[246];
767 } rdma;
768 } tsas;
769};
770
771/* Discovery log page header */
772struct nvmf_disc_rsp_page_hdr {
773 __le64 genctr;
774 __le64 numrec;
775 __le16 recfmt;
776 __u8 resv14[1006];
777 struct nvmf_disc_rsp_page_entry entries[0];
778};
779
780struct nvmf_connect_command {
781 __u8 opcode;
782 __u8 resv1;
783 __u16 command_id;
784 __u8 fctype;
785 __u8 resv2[19];
786 union nvme_data_ptr dptr;
787 __le16 recfmt;
788 __le16 qid;
789 __le16 sqsize;
790 __u8 cattr;
791 __u8 resv3;
792 __le32 kato;
793 __u8 resv4[12];
794};
795
796struct nvmf_connect_data {
797 uuid_le hostid;
798 __le16 cntlid;
799 char resv4[238];
800 char subsysnqn[NVMF_NQN_FIELD_LEN];
801 char hostnqn[NVMF_NQN_FIELD_LEN];
802 char resv5[256];
803};
804
805struct nvmf_property_set_command {
806 __u8 opcode;
807 __u8 resv1;
808 __u16 command_id;
809 __u8 fctype;
810 __u8 resv2[35];
811 __u8 attrib;
812 __u8 resv3[3];
813 __le32 offset;
814 __le64 value;
815 __u8 resv4[8];
816};
817
818struct nvmf_property_get_command {
819 __u8 opcode;
820 __u8 resv1;
821 __u16 command_id;
822 __u8 fctype;
823 __u8 resv2[35];
824 __u8 attrib;
825 __u8 resv3[3];
826 __le32 offset;
827 __u8 resv4[16];
828};
829
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200830struct nvme_command {
831 union {
832 struct nvme_common_command common;
833 struct nvme_rw_command rw;
834 struct nvme_identify identify;
835 struct nvme_features features;
836 struct nvme_create_cq create_cq;
837 struct nvme_create_sq create_sq;
838 struct nvme_delete_queue delete_queue;
839 struct nvme_download_firmware dlfw;
840 struct nvme_format_cmd format;
841 struct nvme_dsm_cmd dsm;
842 struct nvme_abort_cmd abort;
Armen Baloyan725b3582016-06-06 23:20:44 +0200843 struct nvme_get_log_page_command get_log_page;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200844 struct nvmf_common_command fabrics;
845 struct nvmf_connect_command connect;
846 struct nvmf_property_set_command prop_set;
847 struct nvmf_property_get_command prop_get;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200848 };
849};
850
Christoph Hellwig7a5abb42016-06-06 23:20:49 +0200851static inline bool nvme_is_write(struct nvme_command *cmd)
852{
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200853 /*
854 * What a mess...
855 *
856 * Why can't we simply have a Fabrics In and Fabrics out command?
857 */
858 if (unlikely(cmd->common.opcode == nvme_fabrics_command))
859 return cmd->fabrics.opcode & 1;
Christoph Hellwig7a5abb42016-06-06 23:20:49 +0200860 return cmd->common.opcode & 1;
861}
862
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200863enum {
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200864 /*
865 * Generic Command Status:
866 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200867 NVME_SC_SUCCESS = 0x0,
868 NVME_SC_INVALID_OPCODE = 0x1,
869 NVME_SC_INVALID_FIELD = 0x2,
870 NVME_SC_CMDID_CONFLICT = 0x3,
871 NVME_SC_DATA_XFER_ERROR = 0x4,
872 NVME_SC_POWER_LOSS = 0x5,
873 NVME_SC_INTERNAL = 0x6,
874 NVME_SC_ABORT_REQ = 0x7,
875 NVME_SC_ABORT_QUEUE = 0x8,
876 NVME_SC_FUSED_FAIL = 0x9,
877 NVME_SC_FUSED_MISSING = 0xa,
878 NVME_SC_INVALID_NS = 0xb,
879 NVME_SC_CMD_SEQ_ERROR = 0xc,
880 NVME_SC_SGL_INVALID_LAST = 0xd,
881 NVME_SC_SGL_INVALID_COUNT = 0xe,
882 NVME_SC_SGL_INVALID_DATA = 0xf,
883 NVME_SC_SGL_INVALID_METADATA = 0x10,
884 NVME_SC_SGL_INVALID_TYPE = 0x11,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200885
886 NVME_SC_SGL_INVALID_OFFSET = 0x16,
887 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
888
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200889 NVME_SC_LBA_RANGE = 0x80,
890 NVME_SC_CAP_EXCEEDED = 0x81,
891 NVME_SC_NS_NOT_READY = 0x82,
892 NVME_SC_RESERVATION_CONFLICT = 0x83,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200893
894 /*
895 * Command Specific Status:
896 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200897 NVME_SC_CQ_INVALID = 0x100,
898 NVME_SC_QID_INVALID = 0x101,
899 NVME_SC_QUEUE_SIZE = 0x102,
900 NVME_SC_ABORT_LIMIT = 0x103,
901 NVME_SC_ABORT_MISSING = 0x104,
902 NVME_SC_ASYNC_LIMIT = 0x105,
903 NVME_SC_FIRMWARE_SLOT = 0x106,
904 NVME_SC_FIRMWARE_IMAGE = 0x107,
905 NVME_SC_INVALID_VECTOR = 0x108,
906 NVME_SC_INVALID_LOG_PAGE = 0x109,
907 NVME_SC_INVALID_FORMAT = 0x10a,
908 NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
909 NVME_SC_INVALID_QUEUE = 0x10c,
910 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
911 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
912 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
913 NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200914
915 /*
916 * I/O Command Set Specific - NVM commands:
917 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200918 NVME_SC_BAD_ATTRIBUTES = 0x180,
919 NVME_SC_INVALID_PI = 0x181,
920 NVME_SC_READ_ONLY = 0x182,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200921
922 /*
923 * I/O Command Set Specific - Fabrics commands:
924 */
925 NVME_SC_CONNECT_FORMAT = 0x180,
926 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
927 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
928 NVME_SC_CONNECT_RESTART_DISC = 0x183,
929 NVME_SC_CONNECT_INVALID_HOST = 0x184,
930
931 NVME_SC_DISCOVERY_RESTART = 0x190,
932 NVME_SC_AUTH_REQUIRED = 0x191,
933
934 /*
935 * Media and Data Integrity Errors:
936 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200937 NVME_SC_WRITE_FAULT = 0x280,
938 NVME_SC_READ_ERROR = 0x281,
939 NVME_SC_GUARD_CHECK = 0x282,
940 NVME_SC_APPTAG_CHECK = 0x283,
941 NVME_SC_REFTAG_CHECK = 0x284,
942 NVME_SC_COMPARE_FAILED = 0x285,
943 NVME_SC_ACCESS_DENIED = 0x286,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200944
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200945 NVME_SC_DNR = 0x4000,
946};
947
948struct nvme_completion {
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200949 /*
950 * Used by Admin and Fabrics commands to return data:
951 */
952 union {
953 __le16 result16;
954 __le32 result;
955 __le64 result64;
956 };
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200957 __le16 sq_head; /* how much of this queue may be reclaimed */
958 __le16 sq_id; /* submission queue that generated this entry */
959 __u16 command_id; /* of the command which completed */
960 __le16 status; /* did the command fail, and if so, why? */
961};
962
963#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
964
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500965#endif /* _LINUX_NVME_H */