blob: 291587a0743f88673c0b021c9256f8199d058776 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020018#include <linux/types.h>
Christoph Hellwig8e412262017-05-17 09:54:27 +020019#include <linux/uuid.h>
Christoph Hellwigeb793e22016-06-13 16:45:25 +020020
21/* NQN names in commands fields specified one size */
22#define NVMF_NQN_FIELD_LEN 256
23
24/* However the max length of a qualified name is another size */
25#define NVMF_NQN_SIZE 223
26
27#define NVMF_TRSVCID_SIZE 32
28#define NVMF_TRADDR_SIZE 256
29#define NVMF_TSAS_SIZE 256
30
31#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
32
33#define NVME_RDMA_IP_PORT 4420
34
35enum nvme_subsys_type {
36 NVME_NQN_DISC = 1, /* Discovery type target subsystem */
37 NVME_NQN_NVME = 2, /* NVME type target subsystem */
38};
39
40/* Address Family codes for Discovery Log Page entry ADRFAM field */
41enum {
42 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
43 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
44 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
45 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
46 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
47};
48
49/* Transport Type codes for Discovery Log Page entry TRTYPE field */
50enum {
51 NVMF_TRTYPE_RDMA = 1, /* RDMA */
52 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
53 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
54 NVMF_TRTYPE_MAX,
55};
56
57/* Transport Requirements codes for Discovery Log Page entry TREQ field */
58enum {
59 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
60 NVMF_TREQ_REQUIRED = 1, /* Required */
61 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
62};
63
64/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
65 * RDMA_QPTYPE field
66 */
67enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080068 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
69 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020070};
71
72/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
73 * RDMA_QPTYPE field
74 */
75enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080076 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
77 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
78 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
79 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
80 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020081};
82
83/* RDMA Connection Management Service Type codes for Discovery Log Page
84 * entry TSAS RDMA_CMS field
85 */
86enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080087 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020088};
89
90#define NVMF_AQ_DEPTH 32
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020091
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +010092enum {
93 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
94 NVME_REG_VS = 0x0008, /* Version */
95 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +080096 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +010097 NVME_REG_CC = 0x0014, /* Controller Configuration */
98 NVME_REG_CSTS = 0x001c, /* Controller Status */
99 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
100 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
101 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800102 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100103 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
104 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
Xu Yu97f6ef62017-05-24 16:39:55 +0800105 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500106};
107
Keith Buscha0cadb82012-07-27 13:57:23 -0400108#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400109#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400110#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -0600111#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -0600112#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -0600113#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400114
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600115#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
116#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
117#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
118#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
119
120#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
121#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
122#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
123#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
124#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
125
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200126/*
127 * Submission and Completion Queue Entry Sizes for the NVM command set.
128 * (In bytes and specified as a power of two (2^n)).
129 */
130#define NVME_NVM_IOSQES 6
131#define NVME_NVM_IOCQES 4
132
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500133enum {
134 NVME_CC_ENABLE = 1 << 0,
135 NVME_CC_CSS_NVM = 0 << 4,
136 NVME_CC_MPS_SHIFT = 7,
137 NVME_CC_ARB_RR = 0 << 11,
138 NVME_CC_ARB_WRRU = 1 << 11,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -0400139 NVME_CC_ARB_VS = 7 << 11,
140 NVME_CC_SHN_NONE = 0 << 14,
141 NVME_CC_SHN_NORMAL = 1 << 14,
142 NVME_CC_SHN_ABRUPT = 2 << 14,
Keith Busch1894d8f2013-07-15 15:02:22 -0600143 NVME_CC_SHN_MASK = 3 << 14,
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200144 NVME_CC_IOSQES = NVME_NVM_IOSQES << 16,
145 NVME_CC_IOCQES = NVME_NVM_IOCQES << 20,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500146 NVME_CSTS_RDY = 1 << 0,
147 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -0600148 NVME_CSTS_NSSRO = 1 << 4,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500149 NVME_CSTS_SHST_NORMAL = 0 << 2,
150 NVME_CSTS_SHST_OCCUR = 1 << 2,
151 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -0600152 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500153};
154
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200155struct nvme_id_power_state {
156 __le16 max_power; /* centiwatts */
157 __u8 rsvd2;
158 __u8 flags;
159 __le32 entry_lat; /* microseconds */
160 __le32 exit_lat; /* microseconds */
161 __u8 read_tput;
162 __u8 read_lat;
163 __u8 write_tput;
164 __u8 write_lat;
165 __le16 idle_power;
166 __u8 idle_scale;
167 __u8 rsvd19;
168 __le16 active_power;
169 __u8 active_work_scale;
170 __u8 rsvd23[9];
171};
172
173enum {
174 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
175 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
176};
177
178struct nvme_id_ctrl {
179 __le16 vid;
180 __le16 ssvid;
181 char sn[20];
182 char mn[40];
183 char fr[8];
184 __u8 rab;
185 __u8 ieee[3];
Christoph Hellwiga446c082016-09-30 13:51:06 +0200186 __u8 cmic;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200187 __u8 mdts;
Christoph Hellwig08c69642015-10-02 15:27:16 +0200188 __le16 cntlid;
189 __le32 ver;
Christoph Hellwig14e974a2016-06-06 23:20:43 +0200190 __le32 rtd3r;
191 __le32 rtd3e;
192 __le32 oaes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200193 __le32 ctratt;
194 __u8 rsvd100[156];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200195 __le16 oacs;
196 __u8 acl;
197 __u8 aerl;
198 __u8 frmw;
199 __u8 lpa;
200 __u8 elpe;
201 __u8 npss;
202 __u8 avscc;
203 __u8 apsta;
204 __le16 wctemp;
205 __le16 cctemp;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200206 __le16 mtfa;
207 __le32 hmpre;
208 __le32 hmmin;
209 __u8 tnvmcap[16];
210 __u8 unvmcap[16];
211 __le32 rpmbs;
Guan Junxiong435e8092017-06-13 09:26:15 +0800212 __le16 edstt;
213 __u8 dsto;
214 __u8 fwug;
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200215 __le16 kas;
Guan Junxiong435e8092017-06-13 09:26:15 +0800216 __le16 hctma;
217 __le16 mntmt;
218 __le16 mxtmt;
219 __le32 sanicap;
220 __u8 rsvd332[180];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200221 __u8 sqes;
222 __u8 cqes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200223 __le16 maxcmd;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200224 __le32 nn;
225 __le16 oncs;
226 __le16 fuses;
227 __u8 fna;
228 __u8 vwc;
229 __le16 awun;
230 __le16 awupf;
231 __u8 nvscc;
232 __u8 rsvd531;
233 __le16 acwu;
234 __u8 rsvd534[2];
235 __le32 sgls;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200236 __u8 rsvd540[228];
237 char subnqn[256];
238 __u8 rsvd1024[768];
239 __le32 ioccsz;
240 __le32 iorcsz;
241 __le16 icdoff;
242 __u8 ctrattr;
243 __u8 msdbd;
244 __u8 rsvd1804[244];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200245 struct nvme_id_power_state psd[32];
246 __u8 vs[1024];
247};
248
249enum {
250 NVME_CTRL_ONCS_COMPARE = 1 << 0,
251 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
252 NVME_CTRL_ONCS_DSM = 1 << 2,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800253 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200254 NVME_CTRL_VWC_PRESENT = 1 << 0,
Scott Bauer8a9ae522017-02-17 13:59:40 +0100255 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
Helen Koikef9f38e32017-04-10 12:51:07 -0300256 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200257};
258
259struct nvme_lbaf {
260 __le16 ms;
261 __u8 ds;
262 __u8 rp;
263};
264
265struct nvme_id_ns {
266 __le64 nsze;
267 __le64 ncap;
268 __le64 nuse;
269 __u8 nsfeat;
270 __u8 nlbaf;
271 __u8 flbas;
272 __u8 mc;
273 __u8 dpc;
274 __u8 dps;
275 __u8 nmic;
276 __u8 rescap;
277 __u8 fpi;
278 __u8 rsvd33;
279 __le16 nawun;
280 __le16 nawupf;
281 __le16 nacwu;
282 __le16 nabsn;
283 __le16 nabo;
284 __le16 nabspf;
Scott Bauer6b8190d2017-06-15 10:44:30 -0600285 __le16 noiob;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200286 __u8 nvmcap[16];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200287 __u8 rsvd64[40];
288 __u8 nguid[16];
289 __u8 eui64[8];
290 struct nvme_lbaf lbaf[16];
291 __u8 rsvd192[192];
292 __u8 vs[3712];
293};
294
295enum {
Christoph Hellwig329dd762016-09-30 13:51:08 +0200296 NVME_ID_CNS_NS = 0x00,
297 NVME_ID_CNS_CTRL = 0x01,
298 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200299 NVME_ID_CNS_NS_DESC_LIST = 0x03,
Christoph Hellwig329dd762016-09-30 13:51:08 +0200300 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
301 NVME_ID_CNS_NS_PRESENT = 0x11,
302 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
303 NVME_ID_CNS_CTRL_LIST = 0x13,
304};
305
306enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200307 NVME_NS_FEAT_THIN = 1 << 0,
308 NVME_NS_FLBAS_LBA_MASK = 0xf,
309 NVME_NS_FLBAS_META_EXT = 0x10,
310 NVME_LBAF_RP_BEST = 0,
311 NVME_LBAF_RP_BETTER = 1,
312 NVME_LBAF_RP_GOOD = 2,
313 NVME_LBAF_RP_DEGRADED = 3,
314 NVME_NS_DPC_PI_LAST = 1 << 4,
315 NVME_NS_DPC_PI_FIRST = 1 << 3,
316 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
317 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
318 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
319 NVME_NS_DPS_PI_FIRST = 1 << 3,
320 NVME_NS_DPS_PI_MASK = 0x7,
321 NVME_NS_DPS_PI_TYPE1 = 1,
322 NVME_NS_DPS_PI_TYPE2 = 2,
323 NVME_NS_DPS_PI_TYPE3 = 3,
324};
325
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200326struct nvme_ns_id_desc {
327 __u8 nidt;
328 __u8 nidl;
329 __le16 reserved;
330};
331
332#define NVME_NIDT_EUI64_LEN 8
333#define NVME_NIDT_NGUID_LEN 16
334#define NVME_NIDT_UUID_LEN 16
335
336enum {
337 NVME_NIDT_EUI64 = 0x01,
338 NVME_NIDT_NGUID = 0x02,
339 NVME_NIDT_UUID = 0x03,
340};
341
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200342struct nvme_smart_log {
343 __u8 critical_warning;
344 __u8 temperature[2];
345 __u8 avail_spare;
346 __u8 spare_thresh;
347 __u8 percent_used;
348 __u8 rsvd6[26];
349 __u8 data_units_read[16];
350 __u8 data_units_written[16];
351 __u8 host_reads[16];
352 __u8 host_writes[16];
353 __u8 ctrl_busy_time[16];
354 __u8 power_cycles[16];
355 __u8 power_on_hours[16];
356 __u8 unsafe_shutdowns[16];
357 __u8 media_errors[16];
358 __u8 num_err_log_entries[16];
359 __le32 warning_temp_time;
360 __le32 critical_comp_time;
361 __le16 temp_sensor[8];
362 __u8 rsvd216[296];
363};
364
365enum {
366 NVME_SMART_CRIT_SPARE = 1 << 0,
367 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
368 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
369 NVME_SMART_CRIT_MEDIA = 1 << 3,
370 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
371};
372
373enum {
374 NVME_AER_NOTICE_NS_CHANGED = 0x0002,
375};
376
377struct nvme_lba_range_type {
378 __u8 type;
379 __u8 attributes;
380 __u8 rsvd2[14];
381 __u64 slba;
382 __u64 nlb;
383 __u8 guid[16];
384 __u8 rsvd48[16];
385};
386
387enum {
388 NVME_LBART_TYPE_FS = 0x01,
389 NVME_LBART_TYPE_RAID = 0x02,
390 NVME_LBART_TYPE_CACHE = 0x03,
391 NVME_LBART_TYPE_SWAP = 0x04,
392
393 NVME_LBART_ATTRIB_TEMP = 1 << 0,
394 NVME_LBART_ATTRIB_HIDE = 1 << 1,
395};
396
397struct nvme_reservation_status {
398 __le32 gen;
399 __u8 rtype;
400 __u8 regctl[2];
401 __u8 resv5[2];
402 __u8 ptpls;
403 __u8 resv10[13];
404 struct {
405 __le16 cntlid;
406 __u8 rcsts;
407 __u8 resv3[5];
408 __le64 hostid;
409 __le64 rkey;
410 } regctl_ds[];
411};
412
Christoph Hellwig79f370e2016-06-06 23:20:46 +0200413enum nvme_async_event_type {
414 NVME_AER_TYPE_ERROR = 0,
415 NVME_AER_TYPE_SMART = 1,
416 NVME_AER_TYPE_NOTICE = 2,
417};
418
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200419/* I/O commands */
420
421enum nvme_opcode {
422 nvme_cmd_flush = 0x00,
423 nvme_cmd_write = 0x01,
424 nvme_cmd_read = 0x02,
425 nvme_cmd_write_uncor = 0x04,
426 nvme_cmd_compare = 0x05,
427 nvme_cmd_write_zeroes = 0x08,
428 nvme_cmd_dsm = 0x09,
429 nvme_cmd_resv_register = 0x0d,
430 nvme_cmd_resv_report = 0x0e,
431 nvme_cmd_resv_acquire = 0x11,
432 nvme_cmd_resv_release = 0x15,
433};
434
James Smart3972be22016-06-06 23:20:47 +0200435/*
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200436 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
437 *
438 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
439 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
440 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
441 * request subtype
442 */
443enum {
444 NVME_SGL_FMT_ADDRESS = 0x00,
445 NVME_SGL_FMT_OFFSET = 0x01,
446 NVME_SGL_FMT_INVALIDATE = 0x0f,
447};
448
449/*
450 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
451 *
452 * For struct nvme_sgl_desc:
453 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
454 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
455 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
456 *
457 * For struct nvme_keyed_sgl_desc:
458 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
459 */
460enum {
461 NVME_SGL_FMT_DATA_DESC = 0x00,
462 NVME_SGL_FMT_SEG_DESC = 0x02,
463 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
464 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
465};
466
467struct nvme_sgl_desc {
468 __le64 addr;
469 __le32 length;
470 __u8 rsvd[3];
471 __u8 type;
472};
473
474struct nvme_keyed_sgl_desc {
475 __le64 addr;
476 __u8 length[3];
477 __u8 key[4];
478 __u8 type;
479};
480
481union nvme_data_ptr {
482 struct {
483 __le64 prp1;
484 __le64 prp2;
485 };
486 struct nvme_sgl_desc sgl;
487 struct nvme_keyed_sgl_desc ksgl;
488};
489
490/*
James Smart3972be22016-06-06 23:20:47 +0200491 * Lowest two bits of our flags field (FUSE field in the spec):
492 *
493 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
494 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
495 *
496 * Highest two bits in our flags field (PSDT field in the spec):
497 *
498 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
499 * If used, MPTR contains addr of single physical buffer (byte aligned).
500 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
501 * If used, MPTR contains an address of an SGL segment containing
502 * exactly 1 SGL descriptor (qword aligned).
503 */
504enum {
505 NVME_CMD_FUSE_FIRST = (1 << 0),
506 NVME_CMD_FUSE_SECOND = (1 << 1),
507
508 NVME_CMD_SGL_METABUF = (1 << 6),
509 NVME_CMD_SGL_METASEG = (1 << 7),
510 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
511};
512
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200513struct nvme_common_command {
514 __u8 opcode;
515 __u8 flags;
516 __u16 command_id;
517 __le32 nsid;
518 __le32 cdw2[2];
519 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200520 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200521 __le32 cdw10[6];
522};
523
524struct nvme_rw_command {
525 __u8 opcode;
526 __u8 flags;
527 __u16 command_id;
528 __le32 nsid;
529 __u64 rsvd2;
530 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200531 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200532 __le64 slba;
533 __le16 length;
534 __le16 control;
535 __le32 dsmgmt;
536 __le32 reftag;
537 __le16 apptag;
538 __le16 appmask;
539};
540
541enum {
542 NVME_RW_LR = 1 << 15,
543 NVME_RW_FUA = 1 << 14,
544 NVME_RW_DSM_FREQ_UNSPEC = 0,
545 NVME_RW_DSM_FREQ_TYPICAL = 1,
546 NVME_RW_DSM_FREQ_RARE = 2,
547 NVME_RW_DSM_FREQ_READS = 3,
548 NVME_RW_DSM_FREQ_WRITES = 4,
549 NVME_RW_DSM_FREQ_RW = 5,
550 NVME_RW_DSM_FREQ_ONCE = 6,
551 NVME_RW_DSM_FREQ_PREFETCH = 7,
552 NVME_RW_DSM_FREQ_TEMP = 8,
553 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
554 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
555 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
556 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
557 NVME_RW_DSM_SEQ_REQ = 1 << 6,
558 NVME_RW_DSM_COMPRESSED = 1 << 7,
559 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
560 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
561 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
562 NVME_RW_PRINFO_PRACT = 1 << 13,
563};
564
565struct nvme_dsm_cmd {
566 __u8 opcode;
567 __u8 flags;
568 __u16 command_id;
569 __le32 nsid;
570 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200571 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200572 __le32 nr;
573 __le32 attributes;
574 __u32 rsvd12[4];
575};
576
577enum {
578 NVME_DSMGMT_IDR = 1 << 0,
579 NVME_DSMGMT_IDW = 1 << 1,
580 NVME_DSMGMT_AD = 1 << 2,
581};
582
Christoph Hellwigb35ba012017-02-08 14:46:50 +0100583#define NVME_DSM_MAX_RANGES 256
584
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200585struct nvme_dsm_range {
586 __le32 cattr;
587 __le32 nlb;
588 __le64 slba;
589};
590
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800591struct nvme_write_zeroes_cmd {
592 __u8 opcode;
593 __u8 flags;
594 __u16 command_id;
595 __le32 nsid;
596 __u64 rsvd2;
597 __le64 metadata;
598 union nvme_data_ptr dptr;
599 __le64 slba;
600 __le16 length;
601 __le16 control;
602 __le32 dsmgmt;
603 __le32 reftag;
604 __le16 apptag;
605 __le16 appmask;
606};
607
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800608/* Features */
609
610struct nvme_feat_auto_pst {
611 __le64 entries[32];
612};
613
Christoph Hellwig39673e12017-01-09 15:36:28 +0100614enum {
615 NVME_HOST_MEM_ENABLE = (1 << 0),
616 NVME_HOST_MEM_RETURN = (1 << 1),
617};
618
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200619/* Admin commands */
620
621enum nvme_admin_opcode {
622 nvme_admin_delete_sq = 0x00,
623 nvme_admin_create_sq = 0x01,
624 nvme_admin_get_log_page = 0x02,
625 nvme_admin_delete_cq = 0x04,
626 nvme_admin_create_cq = 0x05,
627 nvme_admin_identify = 0x06,
628 nvme_admin_abort_cmd = 0x08,
629 nvme_admin_set_features = 0x09,
630 nvme_admin_get_features = 0x0a,
631 nvme_admin_async_event = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200632 nvme_admin_ns_mgmt = 0x0d,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200633 nvme_admin_activate_fw = 0x10,
634 nvme_admin_download_fw = 0x11,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200635 nvme_admin_ns_attach = 0x15,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200636 nvme_admin_keep_alive = 0x18,
Helen Koikef9f38e32017-04-10 12:51:07 -0300637 nvme_admin_dbbuf = 0x7C,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200638 nvme_admin_format_nvm = 0x80,
639 nvme_admin_security_send = 0x81,
640 nvme_admin_security_recv = 0x82,
641};
642
643enum {
644 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
645 NVME_CQ_IRQ_ENABLED = (1 << 1),
646 NVME_SQ_PRIO_URGENT = (0 << 1),
647 NVME_SQ_PRIO_HIGH = (1 << 1),
648 NVME_SQ_PRIO_MEDIUM = (2 << 1),
649 NVME_SQ_PRIO_LOW = (3 << 1),
650 NVME_FEAT_ARBITRATION = 0x01,
651 NVME_FEAT_POWER_MGMT = 0x02,
652 NVME_FEAT_LBA_RANGE = 0x03,
653 NVME_FEAT_TEMP_THRESH = 0x04,
654 NVME_FEAT_ERR_RECOVERY = 0x05,
655 NVME_FEAT_VOLATILE_WC = 0x06,
656 NVME_FEAT_NUM_QUEUES = 0x07,
657 NVME_FEAT_IRQ_COALESCE = 0x08,
658 NVME_FEAT_IRQ_CONFIG = 0x09,
659 NVME_FEAT_WRITE_ATOMIC = 0x0a,
660 NVME_FEAT_ASYNC_EVENT = 0x0b,
661 NVME_FEAT_AUTO_PST = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200662 NVME_FEAT_HOST_MEM_BUF = 0x0d,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200663 NVME_FEAT_KATO = 0x0f,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200664 NVME_FEAT_SW_PROGRESS = 0x80,
665 NVME_FEAT_HOST_ID = 0x81,
666 NVME_FEAT_RESV_MASK = 0x82,
667 NVME_FEAT_RESV_PERSIST = 0x83,
668 NVME_LOG_ERROR = 0x01,
669 NVME_LOG_SMART = 0x02,
670 NVME_LOG_FW_SLOT = 0x03,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200671 NVME_LOG_DISC = 0x70,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200672 NVME_LOG_RESERVATION = 0x80,
673 NVME_FWACT_REPL = (0 << 3),
674 NVME_FWACT_REPL_ACTV = (1 << 3),
675 NVME_FWACT_ACTV = (2 << 3),
676};
677
678struct nvme_identify {
679 __u8 opcode;
680 __u8 flags;
681 __u16 command_id;
682 __le32 nsid;
683 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200684 union nvme_data_ptr dptr;
Parav Pandit986994a2017-01-26 17:17:28 +0200685 __u8 cns;
686 __u8 rsvd3;
687 __le16 ctrlid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200688 __u32 rsvd11[5];
689};
690
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200691#define NVME_IDENTIFY_DATA_SIZE 4096
692
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200693struct nvme_features {
694 __u8 opcode;
695 __u8 flags;
696 __u16 command_id;
697 __le32 nsid;
698 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200699 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200700 __le32 fid;
701 __le32 dword11;
Arnav Dawnb85cf732017-05-12 17:12:03 +0200702 __le32 dword12;
703 __le32 dword13;
704 __le32 dword14;
705 __le32 dword15;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200706};
707
Christoph Hellwig39673e12017-01-09 15:36:28 +0100708struct nvme_host_mem_buf_desc {
709 __le64 addr;
710 __le32 size;
711 __u32 rsvd;
712};
713
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200714struct nvme_create_cq {
715 __u8 opcode;
716 __u8 flags;
717 __u16 command_id;
718 __u32 rsvd1[5];
719 __le64 prp1;
720 __u64 rsvd8;
721 __le16 cqid;
722 __le16 qsize;
723 __le16 cq_flags;
724 __le16 irq_vector;
725 __u32 rsvd12[4];
726};
727
728struct nvme_create_sq {
729 __u8 opcode;
730 __u8 flags;
731 __u16 command_id;
732 __u32 rsvd1[5];
733 __le64 prp1;
734 __u64 rsvd8;
735 __le16 sqid;
736 __le16 qsize;
737 __le16 sq_flags;
738 __le16 cqid;
739 __u32 rsvd12[4];
740};
741
742struct nvme_delete_queue {
743 __u8 opcode;
744 __u8 flags;
745 __u16 command_id;
746 __u32 rsvd1[9];
747 __le16 qid;
748 __u16 rsvd10;
749 __u32 rsvd11[5];
750};
751
752struct nvme_abort_cmd {
753 __u8 opcode;
754 __u8 flags;
755 __u16 command_id;
756 __u32 rsvd1[9];
757 __le16 sqid;
758 __u16 cid;
759 __u32 rsvd11[5];
760};
761
762struct nvme_download_firmware {
763 __u8 opcode;
764 __u8 flags;
765 __u16 command_id;
766 __u32 rsvd1[5];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200767 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200768 __le32 numd;
769 __le32 offset;
770 __u32 rsvd12[4];
771};
772
773struct nvme_format_cmd {
774 __u8 opcode;
775 __u8 flags;
776 __u16 command_id;
777 __le32 nsid;
778 __u64 rsvd2[4];
779 __le32 cdw10;
780 __u32 rsvd11[5];
781};
782
Armen Baloyan725b3582016-06-06 23:20:44 +0200783struct nvme_get_log_page_command {
784 __u8 opcode;
785 __u8 flags;
786 __u16 command_id;
787 __le32 nsid;
788 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200789 union nvme_data_ptr dptr;
Armen Baloyan725b3582016-06-06 23:20:44 +0200790 __u8 lid;
791 __u8 rsvd10;
792 __le16 numdl;
793 __le16 numdu;
794 __u16 rsvd11;
795 __le32 lpol;
796 __le32 lpou;
797 __u32 rsvd14[2];
798};
799
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200800/*
801 * Fabrics subcommands.
802 */
803enum nvmf_fabrics_opcode {
804 nvme_fabrics_command = 0x7f,
805};
806
807enum nvmf_capsule_command {
808 nvme_fabrics_type_property_set = 0x00,
809 nvme_fabrics_type_connect = 0x01,
810 nvme_fabrics_type_property_get = 0x04,
811};
812
813struct nvmf_common_command {
814 __u8 opcode;
815 __u8 resv1;
816 __u16 command_id;
817 __u8 fctype;
818 __u8 resv2[35];
819 __u8 ts[24];
820};
821
822/*
823 * The legal cntlid range a NVMe Target will provide.
824 * Note that cntlid of value 0 is considered illegal in the fabrics world.
825 * Devices based on earlier specs did not have the subsystem concept;
826 * therefore, those devices had their cntlid value set to 0 as a result.
827 */
828#define NVME_CNTLID_MIN 1
829#define NVME_CNTLID_MAX 0xffef
830#define NVME_CNTLID_DYNAMIC 0xffff
831
832#define MAX_DISC_LOGS 255
833
834/* Discovery log page entry */
835struct nvmf_disc_rsp_page_entry {
836 __u8 trtype;
837 __u8 adrfam;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200838 __u8 subtype;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200839 __u8 treq;
840 __le16 portid;
841 __le16 cntlid;
842 __le16 asqsz;
843 __u8 resv8[22];
844 char trsvcid[NVMF_TRSVCID_SIZE];
845 __u8 resv64[192];
846 char subnqn[NVMF_NQN_FIELD_LEN];
847 char traddr[NVMF_TRADDR_SIZE];
848 union tsas {
849 char common[NVMF_TSAS_SIZE];
850 struct rdma {
851 __u8 qptype;
852 __u8 prtype;
853 __u8 cms;
854 __u8 resv3[5];
855 __u16 pkey;
856 __u8 resv10[246];
857 } rdma;
858 } tsas;
859};
860
861/* Discovery log page header */
862struct nvmf_disc_rsp_page_hdr {
863 __le64 genctr;
864 __le64 numrec;
865 __le16 recfmt;
866 __u8 resv14[1006];
867 struct nvmf_disc_rsp_page_entry entries[0];
868};
869
870struct nvmf_connect_command {
871 __u8 opcode;
872 __u8 resv1;
873 __u16 command_id;
874 __u8 fctype;
875 __u8 resv2[19];
876 union nvme_data_ptr dptr;
877 __le16 recfmt;
878 __le16 qid;
879 __le16 sqsize;
880 __u8 cattr;
881 __u8 resv3;
882 __le32 kato;
883 __u8 resv4[12];
884};
885
886struct nvmf_connect_data {
Christoph Hellwig8e412262017-05-17 09:54:27 +0200887 uuid_t hostid;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200888 __le16 cntlid;
889 char resv4[238];
890 char subsysnqn[NVMF_NQN_FIELD_LEN];
891 char hostnqn[NVMF_NQN_FIELD_LEN];
892 char resv5[256];
893};
894
895struct nvmf_property_set_command {
896 __u8 opcode;
897 __u8 resv1;
898 __u16 command_id;
899 __u8 fctype;
900 __u8 resv2[35];
901 __u8 attrib;
902 __u8 resv3[3];
903 __le32 offset;
904 __le64 value;
905 __u8 resv4[8];
906};
907
908struct nvmf_property_get_command {
909 __u8 opcode;
910 __u8 resv1;
911 __u16 command_id;
912 __u8 fctype;
913 __u8 resv2[35];
914 __u8 attrib;
915 __u8 resv3[3];
916 __le32 offset;
917 __u8 resv4[16];
918};
919
Helen Koikef9f38e32017-04-10 12:51:07 -0300920struct nvme_dbbuf {
921 __u8 opcode;
922 __u8 flags;
923 __u16 command_id;
924 __u32 rsvd1[5];
925 __le64 prp1;
926 __le64 prp2;
927 __u32 rsvd12[6];
928};
929
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200930struct nvme_command {
931 union {
932 struct nvme_common_command common;
933 struct nvme_rw_command rw;
934 struct nvme_identify identify;
935 struct nvme_features features;
936 struct nvme_create_cq create_cq;
937 struct nvme_create_sq create_sq;
938 struct nvme_delete_queue delete_queue;
939 struct nvme_download_firmware dlfw;
940 struct nvme_format_cmd format;
941 struct nvme_dsm_cmd dsm;
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800942 struct nvme_write_zeroes_cmd write_zeroes;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200943 struct nvme_abort_cmd abort;
Armen Baloyan725b3582016-06-06 23:20:44 +0200944 struct nvme_get_log_page_command get_log_page;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200945 struct nvmf_common_command fabrics;
946 struct nvmf_connect_command connect;
947 struct nvmf_property_set_command prop_set;
948 struct nvmf_property_get_command prop_get;
Helen Koikef9f38e32017-04-10 12:51:07 -0300949 struct nvme_dbbuf dbbuf;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200950 };
951};
952
Christoph Hellwig7a5abb42016-06-06 23:20:49 +0200953static inline bool nvme_is_write(struct nvme_command *cmd)
954{
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200955 /*
956 * What a mess...
957 *
958 * Why can't we simply have a Fabrics In and Fabrics out command?
959 */
960 if (unlikely(cmd->common.opcode == nvme_fabrics_command))
961 return cmd->fabrics.opcode & 1;
Christoph Hellwig7a5abb42016-06-06 23:20:49 +0200962 return cmd->common.opcode & 1;
963}
964
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200965enum {
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200966 /*
967 * Generic Command Status:
968 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200969 NVME_SC_SUCCESS = 0x0,
970 NVME_SC_INVALID_OPCODE = 0x1,
971 NVME_SC_INVALID_FIELD = 0x2,
972 NVME_SC_CMDID_CONFLICT = 0x3,
973 NVME_SC_DATA_XFER_ERROR = 0x4,
974 NVME_SC_POWER_LOSS = 0x5,
975 NVME_SC_INTERNAL = 0x6,
976 NVME_SC_ABORT_REQ = 0x7,
977 NVME_SC_ABORT_QUEUE = 0x8,
978 NVME_SC_FUSED_FAIL = 0x9,
979 NVME_SC_FUSED_MISSING = 0xa,
980 NVME_SC_INVALID_NS = 0xb,
981 NVME_SC_CMD_SEQ_ERROR = 0xc,
982 NVME_SC_SGL_INVALID_LAST = 0xd,
983 NVME_SC_SGL_INVALID_COUNT = 0xe,
984 NVME_SC_SGL_INVALID_DATA = 0xf,
985 NVME_SC_SGL_INVALID_METADATA = 0x10,
986 NVME_SC_SGL_INVALID_TYPE = 0x11,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200987
988 NVME_SC_SGL_INVALID_OFFSET = 0x16,
989 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
990
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200991 NVME_SC_LBA_RANGE = 0x80,
992 NVME_SC_CAP_EXCEEDED = 0x81,
993 NVME_SC_NS_NOT_READY = 0x82,
994 NVME_SC_RESERVATION_CONFLICT = 0x83,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200995
996 /*
997 * Command Specific Status:
998 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200999 NVME_SC_CQ_INVALID = 0x100,
1000 NVME_SC_QID_INVALID = 0x101,
1001 NVME_SC_QUEUE_SIZE = 0x102,
1002 NVME_SC_ABORT_LIMIT = 0x103,
1003 NVME_SC_ABORT_MISSING = 0x104,
1004 NVME_SC_ASYNC_LIMIT = 0x105,
1005 NVME_SC_FIRMWARE_SLOT = 0x106,
1006 NVME_SC_FIRMWARE_IMAGE = 0x107,
1007 NVME_SC_INVALID_VECTOR = 0x108,
1008 NVME_SC_INVALID_LOG_PAGE = 0x109,
1009 NVME_SC_INVALID_FORMAT = 0x10a,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001010 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001011 NVME_SC_INVALID_QUEUE = 0x10c,
1012 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
1013 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
1014 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001015 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
1016 NVME_SC_FW_NEEDS_RESET = 0x111,
1017 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
1018 NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
1019 NVME_SC_OVERLAPPING_RANGE = 0x114,
1020 NVME_SC_NS_INSUFFICENT_CAP = 0x115,
1021 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
1022 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
1023 NVME_SC_NS_IS_PRIVATE = 0x119,
1024 NVME_SC_NS_NOT_ATTACHED = 0x11a,
1025 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
1026 NVME_SC_CTRL_LIST_INVALID = 0x11c,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001027
1028 /*
1029 * I/O Command Set Specific - NVM commands:
1030 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001031 NVME_SC_BAD_ATTRIBUTES = 0x180,
1032 NVME_SC_INVALID_PI = 0x181,
1033 NVME_SC_READ_ONLY = 0x182,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001034 NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001035
1036 /*
1037 * I/O Command Set Specific - Fabrics commands:
1038 */
1039 NVME_SC_CONNECT_FORMAT = 0x180,
1040 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
1041 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
1042 NVME_SC_CONNECT_RESTART_DISC = 0x183,
1043 NVME_SC_CONNECT_INVALID_HOST = 0x184,
1044
1045 NVME_SC_DISCOVERY_RESTART = 0x190,
1046 NVME_SC_AUTH_REQUIRED = 0x191,
1047
1048 /*
1049 * Media and Data Integrity Errors:
1050 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001051 NVME_SC_WRITE_FAULT = 0x280,
1052 NVME_SC_READ_ERROR = 0x281,
1053 NVME_SC_GUARD_CHECK = 0x282,
1054 NVME_SC_APPTAG_CHECK = 0x283,
1055 NVME_SC_REFTAG_CHECK = 0x284,
1056 NVME_SC_COMPARE_FAILED = 0x285,
1057 NVME_SC_ACCESS_DENIED = 0x286,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001058 NVME_SC_UNWRITTEN_BLOCK = 0x287,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001059
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001060 NVME_SC_DNR = 0x4000,
James Smartcba3bdf2016-12-02 00:28:39 -08001061
1062
1063 /*
1064 * FC Transport-specific error status values for NVME commands
1065 *
1066 * Transport-specific status code values must be in the range 0xB0..0xBF
1067 */
1068
1069 /* Generic FC failure - catchall */
1070 NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
1071
1072 /* I/O failure due to FC ABTS'd */
1073 NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001074};
1075
1076struct nvme_completion {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001077 /*
1078 * Used by Admin and Fabrics commands to return data:
1079 */
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001080 union nvme_result {
1081 __le16 u16;
1082 __le32 u32;
1083 __le64 u64;
1084 } result;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001085 __le16 sq_head; /* how much of this queue may be reclaimed */
1086 __le16 sq_id; /* submission queue that generated this entry */
1087 __u16 command_id; /* of the command which completed */
1088 __le16 status; /* did the command fail, and if so, why? */
1089};
1090
Gabriel Krisman Bertazi8ef20742016-10-19 09:51:05 -06001091#define NVME_VS(major, minor, tertiary) \
1092 (((major) << 16) | ((minor) << 8) | (tertiary))
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001093
Johannes Thumshirnc61d7882017-06-07 11:45:36 +02001094#define NVME_MAJOR(ver) ((ver) >> 16)
1095#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
1096#define NVME_TERTIARY(ver) ((ver) & 0xff)
1097
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001098#endif /* _LINUX_NVME_H */