blob: d6b5600cfa47d68dbad490090e3453e8516aa31e [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Matthew Wilcox42c77682013-06-25 15:14:56 -040018#include <uapi/linux/nvme.h>
19#include <linux/pci.h>
Matthew Wilcox42c77682013-06-25 15:14:56 -040020#include <linux/kref.h>
Matias Bjørlinga4aea562014-11-04 08:20:14 -070021#include <linux/blk-mq.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022
23struct nvme_bar {
24 __u64 cap; /* Controller Capabilities */
25 __u32 vs; /* Version */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050026 __u32 intms; /* Interrupt Mask Set */
27 __u32 intmc; /* Interrupt Mask Clear */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028 __u32 cc; /* Controller Configuration */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050029 __u32 rsvd1; /* Reserved */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050030 __u32 csts; /* Controller Status */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050031 __u32 rsvd2; /* Reserved */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050032 __u32 aqa; /* Admin Queue Attributes */
33 __u64 asq; /* Admin SQ Base Address */
34 __u64 acq; /* Admin CQ Base Address */
Jon Derrick8ffaadf2015-07-20 10:14:09 -060035 __u32 cmbloc; /* Controller Memory Buffer Location */
36 __u32 cmbsz; /* Controller Memory Buffer Size */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050037};
38
Keith Buscha0cadb82012-07-27 13:57:23 -040039#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -040040#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -040041#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -060042#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -060043#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -060044#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -040045
Jon Derrick8ffaadf2015-07-20 10:14:09 -060046#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
47#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
48#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
49#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
50
51#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
52#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
53#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
54#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
55#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
56
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050057enum {
58 NVME_CC_ENABLE = 1 << 0,
59 NVME_CC_CSS_NVM = 0 << 4,
60 NVME_CC_MPS_SHIFT = 7,
61 NVME_CC_ARB_RR = 0 << 11,
62 NVME_CC_ARB_WRRU = 1 << 11,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -040063 NVME_CC_ARB_VS = 7 << 11,
64 NVME_CC_SHN_NONE = 0 << 14,
65 NVME_CC_SHN_NORMAL = 1 << 14,
66 NVME_CC_SHN_ABRUPT = 2 << 14,
Keith Busch1894d8f2013-07-15 15:02:22 -060067 NVME_CC_SHN_MASK = 3 << 14,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -040068 NVME_CC_IOSQES = 6 << 16,
69 NVME_CC_IOCQES = 4 << 20,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050070 NVME_CSTS_RDY = 1 << 0,
71 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -060072 NVME_CSTS_NSSRO = 1 << 4,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050073 NVME_CSTS_SHST_NORMAL = 0 << 2,
74 NVME_CSTS_SHST_OCCUR = 1 << 2,
75 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -060076 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050077};
78
Matthew Wilcoxbd676082014-06-03 23:04:30 -040079extern unsigned char nvme_io_timeout;
80#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
Vishal Verma13c3b0f2013-03-04 18:40:57 -070081
82/*
83 * Represents an NVM Express device. Each nvme_dev is a PCI function.
84 */
85struct nvme_dev {
86 struct list_head node;
Matias Bjørlinga4aea562014-11-04 08:20:14 -070087 struct nvme_queue **queues;
88 struct request_queue *admin_q;
89 struct blk_mq_tag_set tagset;
90 struct blk_mq_tag_set admin_tagset;
Vishal Verma13c3b0f2013-03-04 18:40:57 -070091 u32 __iomem *dbs;
Christoph Hellwige75ec752015-05-22 11:12:39 +020092 struct device *dev;
Vishal Verma13c3b0f2013-03-04 18:40:57 -070093 struct dma_pool *prp_page_pool;
94 struct dma_pool *prp_small_pool;
95 int instance;
Keith Busch42f61422014-03-24 10:46:25 -060096 unsigned queue_count;
97 unsigned online_queues;
98 unsigned max_qid;
99 int q_depth;
Haiyan Hub80d5cc2013-09-10 11:25:37 +0800100 u32 db_stride;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700101 u32 ctrl_config;
102 struct msix_entry *entry;
103 struct nvme_bar __iomem *bar;
104 struct list_head namespaces;
Keith Busch5e82e952013-02-19 10:17:58 -0700105 struct kref kref;
Keith Buschb3fffde2015-02-03 11:21:42 -0700106 struct device *device;
Tejun Heo9ca97372014-03-07 10:24:49 -0500107 work_func_t reset_workfn;
Keith Busch9a6b9452013-12-10 13:10:36 -0700108 struct work_struct reset_work;
Keith Busch2e1d8442015-02-12 15:33:00 -0700109 struct work_struct probe_work;
Keith Buscha5768aa2015-06-01 14:28:14 -0600110 struct work_struct scan_work;
Keith Busch5e82e952013-02-19 10:17:58 -0700111 char name[12];
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700112 char serial[20];
113 char model[40];
114 char firmware_rev[8];
Keith Buschdfbac8c2015-08-10 15:20:40 -0600115 bool subsystem;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700116 u32 max_hw_sectors;
Keith Busch159b67d2013-04-09 17:13:20 -0600117 u32 stripe_size;
Keith Busch1d090622014-06-23 11:34:01 -0600118 u32 page_size;
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600119 void __iomem *cmb;
120 dma_addr_t cmb_dma_addr;
121 u64 cmb_size;
122 u32 cmbsz;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700123 u16 oncs;
Keith Buschc30341d2013-12-10 13:10:38 -0700124 u16 abort_limit;
Keith Busch6fccf932014-06-18 13:58:57 -0600125 u8 event_limit;
Keith Buscha7d2ce22014-04-29 11:41:28 -0600126 u8 vwc;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700127};
128
129/*
130 * An NVM Express namespace is equivalent to a SCSI LUN
131 */
132struct nvme_ns {
133 struct list_head list;
134
135 struct nvme_dev *dev;
136 struct request_queue *queue;
137 struct gendisk *disk;
138
Matthew Wilcoxc3bfe712013-07-08 17:26:25 -0400139 unsigned ns_id;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700140 int lba_shift;
Keith Buscha67a9512015-04-07 16:57:19 -0600141 u16 ms;
142 bool ext;
143 u8 pi_type;
Vishal Verma5d0f6132013-03-04 18:40:58 -0700144 u64 mode_select_num_blocks;
145 u32 mode_select_block_len;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700146};
147
148/*
149 * The nvme_iod describes the data in an I/O, including the list of PRP
150 * entries. You can't see it in this data structure because C doesn't let
151 * me express that. Use nvme_alloc_iod to ensure there's enough space
152 * allocated to store the PRP list.
153 */
154struct nvme_iod {
Jens Axboeac3dd5b2015-01-22 12:07:58 -0700155 unsigned long private; /* For the use of the submitter of the I/O */
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700156 int npages; /* In the PRP list. 0 means small pool in use */
157 int offset; /* Of PRP list */
158 int nents; /* Used in scatterlist */
159 int length; /* Of data, in bytes */
160 dma_addr_t first_dma;
Keith Busche1e5e562015-02-19 13:39:03 -0700161 struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700162 struct scatterlist sg[0];
163};
Vishal Verma5d0f6132013-03-04 18:40:58 -0700164
Matthew Wilcox063cc6d2013-03-27 21:28:22 -0400165static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
166{
167 return (sector >> (ns->lba_shift - 9));
168}
169
Christoph Hellwigd29ec822015-05-22 11:12:46 +0200170int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
171 void *buf, unsigned bufflen);
172int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
173 void *buffer, void __user *ubuffer, unsigned bufflen,
174 u32 *result, unsigned timeout);
175int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
176int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
177 struct nvme_id_ns **id);
178int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700179int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
180 dma_addr_t dma_addr, u32 *result);
181int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
182 dma_addr_t dma_addr, u32 *result);
183
184struct sg_io_hdr;
185
186int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
Keith Busch320a3822013-10-23 13:07:34 -0600187int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700188int nvme_sg_get_version_num(int __user *ip);
189
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500190#endif /* _LINUX_NVME_H */