blob: 26ebcf41c2131d681aab56112b3390c773e70ab8 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox42c77682013-06-25 15:14:56 -04003 * Copyright (c) 2011-2013, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#ifndef _LINUX_NVME_H
20#define _LINUX_NVME_H
21
Matthew Wilcox42c77682013-06-25 15:14:56 -040022#include <uapi/linux/nvme.h>
23#include <linux/pci.h>
24#include <linux/miscdevice.h>
25#include <linux/kref.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050026
27struct nvme_bar {
28 __u64 cap; /* Controller Capabilities */
29 __u32 vs; /* Version */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050030 __u32 intms; /* Interrupt Mask Set */
31 __u32 intmc; /* Interrupt Mask Clear */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050032 __u32 cc; /* Controller Configuration */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050033 __u32 rsvd1; /* Reserved */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050034 __u32 csts; /* Controller Status */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050035 __u32 rsvd2; /* Reserved */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050036 __u32 aqa; /* Admin Queue Attributes */
37 __u64 asq; /* Admin SQ Base Address */
38 __u64 acq; /* Admin CQ Base Address */
39};
40
Keith Buscha0cadb82012-07-27 13:57:23 -040041#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -040042#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -040043#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Busch8fc23e02012-07-26 11:29:57 -060044#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -040045
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050046enum {
47 NVME_CC_ENABLE = 1 << 0,
48 NVME_CC_CSS_NVM = 0 << 4,
49 NVME_CC_MPS_SHIFT = 7,
50 NVME_CC_ARB_RR = 0 << 11,
51 NVME_CC_ARB_WRRU = 1 << 11,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -040052 NVME_CC_ARB_VS = 7 << 11,
53 NVME_CC_SHN_NONE = 0 << 14,
54 NVME_CC_SHN_NORMAL = 1 << 14,
55 NVME_CC_SHN_ABRUPT = 2 << 14,
Keith Busch1894d8f2013-07-15 15:02:22 -060056 NVME_CC_SHN_MASK = 3 << 14,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -040057 NVME_CC_IOSQES = 6 << 16,
58 NVME_CC_IOCQES = 4 << 20,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050059 NVME_CSTS_RDY = 1 << 0,
60 NVME_CSTS_CFS = 1 << 1,
61 NVME_CSTS_SHST_NORMAL = 0 << 2,
62 NVME_CSTS_SHST_OCCUR = 1 << 2,
63 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -060064 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050065};
66
67#define NVME_VS(major, minor) (major << 16 | minor)
68
Vishal Verma13c3b0f2013-03-04 18:40:57 -070069#define NVME_IO_TIMEOUT (5 * HZ)
70
71/*
72 * Represents an NVM Express device. Each nvme_dev is a PCI function.
73 */
74struct nvme_dev {
75 struct list_head node;
76 struct nvme_queue **queues;
77 u32 __iomem *dbs;
78 struct pci_dev *pci_dev;
79 struct dma_pool *prp_page_pool;
80 struct dma_pool *prp_small_pool;
81 int instance;
82 int queue_count;
83 int db_stride;
84 u32 ctrl_config;
85 struct msix_entry *entry;
86 struct nvme_bar __iomem *bar;
87 struct list_head namespaces;
Keith Busch5e82e952013-02-19 10:17:58 -070088 struct kref kref;
89 struct miscdevice miscdev;
90 char name[12];
Vishal Verma13c3b0f2013-03-04 18:40:57 -070091 char serial[20];
92 char model[40];
93 char firmware_rev[8];
94 u32 max_hw_sectors;
Keith Busch159b67d2013-04-09 17:13:20 -060095 u32 stripe_size;
Vishal Verma13c3b0f2013-03-04 18:40:57 -070096 u16 oncs;
97};
98
99/*
100 * An NVM Express namespace is equivalent to a SCSI LUN
101 */
102struct nvme_ns {
103 struct list_head list;
104
105 struct nvme_dev *dev;
106 struct request_queue *queue;
107 struct gendisk *disk;
108
Matthew Wilcoxc3bfe712013-07-08 17:26:25 -0400109 unsigned ns_id;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700110 int lba_shift;
Keith Buschf410c682013-04-23 17:23:59 -0600111 int ms;
Vishal Verma5d0f6132013-03-04 18:40:58 -0700112 u64 mode_select_num_blocks;
113 u32 mode_select_block_len;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700114};
115
116/*
117 * The nvme_iod describes the data in an I/O, including the list of PRP
118 * entries. You can't see it in this data structure because C doesn't let
119 * me express that. Use nvme_alloc_iod to ensure there's enough space
120 * allocated to store the PRP list.
121 */
122struct nvme_iod {
123 void *private; /* For the use of the submitter of the I/O */
124 int npages; /* In the PRP list. 0 means small pool in use */
125 int offset; /* Of PRP list */
126 int nents; /* Used in scatterlist */
127 int length; /* Of data, in bytes */
Keith Busch61982212013-05-29 15:59:39 -0600128 unsigned long start_time;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700129 dma_addr_t first_dma;
130 struct scatterlist sg[0];
131};
Vishal Verma5d0f6132013-03-04 18:40:58 -0700132
Matthew Wilcox063cc6d2013-03-27 21:28:22 -0400133static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
134{
135 return (sector >> (ns->lba_shift - 9));
136}
137
Vishal Verma5d0f6132013-03-04 18:40:58 -0700138/**
139 * nvme_free_iod - frees an nvme_iod
140 * @dev: The device that the I/O was submitted to
141 * @iod: The memory to free
142 */
143void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
144
145int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
146 struct nvme_iod *iod, int total_len, gfp_t gfp);
147struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
148 unsigned long addr, unsigned length);
149void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
150 struct nvme_iod *iod);
151struct nvme_queue *get_nvmeq(struct nvme_dev *dev);
152void put_nvmeq(struct nvme_queue *nvmeq);
153int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
154 u32 *result, unsigned timeout);
155int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
156int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
157 u32 *result);
158int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
159 dma_addr_t dma_addr);
160int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
161 dma_addr_t dma_addr, u32 *result);
162int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
163 dma_addr_t dma_addr, u32 *result);
164
165struct sg_io_hdr;
166
167int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
168int nvme_sg_get_version_num(int __user *ip);
169
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500170#endif /* _LINUX_NVME_H */