blob: 0adad4a5419b7cbd7560422de51f9abc97a755a0 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Matthew Wilcox42c77682013-06-25 15:14:56 -040018#include <uapi/linux/nvme.h>
19#include <linux/pci.h>
Matthew Wilcox42c77682013-06-25 15:14:56 -040020#include <linux/kref.h>
Matias Bjørlinga4aea562014-11-04 08:20:14 -070021#include <linux/blk-mq.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022
23struct nvme_bar {
24 __u64 cap; /* Controller Capabilities */
25 __u32 vs; /* Version */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050026 __u32 intms; /* Interrupt Mask Set */
27 __u32 intmc; /* Interrupt Mask Clear */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028 __u32 cc; /* Controller Configuration */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050029 __u32 rsvd1; /* Reserved */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050030 __u32 csts; /* Controller Status */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050031 __u32 rsvd2; /* Reserved */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050032 __u32 aqa; /* Admin Queue Attributes */
33 __u64 asq; /* Admin SQ Base Address */
34 __u64 acq; /* Admin CQ Base Address */
35};
36
Keith Buscha0cadb82012-07-27 13:57:23 -040037#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -040038#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -040039#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Busch8fc23e02012-07-26 11:29:57 -060040#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -060041#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -040042
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050043enum {
44 NVME_CC_ENABLE = 1 << 0,
45 NVME_CC_CSS_NVM = 0 << 4,
46 NVME_CC_MPS_SHIFT = 7,
47 NVME_CC_ARB_RR = 0 << 11,
48 NVME_CC_ARB_WRRU = 1 << 11,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -040049 NVME_CC_ARB_VS = 7 << 11,
50 NVME_CC_SHN_NONE = 0 << 14,
51 NVME_CC_SHN_NORMAL = 1 << 14,
52 NVME_CC_SHN_ABRUPT = 2 << 14,
Keith Busch1894d8f2013-07-15 15:02:22 -060053 NVME_CC_SHN_MASK = 3 << 14,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -040054 NVME_CC_IOSQES = 6 << 16,
55 NVME_CC_IOCQES = 4 << 20,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050056 NVME_CSTS_RDY = 1 << 0,
57 NVME_CSTS_CFS = 1 << 1,
58 NVME_CSTS_SHST_NORMAL = 0 << 2,
59 NVME_CSTS_SHST_OCCUR = 1 << 2,
60 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -060061 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050062};
63
Matthew Wilcoxbd676082014-06-03 23:04:30 -040064extern unsigned char nvme_io_timeout;
65#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
Vishal Verma13c3b0f2013-03-04 18:40:57 -070066
67/*
68 * Represents an NVM Express device. Each nvme_dev is a PCI function.
69 */
70struct nvme_dev {
71 struct list_head node;
Matias Bjørlinga4aea562014-11-04 08:20:14 -070072 struct nvme_queue **queues;
73 struct request_queue *admin_q;
74 struct blk_mq_tag_set tagset;
75 struct blk_mq_tag_set admin_tagset;
Vishal Verma13c3b0f2013-03-04 18:40:57 -070076 u32 __iomem *dbs;
77 struct pci_dev *pci_dev;
78 struct dma_pool *prp_page_pool;
79 struct dma_pool *prp_small_pool;
80 int instance;
Keith Busch42f61422014-03-24 10:46:25 -060081 unsigned queue_count;
82 unsigned online_queues;
83 unsigned max_qid;
84 int q_depth;
Haiyan Hub80d5cc2013-09-10 11:25:37 +080085 u32 db_stride;
Vishal Verma13c3b0f2013-03-04 18:40:57 -070086 u32 ctrl_config;
87 struct msix_entry *entry;
88 struct nvme_bar __iomem *bar;
89 struct list_head namespaces;
Keith Busch5e82e952013-02-19 10:17:58 -070090 struct kref kref;
Keith Buschb3fffde2015-02-03 11:21:42 -070091 struct device *device;
Tejun Heo9ca97372014-03-07 10:24:49 -050092 work_func_t reset_workfn;
Keith Busch9a6b9452013-12-10 13:10:36 -070093 struct work_struct reset_work;
Keith Busch2e1d8442015-02-12 15:33:00 -070094 struct work_struct probe_work;
Keith Busch5e82e952013-02-19 10:17:58 -070095 char name[12];
Vishal Verma13c3b0f2013-03-04 18:40:57 -070096 char serial[20];
97 char model[40];
98 char firmware_rev[8];
99 u32 max_hw_sectors;
Keith Busch159b67d2013-04-09 17:13:20 -0600100 u32 stripe_size;
Keith Busch1d090622014-06-23 11:34:01 -0600101 u32 page_size;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700102 u16 oncs;
Keith Buschc30341d2013-12-10 13:10:38 -0700103 u16 abort_limit;
Keith Busch6fccf932014-06-18 13:58:57 -0600104 u8 event_limit;
Keith Buscha7d2ce22014-04-29 11:41:28 -0600105 u8 vwc;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700106};
107
108/*
109 * An NVM Express namespace is equivalent to a SCSI LUN
110 */
111struct nvme_ns {
112 struct list_head list;
113
114 struct nvme_dev *dev;
115 struct request_queue *queue;
116 struct gendisk *disk;
117
Matthew Wilcoxc3bfe712013-07-08 17:26:25 -0400118 unsigned ns_id;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700119 int lba_shift;
Keith Buschf410c682013-04-23 17:23:59 -0600120 int ms;
Keith Busche1e5e562015-02-19 13:39:03 -0700121 int pi_type;
Vishal Verma5d0f6132013-03-04 18:40:58 -0700122 u64 mode_select_num_blocks;
123 u32 mode_select_block_len;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700124};
125
126/*
127 * The nvme_iod describes the data in an I/O, including the list of PRP
128 * entries. You can't see it in this data structure because C doesn't let
129 * me express that. Use nvme_alloc_iod to ensure there's enough space
130 * allocated to store the PRP list.
131 */
132struct nvme_iod {
Jens Axboeac3dd5b2015-01-22 12:07:58 -0700133 unsigned long private; /* For the use of the submitter of the I/O */
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700134 int npages; /* In the PRP list. 0 means small pool in use */
135 int offset; /* Of PRP list */
136 int nents; /* Used in scatterlist */
137 int length; /* Of data, in bytes */
138 dma_addr_t first_dma;
Keith Busche1e5e562015-02-19 13:39:03 -0700139 struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700140 struct scatterlist sg[0];
141};
Vishal Verma5d0f6132013-03-04 18:40:58 -0700142
Matthew Wilcox063cc6d2013-03-27 21:28:22 -0400143static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
144{
145 return (sector >> (ns->lba_shift - 9));
146}
147
Vishal Verma5d0f6132013-03-04 18:40:58 -0700148/**
149 * nvme_free_iod - frees an nvme_iod
150 * @dev: The device that the I/O was submitted to
151 * @iod: The memory to free
152 */
153void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
154
Matias Bjørlinga4aea562014-11-04 08:20:14 -0700155int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700156struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
157 unsigned long addr, unsigned length);
158void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
159 struct nvme_iod *iod);
Matias Bjørlinga4aea562014-11-04 08:20:14 -0700160int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
161 struct nvme_command *, u32 *);
162int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700163int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
164 u32 *result);
165int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
166 dma_addr_t dma_addr);
167int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
168 dma_addr_t dma_addr, u32 *result);
169int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
170 dma_addr_t dma_addr, u32 *result);
171
172struct sg_io_hdr;
173
174int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
Keith Busch320a3822013-10-23 13:07:34 -0600175int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700176int nvme_sg_get_version_num(int __user *ip);
177
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500178#endif /* _LINUX_NVME_H */