blob: 4517608c068f92cefe7c1b9e493e18dfbea15254 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
Matthew Wilcox8de05532011-05-12 13:50:28 -040021#include <linux/bitops.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022#include <linux/blkdev.h>
Matthew Wilcoxfd63e9ce2011-05-06 08:37:54 -040023#include <linux/delay.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050024#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/genhd.h>
Matthew Wilcox5aff9382011-05-06 08:45:47 -040027#include <linux/idr.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kdev_t.h>
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050032#include <linux/kthread.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050033#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -050038#include <linux/poison.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050039#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/types.h>
42#include <linux/version.h>
43
44#define NVME_Q_DEPTH 1024
45#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
46#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
47#define NVME_MINORS 64
Matthew Wilcoxe85248e2011-02-06 18:30:16 -050048#define IO_TIMEOUT (5 * HZ)
49#define ADMIN_TIMEOUT (60 * HZ)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050050
51static int nvme_major;
52module_param(nvme_major, int, 0);
53
Matthew Wilcox58ffacb2011-02-06 07:28:06 -050054static int use_threaded_interrupts;
55module_param(use_threaded_interrupts, int, 0);
56
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050057static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread;
60
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050061/*
62 * Represents an NVM Express device. Each nvme_dev is a PCI function.
63 */
64struct nvme_dev {
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050065 struct list_head node;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050066 struct nvme_queue **queues;
67 u32 __iomem *dbs;
68 struct pci_dev *pci_dev;
Matthew Wilcox091b6092011-02-10 09:56:01 -050069 struct dma_pool *prp_page_pool;
Matthew Wilcox99802a72011-02-10 10:30:34 -050070 struct dma_pool *prp_small_pool;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050071 int instance;
72 int queue_count;
Matthew Wilcoxf1938f62011-10-20 17:00:41 -040073 int db_stride;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050074 u32 ctrl_config;
75 struct msix_entry *entry;
76 struct nvme_bar __iomem *bar;
77 struct list_head namespaces;
Matthew Wilcox51814232011-02-01 16:18:08 -050078 char serial[20];
79 char model[40];
80 char firmware_rev[8];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050081};
82
83/*
84 * An NVM Express namespace is equivalent to a SCSI LUN
85 */
86struct nvme_ns {
87 struct list_head list;
88
89 struct nvme_dev *dev;
90 struct request_queue *queue;
91 struct gendisk *disk;
92
93 int ns_id;
94 int lba_shift;
95};
96
97/*
98 * An NVM Express queue. Each device has at least two (one for admin
99 * commands and one for I/O commands).
100 */
101struct nvme_queue {
102 struct device *q_dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -0500103 struct nvme_dev *dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500104 spinlock_t q_lock;
105 struct nvme_command *sq_cmds;
106 volatile struct nvme_completion *cqes;
107 dma_addr_t sq_dma_addr;
108 dma_addr_t cq_dma_addr;
109 wait_queue_head_t sq_full;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -0500110 wait_queue_t sq_cong_wait;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500111 struct bio_list sq_cong;
112 u32 __iomem *q_db;
113 u16 q_depth;
114 u16 cq_vector;
115 u16 sq_head;
116 u16 sq_tail;
117 u16 cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500118 u16 cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500119 unsigned long cmdid_data[];
120};
121
122/*
123 * Check we didin't inadvertently grow the command struct
124 */
125static inline void _nvme_check_size(void)
126{
127 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
128 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
129 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
130 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
131 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
132 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
133 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
134 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
136}
137
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500138typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400139 struct nvme_completion *);
140
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500141struct nvme_cmd_info {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400142 nvme_completion_fn fn;
143 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500144 unsigned long timeout;
145};
146
147static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
148{
149 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
150}
151
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500152/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400153 * alloc_cmdid() - Allocate a Command ID
154 * @nvmeq: The queue that will be used for this command
155 * @ctx: A pointer that will be passed to the handler
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400156 * @handler: The function to call on completion
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500157 *
158 * Allocate a Command ID for a queue. The data passed in will
159 * be passed to the completion handler. This is implemented by using
160 * the bottom two bits of the ctx pointer to store the handler ID.
161 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
162 * We can change this if it becomes a problem.
Matthew Wilcox184d2942011-05-11 21:36:38 -0400163 *
164 * May be called with local interrupts disabled and the q_lock held,
165 * or with interrupts enabled and no locks held.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500166 */
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400167static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
168 nvme_completion_fn handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500169{
Matthew Wilcoxe6d15f72011-02-24 08:49:41 -0500170 int depth = nvmeq->q_depth - 1;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500171 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500172 int cmdid;
173
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500174 do {
175 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
176 if (cmdid >= depth)
177 return -EBUSY;
178 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
179
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400180 info[cmdid].fn = handler;
181 info[cmdid].ctx = ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500182 info[cmdid].timeout = jiffies + timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500183 return cmdid;
184}
185
186static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400187 nvme_completion_fn handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500188{
189 int cmdid;
190 wait_event_killable(nvmeq->sq_full,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500191 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500192 return (cmdid < 0) ? -EINTR : cmdid;
193}
194
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400195/* Special values must be less than 0x1000 */
196#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
Matthew Wilcoxd2d87032011-02-07 15:55:59 -0500197#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
198#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
199#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500200#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500201
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500202static void special_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400203 struct nvme_completion *cqe)
204{
205 if (ctx == CMD_CTX_CANCELLED)
206 return;
207 if (ctx == CMD_CTX_FLUSH)
208 return;
209 if (ctx == CMD_CTX_COMPLETED) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500210 dev_warn(&dev->pci_dev->dev,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400211 "completed id %d twice on queue %d\n",
212 cqe->command_id, le16_to_cpup(&cqe->sq_id));
213 return;
214 }
215 if (ctx == CMD_CTX_INVALID) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500216 dev_warn(&dev->pci_dev->dev,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400217 "invalid id %d completed on queue %d\n",
218 cqe->command_id, le16_to_cpup(&cqe->sq_id));
219 return;
220 }
221
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500222 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400223}
224
Matthew Wilcox184d2942011-05-11 21:36:38 -0400225/*
226 * Called with local interrupts disabled and the q_lock held. May not sleep.
227 */
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400228static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
229 nvme_completion_fn *fn)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500230{
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400231 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500232 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500233
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400234 if (cmdid >= nvmeq->q_depth) {
235 *fn = special_completion;
Matthew Wilcox48e3d392011-02-06 08:51:15 -0500236 return CMD_CTX_INVALID;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400237 }
238 *fn = info[cmdid].fn;
239 ctx = info[cmdid].ctx;
240 info[cmdid].fn = special_completion;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500241 info[cmdid].ctx = CMD_CTX_COMPLETED;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500242 clear_bit(cmdid, nvmeq->cmdid_data);
243 wake_up(&nvmeq->sq_full);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400244 return ctx;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500245}
246
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400247static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
248 nvme_completion_fn *fn)
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500249{
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400250 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500251 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400252 if (fn)
253 *fn = info[cmdid].fn;
254 ctx = info[cmdid].ctx;
255 info[cmdid].fn = special_completion;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500256 info[cmdid].ctx = CMD_CTX_CANCELLED;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400257 return ctx;
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500258}
259
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500260static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500261{
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500262 return dev->queues[get_cpu() + 1];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500263}
264
265static void put_nvmeq(struct nvme_queue *nvmeq)
266{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500267 put_cpu();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500268}
269
270/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400271 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500272 * @nvmeq: The queue to use
273 * @cmd: The command to send
274 *
275 * Safe to use from interrupt context
276 */
277static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
278{
279 unsigned long flags;
280 u16 tail;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500281 spin_lock_irqsave(&nvmeq->q_lock, flags);
282 tail = nvmeq->sq_tail;
283 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500284 if (++tail == nvmeq->q_depth)
285 tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500286 writel(tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500287 nvmeq->sq_tail = tail;
288 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
289
290 return 0;
291}
292
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500293/*
294 * The nvme_iod describes the data in an I/O, including the list of PRP
295 * entries. You can't see it in this data structure because C doesn't let
296 * me express that. Use nvme_alloc_iod to ensure there's enough space
297 * allocated to store the PRP list.
298 */
299struct nvme_iod {
300 void *private; /* For the use of the submitter of the I/O */
301 int npages; /* In the PRP list. 0 means small pool in use */
302 int offset; /* Of PRP list */
303 int nents; /* Used in scatterlist */
304 int length; /* Of data, in bytes */
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500305 dma_addr_t first_dma;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500306 struct scatterlist sg[0];
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500307};
308
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500309static __le64 **iod_list(struct nvme_iod *iod)
310{
311 return ((void *)iod) + iod->offset;
312}
313
314/*
315 * Will slightly overestimate the number of pages needed. This is OK
316 * as it only leads to a small amount of wasted memory for the lifetime of
317 * the I/O.
318 */
319static int nvme_npages(unsigned size)
320{
321 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
322 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
323}
324
325static struct nvme_iod *
326nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
327{
328 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
329 sizeof(__le64 *) * nvme_npages(nbytes) +
330 sizeof(struct scatterlist) * nseg, gfp);
331
332 if (iod) {
333 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
334 iod->npages = -1;
335 iod->length = nbytes;
336 }
337
338 return iod;
339}
340
341static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500342{
343 const int last_prp = PAGE_SIZE / 8 - 1;
344 int i;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500345 __le64 **list = iod_list(iod);
346 dma_addr_t prp_dma = iod->first_dma;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500347
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500348 if (iod->npages == 0)
349 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
350 for (i = 0; i < iod->npages; i++) {
351 __le64 *prp_list = list[i];
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500352 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
Matthew Wilcox091b6092011-02-10 09:56:01 -0500353 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500354 prp_dma = next_prp_dma;
355 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500356 kfree(iod);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500357}
358
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500359static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
360{
361 struct nvme_queue *nvmeq = get_nvmeq(dev);
362 if (bio_list_empty(&nvmeq->sq_cong))
363 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
364 bio_list_add(&nvmeq->sq_cong, bio);
365 put_nvmeq(nvmeq);
366 wake_up_process(nvme_thread);
367}
368
369static void bio_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500370 struct nvme_completion *cqe)
371{
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500372 struct nvme_iod *iod = ctx;
373 struct bio *bio = iod->private;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500374 u16 status = le16_to_cpup(&cqe->status) >> 1;
375
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500376 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500377 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500378 nvme_free_iod(dev, iod);
Matthew Wilcox09a58f52011-04-28 23:09:09 -0700379 if (status) {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500380 bio_endio(bio, -EIO);
Matthew Wilcox09a58f52011-04-28 23:09:09 -0700381 } else if (bio->bi_vcnt > bio->bi_idx) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500382 requeue_bio(dev, bio);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500383 } else {
384 bio_endio(bio, 0);
385 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500386}
387
Matthew Wilcox184d2942011-05-11 21:36:38 -0400388/* length is in bytes. gfp flags indicates whether we may sleep. */
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500389static int nvme_setup_prps(struct nvme_dev *dev,
390 struct nvme_common_command *cmd, struct nvme_iod *iod,
391 int total_len, gfp_t gfp)
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500392{
Matthew Wilcox99802a72011-02-10 10:30:34 -0500393 struct dma_pool *pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500394 int length = total_len;
395 struct scatterlist *sg = iod->sg;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500396 int dma_len = sg_dma_len(sg);
397 u64 dma_addr = sg_dma_address(sg);
398 int offset = offset_in_page(dma_addr);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500399 __le64 *prp_list;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500400 __le64 **list = iod_list(iod);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500401 dma_addr_t prp_dma;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500402 int nprps, i;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500403
404 cmd->prp1 = cpu_to_le64(dma_addr);
405 length -= (PAGE_SIZE - offset);
406 if (length <= 0)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500407 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500408
409 dma_len -= (PAGE_SIZE - offset);
410 if (dma_len) {
411 dma_addr += (PAGE_SIZE - offset);
412 } else {
413 sg = sg_next(sg);
414 dma_addr = sg_dma_address(sg);
415 dma_len = sg_dma_len(sg);
416 }
417
418 if (length <= PAGE_SIZE) {
419 cmd->prp2 = cpu_to_le64(dma_addr);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500420 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500421 }
422
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500423 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
Matthew Wilcox99802a72011-02-10 10:30:34 -0500424 if (nprps <= (256 / 8)) {
425 pool = dev->prp_small_pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500426 iod->npages = 0;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500427 } else {
428 pool = dev->prp_page_pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500429 iod->npages = 1;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500430 }
431
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400432 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
433 if (!prp_list) {
434 cmd->prp2 = cpu_to_le64(dma_addr);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500435 iod->npages = -1;
436 return (total_len - length) + PAGE_SIZE;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400437 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500438 list[0] = prp_list;
439 iod->first_dma = prp_dma;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500440 cmd->prp2 = cpu_to_le64(prp_dma);
441 i = 0;
442 for (;;) {
Matthew Wilcox7523d832011-03-16 16:43:40 -0400443 if (i == PAGE_SIZE / 8) {
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500444 __le64 *old_prp_list = prp_list;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400445 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500446 if (!prp_list)
447 return total_len - length;
448 list[iod->npages++] = prp_list;
Matthew Wilcox7523d832011-03-16 16:43:40 -0400449 prp_list[0] = old_prp_list[i - 1];
450 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
451 i = 1;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500452 }
453 prp_list[i++] = cpu_to_le64(dma_addr);
454 dma_len -= PAGE_SIZE;
455 dma_addr += PAGE_SIZE;
456 length -= PAGE_SIZE;
457 if (length <= 0)
458 break;
459 if (dma_len > 0)
460 continue;
461 BUG_ON(dma_len < 0);
462 sg = sg_next(sg);
463 dma_addr = sg_dma_address(sg);
464 dma_len = sg_dma_len(sg);
465 }
466
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500467 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500468}
469
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500470/* NVMe scatterlists require no holes in the virtual address */
471#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
472 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
473
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500474static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500475 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
476{
Matthew Wilcox76830842011-02-10 13:55:39 -0500477 struct bio_vec *bvec, *bvprv = NULL;
478 struct scatterlist *sg = NULL;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500479 int i, old_idx, length = 0, nsegs = 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500480
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500481 sg_init_table(iod->sg, psegs);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500482 old_idx = bio->bi_idx;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500483 bio_for_each_segment(bvec, bio, i) {
Matthew Wilcox76830842011-02-10 13:55:39 -0500484 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
485 sg->length += bvec->bv_len;
486 } else {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500487 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
488 break;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500489 sg = sg ? sg + 1 : iod->sg;
Matthew Wilcox76830842011-02-10 13:55:39 -0500490 sg_set_page(sg, bvec->bv_page, bvec->bv_len,
491 bvec->bv_offset);
492 nsegs++;
493 }
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500494 length += bvec->bv_len;
Matthew Wilcox76830842011-02-10 13:55:39 -0500495 bvprv = bvec;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500496 }
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500497 bio->bi_idx = i;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500498 iod->nents = nsegs;
Matthew Wilcox76830842011-02-10 13:55:39 -0500499 sg_mark_end(sg);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500500 if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500501 bio->bi_idx = old_idx;
502 return -ENOMEM;
503 }
504 return length;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500505}
506
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500507static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
508 int cmdid)
509{
510 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
511
512 memset(cmnd, 0, sizeof(*cmnd));
513 cmnd->common.opcode = nvme_cmd_flush;
514 cmnd->common.command_id = cmdid;
515 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
516
517 if (++nvmeq->sq_tail == nvmeq->q_depth)
518 nvmeq->sq_tail = 0;
519 writel(nvmeq->sq_tail, nvmeq->q_db);
520
521 return 0;
522}
523
524static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
525{
526 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400527 special_completion, IO_TIMEOUT);
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500528 if (unlikely(cmdid < 0))
529 return cmdid;
530
531 return nvme_submit_flush(nvmeq, ns, cmdid);
532}
533
Matthew Wilcox184d2942011-05-11 21:36:38 -0400534/*
535 * Called with local interrupts disabled and the q_lock held. May not sleep.
536 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500537static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
538 struct bio *bio)
539{
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500540 struct nvme_command *cmnd;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500541 struct nvme_iod *iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500542 enum dma_data_direction dma_dir;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500543 int cmdid, length, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500544 u16 control;
545 u32 dsmgmt;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500546 int psegs = bio_phys_segments(ns->queue, bio);
547
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500548 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
549 result = nvme_submit_flush_data(nvmeq, ns);
550 if (result)
551 return result;
552 }
553
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500554 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
555 if (!iod)
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500556 goto nomem;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500557 iod->private = bio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500558
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500559 result = -EBUSY;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500560 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, IO_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500561 if (unlikely(cmdid < 0))
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500562 goto free_iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500563
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500564 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
565 return nvme_submit_flush(nvmeq, ns, cmdid);
566
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500567 control = 0;
568 if (bio->bi_rw & REQ_FUA)
569 control |= NVME_RW_FUA;
570 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
571 control |= NVME_RW_LR;
572
573 dsmgmt = 0;
574 if (bio->bi_rw & REQ_RAHEAD)
575 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
576
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500577 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500578
Matthew Wilcoxb8deb622011-01-26 10:08:25 -0500579 memset(cmnd, 0, sizeof(*cmnd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500580 if (bio_data_dir(bio)) {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500581 cmnd->rw.opcode = nvme_cmd_write;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500582 dma_dir = DMA_TO_DEVICE;
583 } else {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500584 cmnd->rw.opcode = nvme_cmd_read;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500585 dma_dir = DMA_FROM_DEVICE;
586 }
587
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500588 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500589 if (result < 0)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500590 goto free_iod;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500591 length = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500592
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500593 cmnd->rw.command_id = cmdid;
594 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500595 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
596 GFP_ATOMIC);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500597 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500598 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500599 cmnd->rw.control = cpu_to_le16(control);
600 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500601
Matthew Wilcoxd8ee9d62011-02-24 08:46:00 -0500602 bio->bi_sector += length >> 9;
603
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500604 if (++nvmeq->sq_tail == nvmeq->q_depth)
605 nvmeq->sq_tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500606 writel(nvmeq->sq_tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500607
Matthew Wilcox1974b1a2011-02-10 12:01:09 -0500608 return 0;
609
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500610 free_iod:
611 nvme_free_iod(nvmeq->dev, iod);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500612 nomem:
613 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500614}
615
616/*
617 * NB: return value of non-zero would mean that we were a stacking driver.
618 * make_request must always succeed.
619 */
620static int nvme_make_request(struct request_queue *q, struct bio *bio)
621{
622 struct nvme_ns *ns = q->queuedata;
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500623 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500624 int result = -EBUSY;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500625
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500626 spin_lock_irq(&nvmeq->q_lock);
627 if (bio_list_empty(&nvmeq->sq_cong))
628 result = nvme_submit_bio_queue(nvmeq, ns, bio);
629 if (unlikely(result)) {
630 if (bio_list_empty(&nvmeq->sq_cong))
631 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500632 bio_list_add(&nvmeq->sq_cong, bio);
633 }
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500634
635 spin_unlock_irq(&nvmeq->q_lock);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500636 put_nvmeq(nvmeq);
637
638 return 0;
639}
640
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500641static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
642{
Matthew Wilcox82123462011-01-20 13:24:06 -0500643 u16 head, phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500644
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500645 head = nvmeq->cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500646 phase = nvmeq->cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500647
648 for (;;) {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400649 void *ctx;
650 nvme_completion_fn fn;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500651 struct nvme_completion cqe = nvmeq->cqes[head];
Matthew Wilcox82123462011-01-20 13:24:06 -0500652 if ((le16_to_cpu(cqe.status) & 1) != phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500653 break;
654 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
655 if (++head == nvmeq->q_depth) {
656 head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500657 phase = !phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500658 }
659
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400660 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500661 fn(nvmeq->dev, ctx, &cqe);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500662 }
663
664 /* If the controller ignores the cq head doorbell and continuously
665 * writes to the queue, it is theoretically possible to wrap around
666 * the queue twice and mistakenly return IRQ_NONE. Linux only
667 * requires that 0.1% of your interrupts are handled, so this isn't
668 * a big problem.
669 */
Matthew Wilcox82123462011-01-20 13:24:06 -0500670 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500671 return IRQ_NONE;
672
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400673 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500674 nvmeq->cq_head = head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500675 nvmeq->cq_phase = phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500676
677 return IRQ_HANDLED;
678}
679
680static irqreturn_t nvme_irq(int irq, void *data)
681{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500682 irqreturn_t result;
683 struct nvme_queue *nvmeq = data;
684 spin_lock(&nvmeq->q_lock);
685 result = nvme_process_cq(nvmeq);
686 spin_unlock(&nvmeq->q_lock);
687 return result;
688}
689
690static irqreturn_t nvme_irq_check(int irq, void *data)
691{
692 struct nvme_queue *nvmeq = data;
693 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
694 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
695 return IRQ_NONE;
696 return IRQ_WAKE_THREAD;
697}
698
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500699static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
700{
701 spin_lock_irq(&nvmeq->q_lock);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400702 cancel_cmdid(nvmeq, cmdid, NULL);
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500703 spin_unlock_irq(&nvmeq->q_lock);
704}
705
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400706struct sync_cmd_info {
707 struct task_struct *task;
708 u32 result;
709 int status;
710};
711
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500712static void sync_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400713 struct nvme_completion *cqe)
714{
715 struct sync_cmd_info *cmdinfo = ctx;
716 cmdinfo->result = le32_to_cpup(&cqe->result);
717 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
718 wake_up_process(cmdinfo->task);
719}
720
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500721/*
722 * Returns 0 on success. If the result is negative, it's a Linux error code;
723 * if the result is positive, it's an NVM Express status code
724 */
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500725static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500726 struct nvme_command *cmd, u32 *result, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500727{
728 int cmdid;
729 struct sync_cmd_info cmdinfo;
730
731 cmdinfo.task = current;
732 cmdinfo.status = -EINTR;
733
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400734 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500735 timeout);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500736 if (cmdid < 0)
737 return cmdid;
738 cmd->common.command_id = cmdid;
739
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500740 set_current_state(TASK_KILLABLE);
741 nvme_submit_cmd(nvmeq, cmd);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500742 schedule();
743
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500744 if (cmdinfo.status == -EINTR) {
745 nvme_abort_command(nvmeq, cmdid);
746 return -EINTR;
747 }
748
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500749 if (result)
750 *result = cmdinfo.result;
751
752 return cmdinfo.status;
753}
754
755static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
756 u32 *result)
757{
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500758 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500759}
760
761static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
762{
763 int status;
764 struct nvme_command c;
765
766 memset(&c, 0, sizeof(c));
767 c.delete_queue.opcode = opcode;
768 c.delete_queue.qid = cpu_to_le16(id);
769
770 status = nvme_submit_admin_cmd(dev, &c, NULL);
771 if (status)
772 return -EIO;
773 return 0;
774}
775
776static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
777 struct nvme_queue *nvmeq)
778{
779 int status;
780 struct nvme_command c;
781 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
782
783 memset(&c, 0, sizeof(c));
784 c.create_cq.opcode = nvme_admin_create_cq;
785 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
786 c.create_cq.cqid = cpu_to_le16(qid);
787 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
788 c.create_cq.cq_flags = cpu_to_le16(flags);
789 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
790
791 status = nvme_submit_admin_cmd(dev, &c, NULL);
792 if (status)
793 return -EIO;
794 return 0;
795}
796
797static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
798 struct nvme_queue *nvmeq)
799{
800 int status;
801 struct nvme_command c;
802 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
803
804 memset(&c, 0, sizeof(c));
805 c.create_sq.opcode = nvme_admin_create_sq;
806 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
807 c.create_sq.sqid = cpu_to_le16(qid);
808 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
809 c.create_sq.sq_flags = cpu_to_le16(flags);
810 c.create_sq.cqid = cpu_to_le16(qid);
811
812 status = nvme_submit_admin_cmd(dev, &c, NULL);
813 if (status)
814 return -EIO;
815 return 0;
816}
817
818static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
819{
820 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
821}
822
823static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
824{
825 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
826}
827
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400828static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
829 dma_addr_t dma_addr)
830{
831 struct nvme_command c;
832
833 memset(&c, 0, sizeof(c));
834 c.identify.opcode = nvme_admin_identify;
835 c.identify.nsid = cpu_to_le32(nsid);
836 c.identify.prp1 = cpu_to_le64(dma_addr);
837 c.identify.cns = cpu_to_le32(cns);
838
839 return nvme_submit_admin_cmd(dev, &c, NULL);
840}
841
842static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
843 unsigned dword11, dma_addr_t dma_addr, u32 *result)
844{
845 struct nvme_command c;
846
847 memset(&c, 0, sizeof(c));
848 c.features.opcode = nvme_admin_get_features;
849 c.features.prp1 = cpu_to_le64(dma_addr);
850 c.features.fid = cpu_to_le32(fid);
851 c.features.dword11 = cpu_to_le32(dword11);
852
853 return nvme_submit_admin_cmd(dev, &c, result);
854}
855
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500856static void nvme_free_queue(struct nvme_dev *dev, int qid)
857{
858 struct nvme_queue *nvmeq = dev->queues[qid];
Matthew Wilcoxaba20802011-03-27 08:52:06 -0400859 int vector = dev->entry[nvmeq->cq_vector].vector;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500860
Matthew Wilcoxaba20802011-03-27 08:52:06 -0400861 irq_set_affinity_hint(vector, NULL);
862 free_irq(vector, nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500863
864 /* Don't tell the adapter to delete the admin queue */
865 if (qid) {
866 adapter_delete_sq(dev, qid);
867 adapter_delete_cq(dev, qid);
868 }
869
870 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
871 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
872 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
873 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
874 kfree(nvmeq);
875}
876
877static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
878 int depth, int vector)
879{
880 struct device *dmadev = &dev->pci_dev->dev;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500881 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500882 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
883 if (!nvmeq)
884 return NULL;
885
886 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
887 &nvmeq->cq_dma_addr, GFP_KERNEL);
888 if (!nvmeq->cqes)
889 goto free_nvmeq;
890 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
891
892 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
893 &nvmeq->sq_dma_addr, GFP_KERNEL);
894 if (!nvmeq->sq_cmds)
895 goto free_cqdma;
896
897 nvmeq->q_dmadev = dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -0500898 nvmeq->dev = dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500899 spin_lock_init(&nvmeq->q_lock);
900 nvmeq->cq_head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500901 nvmeq->cq_phase = 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500902 init_waitqueue_head(&nvmeq->sq_full);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -0500903 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500904 bio_list_init(&nvmeq->sq_cong);
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400905 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500906 nvmeq->q_depth = depth;
907 nvmeq->cq_vector = vector;
908
909 return nvmeq;
910
911 free_cqdma:
912 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
913 nvmeq->cq_dma_addr);
914 free_nvmeq:
915 kfree(nvmeq);
916 return NULL;
917}
918
Matthew Wilcox30010822011-01-20 09:10:15 -0500919static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
920 const char *name)
921{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500922 if (use_threaded_interrupts)
923 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
Matthew Wilcoxec6ce612011-02-06 09:01:00 -0500924 nvme_irq_check, nvme_irq,
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500925 IRQF_DISABLED | IRQF_SHARED,
926 name, nvmeq);
Matthew Wilcox30010822011-01-20 09:10:15 -0500927 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
928 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
929}
930
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500931static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
932 int qid, int cq_size, int vector)
933{
934 int result;
935 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
936
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500937 if (!nvmeq)
Matthew Wilcox6f0f5442011-05-11 13:30:59 -0700938 return ERR_PTR(-ENOMEM);
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500939
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500940 result = adapter_alloc_cq(dev, qid, nvmeq);
941 if (result < 0)
942 goto free_nvmeq;
943
944 result = adapter_alloc_sq(dev, qid, nvmeq);
945 if (result < 0)
946 goto release_cq;
947
Matthew Wilcox30010822011-01-20 09:10:15 -0500948 result = queue_request_irq(dev, nvmeq, "nvme");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500949 if (result < 0)
950 goto release_sq;
951
952 return nvmeq;
953
954 release_sq:
955 adapter_delete_sq(dev, qid);
956 release_cq:
957 adapter_delete_cq(dev, qid);
958 free_nvmeq:
959 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
960 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
961 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
962 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
963 kfree(nvmeq);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -0700964 return ERR_PTR(result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500965}
966
967static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
968{
969 int result;
970 u32 aqa;
Matthew Wilcox22605f92011-04-19 15:04:20 -0400971 u64 cap;
972 unsigned long timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500973 struct nvme_queue *nvmeq;
974
975 dev->dbs = ((void __iomem *)dev->bar) + 4096;
976
977 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500978 if (!nvmeq)
979 return -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500980
981 aqa = nvmeq->q_depth - 1;
982 aqa |= aqa << 16;
983
984 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
985 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
986 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -0400987 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500988
Shane Michael Matthews5911f202011-02-01 11:31:55 -0500989 writel(0, &dev->bar->cc);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500990 writel(aqa, &dev->bar->aqa);
991 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
992 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
993 writel(dev->ctrl_config, &dev->bar->cc);
994
Matthew Wilcox22605f92011-04-19 15:04:20 -0400995 cap = readq(&dev->bar->cap);
996 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400997 dev->db_stride = NVME_CAP_STRIDE(cap);
Matthew Wilcox22605f92011-04-19 15:04:20 -0400998
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500999 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
1000 msleep(100);
1001 if (fatal_signal_pending(current))
1002 return -EINTR;
Matthew Wilcox22605f92011-04-19 15:04:20 -04001003 if (time_after(jiffies, timeout)) {
1004 dev_err(&dev->pci_dev->dev,
1005 "Device not ready; aborting initialisation\n");
1006 return -ENODEV;
1007 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001008 }
1009
Matthew Wilcox30010822011-01-20 09:10:15 -05001010 result = queue_request_irq(dev, nvmeq, "nvme admin");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001011 dev->queues[0] = nvmeq;
1012 return result;
1013}
1014
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001015static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1016 unsigned long addr, unsigned length)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001017{
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001018 int i, err, count, nents, offset;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001019 struct scatterlist *sg;
1020 struct page **pages;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001021 struct nvme_iod *iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001022
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001023 if (addr & 3)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001024 return ERR_PTR(-EINVAL);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001025 if (!length)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001026 return ERR_PTR(-EINVAL);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001027
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001028 offset = offset_in_page(addr);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001029 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1030 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001031
1032 err = get_user_pages_fast(addr, count, 1, pages);
1033 if (err < count) {
1034 count = err;
1035 err = -EFAULT;
1036 goto put_pages;
1037 }
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001038
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001039 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1040 sg = iod->sg;
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001041 sg_init_table(sg, count);
Matthew Wilcoxd0ba1e42011-09-13 17:01:39 -04001042 for (i = 0; i < count; i++) {
1043 sg_set_page(&sg[i], pages[i],
1044 min_t(int, length, PAGE_SIZE - offset), offset);
1045 length -= (PAGE_SIZE - offset);
1046 offset = 0;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001047 }
1048
1049 err = -ENOMEM;
1050 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1051 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001052 if (!nents)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001053 goto free_iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001054
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001055 kfree(pages);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001056 return iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001057
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001058 free_iod:
1059 kfree(iod);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001060 put_pages:
1061 for (i = 0; i < count; i++)
1062 put_page(pages[i]);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001063 kfree(pages);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001064 return ERR_PTR(err);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001065}
1066
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001067static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001068 unsigned long addr, int length, struct nvme_iod *iod)
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001069{
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001070 struct scatterlist *sg = iod->sg;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001071 int i, count;
1072
1073 count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
Nisheeth Bhatd1a490e2011-09-15 16:52:24 -04001074 dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001075
1076 for (i = 0; i < count; i++)
1077 put_page(sg_page(&sg[i]));
1078}
1079
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001080static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1081{
1082 struct nvme_dev *dev = ns->dev;
1083 struct nvme_queue *nvmeq;
1084 struct nvme_user_io io;
1085 struct nvme_command c;
1086 unsigned length;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001087 int status;
1088 struct nvme_iod *iod;
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001089
1090 if (copy_from_user(&io, uio, sizeof(io)))
1091 return -EFAULT;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001092 length = (io.nblocks + 1) << ns->lba_shift;
1093
1094 switch (io.opcode) {
1095 case nvme_cmd_write:
1096 case nvme_cmd_read:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001097 case nvme_cmd_compare:
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001098 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
Matthew Wilcox64132142011-08-09 12:56:37 -04001099 break;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001100 default:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001101 return -EINVAL;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001102 }
1103
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001104 if (IS_ERR(iod))
1105 return PTR_ERR(iod);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001106
1107 memset(&c, 0, sizeof(c));
1108 c.rw.opcode = io.opcode;
1109 c.rw.flags = io.flags;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001110 c.rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001111 c.rw.slba = cpu_to_le64(io.slba);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001112 c.rw.length = cpu_to_le16(io.nblocks);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001113 c.rw.control = cpu_to_le16(io.control);
1114 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001115 c.rw.reftag = io.reftag;
1116 c.rw.apptag = io.apptag;
1117 c.rw.appmask = io.appmask;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001118 /* XXX: metadata */
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001119 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001120
Matthew Wilcox040a93b2011-12-20 11:04:12 -05001121 nvmeq = get_nvmeq(dev);
Matthew Wilcoxfa922822011-03-16 16:29:00 -04001122 /*
1123 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
Matthew Wilcoxb1ad37e2011-02-04 16:14:30 -05001124 * disabled. We may be preempted at any point, and be rescheduled
1125 * to a different CPU. That will cause cacheline bouncing, but no
1126 * additional races since q_lock already protects against other CPUs.
1127 */
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001128 put_nvmeq(nvmeq);
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001129 if (length != (io.nblocks + 1) << ns->lba_shift)
1130 status = -ENOMEM;
1131 else
1132 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001133
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001134 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, iod);
1135 nvme_free_iod(dev, iod);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001136 return status;
1137}
1138
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001139static int nvme_user_admin_cmd(struct nvme_ns *ns,
1140 struct nvme_admin_cmd __user *ucmd)
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001141{
1142 struct nvme_dev *dev = ns->dev;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001143 struct nvme_admin_cmd cmd;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001144 struct nvme_command c;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001145 int status, length;
1146 struct nvme_iod *iod;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001147
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001148 if (!capable(CAP_SYS_ADMIN))
1149 return -EACCES;
1150 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001151 return -EFAULT;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001152
1153 memset(&c, 0, sizeof(c));
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001154 c.common.opcode = cmd.opcode;
1155 c.common.flags = cmd.flags;
1156 c.common.nsid = cpu_to_le32(cmd.nsid);
1157 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1158 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1159 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1160 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1161 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1162 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1163 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1164 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1165
1166 length = cmd.data_len;
1167 if (cmd.data_len) {
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001168 iod = nvme_map_user_pages(dev, 1, cmd.addr, length);
1169 if (IS_ERR(iod))
1170 return PTR_ERR(iod);
1171 length = nvme_setup_prps(dev, &c.common, iod, length,
1172 GFP_KERNEL);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001173 }
1174
1175 if (length != cmd.data_len)
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001176 status = -ENOMEM;
1177 else
1178 status = nvme_submit_admin_cmd(dev, &c, NULL);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001179
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001180 if (cmd.data_len) {
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001181 nvme_unmap_user_pages(dev, 0, cmd.addr, cmd.data_len, iod);
1182 nvme_free_iod(dev, iod);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001183 }
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001184 return status;
1185}
1186
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001187static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1188 unsigned long arg)
1189{
1190 struct nvme_ns *ns = bdev->bd_disk->private_data;
1191
1192 switch (cmd) {
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001193 case NVME_IOCTL_ID:
1194 return ns->ns_id;
1195 case NVME_IOCTL_ADMIN_CMD:
1196 return nvme_user_admin_cmd(ns, (void __user *)arg);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001197 case NVME_IOCTL_SUBMIT_IO:
1198 return nvme_submit_io(ns, (void __user *)arg);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001199 default:
1200 return -ENOTTY;
1201 }
1202}
1203
1204static const struct block_device_operations nvme_fops = {
1205 .owner = THIS_MODULE,
1206 .ioctl = nvme_ioctl,
Matthew Wilcox49481682011-03-19 14:55:38 -04001207 .compat_ioctl = nvme_ioctl,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001208};
1209
Matthew Wilcox8de05532011-05-12 13:50:28 -04001210static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1211{
1212 int depth = nvmeq->q_depth - 1;
1213 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1214 unsigned long now = jiffies;
1215 int cmdid;
1216
1217 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -04001218 void *ctx;
1219 nvme_completion_fn fn;
Matthew Wilcox8de05532011-05-12 13:50:28 -04001220 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1221
1222 if (!time_after(now, info[cmdid].timeout))
1223 continue;
1224 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -04001225 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
Matthew Wilcox5c1281a2011-12-20 11:54:53 -05001226 fn(nvmeq->dev, ctx, &cqe);
Matthew Wilcox8de05532011-05-12 13:50:28 -04001227 }
1228}
1229
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001230static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1231{
1232 while (bio_list_peek(&nvmeq->sq_cong)) {
1233 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1234 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1235 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1236 bio_list_add_head(&nvmeq->sq_cong, bio);
1237 break;
1238 }
Matthew Wilcox3cb967c2011-03-16 16:45:49 -04001239 if (bio_list_empty(&nvmeq->sq_cong))
1240 remove_wait_queue(&nvmeq->sq_full,
1241 &nvmeq->sq_cong_wait);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001242 }
1243}
1244
1245static int nvme_kthread(void *data)
1246{
1247 struct nvme_dev *dev;
1248
1249 while (!kthread_should_stop()) {
1250 __set_current_state(TASK_RUNNING);
1251 spin_lock(&dev_list_lock);
1252 list_for_each_entry(dev, &dev_list, node) {
1253 int i;
1254 for (i = 0; i < dev->queue_count; i++) {
1255 struct nvme_queue *nvmeq = dev->queues[i];
Matthew Wilcox740216f2011-02-15 16:28:20 -05001256 if (!nvmeq)
1257 continue;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001258 spin_lock_irq(&nvmeq->q_lock);
1259 if (nvme_process_cq(nvmeq))
1260 printk("process_cq did something\n");
Matthew Wilcox8de05532011-05-12 13:50:28 -04001261 nvme_timeout_ios(nvmeq);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001262 nvme_resubmit_bios(nvmeq);
1263 spin_unlock_irq(&nvmeq->q_lock);
1264 }
1265 }
1266 spin_unlock(&dev_list_lock);
1267 set_current_state(TASK_INTERRUPTIBLE);
1268 schedule_timeout(HZ);
1269 }
1270 return 0;
1271}
1272
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001273static DEFINE_IDA(nvme_index_ida);
1274
1275static int nvme_get_ns_idx(void)
1276{
1277 int index, error;
1278
1279 do {
1280 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1281 return -1;
1282
1283 spin_lock(&dev_list_lock);
1284 error = ida_get_new(&nvme_index_ida, &index);
1285 spin_unlock(&dev_list_lock);
1286 } while (error == -EAGAIN);
1287
1288 if (error)
1289 index = -1;
1290 return index;
1291}
1292
1293static void nvme_put_ns_idx(int index)
1294{
1295 spin_lock(&dev_list_lock);
1296 ida_remove(&nvme_index_ida, index);
1297 spin_unlock(&dev_list_lock);
1298}
1299
1300static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001301 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1302{
1303 struct nvme_ns *ns;
1304 struct gendisk *disk;
1305 int lbaf;
1306
1307 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1308 return NULL;
1309
1310 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1311 if (!ns)
1312 return NULL;
1313 ns->queue = blk_alloc_queue(GFP_KERNEL);
1314 if (!ns->queue)
1315 goto out_free_ns;
1316 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
1317 QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
1318 blk_queue_make_request(ns->queue, nvme_make_request);
1319 ns->dev = dev;
1320 ns->queue->queuedata = ns;
1321
1322 disk = alloc_disk(NVME_MINORS);
1323 if (!disk)
1324 goto out_free_queue;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001325 ns->ns_id = nsid;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001326 ns->disk = disk;
1327 lbaf = id->flbas & 0xf;
1328 ns->lba_shift = id->lbaf[lbaf].ds;
1329
1330 disk->major = nvme_major;
1331 disk->minors = NVME_MINORS;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001332 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001333 disk->fops = &nvme_fops;
1334 disk->private_data = ns;
1335 disk->queue = ns->queue;
Matthew Wilcox388f0372011-02-01 12:49:38 -05001336 disk->driverfs_dev = &dev->pci_dev->dev;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001337 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001338 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1339
1340 return ns;
1341
1342 out_free_queue:
1343 blk_cleanup_queue(ns->queue);
1344 out_free_ns:
1345 kfree(ns);
1346 return NULL;
1347}
1348
1349static void nvme_ns_free(struct nvme_ns *ns)
1350{
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001351 int index = ns->disk->first_minor / NVME_MINORS;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001352 put_disk(ns->disk);
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001353 nvme_put_ns_idx(index);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001354 blk_cleanup_queue(ns->queue);
1355 kfree(ns);
1356}
1357
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001358static int set_queue_count(struct nvme_dev *dev, int count)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001359{
1360 int status;
1361 u32 result;
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001362 u32 q_count = (count - 1) | ((count - 1) << 16);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001363
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001364 status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
1365 &result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001366 if (status)
1367 return -EIO;
1368 return min(result & 0xffff, result >> 16) + 1;
1369}
1370
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001371static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1372{
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001373 int result, cpu, i, nr_io_queues, db_bar_size;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001374
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001375 nr_io_queues = num_online_cpus();
1376 result = set_queue_count(dev, nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001377 if (result < 0)
1378 return result;
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001379 if (result < nr_io_queues)
1380 nr_io_queues = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001381
Matthew Wilcox1b234842011-01-20 13:01:49 -05001382 /* Deregister the admin queue's interrupt */
1383 free_irq(dev->entry[0].vector, dev->queues[0]);
1384
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001385 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1386 if (db_bar_size > 8192) {
1387 iounmap(dev->bar);
1388 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
1389 db_bar_size);
1390 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1391 dev->queues[0]->q_db = dev->dbs;
1392 }
1393
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001394 for (i = 0; i < nr_io_queues; i++)
Matthew Wilcox1b234842011-01-20 13:01:49 -05001395 dev->entry[i].entry = i;
1396 for (;;) {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001397 result = pci_enable_msix(dev->pci_dev, dev->entry,
1398 nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001399 if (result == 0) {
1400 break;
1401 } else if (result > 0) {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001402 nr_io_queues = result;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001403 continue;
1404 } else {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001405 nr_io_queues = 1;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001406 break;
1407 }
1408 }
1409
1410 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1411 /* XXX: handle failure here */
1412
1413 cpu = cpumask_first(cpu_online_mask);
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001414 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001415 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1416 cpu = cpumask_next(cpu, cpu_online_mask);
1417 }
1418
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001419 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001420 dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
1421 NVME_Q_DEPTH, i);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001422 if (IS_ERR(dev->queues[i + 1]))
1423 return PTR_ERR(dev->queues[i + 1]);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001424 dev->queue_count++;
1425 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001426
Matthew Wilcox9ecdc942011-03-16 16:52:19 -04001427 for (; i < num_possible_cpus(); i++) {
1428 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1429 dev->queues[i + 1] = dev->queues[target + 1];
1430 }
1431
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001432 return 0;
1433}
1434
1435static void nvme_free_queues(struct nvme_dev *dev)
1436{
1437 int i;
1438
1439 for (i = dev->queue_count - 1; i >= 0; i--)
1440 nvme_free_queue(dev, i);
1441}
1442
1443static int __devinit nvme_dev_add(struct nvme_dev *dev)
1444{
1445 int res, nn, i;
1446 struct nvme_ns *ns, *next;
Matthew Wilcox51814232011-02-01 16:18:08 -05001447 struct nvme_id_ctrl *ctrl;
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001448 struct nvme_id_ns *id_ns;
1449 void *mem;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001450 dma_addr_t dma_addr;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001451
1452 res = nvme_setup_io_queues(dev);
1453 if (res)
1454 return res;
1455
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001456 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001457 GFP_KERNEL);
1458
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001459 res = nvme_identify(dev, 0, 1, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001460 if (res) {
1461 res = -EIO;
1462 goto out_free;
1463 }
1464
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001465 ctrl = mem;
Matthew Wilcox51814232011-02-01 16:18:08 -05001466 nn = le32_to_cpup(&ctrl->nn);
1467 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1468 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1469 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001470
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001471 id_ns = mem;
Matthew Wilcox2b2c1892011-10-07 13:10:13 -04001472 for (i = 1; i <= nn; i++) {
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001473 res = nvme_identify(dev, i, 0, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001474 if (res)
1475 continue;
1476
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001477 if (id_ns->ncap == 0)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001478 continue;
1479
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001480 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
1481 dma_addr + 4096, NULL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001482 if (res)
1483 continue;
1484
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001485 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001486 if (ns)
1487 list_add_tail(&ns->list, &dev->namespaces);
1488 }
1489 list_for_each_entry(ns, &dev->namespaces, list)
1490 add_disk(ns->disk);
1491
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001492 goto out;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001493
1494 out_free:
1495 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1496 list_del(&ns->list);
1497 nvme_ns_free(ns);
1498 }
1499
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001500 out:
Matthew Wilcox684f5c22011-09-19 17:14:53 -04001501 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001502 return res;
1503}
1504
1505static int nvme_dev_remove(struct nvme_dev *dev)
1506{
1507 struct nvme_ns *ns, *next;
1508
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001509 spin_lock(&dev_list_lock);
1510 list_del(&dev->node);
1511 spin_unlock(&dev_list_lock);
1512
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001513 /* TODO: wait all I/O finished or cancel them */
1514
1515 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1516 list_del(&ns->list);
1517 del_gendisk(ns->disk);
1518 nvme_ns_free(ns);
1519 }
1520
1521 nvme_free_queues(dev);
1522
1523 return 0;
1524}
1525
Matthew Wilcox091b6092011-02-10 09:56:01 -05001526static int nvme_setup_prp_pools(struct nvme_dev *dev)
1527{
1528 struct device *dmadev = &dev->pci_dev->dev;
1529 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
1530 PAGE_SIZE, PAGE_SIZE, 0);
1531 if (!dev->prp_page_pool)
1532 return -ENOMEM;
1533
Matthew Wilcox99802a72011-02-10 10:30:34 -05001534 /* Optimisation for I/Os between 4k and 128k */
1535 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
1536 256, 256, 0);
1537 if (!dev->prp_small_pool) {
1538 dma_pool_destroy(dev->prp_page_pool);
1539 return -ENOMEM;
1540 }
Matthew Wilcox091b6092011-02-10 09:56:01 -05001541 return 0;
1542}
1543
1544static void nvme_release_prp_pools(struct nvme_dev *dev)
1545{
1546 dma_pool_destroy(dev->prp_page_pool);
Matthew Wilcox99802a72011-02-10 10:30:34 -05001547 dma_pool_destroy(dev->prp_small_pool);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001548}
1549
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001550/* XXX: Use an ida or something to let remove / add work correctly */
1551static void nvme_set_instance(struct nvme_dev *dev)
1552{
1553 static int instance;
1554 dev->instance = instance++;
1555}
1556
1557static void nvme_release_instance(struct nvme_dev *dev)
1558{
1559}
1560
1561static int __devinit nvme_probe(struct pci_dev *pdev,
1562 const struct pci_device_id *id)
1563{
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001564 int bars, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001565 struct nvme_dev *dev;
1566
1567 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1568 if (!dev)
1569 return -ENOMEM;
1570 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1571 GFP_KERNEL);
1572 if (!dev->entry)
1573 goto free;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001574 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1575 GFP_KERNEL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001576 if (!dev->queues)
1577 goto free;
1578
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001579 if (pci_enable_device_mem(pdev))
1580 goto free;
Matthew Wilcoxf64d3362011-02-01 09:01:59 -05001581 pci_set_master(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001582 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1583 if (pci_request_selected_regions(pdev, bars, "nvme"))
1584 goto disable;
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001585
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001586 INIT_LIST_HEAD(&dev->namespaces);
1587 dev->pci_dev = pdev;
1588 pci_set_drvdata(pdev, dev);
Matthew Wilcox29303532011-02-01 16:23:39 -05001589 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1590 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001591 nvme_set_instance(dev);
Matthew Wilcox53c95772011-01-20 13:42:34 -05001592 dev->entry[0].vector = pdev->irq;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001593
Matthew Wilcox091b6092011-02-10 09:56:01 -05001594 result = nvme_setup_prp_pools(dev);
1595 if (result)
1596 goto disable_msix;
1597
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001598 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1599 if (!dev->bar) {
1600 result = -ENOMEM;
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001601 goto disable_msix;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001602 }
1603
1604 result = nvme_configure_admin_queue(dev);
1605 if (result)
1606 goto unmap;
1607 dev->queue_count++;
1608
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001609 spin_lock(&dev_list_lock);
1610 list_add(&dev->node, &dev_list);
1611 spin_unlock(&dev_list_lock);
1612
Matthew Wilcox740216f2011-02-15 16:28:20 -05001613 result = nvme_dev_add(dev);
1614 if (result)
1615 goto delete;
1616
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001617 return 0;
1618
1619 delete:
Matthew Wilcox740216f2011-02-15 16:28:20 -05001620 spin_lock(&dev_list_lock);
1621 list_del(&dev->node);
1622 spin_unlock(&dev_list_lock);
1623
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001624 nvme_free_queues(dev);
1625 unmap:
1626 iounmap(dev->bar);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001627 disable_msix:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001628 pci_disable_msix(pdev);
1629 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001630 nvme_release_prp_pools(dev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001631 disable:
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001632 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001633 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001634 free:
1635 kfree(dev->queues);
1636 kfree(dev->entry);
1637 kfree(dev);
1638 return result;
1639}
1640
1641static void __devexit nvme_remove(struct pci_dev *pdev)
1642{
1643 struct nvme_dev *dev = pci_get_drvdata(pdev);
1644 nvme_dev_remove(dev);
1645 pci_disable_msix(pdev);
1646 iounmap(dev->bar);
1647 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001648 nvme_release_prp_pools(dev);
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001649 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001650 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001651 kfree(dev->queues);
1652 kfree(dev->entry);
1653 kfree(dev);
1654}
1655
1656/* These functions are yet to be implemented */
1657#define nvme_error_detected NULL
1658#define nvme_dump_registers NULL
1659#define nvme_link_reset NULL
1660#define nvme_slot_reset NULL
1661#define nvme_error_resume NULL
1662#define nvme_suspend NULL
1663#define nvme_resume NULL
1664
1665static struct pci_error_handlers nvme_err_handler = {
1666 .error_detected = nvme_error_detected,
1667 .mmio_enabled = nvme_dump_registers,
1668 .link_reset = nvme_link_reset,
1669 .slot_reset = nvme_slot_reset,
1670 .resume = nvme_error_resume,
1671};
1672
1673/* Move to pci_ids.h later */
1674#define PCI_CLASS_STORAGE_EXPRESS 0x010802
1675
1676static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1677 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1678 { 0, }
1679};
1680MODULE_DEVICE_TABLE(pci, nvme_id_table);
1681
1682static struct pci_driver nvme_driver = {
1683 .name = "nvme",
1684 .id_table = nvme_id_table,
1685 .probe = nvme_probe,
1686 .remove = __devexit_p(nvme_remove),
1687 .suspend = nvme_suspend,
1688 .resume = nvme_resume,
1689 .err_handler = &nvme_err_handler,
1690};
1691
1692static int __init nvme_init(void)
1693{
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001694 int result = -EBUSY;
1695
1696 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
1697 if (IS_ERR(nvme_thread))
1698 return PTR_ERR(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001699
1700 nvme_major = register_blkdev(nvme_major, "nvme");
1701 if (nvme_major <= 0)
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001702 goto kill_kthread;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001703
1704 result = pci_register_driver(&nvme_driver);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001705 if (result)
1706 goto unregister_blkdev;
1707 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001708
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001709 unregister_blkdev:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001710 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001711 kill_kthread:
1712 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001713 return result;
1714}
1715
1716static void __exit nvme_exit(void)
1717{
1718 pci_unregister_driver(&nvme_driver);
1719 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001720 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001721}
1722
1723MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1724MODULE_LICENSE("GPL");
Matthew Wilcoxce38c142011-10-07 13:20:37 -04001725MODULE_VERSION("0.7");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001726module_init(nvme_init);
1727module_exit(nvme_exit);