blob: c1dc4d86c22122f4b34abd35880cfcbb0896215d [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
Matthew Wilcox8de05532011-05-12 13:50:28 -040021#include <linux/bitops.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022#include <linux/blkdev.h>
Matthew Wilcoxfd63e9ce2011-05-06 08:37:54 -040023#include <linux/delay.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050024#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/genhd.h>
Matthew Wilcox5aff9382011-05-06 08:45:47 -040027#include <linux/idr.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kdev_t.h>
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050032#include <linux/kthread.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050033#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -050038#include <linux/poison.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050039#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/types.h>
42#include <linux/version.h>
43
44#define NVME_Q_DEPTH 1024
45#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
46#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
47#define NVME_MINORS 64
Matthew Wilcoxff976d72011-12-20 13:53:01 -050048#define NVME_IO_TIMEOUT (5 * HZ)
Matthew Wilcoxe85248e2011-02-06 18:30:16 -050049#define ADMIN_TIMEOUT (60 * HZ)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050050
51static int nvme_major;
52module_param(nvme_major, int, 0);
53
Matthew Wilcox58ffacb2011-02-06 07:28:06 -050054static int use_threaded_interrupts;
55module_param(use_threaded_interrupts, int, 0);
56
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050057static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread;
60
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050061/*
62 * Represents an NVM Express device. Each nvme_dev is a PCI function.
63 */
64struct nvme_dev {
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050065 struct list_head node;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050066 struct nvme_queue **queues;
67 u32 __iomem *dbs;
68 struct pci_dev *pci_dev;
Matthew Wilcox091b6092011-02-10 09:56:01 -050069 struct dma_pool *prp_page_pool;
Matthew Wilcox99802a72011-02-10 10:30:34 -050070 struct dma_pool *prp_small_pool;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050071 int instance;
72 int queue_count;
Matthew Wilcoxf1938f62011-10-20 17:00:41 -040073 int db_stride;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050074 u32 ctrl_config;
75 struct msix_entry *entry;
76 struct nvme_bar __iomem *bar;
77 struct list_head namespaces;
Matthew Wilcox51814232011-02-01 16:18:08 -050078 char serial[20];
79 char model[40];
80 char firmware_rev[8];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050081};
82
83/*
84 * An NVM Express namespace is equivalent to a SCSI LUN
85 */
86struct nvme_ns {
87 struct list_head list;
88
89 struct nvme_dev *dev;
90 struct request_queue *queue;
91 struct gendisk *disk;
92
93 int ns_id;
94 int lba_shift;
95};
96
97/*
98 * An NVM Express queue. Each device has at least two (one for admin
99 * commands and one for I/O commands).
100 */
101struct nvme_queue {
102 struct device *q_dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -0500103 struct nvme_dev *dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500104 spinlock_t q_lock;
105 struct nvme_command *sq_cmds;
106 volatile struct nvme_completion *cqes;
107 dma_addr_t sq_dma_addr;
108 dma_addr_t cq_dma_addr;
109 wait_queue_head_t sq_full;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -0500110 wait_queue_t sq_cong_wait;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500111 struct bio_list sq_cong;
112 u32 __iomem *q_db;
113 u16 q_depth;
114 u16 cq_vector;
115 u16 sq_head;
116 u16 sq_tail;
117 u16 cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500118 u16 cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500119 unsigned long cmdid_data[];
120};
121
122/*
123 * Check we didin't inadvertently grow the command struct
124 */
125static inline void _nvme_check_size(void)
126{
127 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
128 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
129 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
130 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
131 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
132 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
133 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
134 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
136}
137
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500138typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400139 struct nvme_completion *);
140
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500141struct nvme_cmd_info {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400142 nvme_completion_fn fn;
143 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500144 unsigned long timeout;
145};
146
147static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
148{
149 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
150}
151
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500152/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400153 * alloc_cmdid() - Allocate a Command ID
154 * @nvmeq: The queue that will be used for this command
155 * @ctx: A pointer that will be passed to the handler
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400156 * @handler: The function to call on completion
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500157 *
158 * Allocate a Command ID for a queue. The data passed in will
159 * be passed to the completion handler. This is implemented by using
160 * the bottom two bits of the ctx pointer to store the handler ID.
161 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
162 * We can change this if it becomes a problem.
Matthew Wilcox184d2942011-05-11 21:36:38 -0400163 *
164 * May be called with local interrupts disabled and the q_lock held,
165 * or with interrupts enabled and no locks held.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500166 */
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400167static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
168 nvme_completion_fn handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500169{
Matthew Wilcoxe6d15f72011-02-24 08:49:41 -0500170 int depth = nvmeq->q_depth - 1;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500171 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500172 int cmdid;
173
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500174 do {
175 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
176 if (cmdid >= depth)
177 return -EBUSY;
178 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
179
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400180 info[cmdid].fn = handler;
181 info[cmdid].ctx = ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500182 info[cmdid].timeout = jiffies + timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500183 return cmdid;
184}
185
186static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400187 nvme_completion_fn handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500188{
189 int cmdid;
190 wait_event_killable(nvmeq->sq_full,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500191 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500192 return (cmdid < 0) ? -EINTR : cmdid;
193}
194
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400195/* Special values must be less than 0x1000 */
196#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
Matthew Wilcoxd2d87032011-02-07 15:55:59 -0500197#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
198#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
199#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500200#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500201
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500202static void special_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400203 struct nvme_completion *cqe)
204{
205 if (ctx == CMD_CTX_CANCELLED)
206 return;
207 if (ctx == CMD_CTX_FLUSH)
208 return;
209 if (ctx == CMD_CTX_COMPLETED) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500210 dev_warn(&dev->pci_dev->dev,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400211 "completed id %d twice on queue %d\n",
212 cqe->command_id, le16_to_cpup(&cqe->sq_id));
213 return;
214 }
215 if (ctx == CMD_CTX_INVALID) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500216 dev_warn(&dev->pci_dev->dev,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400217 "invalid id %d completed on queue %d\n",
218 cqe->command_id, le16_to_cpup(&cqe->sq_id));
219 return;
220 }
221
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500222 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400223}
224
Matthew Wilcox184d2942011-05-11 21:36:38 -0400225/*
226 * Called with local interrupts disabled and the q_lock held. May not sleep.
227 */
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400228static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
229 nvme_completion_fn *fn)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500230{
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400231 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500232 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500233
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400234 if (cmdid >= nvmeq->q_depth) {
235 *fn = special_completion;
Matthew Wilcox48e3d392011-02-06 08:51:15 -0500236 return CMD_CTX_INVALID;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400237 }
238 *fn = info[cmdid].fn;
239 ctx = info[cmdid].ctx;
240 info[cmdid].fn = special_completion;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500241 info[cmdid].ctx = CMD_CTX_COMPLETED;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500242 clear_bit(cmdid, nvmeq->cmdid_data);
243 wake_up(&nvmeq->sq_full);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400244 return ctx;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500245}
246
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400247static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
248 nvme_completion_fn *fn)
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500249{
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400250 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500251 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400252 if (fn)
253 *fn = info[cmdid].fn;
254 ctx = info[cmdid].ctx;
255 info[cmdid].fn = special_completion;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500256 info[cmdid].ctx = CMD_CTX_CANCELLED;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400257 return ctx;
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500258}
259
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500260static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500261{
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500262 return dev->queues[get_cpu() + 1];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500263}
264
265static void put_nvmeq(struct nvme_queue *nvmeq)
266{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500267 put_cpu();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500268}
269
270/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400271 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500272 * @nvmeq: The queue to use
273 * @cmd: The command to send
274 *
275 * Safe to use from interrupt context
276 */
277static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
278{
279 unsigned long flags;
280 u16 tail;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500281 spin_lock_irqsave(&nvmeq->q_lock, flags);
282 tail = nvmeq->sq_tail;
283 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500284 if (++tail == nvmeq->q_depth)
285 tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500286 writel(tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500287 nvmeq->sq_tail = tail;
288 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
289
290 return 0;
291}
292
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500293/*
294 * The nvme_iod describes the data in an I/O, including the list of PRP
295 * entries. You can't see it in this data structure because C doesn't let
296 * me express that. Use nvme_alloc_iod to ensure there's enough space
297 * allocated to store the PRP list.
298 */
299struct nvme_iod {
300 void *private; /* For the use of the submitter of the I/O */
301 int npages; /* In the PRP list. 0 means small pool in use */
302 int offset; /* Of PRP list */
303 int nents; /* Used in scatterlist */
304 int length; /* Of data, in bytes */
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500305 dma_addr_t first_dma;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500306 struct scatterlist sg[0];
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500307};
308
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500309static __le64 **iod_list(struct nvme_iod *iod)
310{
311 return ((void *)iod) + iod->offset;
312}
313
314/*
315 * Will slightly overestimate the number of pages needed. This is OK
316 * as it only leads to a small amount of wasted memory for the lifetime of
317 * the I/O.
318 */
319static int nvme_npages(unsigned size)
320{
321 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
322 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
323}
324
325static struct nvme_iod *
326nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
327{
328 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
329 sizeof(__le64 *) * nvme_npages(nbytes) +
330 sizeof(struct scatterlist) * nseg, gfp);
331
332 if (iod) {
333 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
334 iod->npages = -1;
335 iod->length = nbytes;
336 }
337
338 return iod;
339}
340
341static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500342{
343 const int last_prp = PAGE_SIZE / 8 - 1;
344 int i;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500345 __le64 **list = iod_list(iod);
346 dma_addr_t prp_dma = iod->first_dma;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500347
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500348 if (iod->npages == 0)
349 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
350 for (i = 0; i < iod->npages; i++) {
351 __le64 *prp_list = list[i];
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500352 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
Matthew Wilcox091b6092011-02-10 09:56:01 -0500353 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500354 prp_dma = next_prp_dma;
355 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500356 kfree(iod);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500357}
358
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500359static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
360{
361 struct nvme_queue *nvmeq = get_nvmeq(dev);
362 if (bio_list_empty(&nvmeq->sq_cong))
363 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
364 bio_list_add(&nvmeq->sq_cong, bio);
365 put_nvmeq(nvmeq);
366 wake_up_process(nvme_thread);
367}
368
369static void bio_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500370 struct nvme_completion *cqe)
371{
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500372 struct nvme_iod *iod = ctx;
373 struct bio *bio = iod->private;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500374 u16 status = le16_to_cpup(&cqe->status) >> 1;
375
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500376 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500377 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500378 nvme_free_iod(dev, iod);
Matthew Wilcox09a58f52011-04-28 23:09:09 -0700379 if (status) {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500380 bio_endio(bio, -EIO);
Matthew Wilcox09a58f52011-04-28 23:09:09 -0700381 } else if (bio->bi_vcnt > bio->bi_idx) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500382 requeue_bio(dev, bio);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500383 } else {
384 bio_endio(bio, 0);
385 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500386}
387
Matthew Wilcox184d2942011-05-11 21:36:38 -0400388/* length is in bytes. gfp flags indicates whether we may sleep. */
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500389static int nvme_setup_prps(struct nvme_dev *dev,
390 struct nvme_common_command *cmd, struct nvme_iod *iod,
391 int total_len, gfp_t gfp)
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500392{
Matthew Wilcox99802a72011-02-10 10:30:34 -0500393 struct dma_pool *pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500394 int length = total_len;
395 struct scatterlist *sg = iod->sg;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500396 int dma_len = sg_dma_len(sg);
397 u64 dma_addr = sg_dma_address(sg);
398 int offset = offset_in_page(dma_addr);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500399 __le64 *prp_list;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500400 __le64 **list = iod_list(iod);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500401 dma_addr_t prp_dma;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500402 int nprps, i;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500403
404 cmd->prp1 = cpu_to_le64(dma_addr);
405 length -= (PAGE_SIZE - offset);
406 if (length <= 0)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500407 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500408
409 dma_len -= (PAGE_SIZE - offset);
410 if (dma_len) {
411 dma_addr += (PAGE_SIZE - offset);
412 } else {
413 sg = sg_next(sg);
414 dma_addr = sg_dma_address(sg);
415 dma_len = sg_dma_len(sg);
416 }
417
418 if (length <= PAGE_SIZE) {
419 cmd->prp2 = cpu_to_le64(dma_addr);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500420 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500421 }
422
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500423 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
Matthew Wilcox99802a72011-02-10 10:30:34 -0500424 if (nprps <= (256 / 8)) {
425 pool = dev->prp_small_pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500426 iod->npages = 0;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500427 } else {
428 pool = dev->prp_page_pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500429 iod->npages = 1;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500430 }
431
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400432 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
433 if (!prp_list) {
434 cmd->prp2 = cpu_to_le64(dma_addr);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500435 iod->npages = -1;
436 return (total_len - length) + PAGE_SIZE;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400437 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500438 list[0] = prp_list;
439 iod->first_dma = prp_dma;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500440 cmd->prp2 = cpu_to_le64(prp_dma);
441 i = 0;
442 for (;;) {
Matthew Wilcox7523d832011-03-16 16:43:40 -0400443 if (i == PAGE_SIZE / 8) {
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500444 __le64 *old_prp_list = prp_list;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400445 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500446 if (!prp_list)
447 return total_len - length;
448 list[iod->npages++] = prp_list;
Matthew Wilcox7523d832011-03-16 16:43:40 -0400449 prp_list[0] = old_prp_list[i - 1];
450 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
451 i = 1;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500452 }
453 prp_list[i++] = cpu_to_le64(dma_addr);
454 dma_len -= PAGE_SIZE;
455 dma_addr += PAGE_SIZE;
456 length -= PAGE_SIZE;
457 if (length <= 0)
458 break;
459 if (dma_len > 0)
460 continue;
461 BUG_ON(dma_len < 0);
462 sg = sg_next(sg);
463 dma_addr = sg_dma_address(sg);
464 dma_len = sg_dma_len(sg);
465 }
466
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500467 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500468}
469
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500470/* NVMe scatterlists require no holes in the virtual address */
471#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
472 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
473
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500474static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500475 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
476{
Matthew Wilcox76830842011-02-10 13:55:39 -0500477 struct bio_vec *bvec, *bvprv = NULL;
478 struct scatterlist *sg = NULL;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500479 int i, old_idx, length = 0, nsegs = 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500480
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500481 sg_init_table(iod->sg, psegs);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500482 old_idx = bio->bi_idx;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500483 bio_for_each_segment(bvec, bio, i) {
Matthew Wilcox76830842011-02-10 13:55:39 -0500484 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
485 sg->length += bvec->bv_len;
486 } else {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500487 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
488 break;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500489 sg = sg ? sg + 1 : iod->sg;
Matthew Wilcox76830842011-02-10 13:55:39 -0500490 sg_set_page(sg, bvec->bv_page, bvec->bv_len,
491 bvec->bv_offset);
492 nsegs++;
493 }
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500494 length += bvec->bv_len;
Matthew Wilcox76830842011-02-10 13:55:39 -0500495 bvprv = bvec;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500496 }
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500497 bio->bi_idx = i;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500498 iod->nents = nsegs;
Matthew Wilcox76830842011-02-10 13:55:39 -0500499 sg_mark_end(sg);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500500 if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500501 bio->bi_idx = old_idx;
502 return -ENOMEM;
503 }
504 return length;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500505}
506
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500507static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
508 int cmdid)
509{
510 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
511
512 memset(cmnd, 0, sizeof(*cmnd));
513 cmnd->common.opcode = nvme_cmd_flush;
514 cmnd->common.command_id = cmdid;
515 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
516
517 if (++nvmeq->sq_tail == nvmeq->q_depth)
518 nvmeq->sq_tail = 0;
519 writel(nvmeq->sq_tail, nvmeq->q_db);
520
521 return 0;
522}
523
524static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
525{
526 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
Matthew Wilcoxff976d72011-12-20 13:53:01 -0500527 special_completion, NVME_IO_TIMEOUT);
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500528 if (unlikely(cmdid < 0))
529 return cmdid;
530
531 return nvme_submit_flush(nvmeq, ns, cmdid);
532}
533
Matthew Wilcox184d2942011-05-11 21:36:38 -0400534/*
535 * Called with local interrupts disabled and the q_lock held. May not sleep.
536 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500537static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
538 struct bio *bio)
539{
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500540 struct nvme_command *cmnd;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500541 struct nvme_iod *iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500542 enum dma_data_direction dma_dir;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500543 int cmdid, length, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500544 u16 control;
545 u32 dsmgmt;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500546 int psegs = bio_phys_segments(ns->queue, bio);
547
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500548 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
549 result = nvme_submit_flush_data(nvmeq, ns);
550 if (result)
551 return result;
552 }
553
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500554 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
555 if (!iod)
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500556 goto nomem;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500557 iod->private = bio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500558
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500559 result = -EBUSY;
Matthew Wilcoxff976d72011-12-20 13:53:01 -0500560 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500561 if (unlikely(cmdid < 0))
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500562 goto free_iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500563
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500564 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
565 return nvme_submit_flush(nvmeq, ns, cmdid);
566
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500567 control = 0;
568 if (bio->bi_rw & REQ_FUA)
569 control |= NVME_RW_FUA;
570 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
571 control |= NVME_RW_LR;
572
573 dsmgmt = 0;
574 if (bio->bi_rw & REQ_RAHEAD)
575 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
576
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500577 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500578
Matthew Wilcoxb8deb622011-01-26 10:08:25 -0500579 memset(cmnd, 0, sizeof(*cmnd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500580 if (bio_data_dir(bio)) {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500581 cmnd->rw.opcode = nvme_cmd_write;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500582 dma_dir = DMA_TO_DEVICE;
583 } else {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500584 cmnd->rw.opcode = nvme_cmd_read;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500585 dma_dir = DMA_FROM_DEVICE;
586 }
587
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500588 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500589 if (result < 0)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500590 goto free_iod;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500591 length = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500592
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500593 cmnd->rw.command_id = cmdid;
594 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500595 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
596 GFP_ATOMIC);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500597 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500598 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500599 cmnd->rw.control = cpu_to_le16(control);
600 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500601
Matthew Wilcoxd8ee9d62011-02-24 08:46:00 -0500602 bio->bi_sector += length >> 9;
603
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500604 if (++nvmeq->sq_tail == nvmeq->q_depth)
605 nvmeq->sq_tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500606 writel(nvmeq->sq_tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500607
Matthew Wilcox1974b1a2011-02-10 12:01:09 -0500608 return 0;
609
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500610 free_iod:
611 nvme_free_iod(nvmeq->dev, iod);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500612 nomem:
613 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500614}
615
Linus Torvalds93c3d652012-01-18 15:41:27 -0800616static void nvme_make_request(struct request_queue *q, struct bio *bio)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500617{
618 struct nvme_ns *ns = q->queuedata;
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500619 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500620 int result = -EBUSY;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500621
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500622 spin_lock_irq(&nvmeq->q_lock);
623 if (bio_list_empty(&nvmeq->sq_cong))
624 result = nvme_submit_bio_queue(nvmeq, ns, bio);
625 if (unlikely(result)) {
626 if (bio_list_empty(&nvmeq->sq_cong))
627 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500628 bio_list_add(&nvmeq->sq_cong, bio);
629 }
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500630
631 spin_unlock_irq(&nvmeq->q_lock);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500632 put_nvmeq(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500633}
634
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500635static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
636{
Matthew Wilcox82123462011-01-20 13:24:06 -0500637 u16 head, phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500638
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500639 head = nvmeq->cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500640 phase = nvmeq->cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500641
642 for (;;) {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400643 void *ctx;
644 nvme_completion_fn fn;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500645 struct nvme_completion cqe = nvmeq->cqes[head];
Matthew Wilcox82123462011-01-20 13:24:06 -0500646 if ((le16_to_cpu(cqe.status) & 1) != phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500647 break;
648 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
649 if (++head == nvmeq->q_depth) {
650 head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500651 phase = !phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500652 }
653
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400654 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500655 fn(nvmeq->dev, ctx, &cqe);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500656 }
657
658 /* If the controller ignores the cq head doorbell and continuously
659 * writes to the queue, it is theoretically possible to wrap around
660 * the queue twice and mistakenly return IRQ_NONE. Linux only
661 * requires that 0.1% of your interrupts are handled, so this isn't
662 * a big problem.
663 */
Matthew Wilcox82123462011-01-20 13:24:06 -0500664 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500665 return IRQ_NONE;
666
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400667 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500668 nvmeq->cq_head = head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500669 nvmeq->cq_phase = phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500670
671 return IRQ_HANDLED;
672}
673
674static irqreturn_t nvme_irq(int irq, void *data)
675{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500676 irqreturn_t result;
677 struct nvme_queue *nvmeq = data;
678 spin_lock(&nvmeq->q_lock);
679 result = nvme_process_cq(nvmeq);
680 spin_unlock(&nvmeq->q_lock);
681 return result;
682}
683
684static irqreturn_t nvme_irq_check(int irq, void *data)
685{
686 struct nvme_queue *nvmeq = data;
687 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
688 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
689 return IRQ_NONE;
690 return IRQ_WAKE_THREAD;
691}
692
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500693static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
694{
695 spin_lock_irq(&nvmeq->q_lock);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400696 cancel_cmdid(nvmeq, cmdid, NULL);
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500697 spin_unlock_irq(&nvmeq->q_lock);
698}
699
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400700struct sync_cmd_info {
701 struct task_struct *task;
702 u32 result;
703 int status;
704};
705
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500706static void sync_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400707 struct nvme_completion *cqe)
708{
709 struct sync_cmd_info *cmdinfo = ctx;
710 cmdinfo->result = le32_to_cpup(&cqe->result);
711 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
712 wake_up_process(cmdinfo->task);
713}
714
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500715/*
716 * Returns 0 on success. If the result is negative, it's a Linux error code;
717 * if the result is positive, it's an NVM Express status code
718 */
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500719static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500720 struct nvme_command *cmd, u32 *result, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500721{
722 int cmdid;
723 struct sync_cmd_info cmdinfo;
724
725 cmdinfo.task = current;
726 cmdinfo.status = -EINTR;
727
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400728 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500729 timeout);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500730 if (cmdid < 0)
731 return cmdid;
732 cmd->common.command_id = cmdid;
733
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500734 set_current_state(TASK_KILLABLE);
735 nvme_submit_cmd(nvmeq, cmd);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500736 schedule();
737
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500738 if (cmdinfo.status == -EINTR) {
739 nvme_abort_command(nvmeq, cmdid);
740 return -EINTR;
741 }
742
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500743 if (result)
744 *result = cmdinfo.result;
745
746 return cmdinfo.status;
747}
748
749static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
750 u32 *result)
751{
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500752 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500753}
754
755static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
756{
757 int status;
758 struct nvme_command c;
759
760 memset(&c, 0, sizeof(c));
761 c.delete_queue.opcode = opcode;
762 c.delete_queue.qid = cpu_to_le16(id);
763
764 status = nvme_submit_admin_cmd(dev, &c, NULL);
765 if (status)
766 return -EIO;
767 return 0;
768}
769
770static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
771 struct nvme_queue *nvmeq)
772{
773 int status;
774 struct nvme_command c;
775 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
776
777 memset(&c, 0, sizeof(c));
778 c.create_cq.opcode = nvme_admin_create_cq;
779 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
780 c.create_cq.cqid = cpu_to_le16(qid);
781 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
782 c.create_cq.cq_flags = cpu_to_le16(flags);
783 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
784
785 status = nvme_submit_admin_cmd(dev, &c, NULL);
786 if (status)
787 return -EIO;
788 return 0;
789}
790
791static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
792 struct nvme_queue *nvmeq)
793{
794 int status;
795 struct nvme_command c;
796 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
797
798 memset(&c, 0, sizeof(c));
799 c.create_sq.opcode = nvme_admin_create_sq;
800 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
801 c.create_sq.sqid = cpu_to_le16(qid);
802 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
803 c.create_sq.sq_flags = cpu_to_le16(flags);
804 c.create_sq.cqid = cpu_to_le16(qid);
805
806 status = nvme_submit_admin_cmd(dev, &c, NULL);
807 if (status)
808 return -EIO;
809 return 0;
810}
811
812static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
813{
814 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
815}
816
817static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
818{
819 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
820}
821
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400822static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
823 dma_addr_t dma_addr)
824{
825 struct nvme_command c;
826
827 memset(&c, 0, sizeof(c));
828 c.identify.opcode = nvme_admin_identify;
829 c.identify.nsid = cpu_to_le32(nsid);
830 c.identify.prp1 = cpu_to_le64(dma_addr);
831 c.identify.cns = cpu_to_le32(cns);
832
833 return nvme_submit_admin_cmd(dev, &c, NULL);
834}
835
836static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
Matthew Wilcoxdf348132012-01-11 07:29:56 -0700837 unsigned dword11, dma_addr_t dma_addr)
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400838{
839 struct nvme_command c;
840
841 memset(&c, 0, sizeof(c));
842 c.features.opcode = nvme_admin_get_features;
843 c.features.prp1 = cpu_to_le64(dma_addr);
844 c.features.fid = cpu_to_le32(fid);
845 c.features.dword11 = cpu_to_le32(dword11);
846
Matthew Wilcoxdf348132012-01-11 07:29:56 -0700847 return nvme_submit_admin_cmd(dev, &c, NULL);
848}
849
850static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
851 unsigned dword11, dma_addr_t dma_addr, u32 *result)
852{
853 struct nvme_command c;
854
855 memset(&c, 0, sizeof(c));
856 c.features.opcode = nvme_admin_set_features;
857 c.features.prp1 = cpu_to_le64(dma_addr);
858 c.features.fid = cpu_to_le32(fid);
859 c.features.dword11 = cpu_to_le32(dword11);
860
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400861 return nvme_submit_admin_cmd(dev, &c, result);
862}
863
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500864static void nvme_free_queue(struct nvme_dev *dev, int qid)
865{
866 struct nvme_queue *nvmeq = dev->queues[qid];
Matthew Wilcoxaba20802011-03-27 08:52:06 -0400867 int vector = dev->entry[nvmeq->cq_vector].vector;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500868
Matthew Wilcoxaba20802011-03-27 08:52:06 -0400869 irq_set_affinity_hint(vector, NULL);
870 free_irq(vector, nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500871
872 /* Don't tell the adapter to delete the admin queue */
873 if (qid) {
874 adapter_delete_sq(dev, qid);
875 adapter_delete_cq(dev, qid);
876 }
877
878 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
879 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
880 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
881 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
882 kfree(nvmeq);
883}
884
885static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
886 int depth, int vector)
887{
888 struct device *dmadev = &dev->pci_dev->dev;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500889 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500890 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
891 if (!nvmeq)
892 return NULL;
893
894 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
895 &nvmeq->cq_dma_addr, GFP_KERNEL);
896 if (!nvmeq->cqes)
897 goto free_nvmeq;
898 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
899
900 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
901 &nvmeq->sq_dma_addr, GFP_KERNEL);
902 if (!nvmeq->sq_cmds)
903 goto free_cqdma;
904
905 nvmeq->q_dmadev = dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -0500906 nvmeq->dev = dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500907 spin_lock_init(&nvmeq->q_lock);
908 nvmeq->cq_head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500909 nvmeq->cq_phase = 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500910 init_waitqueue_head(&nvmeq->sq_full);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -0500911 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500912 bio_list_init(&nvmeq->sq_cong);
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400913 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500914 nvmeq->q_depth = depth;
915 nvmeq->cq_vector = vector;
916
917 return nvmeq;
918
919 free_cqdma:
920 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
921 nvmeq->cq_dma_addr);
922 free_nvmeq:
923 kfree(nvmeq);
924 return NULL;
925}
926
Matthew Wilcox30010822011-01-20 09:10:15 -0500927static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
928 const char *name)
929{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500930 if (use_threaded_interrupts)
931 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
Matthew Wilcoxec6ce612011-02-06 09:01:00 -0500932 nvme_irq_check, nvme_irq,
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500933 IRQF_DISABLED | IRQF_SHARED,
934 name, nvmeq);
Matthew Wilcox30010822011-01-20 09:10:15 -0500935 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
936 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
937}
938
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500939static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
940 int qid, int cq_size, int vector)
941{
942 int result;
943 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
944
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500945 if (!nvmeq)
Matthew Wilcox6f0f5442011-05-11 13:30:59 -0700946 return ERR_PTR(-ENOMEM);
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500947
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500948 result = adapter_alloc_cq(dev, qid, nvmeq);
949 if (result < 0)
950 goto free_nvmeq;
951
952 result = adapter_alloc_sq(dev, qid, nvmeq);
953 if (result < 0)
954 goto release_cq;
955
Matthew Wilcox30010822011-01-20 09:10:15 -0500956 result = queue_request_irq(dev, nvmeq, "nvme");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500957 if (result < 0)
958 goto release_sq;
959
960 return nvmeq;
961
962 release_sq:
963 adapter_delete_sq(dev, qid);
964 release_cq:
965 adapter_delete_cq(dev, qid);
966 free_nvmeq:
967 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
968 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
969 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
970 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
971 kfree(nvmeq);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -0700972 return ERR_PTR(result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500973}
974
975static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
976{
977 int result;
978 u32 aqa;
Matthew Wilcox22605f92011-04-19 15:04:20 -0400979 u64 cap;
980 unsigned long timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500981 struct nvme_queue *nvmeq;
982
983 dev->dbs = ((void __iomem *)dev->bar) + 4096;
984
985 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500986 if (!nvmeq)
987 return -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500988
989 aqa = nvmeq->q_depth - 1;
990 aqa |= aqa << 16;
991
992 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
993 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
994 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -0400995 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500996
Shane Michael Matthews5911f202011-02-01 11:31:55 -0500997 writel(0, &dev->bar->cc);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500998 writel(aqa, &dev->bar->aqa);
999 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
1000 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
1001 writel(dev->ctrl_config, &dev->bar->cc);
1002
Matthew Wilcox22605f92011-04-19 15:04:20 -04001003 cap = readq(&dev->bar->cap);
1004 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001005 dev->db_stride = NVME_CAP_STRIDE(cap);
Matthew Wilcox22605f92011-04-19 15:04:20 -04001006
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001007 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
1008 msleep(100);
1009 if (fatal_signal_pending(current))
1010 return -EINTR;
Matthew Wilcox22605f92011-04-19 15:04:20 -04001011 if (time_after(jiffies, timeout)) {
1012 dev_err(&dev->pci_dev->dev,
1013 "Device not ready; aborting initialisation\n");
1014 return -ENODEV;
1015 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001016 }
1017
Matthew Wilcox30010822011-01-20 09:10:15 -05001018 result = queue_request_irq(dev, nvmeq, "nvme admin");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001019 dev->queues[0] = nvmeq;
1020 return result;
1021}
1022
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001023static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1024 unsigned long addr, unsigned length)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001025{
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001026 int i, err, count, nents, offset;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001027 struct scatterlist *sg;
1028 struct page **pages;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001029 struct nvme_iod *iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001030
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001031 if (addr & 3)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001032 return ERR_PTR(-EINVAL);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001033 if (!length)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001034 return ERR_PTR(-EINVAL);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001035
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001036 offset = offset_in_page(addr);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001037 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1038 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001039
1040 err = get_user_pages_fast(addr, count, 1, pages);
1041 if (err < count) {
1042 count = err;
1043 err = -EFAULT;
1044 goto put_pages;
1045 }
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001046
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001047 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1048 sg = iod->sg;
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001049 sg_init_table(sg, count);
Matthew Wilcoxd0ba1e42011-09-13 17:01:39 -04001050 for (i = 0; i < count; i++) {
1051 sg_set_page(&sg[i], pages[i],
1052 min_t(int, length, PAGE_SIZE - offset), offset);
1053 length -= (PAGE_SIZE - offset);
1054 offset = 0;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001055 }
Matthew Wilcoxfe304c42012-01-06 13:49:25 -07001056 sg_mark_end(&sg[i - 1]);
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001057 iod->nents = count;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001058
1059 err = -ENOMEM;
1060 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1061 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001062 if (!nents)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001063 goto free_iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001064
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001065 kfree(pages);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001066 return iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001067
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001068 free_iod:
1069 kfree(iod);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001070 put_pages:
1071 for (i = 0; i < count; i++)
1072 put_page(pages[i]);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001073 kfree(pages);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001074 return ERR_PTR(err);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001075}
1076
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001077static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001078 struct nvme_iod *iod)
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001079{
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001080 int i;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001081
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001082 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
1083 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001084
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001085 for (i = 0; i < iod->nents; i++)
1086 put_page(sg_page(&iod->sg[i]));
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001087}
1088
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001089static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1090{
1091 struct nvme_dev *dev = ns->dev;
1092 struct nvme_queue *nvmeq;
1093 struct nvme_user_io io;
1094 struct nvme_command c;
1095 unsigned length;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001096 int status;
1097 struct nvme_iod *iod;
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001098
1099 if (copy_from_user(&io, uio, sizeof(io)))
1100 return -EFAULT;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001101 length = (io.nblocks + 1) << ns->lba_shift;
1102
1103 switch (io.opcode) {
1104 case nvme_cmd_write:
1105 case nvme_cmd_read:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001106 case nvme_cmd_compare:
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001107 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
Matthew Wilcox64132142011-08-09 12:56:37 -04001108 break;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001109 default:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001110 return -EINVAL;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001111 }
1112
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001113 if (IS_ERR(iod))
1114 return PTR_ERR(iod);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001115
1116 memset(&c, 0, sizeof(c));
1117 c.rw.opcode = io.opcode;
1118 c.rw.flags = io.flags;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001119 c.rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001120 c.rw.slba = cpu_to_le64(io.slba);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001121 c.rw.length = cpu_to_le16(io.nblocks);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001122 c.rw.control = cpu_to_le16(io.control);
1123 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001124 c.rw.reftag = io.reftag;
1125 c.rw.apptag = io.apptag;
1126 c.rw.appmask = io.appmask;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001127 /* XXX: metadata */
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001128 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001129
Matthew Wilcox040a93b2011-12-20 11:04:12 -05001130 nvmeq = get_nvmeq(dev);
Matthew Wilcoxfa922822011-03-16 16:29:00 -04001131 /*
1132 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
Matthew Wilcoxb1ad37e2011-02-04 16:14:30 -05001133 * disabled. We may be preempted at any point, and be rescheduled
1134 * to a different CPU. That will cause cacheline bouncing, but no
1135 * additional races since q_lock already protects against other CPUs.
1136 */
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001137 put_nvmeq(nvmeq);
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001138 if (length != (io.nblocks + 1) << ns->lba_shift)
1139 status = -ENOMEM;
1140 else
Matthew Wilcoxff976d72011-12-20 13:53:01 -05001141 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001142
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001143 nvme_unmap_user_pages(dev, io.opcode & 1, iod);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001144 nvme_free_iod(dev, iod);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001145 return status;
1146}
1147
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001148static int nvme_user_admin_cmd(struct nvme_ns *ns,
1149 struct nvme_admin_cmd __user *ucmd)
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001150{
1151 struct nvme_dev *dev = ns->dev;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001152 struct nvme_admin_cmd cmd;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001153 struct nvme_command c;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001154 int status, length;
1155 struct nvme_iod *iod;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001156
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001157 if (!capable(CAP_SYS_ADMIN))
1158 return -EACCES;
1159 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001160 return -EFAULT;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001161
1162 memset(&c, 0, sizeof(c));
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001163 c.common.opcode = cmd.opcode;
1164 c.common.flags = cmd.flags;
1165 c.common.nsid = cpu_to_le32(cmd.nsid);
1166 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1167 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1168 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1169 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1170 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1171 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1172 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1173 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1174
1175 length = cmd.data_len;
1176 if (cmd.data_len) {
Matthew Wilcox49742182012-01-06 13:42:45 -07001177 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
1178 length);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001179 if (IS_ERR(iod))
1180 return PTR_ERR(iod);
1181 length = nvme_setup_prps(dev, &c.common, iod, length,
1182 GFP_KERNEL);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001183 }
1184
1185 if (length != cmd.data_len)
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001186 status = -ENOMEM;
1187 else
1188 status = nvme_submit_admin_cmd(dev, &c, NULL);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001189
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001190 if (cmd.data_len) {
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001191 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001192 nvme_free_iod(dev, iod);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001193 }
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001194 return status;
1195}
1196
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001197static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1198 unsigned long arg)
1199{
1200 struct nvme_ns *ns = bdev->bd_disk->private_data;
1201
1202 switch (cmd) {
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001203 case NVME_IOCTL_ID:
1204 return ns->ns_id;
1205 case NVME_IOCTL_ADMIN_CMD:
1206 return nvme_user_admin_cmd(ns, (void __user *)arg);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001207 case NVME_IOCTL_SUBMIT_IO:
1208 return nvme_submit_io(ns, (void __user *)arg);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001209 default:
1210 return -ENOTTY;
1211 }
1212}
1213
1214static const struct block_device_operations nvme_fops = {
1215 .owner = THIS_MODULE,
1216 .ioctl = nvme_ioctl,
Matthew Wilcox49481682011-03-19 14:55:38 -04001217 .compat_ioctl = nvme_ioctl,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001218};
1219
Matthew Wilcox8de05532011-05-12 13:50:28 -04001220static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1221{
1222 int depth = nvmeq->q_depth - 1;
1223 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1224 unsigned long now = jiffies;
1225 int cmdid;
1226
1227 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -04001228 void *ctx;
1229 nvme_completion_fn fn;
Matthew Wilcox8de05532011-05-12 13:50:28 -04001230 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1231
1232 if (!time_after(now, info[cmdid].timeout))
1233 continue;
1234 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -04001235 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
Matthew Wilcox5c1281a2011-12-20 11:54:53 -05001236 fn(nvmeq->dev, ctx, &cqe);
Matthew Wilcox8de05532011-05-12 13:50:28 -04001237 }
1238}
1239
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001240static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1241{
1242 while (bio_list_peek(&nvmeq->sq_cong)) {
1243 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1244 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1245 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1246 bio_list_add_head(&nvmeq->sq_cong, bio);
1247 break;
1248 }
Matthew Wilcox3cb967c2011-03-16 16:45:49 -04001249 if (bio_list_empty(&nvmeq->sq_cong))
1250 remove_wait_queue(&nvmeq->sq_full,
1251 &nvmeq->sq_cong_wait);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001252 }
1253}
1254
1255static int nvme_kthread(void *data)
1256{
1257 struct nvme_dev *dev;
1258
1259 while (!kthread_should_stop()) {
1260 __set_current_state(TASK_RUNNING);
1261 spin_lock(&dev_list_lock);
1262 list_for_each_entry(dev, &dev_list, node) {
1263 int i;
1264 for (i = 0; i < dev->queue_count; i++) {
1265 struct nvme_queue *nvmeq = dev->queues[i];
Matthew Wilcox740216f2011-02-15 16:28:20 -05001266 if (!nvmeq)
1267 continue;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001268 spin_lock_irq(&nvmeq->q_lock);
1269 if (nvme_process_cq(nvmeq))
1270 printk("process_cq did something\n");
Matthew Wilcox8de05532011-05-12 13:50:28 -04001271 nvme_timeout_ios(nvmeq);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001272 nvme_resubmit_bios(nvmeq);
1273 spin_unlock_irq(&nvmeq->q_lock);
1274 }
1275 }
1276 spin_unlock(&dev_list_lock);
1277 set_current_state(TASK_INTERRUPTIBLE);
1278 schedule_timeout(HZ);
1279 }
1280 return 0;
1281}
1282
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001283static DEFINE_IDA(nvme_index_ida);
1284
1285static int nvme_get_ns_idx(void)
1286{
1287 int index, error;
1288
1289 do {
1290 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1291 return -1;
1292
1293 spin_lock(&dev_list_lock);
1294 error = ida_get_new(&nvme_index_ida, &index);
1295 spin_unlock(&dev_list_lock);
1296 } while (error == -EAGAIN);
1297
1298 if (error)
1299 index = -1;
1300 return index;
1301}
1302
1303static void nvme_put_ns_idx(int index)
1304{
1305 spin_lock(&dev_list_lock);
1306 ida_remove(&nvme_index_ida, index);
1307 spin_unlock(&dev_list_lock);
1308}
1309
1310static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001311 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1312{
1313 struct nvme_ns *ns;
1314 struct gendisk *disk;
1315 int lbaf;
1316
1317 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1318 return NULL;
1319
1320 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1321 if (!ns)
1322 return NULL;
1323 ns->queue = blk_alloc_queue(GFP_KERNEL);
1324 if (!ns->queue)
1325 goto out_free_ns;
Matthew Wilcox4eeb9212012-01-10 14:35:08 -07001326 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
1327 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1328 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1329/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001330 blk_queue_make_request(ns->queue, nvme_make_request);
1331 ns->dev = dev;
1332 ns->queue->queuedata = ns;
1333
1334 disk = alloc_disk(NVME_MINORS);
1335 if (!disk)
1336 goto out_free_queue;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001337 ns->ns_id = nsid;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001338 ns->disk = disk;
1339 lbaf = id->flbas & 0xf;
1340 ns->lba_shift = id->lbaf[lbaf].ds;
1341
1342 disk->major = nvme_major;
1343 disk->minors = NVME_MINORS;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001344 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001345 disk->fops = &nvme_fops;
1346 disk->private_data = ns;
1347 disk->queue = ns->queue;
Matthew Wilcox388f0372011-02-01 12:49:38 -05001348 disk->driverfs_dev = &dev->pci_dev->dev;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001349 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001350 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1351
1352 return ns;
1353
1354 out_free_queue:
1355 blk_cleanup_queue(ns->queue);
1356 out_free_ns:
1357 kfree(ns);
1358 return NULL;
1359}
1360
1361static void nvme_ns_free(struct nvme_ns *ns)
1362{
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001363 int index = ns->disk->first_minor / NVME_MINORS;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001364 put_disk(ns->disk);
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001365 nvme_put_ns_idx(index);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001366 blk_cleanup_queue(ns->queue);
1367 kfree(ns);
1368}
1369
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001370static int set_queue_count(struct nvme_dev *dev, int count)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001371{
1372 int status;
1373 u32 result;
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001374 u32 q_count = (count - 1) | ((count - 1) << 16);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001375
Matthew Wilcoxdf348132012-01-11 07:29:56 -07001376 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001377 &result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001378 if (status)
1379 return -EIO;
1380 return min(result & 0xffff, result >> 16) + 1;
1381}
1382
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001383static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1384{
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001385 int result, cpu, i, nr_io_queues, db_bar_size;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001386
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001387 nr_io_queues = num_online_cpus();
1388 result = set_queue_count(dev, nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001389 if (result < 0)
1390 return result;
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001391 if (result < nr_io_queues)
1392 nr_io_queues = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001393
Matthew Wilcox1b234842011-01-20 13:01:49 -05001394 /* Deregister the admin queue's interrupt */
1395 free_irq(dev->entry[0].vector, dev->queues[0]);
1396
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001397 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1398 if (db_bar_size > 8192) {
1399 iounmap(dev->bar);
1400 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
1401 db_bar_size);
1402 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1403 dev->queues[0]->q_db = dev->dbs;
1404 }
1405
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001406 for (i = 0; i < nr_io_queues; i++)
Matthew Wilcox1b234842011-01-20 13:01:49 -05001407 dev->entry[i].entry = i;
1408 for (;;) {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001409 result = pci_enable_msix(dev->pci_dev, dev->entry,
1410 nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001411 if (result == 0) {
1412 break;
1413 } else if (result > 0) {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001414 nr_io_queues = result;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001415 continue;
1416 } else {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001417 nr_io_queues = 1;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001418 break;
1419 }
1420 }
1421
1422 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1423 /* XXX: handle failure here */
1424
1425 cpu = cpumask_first(cpu_online_mask);
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001426 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001427 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1428 cpu = cpumask_next(cpu, cpu_online_mask);
1429 }
1430
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001431 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001432 dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
1433 NVME_Q_DEPTH, i);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001434 if (IS_ERR(dev->queues[i + 1]))
1435 return PTR_ERR(dev->queues[i + 1]);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001436 dev->queue_count++;
1437 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001438
Matthew Wilcox9ecdc942011-03-16 16:52:19 -04001439 for (; i < num_possible_cpus(); i++) {
1440 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1441 dev->queues[i + 1] = dev->queues[target + 1];
1442 }
1443
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001444 return 0;
1445}
1446
1447static void nvme_free_queues(struct nvme_dev *dev)
1448{
1449 int i;
1450
1451 for (i = dev->queue_count - 1; i >= 0; i--)
1452 nvme_free_queue(dev, i);
1453}
1454
1455static int __devinit nvme_dev_add(struct nvme_dev *dev)
1456{
1457 int res, nn, i;
1458 struct nvme_ns *ns, *next;
Matthew Wilcox51814232011-02-01 16:18:08 -05001459 struct nvme_id_ctrl *ctrl;
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001460 struct nvme_id_ns *id_ns;
1461 void *mem;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001462 dma_addr_t dma_addr;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001463
1464 res = nvme_setup_io_queues(dev);
1465 if (res)
1466 return res;
1467
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001468 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001469 GFP_KERNEL);
1470
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001471 res = nvme_identify(dev, 0, 1, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001472 if (res) {
1473 res = -EIO;
1474 goto out_free;
1475 }
1476
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001477 ctrl = mem;
Matthew Wilcox51814232011-02-01 16:18:08 -05001478 nn = le32_to_cpup(&ctrl->nn);
1479 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1480 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1481 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001482
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001483 id_ns = mem;
Matthew Wilcox2b2c1892011-10-07 13:10:13 -04001484 for (i = 1; i <= nn; i++) {
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001485 res = nvme_identify(dev, i, 0, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001486 if (res)
1487 continue;
1488
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001489 if (id_ns->ncap == 0)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001490 continue;
1491
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001492 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
Matthew Wilcoxdf348132012-01-11 07:29:56 -07001493 dma_addr + 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001494 if (res)
1495 continue;
1496
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001497 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001498 if (ns)
1499 list_add_tail(&ns->list, &dev->namespaces);
1500 }
1501 list_for_each_entry(ns, &dev->namespaces, list)
1502 add_disk(ns->disk);
1503
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001504 goto out;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001505
1506 out_free:
1507 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1508 list_del(&ns->list);
1509 nvme_ns_free(ns);
1510 }
1511
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001512 out:
Matthew Wilcox684f5c22011-09-19 17:14:53 -04001513 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001514 return res;
1515}
1516
1517static int nvme_dev_remove(struct nvme_dev *dev)
1518{
1519 struct nvme_ns *ns, *next;
1520
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001521 spin_lock(&dev_list_lock);
1522 list_del(&dev->node);
1523 spin_unlock(&dev_list_lock);
1524
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001525 /* TODO: wait all I/O finished or cancel them */
1526
1527 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1528 list_del(&ns->list);
1529 del_gendisk(ns->disk);
1530 nvme_ns_free(ns);
1531 }
1532
1533 nvme_free_queues(dev);
1534
1535 return 0;
1536}
1537
Matthew Wilcox091b6092011-02-10 09:56:01 -05001538static int nvme_setup_prp_pools(struct nvme_dev *dev)
1539{
1540 struct device *dmadev = &dev->pci_dev->dev;
1541 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
1542 PAGE_SIZE, PAGE_SIZE, 0);
1543 if (!dev->prp_page_pool)
1544 return -ENOMEM;
1545
Matthew Wilcox99802a72011-02-10 10:30:34 -05001546 /* Optimisation for I/Os between 4k and 128k */
1547 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
1548 256, 256, 0);
1549 if (!dev->prp_small_pool) {
1550 dma_pool_destroy(dev->prp_page_pool);
1551 return -ENOMEM;
1552 }
Matthew Wilcox091b6092011-02-10 09:56:01 -05001553 return 0;
1554}
1555
1556static void nvme_release_prp_pools(struct nvme_dev *dev)
1557{
1558 dma_pool_destroy(dev->prp_page_pool);
Matthew Wilcox99802a72011-02-10 10:30:34 -05001559 dma_pool_destroy(dev->prp_small_pool);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001560}
1561
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001562/* XXX: Use an ida or something to let remove / add work correctly */
1563static void nvme_set_instance(struct nvme_dev *dev)
1564{
1565 static int instance;
1566 dev->instance = instance++;
1567}
1568
1569static void nvme_release_instance(struct nvme_dev *dev)
1570{
1571}
1572
1573static int __devinit nvme_probe(struct pci_dev *pdev,
1574 const struct pci_device_id *id)
1575{
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001576 int bars, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001577 struct nvme_dev *dev;
1578
1579 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1580 if (!dev)
1581 return -ENOMEM;
1582 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1583 GFP_KERNEL);
1584 if (!dev->entry)
1585 goto free;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001586 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1587 GFP_KERNEL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001588 if (!dev->queues)
1589 goto free;
1590
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001591 if (pci_enable_device_mem(pdev))
1592 goto free;
Matthew Wilcoxf64d3362011-02-01 09:01:59 -05001593 pci_set_master(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001594 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1595 if (pci_request_selected_regions(pdev, bars, "nvme"))
1596 goto disable;
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001597
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001598 INIT_LIST_HEAD(&dev->namespaces);
1599 dev->pci_dev = pdev;
1600 pci_set_drvdata(pdev, dev);
Matthew Wilcox29303532011-02-01 16:23:39 -05001601 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1602 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001603 nvme_set_instance(dev);
Matthew Wilcox53c95772011-01-20 13:42:34 -05001604 dev->entry[0].vector = pdev->irq;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001605
Matthew Wilcox091b6092011-02-10 09:56:01 -05001606 result = nvme_setup_prp_pools(dev);
1607 if (result)
1608 goto disable_msix;
1609
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001610 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1611 if (!dev->bar) {
1612 result = -ENOMEM;
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001613 goto disable_msix;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001614 }
1615
1616 result = nvme_configure_admin_queue(dev);
1617 if (result)
1618 goto unmap;
1619 dev->queue_count++;
1620
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001621 spin_lock(&dev_list_lock);
1622 list_add(&dev->node, &dev_list);
1623 spin_unlock(&dev_list_lock);
1624
Matthew Wilcox740216f2011-02-15 16:28:20 -05001625 result = nvme_dev_add(dev);
1626 if (result)
1627 goto delete;
1628
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001629 return 0;
1630
1631 delete:
Matthew Wilcox740216f2011-02-15 16:28:20 -05001632 spin_lock(&dev_list_lock);
1633 list_del(&dev->node);
1634 spin_unlock(&dev_list_lock);
1635
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001636 nvme_free_queues(dev);
1637 unmap:
1638 iounmap(dev->bar);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001639 disable_msix:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001640 pci_disable_msix(pdev);
1641 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001642 nvme_release_prp_pools(dev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001643 disable:
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001644 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001645 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001646 free:
1647 kfree(dev->queues);
1648 kfree(dev->entry);
1649 kfree(dev);
1650 return result;
1651}
1652
1653static void __devexit nvme_remove(struct pci_dev *pdev)
1654{
1655 struct nvme_dev *dev = pci_get_drvdata(pdev);
1656 nvme_dev_remove(dev);
1657 pci_disable_msix(pdev);
1658 iounmap(dev->bar);
1659 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001660 nvme_release_prp_pools(dev);
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001661 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001662 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001663 kfree(dev->queues);
1664 kfree(dev->entry);
1665 kfree(dev);
1666}
1667
1668/* These functions are yet to be implemented */
1669#define nvme_error_detected NULL
1670#define nvme_dump_registers NULL
1671#define nvme_link_reset NULL
1672#define nvme_slot_reset NULL
1673#define nvme_error_resume NULL
1674#define nvme_suspend NULL
1675#define nvme_resume NULL
1676
1677static struct pci_error_handlers nvme_err_handler = {
1678 .error_detected = nvme_error_detected,
1679 .mmio_enabled = nvme_dump_registers,
1680 .link_reset = nvme_link_reset,
1681 .slot_reset = nvme_slot_reset,
1682 .resume = nvme_error_resume,
1683};
1684
1685/* Move to pci_ids.h later */
1686#define PCI_CLASS_STORAGE_EXPRESS 0x010802
1687
1688static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1689 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1690 { 0, }
1691};
1692MODULE_DEVICE_TABLE(pci, nvme_id_table);
1693
1694static struct pci_driver nvme_driver = {
1695 .name = "nvme",
1696 .id_table = nvme_id_table,
1697 .probe = nvme_probe,
1698 .remove = __devexit_p(nvme_remove),
1699 .suspend = nvme_suspend,
1700 .resume = nvme_resume,
1701 .err_handler = &nvme_err_handler,
1702};
1703
1704static int __init nvme_init(void)
1705{
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001706 int result = -EBUSY;
1707
1708 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
1709 if (IS_ERR(nvme_thread))
1710 return PTR_ERR(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001711
1712 nvme_major = register_blkdev(nvme_major, "nvme");
1713 if (nvme_major <= 0)
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001714 goto kill_kthread;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001715
1716 result = pci_register_driver(&nvme_driver);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001717 if (result)
1718 goto unregister_blkdev;
1719 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001720
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001721 unregister_blkdev:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001722 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001723 kill_kthread:
1724 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001725 return result;
1726}
1727
1728static void __exit nvme_exit(void)
1729{
1730 pci_unregister_driver(&nvme_driver);
1731 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001732 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001733}
1734
1735MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1736MODULE_LICENSE("GPL");
Matthew Wilcox366e8212012-01-10 16:30:15 -05001737MODULE_VERSION("0.8");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001738module_init(nvme_init);
1739module_exit(nvme_exit);