blob: 1e57737b17606f0acdd0e6f65c7c1b6a086518c2 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/errno.h>
23#include <linux/fs.h>
24#include <linux/genhd.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/kdev_t.h>
29#include <linux/kernel.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/moduleparam.h>
33#include <linux/pci.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/types.h>
37#include <linux/version.h>
38
39#define NVME_Q_DEPTH 1024
40#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
41#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
42#define NVME_MINORS 64
43
44static int nvme_major;
45module_param(nvme_major, int, 0);
46
47/*
48 * Represents an NVM Express device. Each nvme_dev is a PCI function.
49 */
50struct nvme_dev {
51 struct list_head node;
52 struct nvme_queue **queues;
53 u32 __iomem *dbs;
54 struct pci_dev *pci_dev;
55 int instance;
56 int queue_count;
57 u32 ctrl_config;
58 struct msix_entry *entry;
59 struct nvme_bar __iomem *bar;
60 struct list_head namespaces;
61};
62
63/*
64 * An NVM Express namespace is equivalent to a SCSI LUN
65 */
66struct nvme_ns {
67 struct list_head list;
68
69 struct nvme_dev *dev;
70 struct request_queue *queue;
71 struct gendisk *disk;
72
73 int ns_id;
74 int lba_shift;
75};
76
77/*
78 * An NVM Express queue. Each device has at least two (one for admin
79 * commands and one for I/O commands).
80 */
81struct nvme_queue {
82 struct device *q_dmadev;
83 spinlock_t q_lock;
84 struct nvme_command *sq_cmds;
85 volatile struct nvme_completion *cqes;
86 dma_addr_t sq_dma_addr;
87 dma_addr_t cq_dma_addr;
88 wait_queue_head_t sq_full;
89 struct bio_list sq_cong;
90 u32 __iomem *q_db;
91 u16 q_depth;
92 u16 cq_vector;
93 u16 sq_head;
94 u16 sq_tail;
95 u16 cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -050096 u16 cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050097 unsigned long cmdid_data[];
98};
99
100/*
101 * Check we didin't inadvertently grow the command struct
102 */
103static inline void _nvme_check_size(void)
104{
105 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
106 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
107 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
108 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
109 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
110 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
111 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
112 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
113 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
114}
115
116/**
117 * alloc_cmdid - Allocate a Command ID
118 * @param nvmeq The queue that will be used for this command
119 * @param ctx A pointer that will be passed to the handler
120 * @param handler The ID of the handler to call
121 *
122 * Allocate a Command ID for a queue. The data passed in will
123 * be passed to the completion handler. This is implemented by using
124 * the bottom two bits of the ctx pointer to store the handler ID.
125 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
126 * We can change this if it becomes a problem.
127 */
128static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler)
129{
130 int depth = nvmeq->q_depth;
131 unsigned long data = (unsigned long)ctx | handler;
132 int cmdid;
133
134 BUG_ON((unsigned long)ctx & 3);
135
136 do {
137 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
138 if (cmdid >= depth)
139 return -EBUSY;
140 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
141
142 nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(depth)] = data;
143 return cmdid;
144}
145
146static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
147 int handler)
148{
149 int cmdid;
150 wait_event_killable(nvmeq->sq_full,
151 (cmdid = alloc_cmdid(nvmeq, ctx, handler)) >= 0);
152 return (cmdid < 0) ? -EINTR : cmdid;
153}
154
155/* If you need more than four handlers, you'll need to change how
156 * alloc_cmdid and nvme_process_cq work
157 */
158enum {
159 sync_completion_id = 0,
160 bio_completion_id,
161};
162
163static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
164{
165 unsigned long data;
166
167 data = nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(nvmeq->q_depth)];
168 clear_bit(cmdid, nvmeq->cmdid_data);
169 wake_up(&nvmeq->sq_full);
170 return data;
171}
172
173static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
174{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500175 int qid, cpu = get_cpu();
176 if (cpu < ns->dev->queue_count)
177 qid = cpu + 1;
178 else
179 qid = (cpu % rounddown_pow_of_two(ns->dev->queue_count)) + 1;
180 return ns->dev->queues[qid];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500181}
182
183static void put_nvmeq(struct nvme_queue *nvmeq)
184{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500185 put_cpu();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500186}
187
188/**
189 * nvme_submit_cmd: Copy a command into a queue and ring the doorbell
190 * @nvmeq: The queue to use
191 * @cmd: The command to send
192 *
193 * Safe to use from interrupt context
194 */
195static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
196{
197 unsigned long flags;
198 u16 tail;
199 /* XXX: Need to check tail isn't going to overrun head */
200 spin_lock_irqsave(&nvmeq->q_lock, flags);
201 tail = nvmeq->sq_tail;
202 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
203 writel(tail, nvmeq->q_db);
204 if (++tail == nvmeq->q_depth)
205 tail = 0;
206 nvmeq->sq_tail = tail;
207 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
208
209 return 0;
210}
211
212struct nvme_req_info {
213 struct bio *bio;
214 int nents;
215 struct scatterlist sg[0];
216};
217
218/* XXX: use a mempool */
219static struct nvme_req_info *alloc_info(unsigned nseg, gfp_t gfp)
220{
221 return kmalloc(sizeof(struct nvme_req_info) +
222 sizeof(struct scatterlist) * nseg, gfp);
223}
224
225static void free_info(struct nvme_req_info *info)
226{
227 kfree(info);
228}
229
230static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
231 struct nvme_completion *cqe)
232{
233 struct nvme_req_info *info = ctx;
234 struct bio *bio = info->bio;
235 u16 status = le16_to_cpup(&cqe->status) >> 1;
236
237 dma_unmap_sg(nvmeq->q_dmadev, info->sg, info->nents,
238 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
239 free_info(info);
240 bio_endio(bio, status ? -EIO : 0);
241}
242
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500243/* length is in bytes */
244static void nvme_setup_prps(struct nvme_common_command *cmd,
245 struct scatterlist *sg, int length)
246{
247 int dma_len = sg_dma_len(sg);
248 u64 dma_addr = sg_dma_address(sg);
249 int offset = offset_in_page(dma_addr);
250
251 cmd->prp1 = cpu_to_le64(dma_addr);
252 length -= (PAGE_SIZE - offset);
253 if (length <= 0)
254 return;
255
256 dma_len -= (PAGE_SIZE - offset);
257 if (dma_len) {
258 dma_addr += (PAGE_SIZE - offset);
259 } else {
260 sg = sg_next(sg);
261 dma_addr = sg_dma_address(sg);
262 dma_len = sg_dma_len(sg);
263 }
264
265 if (length <= PAGE_SIZE) {
266 cmd->prp2 = cpu_to_le64(dma_addr);
267 return;
268 }
269
270 /* XXX: support PRP lists */
271}
272
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500273static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
274 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
275{
276 struct bio_vec *bvec;
277 struct scatterlist *sg = info->sg;
278 int i, nsegs;
279
280 sg_init_table(sg, psegs);
281 bio_for_each_segment(bvec, bio, i) {
282 sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
283 /* XXX: handle non-mergable here */
284 nsegs++;
285 }
286 info->nents = nsegs;
287
288 return dma_map_sg(dev, info->sg, info->nents, dma_dir);
289}
290
291static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
292 struct bio *bio)
293{
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500294 struct nvme_command *cmnd;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500295 struct nvme_req_info *info;
296 enum dma_data_direction dma_dir;
297 int cmdid;
298 u16 control;
299 u32 dsmgmt;
300 unsigned long flags;
301 int psegs = bio_phys_segments(ns->queue, bio);
302
303 info = alloc_info(psegs, GFP_NOIO);
304 if (!info)
305 goto congestion;
306 info->bio = bio;
307
308 cmdid = alloc_cmdid(nvmeq, info, bio_completion_id);
309 if (unlikely(cmdid < 0))
310 goto free_info;
311
312 control = 0;
313 if (bio->bi_rw & REQ_FUA)
314 control |= NVME_RW_FUA;
315 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
316 control |= NVME_RW_LR;
317
318 dsmgmt = 0;
319 if (bio->bi_rw & REQ_RAHEAD)
320 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
321
322 spin_lock_irqsave(&nvmeq->q_lock, flags);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500323 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500324
325 if (bio_data_dir(bio)) {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500326 cmnd->rw.opcode = nvme_cmd_write;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500327 dma_dir = DMA_TO_DEVICE;
328 } else {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500329 cmnd->rw.opcode = nvme_cmd_read;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500330 dma_dir = DMA_FROM_DEVICE;
331 }
332
333 nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs);
334
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500335 cmnd->rw.flags = 1;
336 cmnd->rw.command_id = cmdid;
337 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
338 nvme_setup_prps(&cmnd->common, info->sg, bio->bi_size);
339 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
340 cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
341 cmnd->rw.control = cpu_to_le16(control);
342 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500343
344 writel(nvmeq->sq_tail, nvmeq->q_db);
345 if (++nvmeq->sq_tail == nvmeq->q_depth)
346 nvmeq->sq_tail = 0;
347
348 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
349
350 return 0;
351
352 free_info:
353 free_info(info);
354 congestion:
355 return -EBUSY;
356}
357
358/*
359 * NB: return value of non-zero would mean that we were a stacking driver.
360 * make_request must always succeed.
361 */
362static int nvme_make_request(struct request_queue *q, struct bio *bio)
363{
364 struct nvme_ns *ns = q->queuedata;
365 struct nvme_queue *nvmeq = get_nvmeq(ns);
366
367 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
368 blk_set_queue_congested(q, rw_is_sync(bio->bi_rw));
369 bio_list_add(&nvmeq->sq_cong, bio);
370 }
371 put_nvmeq(nvmeq);
372
373 return 0;
374}
375
376struct sync_cmd_info {
377 struct task_struct *task;
378 u32 result;
379 int status;
380};
381
382static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
383 struct nvme_completion *cqe)
384{
385 struct sync_cmd_info *cmdinfo = ctx;
386 cmdinfo->result = le32_to_cpup(&cqe->result);
387 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
388 wake_up_process(cmdinfo->task);
389}
390
391typedef void (*completion_fn)(struct nvme_queue *, void *,
392 struct nvme_completion *);
393
394static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
395{
Matthew Wilcox82123462011-01-20 13:24:06 -0500396 u16 head, phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500397
398 static const completion_fn completions[4] = {
399 [sync_completion_id] = sync_completion,
400 [bio_completion_id] = bio_completion,
401 };
402
403 head = nvmeq->cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500404 phase = nvmeq->cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500405
406 for (;;) {
407 unsigned long data;
408 void *ptr;
409 unsigned char handler;
410 struct nvme_completion cqe = nvmeq->cqes[head];
Matthew Wilcox82123462011-01-20 13:24:06 -0500411 if ((le16_to_cpu(cqe.status) & 1) != phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500412 break;
413 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
414 if (++head == nvmeq->q_depth) {
415 head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500416 phase = !phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500417 }
418
419 data = free_cmdid(nvmeq, cqe.command_id);
420 handler = data & 3;
421 ptr = (void *)(data & ~3UL);
422 completions[handler](nvmeq, ptr, &cqe);
423 }
424
425 /* If the controller ignores the cq head doorbell and continuously
426 * writes to the queue, it is theoretically possible to wrap around
427 * the queue twice and mistakenly return IRQ_NONE. Linux only
428 * requires that 0.1% of your interrupts are handled, so this isn't
429 * a big problem.
430 */
Matthew Wilcox82123462011-01-20 13:24:06 -0500431 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500432 return IRQ_NONE;
433
434 writel(head, nvmeq->q_db + 1);
435 nvmeq->cq_head = head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500436 nvmeq->cq_phase = phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500437
438 return IRQ_HANDLED;
439}
440
441static irqreturn_t nvme_irq(int irq, void *data)
442{
443 return nvme_process_cq(data);
444}
445
446/*
447 * Returns 0 on success. If the result is negative, it's a Linux error code;
448 * if the result is positive, it's an NVM Express status code
449 */
450static int nvme_submit_sync_cmd(struct nvme_queue *q, struct nvme_command *cmd,
451 u32 *result)
452{
453 int cmdid;
454 struct sync_cmd_info cmdinfo;
455
456 cmdinfo.task = current;
457 cmdinfo.status = -EINTR;
458
459 cmdid = alloc_cmdid_killable(q, &cmdinfo, sync_completion_id);
460 if (cmdid < 0)
461 return cmdid;
462 cmd->common.command_id = cmdid;
463
464 set_current_state(TASK_UNINTERRUPTIBLE);
465 nvme_submit_cmd(q, cmd);
466 schedule();
467
468 if (result)
469 *result = cmdinfo.result;
470
471 return cmdinfo.status;
472}
473
474static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
475 u32 *result)
476{
477 return nvme_submit_sync_cmd(dev->queues[0], cmd, result);
478}
479
480static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
481{
482 int status;
483 struct nvme_command c;
484
485 memset(&c, 0, sizeof(c));
486 c.delete_queue.opcode = opcode;
487 c.delete_queue.qid = cpu_to_le16(id);
488
489 status = nvme_submit_admin_cmd(dev, &c, NULL);
490 if (status)
491 return -EIO;
492 return 0;
493}
494
495static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
496 struct nvme_queue *nvmeq)
497{
498 int status;
499 struct nvme_command c;
500 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
501
502 memset(&c, 0, sizeof(c));
503 c.create_cq.opcode = nvme_admin_create_cq;
504 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
505 c.create_cq.cqid = cpu_to_le16(qid);
506 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
507 c.create_cq.cq_flags = cpu_to_le16(flags);
508 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
509
510 status = nvme_submit_admin_cmd(dev, &c, NULL);
511 if (status)
512 return -EIO;
513 return 0;
514}
515
516static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
517 struct nvme_queue *nvmeq)
518{
519 int status;
520 struct nvme_command c;
521 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
522
523 memset(&c, 0, sizeof(c));
524 c.create_sq.opcode = nvme_admin_create_sq;
525 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
526 c.create_sq.sqid = cpu_to_le16(qid);
527 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
528 c.create_sq.sq_flags = cpu_to_le16(flags);
529 c.create_sq.cqid = cpu_to_le16(qid);
530
531 status = nvme_submit_admin_cmd(dev, &c, NULL);
532 if (status)
533 return -EIO;
534 return 0;
535}
536
537static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
538{
539 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
540}
541
542static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
543{
544 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
545}
546
547static void nvme_free_queue(struct nvme_dev *dev, int qid)
548{
549 struct nvme_queue *nvmeq = dev->queues[qid];
550
551 free_irq(dev->entry[nvmeq->cq_vector].vector, nvmeq);
552
553 /* Don't tell the adapter to delete the admin queue */
554 if (qid) {
555 adapter_delete_sq(dev, qid);
556 adapter_delete_cq(dev, qid);
557 }
558
559 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
560 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
561 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
562 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
563 kfree(nvmeq);
564}
565
566static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
567 int depth, int vector)
568{
569 struct device *dmadev = &dev->pci_dev->dev;
570 unsigned extra = (depth + BITS_TO_LONGS(depth)) * sizeof(long);
571 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
572 if (!nvmeq)
573 return NULL;
574
575 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
576 &nvmeq->cq_dma_addr, GFP_KERNEL);
577 if (!nvmeq->cqes)
578 goto free_nvmeq;
579 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
580
581 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
582 &nvmeq->sq_dma_addr, GFP_KERNEL);
583 if (!nvmeq->sq_cmds)
584 goto free_cqdma;
585
586 nvmeq->q_dmadev = dmadev;
587 spin_lock_init(&nvmeq->q_lock);
588 nvmeq->cq_head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500589 nvmeq->cq_phase = 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500590 init_waitqueue_head(&nvmeq->sq_full);
591 bio_list_init(&nvmeq->sq_cong);
592 nvmeq->q_db = &dev->dbs[qid * 2];
593 nvmeq->q_depth = depth;
594 nvmeq->cq_vector = vector;
595
596 return nvmeq;
597
598 free_cqdma:
599 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
600 nvmeq->cq_dma_addr);
601 free_nvmeq:
602 kfree(nvmeq);
603 return NULL;
604}
605
Matthew Wilcox30010822011-01-20 09:10:15 -0500606static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
607 const char *name)
608{
609 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
610 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
611}
612
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500613static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
614 int qid, int cq_size, int vector)
615{
616 int result;
617 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
618
619 result = adapter_alloc_cq(dev, qid, nvmeq);
620 if (result < 0)
621 goto free_nvmeq;
622
623 result = adapter_alloc_sq(dev, qid, nvmeq);
624 if (result < 0)
625 goto release_cq;
626
Matthew Wilcox30010822011-01-20 09:10:15 -0500627 result = queue_request_irq(dev, nvmeq, "nvme");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500628 if (result < 0)
629 goto release_sq;
630
631 return nvmeq;
632
633 release_sq:
634 adapter_delete_sq(dev, qid);
635 release_cq:
636 adapter_delete_cq(dev, qid);
637 free_nvmeq:
638 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
639 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
640 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
641 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
642 kfree(nvmeq);
643 return NULL;
644}
645
646static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
647{
648 int result;
649 u32 aqa;
650 struct nvme_queue *nvmeq;
651
652 dev->dbs = ((void __iomem *)dev->bar) + 4096;
653
654 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
655
656 aqa = nvmeq->q_depth - 1;
657 aqa |= aqa << 16;
658
659 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
660 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
661 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
662
663 writel(aqa, &dev->bar->aqa);
664 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
665 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
666 writel(dev->ctrl_config, &dev->bar->cc);
667
668 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
669 msleep(100);
670 if (fatal_signal_pending(current))
671 return -EINTR;
672 }
673
Matthew Wilcox30010822011-01-20 09:10:15 -0500674 result = queue_request_irq(dev, nvmeq, "nvme admin");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500675 dev->queues[0] = nvmeq;
676 return result;
677}
678
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500679static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500680{
681 struct nvme_dev *dev = ns->dev;
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500682 int i, err, count, nents, offset;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500683 struct nvme_command c;
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500684 struct scatterlist sg[2];
685 struct page *pages[2];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500686
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500687 if (addr & 3)
688 return -EINVAL;
689 offset = offset_in_page(addr);
690 count = offset ? 2 : 1;
691
692 err = get_user_pages_fast(addr, count, 1, pages);
693 if (err < count) {
694 count = err;
695 err = -EFAULT;
696 goto put_pages;
697 }
698 sg_init_table(sg, count);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500699 sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
700 if (count > 1)
701 sg_set_page(&sg[1], pages[1], offset, 0);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500702 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE);
703 if (!nents)
704 goto put_pages;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500705
706 memset(&c, 0, sizeof(c));
707 c.identify.opcode = nvme_admin_identify;
708 c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500709 nvme_setup_prps(&c.common, sg, 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500710 c.identify.cns = cpu_to_le32(cns);
711
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500712 err = nvme_submit_admin_cmd(dev, &c, NULL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500713
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500714 if (err)
715 err = -EIO;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500716
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500717 dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
718 put_pages:
719 for (i = 0; i < count; i++)
720 put_page(pages[i]);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500721
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500722 return err;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500723}
724
725static int nvme_get_range_type(struct nvme_ns *ns, void __user *addr)
726{
727 struct nvme_dev *dev = ns->dev;
728 int status;
729 struct nvme_command c;
730 void *page;
731 dma_addr_t dma_addr;
732
733 page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
734 GFP_KERNEL);
735
736 memset(&c, 0, sizeof(c));
737 c.features.opcode = nvme_admin_get_features;
738 c.features.nsid = cpu_to_le32(ns->ns_id);
739 c.features.prp1 = cpu_to_le64(dma_addr);
740 c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
741
742 status = nvme_submit_admin_cmd(dev, &c, NULL);
743
744 /* XXX: Assuming first range for now */
745 if (status)
746 status = -EIO;
747 else if (copy_to_user(addr, page, 64))
748 status = -EFAULT;
749
750 dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
751
752 return status;
753}
754
755static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
756 unsigned long arg)
757{
758 struct nvme_ns *ns = bdev->bd_disk->private_data;
759
760 switch (cmd) {
761 case NVME_IOCTL_IDENTIFY_NS:
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500762 return nvme_identify(ns, arg, 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500763 case NVME_IOCTL_IDENTIFY_CTRL:
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500764 return nvme_identify(ns, arg, 1);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500765 case NVME_IOCTL_GET_RANGE_TYPE:
766 return nvme_get_range_type(ns, (void __user *)arg);
767 default:
768 return -ENOTTY;
769 }
770}
771
772static const struct block_device_operations nvme_fops = {
773 .owner = THIS_MODULE,
774 .ioctl = nvme_ioctl,
775};
776
777static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index,
778 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
779{
780 struct nvme_ns *ns;
781 struct gendisk *disk;
782 int lbaf;
783
784 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
785 return NULL;
786
787 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
788 if (!ns)
789 return NULL;
790 ns->queue = blk_alloc_queue(GFP_KERNEL);
791 if (!ns->queue)
792 goto out_free_ns;
793 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
794 QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
795 blk_queue_make_request(ns->queue, nvme_make_request);
796 ns->dev = dev;
797 ns->queue->queuedata = ns;
798
799 disk = alloc_disk(NVME_MINORS);
800 if (!disk)
801 goto out_free_queue;
802 ns->ns_id = index;
803 ns->disk = disk;
804 lbaf = id->flbas & 0xf;
805 ns->lba_shift = id->lbaf[lbaf].ds;
806
807 disk->major = nvme_major;
808 disk->minors = NVME_MINORS;
809 disk->first_minor = NVME_MINORS * index;
810 disk->fops = &nvme_fops;
811 disk->private_data = ns;
812 disk->queue = ns->queue;
813 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index);
814 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
815
816 return ns;
817
818 out_free_queue:
819 blk_cleanup_queue(ns->queue);
820 out_free_ns:
821 kfree(ns);
822 return NULL;
823}
824
825static void nvme_ns_free(struct nvme_ns *ns)
826{
827 put_disk(ns->disk);
828 blk_cleanup_queue(ns->queue);
829 kfree(ns);
830}
831
Matthew Wilcoxb3b06812011-01-20 09:14:34 -0500832static int set_queue_count(struct nvme_dev *dev, int count)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500833{
834 int status;
835 u32 result;
836 struct nvme_command c;
Matthew Wilcoxb3b06812011-01-20 09:14:34 -0500837 u32 q_count = (count - 1) | ((count - 1) << 16);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500838
839 memset(&c, 0, sizeof(c));
840 c.features.opcode = nvme_admin_get_features;
841 c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES);
842 c.features.dword11 = cpu_to_le32(q_count);
843
844 status = nvme_submit_admin_cmd(dev, &c, &result);
845 if (status)
846 return -EIO;
847 return min(result & 0xffff, result >> 16) + 1;
848}
849
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500850static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
851{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500852 int result, cpu, i, nr_queues;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500853
Matthew Wilcox1b234842011-01-20 13:01:49 -0500854 nr_queues = num_online_cpus();
855 result = set_queue_count(dev, nr_queues);
856 if (result < 0)
857 return result;
858 if (result < nr_queues)
859 nr_queues = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500860
Matthew Wilcox1b234842011-01-20 13:01:49 -0500861 /* Deregister the admin queue's interrupt */
862 free_irq(dev->entry[0].vector, dev->queues[0]);
863
864 for (i = 0; i < nr_queues; i++)
865 dev->entry[i].entry = i;
866 for (;;) {
867 result = pci_enable_msix(dev->pci_dev, dev->entry, nr_queues);
868 if (result == 0) {
869 break;
870 } else if (result > 0) {
871 nr_queues = result;
872 continue;
873 } else {
874 nr_queues = 1;
875 break;
876 }
877 }
878
879 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
880 /* XXX: handle failure here */
881
882 cpu = cpumask_first(cpu_online_mask);
883 for (i = 0; i < nr_queues; i++) {
884 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
885 cpu = cpumask_next(cpu, cpu_online_mask);
886 }
887
888 for (i = 0; i < nr_queues; i++) {
889 dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
890 NVME_Q_DEPTH, i);
891 if (!dev->queues[i + 1])
892 return -ENOMEM;
893 dev->queue_count++;
894 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500895
896 return 0;
897}
898
899static void nvme_free_queues(struct nvme_dev *dev)
900{
901 int i;
902
903 for (i = dev->queue_count - 1; i >= 0; i--)
904 nvme_free_queue(dev, i);
905}
906
907static int __devinit nvme_dev_add(struct nvme_dev *dev)
908{
909 int res, nn, i;
910 struct nvme_ns *ns, *next;
911 void *id;
912 dma_addr_t dma_addr;
913 struct nvme_command cid, crt;
914
915 res = nvme_setup_io_queues(dev);
916 if (res)
917 return res;
918
919 /* XXX: Switch to a SG list once prp2 works */
920 id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
921 GFP_KERNEL);
922
923 memset(&cid, 0, sizeof(cid));
924 cid.identify.opcode = nvme_admin_identify;
925 cid.identify.nsid = 0;
926 cid.identify.prp1 = cpu_to_le64(dma_addr);
927 cid.identify.cns = cpu_to_le32(1);
928
929 res = nvme_submit_admin_cmd(dev, &cid, NULL);
930 if (res) {
931 res = -EIO;
932 goto out_free;
933 }
934
935 nn = le32_to_cpup(&((struct nvme_id_ctrl *)id)->nn);
936
937 cid.identify.cns = 0;
938 memset(&crt, 0, sizeof(crt));
939 crt.features.opcode = nvme_admin_get_features;
940 crt.features.prp1 = cpu_to_le64(dma_addr + 4096);
941 crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
942
943 for (i = 0; i < nn; i++) {
944 cid.identify.nsid = cpu_to_le32(i);
945 res = nvme_submit_admin_cmd(dev, &cid, NULL);
946 if (res)
947 continue;
948
949 if (((struct nvme_id_ns *)id)->ncap == 0)
950 continue;
951
952 crt.features.nsid = cpu_to_le32(i);
953 res = nvme_submit_admin_cmd(dev, &crt, NULL);
954 if (res)
955 continue;
956
957 ns = nvme_alloc_ns(dev, i, id, id + 4096);
958 if (ns)
959 list_add_tail(&ns->list, &dev->namespaces);
960 }
961 list_for_each_entry(ns, &dev->namespaces, list)
962 add_disk(ns->disk);
963
964 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
965 return 0;
966
967 out_free:
968 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
969 list_del(&ns->list);
970 nvme_ns_free(ns);
971 }
972
973 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
974 return res;
975}
976
977static int nvme_dev_remove(struct nvme_dev *dev)
978{
979 struct nvme_ns *ns, *next;
980
981 /* TODO: wait all I/O finished or cancel them */
982
983 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
984 list_del(&ns->list);
985 del_gendisk(ns->disk);
986 nvme_ns_free(ns);
987 }
988
989 nvme_free_queues(dev);
990
991 return 0;
992}
993
994/* XXX: Use an ida or something to let remove / add work correctly */
995static void nvme_set_instance(struct nvme_dev *dev)
996{
997 static int instance;
998 dev->instance = instance++;
999}
1000
1001static void nvme_release_instance(struct nvme_dev *dev)
1002{
1003}
1004
1005static int __devinit nvme_probe(struct pci_dev *pdev,
1006 const struct pci_device_id *id)
1007{
1008 int result = -ENOMEM;
1009 struct nvme_dev *dev;
1010
1011 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1012 if (!dev)
1013 return -ENOMEM;
1014 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1015 GFP_KERNEL);
1016 if (!dev->entry)
1017 goto free;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001018 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1019 GFP_KERNEL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001020 if (!dev->queues)
1021 goto free;
1022
1023 INIT_LIST_HEAD(&dev->namespaces);
1024 dev->pci_dev = pdev;
1025 pci_set_drvdata(pdev, dev);
1026 dma_set_mask(&dev->pci_dev->dev, DMA_BIT_MASK(64));
1027 nvme_set_instance(dev);
Matthew Wilcox53c95772011-01-20 13:42:34 -05001028 dev->entry[0].vector = pdev->irq;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001029
1030 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1031 if (!dev->bar) {
1032 result = -ENOMEM;
1033 goto disable;
1034 }
1035
1036 result = nvme_configure_admin_queue(dev);
1037 if (result)
1038 goto unmap;
1039 dev->queue_count++;
1040
1041 result = nvme_dev_add(dev);
1042 if (result)
1043 goto delete;
1044 return 0;
1045
1046 delete:
1047 nvme_free_queues(dev);
1048 unmap:
1049 iounmap(dev->bar);
1050 disable:
1051 pci_disable_msix(pdev);
1052 nvme_release_instance(dev);
1053 free:
1054 kfree(dev->queues);
1055 kfree(dev->entry);
1056 kfree(dev);
1057 return result;
1058}
1059
1060static void __devexit nvme_remove(struct pci_dev *pdev)
1061{
1062 struct nvme_dev *dev = pci_get_drvdata(pdev);
1063 nvme_dev_remove(dev);
1064 pci_disable_msix(pdev);
1065 iounmap(dev->bar);
1066 nvme_release_instance(dev);
1067 kfree(dev->queues);
1068 kfree(dev->entry);
1069 kfree(dev);
1070}
1071
1072/* These functions are yet to be implemented */
1073#define nvme_error_detected NULL
1074#define nvme_dump_registers NULL
1075#define nvme_link_reset NULL
1076#define nvme_slot_reset NULL
1077#define nvme_error_resume NULL
1078#define nvme_suspend NULL
1079#define nvme_resume NULL
1080
1081static struct pci_error_handlers nvme_err_handler = {
1082 .error_detected = nvme_error_detected,
1083 .mmio_enabled = nvme_dump_registers,
1084 .link_reset = nvme_link_reset,
1085 .slot_reset = nvme_slot_reset,
1086 .resume = nvme_error_resume,
1087};
1088
1089/* Move to pci_ids.h later */
1090#define PCI_CLASS_STORAGE_EXPRESS 0x010802
1091
1092static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1093 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1094 { 0, }
1095};
1096MODULE_DEVICE_TABLE(pci, nvme_id_table);
1097
1098static struct pci_driver nvme_driver = {
1099 .name = "nvme",
1100 .id_table = nvme_id_table,
1101 .probe = nvme_probe,
1102 .remove = __devexit_p(nvme_remove),
1103 .suspend = nvme_suspend,
1104 .resume = nvme_resume,
1105 .err_handler = &nvme_err_handler,
1106};
1107
1108static int __init nvme_init(void)
1109{
1110 int result;
1111
1112 nvme_major = register_blkdev(nvme_major, "nvme");
1113 if (nvme_major <= 0)
1114 return -EBUSY;
1115
1116 result = pci_register_driver(&nvme_driver);
1117 if (!result)
1118 return 0;
1119
1120 unregister_blkdev(nvme_major, "nvme");
1121 return result;
1122}
1123
1124static void __exit nvme_exit(void)
1125{
1126 pci_unregister_driver(&nvme_driver);
1127 unregister_blkdev(nvme_major, "nvme");
1128}
1129
1130MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1131MODULE_LICENSE("GPL");
1132MODULE_VERSION("0.1");
1133module_init(nvme_init);
1134module_exit(nvme_exit);