blob: 9377cf32f8131e1a1a1fa2030efb95959ecfe463 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/errno.h>
23#include <linux/fs.h>
24#include <linux/genhd.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/kdev_t.h>
29#include <linux/kernel.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/moduleparam.h>
33#include <linux/pci.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/types.h>
37#include <linux/version.h>
38
39#define NVME_Q_DEPTH 1024
40#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
41#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
42#define NVME_MINORS 64
43
44static int nvme_major;
45module_param(nvme_major, int, 0);
46
47/*
48 * Represents an NVM Express device. Each nvme_dev is a PCI function.
49 */
50struct nvme_dev {
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050051 struct nvme_queue **queues;
52 u32 __iomem *dbs;
53 struct pci_dev *pci_dev;
54 int instance;
55 int queue_count;
56 u32 ctrl_config;
57 struct msix_entry *entry;
58 struct nvme_bar __iomem *bar;
59 struct list_head namespaces;
Matthew Wilcox51814232011-02-01 16:18:08 -050060 char serial[20];
61 char model[40];
62 char firmware_rev[8];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050063};
64
65/*
66 * An NVM Express namespace is equivalent to a SCSI LUN
67 */
68struct nvme_ns {
69 struct list_head list;
70
71 struct nvme_dev *dev;
72 struct request_queue *queue;
73 struct gendisk *disk;
74
75 int ns_id;
76 int lba_shift;
77};
78
79/*
80 * An NVM Express queue. Each device has at least two (one for admin
81 * commands and one for I/O commands).
82 */
83struct nvme_queue {
84 struct device *q_dmadev;
85 spinlock_t q_lock;
86 struct nvme_command *sq_cmds;
87 volatile struct nvme_completion *cqes;
88 dma_addr_t sq_dma_addr;
89 dma_addr_t cq_dma_addr;
90 wait_queue_head_t sq_full;
91 struct bio_list sq_cong;
92 u32 __iomem *q_db;
93 u16 q_depth;
94 u16 cq_vector;
95 u16 sq_head;
96 u16 sq_tail;
97 u16 cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -050098 u16 cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050099 unsigned long cmdid_data[];
100};
101
102/*
103 * Check we didin't inadvertently grow the command struct
104 */
105static inline void _nvme_check_size(void)
106{
107 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
108 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
109 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
110 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
111 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
112 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
113 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
114 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
115 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
116}
117
118/**
119 * alloc_cmdid - Allocate a Command ID
120 * @param nvmeq The queue that will be used for this command
121 * @param ctx A pointer that will be passed to the handler
122 * @param handler The ID of the handler to call
123 *
124 * Allocate a Command ID for a queue. The data passed in will
125 * be passed to the completion handler. This is implemented by using
126 * the bottom two bits of the ctx pointer to store the handler ID.
127 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
128 * We can change this if it becomes a problem.
129 */
130static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler)
131{
132 int depth = nvmeq->q_depth;
133 unsigned long data = (unsigned long)ctx | handler;
134 int cmdid;
135
136 BUG_ON((unsigned long)ctx & 3);
137
138 do {
139 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
140 if (cmdid >= depth)
141 return -EBUSY;
142 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
143
144 nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(depth)] = data;
145 return cmdid;
146}
147
148static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
149 int handler)
150{
151 int cmdid;
152 wait_event_killable(nvmeq->sq_full,
153 (cmdid = alloc_cmdid(nvmeq, ctx, handler)) >= 0);
154 return (cmdid < 0) ? -EINTR : cmdid;
155}
156
157/* If you need more than four handlers, you'll need to change how
158 * alloc_cmdid and nvme_process_cq work
159 */
160enum {
161 sync_completion_id = 0,
162 bio_completion_id,
163};
164
165static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
166{
167 unsigned long data;
168
169 data = nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(nvmeq->q_depth)];
170 clear_bit(cmdid, nvmeq->cmdid_data);
171 wake_up(&nvmeq->sq_full);
172 return data;
173}
174
175static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
176{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500177 int qid, cpu = get_cpu();
178 if (cpu < ns->dev->queue_count)
179 qid = cpu + 1;
180 else
181 qid = (cpu % rounddown_pow_of_two(ns->dev->queue_count)) + 1;
182 return ns->dev->queues[qid];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500183}
184
185static void put_nvmeq(struct nvme_queue *nvmeq)
186{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500187 put_cpu();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500188}
189
190/**
191 * nvme_submit_cmd: Copy a command into a queue and ring the doorbell
192 * @nvmeq: The queue to use
193 * @cmd: The command to send
194 *
195 * Safe to use from interrupt context
196 */
197static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
198{
199 unsigned long flags;
200 u16 tail;
201 /* XXX: Need to check tail isn't going to overrun head */
202 spin_lock_irqsave(&nvmeq->q_lock, flags);
203 tail = nvmeq->sq_tail;
204 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
205 writel(tail, nvmeq->q_db);
206 if (++tail == nvmeq->q_depth)
207 tail = 0;
208 nvmeq->sq_tail = tail;
209 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
210
211 return 0;
212}
213
214struct nvme_req_info {
215 struct bio *bio;
216 int nents;
217 struct scatterlist sg[0];
218};
219
220/* XXX: use a mempool */
221static struct nvme_req_info *alloc_info(unsigned nseg, gfp_t gfp)
222{
223 return kmalloc(sizeof(struct nvme_req_info) +
224 sizeof(struct scatterlist) * nseg, gfp);
225}
226
227static void free_info(struct nvme_req_info *info)
228{
229 kfree(info);
230}
231
232static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
233 struct nvme_completion *cqe)
234{
235 struct nvme_req_info *info = ctx;
236 struct bio *bio = info->bio;
237 u16 status = le16_to_cpup(&cqe->status) >> 1;
238
239 dma_unmap_sg(nvmeq->q_dmadev, info->sg, info->nents,
240 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
241 free_info(info);
242 bio_endio(bio, status ? -EIO : 0);
243}
244
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500245/* length is in bytes */
246static void nvme_setup_prps(struct nvme_common_command *cmd,
247 struct scatterlist *sg, int length)
248{
249 int dma_len = sg_dma_len(sg);
250 u64 dma_addr = sg_dma_address(sg);
251 int offset = offset_in_page(dma_addr);
252
253 cmd->prp1 = cpu_to_le64(dma_addr);
254 length -= (PAGE_SIZE - offset);
255 if (length <= 0)
256 return;
257
258 dma_len -= (PAGE_SIZE - offset);
259 if (dma_len) {
260 dma_addr += (PAGE_SIZE - offset);
261 } else {
262 sg = sg_next(sg);
263 dma_addr = sg_dma_address(sg);
264 dma_len = sg_dma_len(sg);
265 }
266
267 if (length <= PAGE_SIZE) {
268 cmd->prp2 = cpu_to_le64(dma_addr);
269 return;
270 }
271
272 /* XXX: support PRP lists */
273}
274
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500275static int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
276 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
277{
278 struct bio_vec *bvec;
279 struct scatterlist *sg = info->sg;
280 int i, nsegs;
281
282 sg_init_table(sg, psegs);
283 bio_for_each_segment(bvec, bio, i) {
284 sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
285 /* XXX: handle non-mergable here */
286 nsegs++;
287 }
288 info->nents = nsegs;
289
290 return dma_map_sg(dev, info->sg, info->nents, dma_dir);
291}
292
293static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
294 struct bio *bio)
295{
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500296 struct nvme_command *cmnd;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500297 struct nvme_req_info *info;
298 enum dma_data_direction dma_dir;
299 int cmdid;
300 u16 control;
301 u32 dsmgmt;
302 unsigned long flags;
303 int psegs = bio_phys_segments(ns->queue, bio);
304
305 info = alloc_info(psegs, GFP_NOIO);
306 if (!info)
307 goto congestion;
308 info->bio = bio;
309
310 cmdid = alloc_cmdid(nvmeq, info, bio_completion_id);
311 if (unlikely(cmdid < 0))
312 goto free_info;
313
314 control = 0;
315 if (bio->bi_rw & REQ_FUA)
316 control |= NVME_RW_FUA;
317 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
318 control |= NVME_RW_LR;
319
320 dsmgmt = 0;
321 if (bio->bi_rw & REQ_RAHEAD)
322 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
323
324 spin_lock_irqsave(&nvmeq->q_lock, flags);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500325 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500326
Matthew Wilcoxb8deb622011-01-26 10:08:25 -0500327 memset(cmnd, 0, sizeof(*cmnd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500328 if (bio_data_dir(bio)) {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500329 cmnd->rw.opcode = nvme_cmd_write;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500330 dma_dir = DMA_TO_DEVICE;
331 } else {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500332 cmnd->rw.opcode = nvme_cmd_read;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500333 dma_dir = DMA_FROM_DEVICE;
334 }
335
336 nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs);
337
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500338 cmnd->rw.flags = 1;
339 cmnd->rw.command_id = cmdid;
340 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
341 nvme_setup_prps(&cmnd->common, info->sg, bio->bi_size);
342 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
343 cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
344 cmnd->rw.control = cpu_to_le16(control);
345 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500346
347 writel(nvmeq->sq_tail, nvmeq->q_db);
348 if (++nvmeq->sq_tail == nvmeq->q_depth)
349 nvmeq->sq_tail = 0;
350
351 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
352
353 return 0;
354
355 free_info:
356 free_info(info);
357 congestion:
358 return -EBUSY;
359}
360
361/*
362 * NB: return value of non-zero would mean that we were a stacking driver.
363 * make_request must always succeed.
364 */
365static int nvme_make_request(struct request_queue *q, struct bio *bio)
366{
367 struct nvme_ns *ns = q->queuedata;
368 struct nvme_queue *nvmeq = get_nvmeq(ns);
369
370 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
371 blk_set_queue_congested(q, rw_is_sync(bio->bi_rw));
372 bio_list_add(&nvmeq->sq_cong, bio);
373 }
374 put_nvmeq(nvmeq);
375
376 return 0;
377}
378
379struct sync_cmd_info {
380 struct task_struct *task;
381 u32 result;
382 int status;
383};
384
385static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
386 struct nvme_completion *cqe)
387{
388 struct sync_cmd_info *cmdinfo = ctx;
389 cmdinfo->result = le32_to_cpup(&cqe->result);
390 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
391 wake_up_process(cmdinfo->task);
392}
393
394typedef void (*completion_fn)(struct nvme_queue *, void *,
395 struct nvme_completion *);
396
397static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
398{
Matthew Wilcox82123462011-01-20 13:24:06 -0500399 u16 head, phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500400
401 static const completion_fn completions[4] = {
402 [sync_completion_id] = sync_completion,
403 [bio_completion_id] = bio_completion,
404 };
405
406 head = nvmeq->cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500407 phase = nvmeq->cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500408
409 for (;;) {
410 unsigned long data;
411 void *ptr;
412 unsigned char handler;
413 struct nvme_completion cqe = nvmeq->cqes[head];
Matthew Wilcox82123462011-01-20 13:24:06 -0500414 if ((le16_to_cpu(cqe.status) & 1) != phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500415 break;
416 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
417 if (++head == nvmeq->q_depth) {
418 head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500419 phase = !phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500420 }
421
422 data = free_cmdid(nvmeq, cqe.command_id);
423 handler = data & 3;
424 ptr = (void *)(data & ~3UL);
425 completions[handler](nvmeq, ptr, &cqe);
426 }
427
428 /* If the controller ignores the cq head doorbell and continuously
429 * writes to the queue, it is theoretically possible to wrap around
430 * the queue twice and mistakenly return IRQ_NONE. Linux only
431 * requires that 0.1% of your interrupts are handled, so this isn't
432 * a big problem.
433 */
Matthew Wilcox82123462011-01-20 13:24:06 -0500434 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500435 return IRQ_NONE;
436
437 writel(head, nvmeq->q_db + 1);
438 nvmeq->cq_head = head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500439 nvmeq->cq_phase = phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500440
441 return IRQ_HANDLED;
442}
443
444static irqreturn_t nvme_irq(int irq, void *data)
445{
446 return nvme_process_cq(data);
447}
448
449/*
450 * Returns 0 on success. If the result is negative, it's a Linux error code;
451 * if the result is positive, it's an NVM Express status code
452 */
453static int nvme_submit_sync_cmd(struct nvme_queue *q, struct nvme_command *cmd,
454 u32 *result)
455{
456 int cmdid;
457 struct sync_cmd_info cmdinfo;
458
459 cmdinfo.task = current;
460 cmdinfo.status = -EINTR;
461
462 cmdid = alloc_cmdid_killable(q, &cmdinfo, sync_completion_id);
463 if (cmdid < 0)
464 return cmdid;
465 cmd->common.command_id = cmdid;
466
467 set_current_state(TASK_UNINTERRUPTIBLE);
468 nvme_submit_cmd(q, cmd);
469 schedule();
470
471 if (result)
472 *result = cmdinfo.result;
473
474 return cmdinfo.status;
475}
476
477static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
478 u32 *result)
479{
480 return nvme_submit_sync_cmd(dev->queues[0], cmd, result);
481}
482
483static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
484{
485 int status;
486 struct nvme_command c;
487
488 memset(&c, 0, sizeof(c));
489 c.delete_queue.opcode = opcode;
490 c.delete_queue.qid = cpu_to_le16(id);
491
492 status = nvme_submit_admin_cmd(dev, &c, NULL);
493 if (status)
494 return -EIO;
495 return 0;
496}
497
498static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
499 struct nvme_queue *nvmeq)
500{
501 int status;
502 struct nvme_command c;
503 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
504
505 memset(&c, 0, sizeof(c));
506 c.create_cq.opcode = nvme_admin_create_cq;
507 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
508 c.create_cq.cqid = cpu_to_le16(qid);
509 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
510 c.create_cq.cq_flags = cpu_to_le16(flags);
511 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
512
513 status = nvme_submit_admin_cmd(dev, &c, NULL);
514 if (status)
515 return -EIO;
516 return 0;
517}
518
519static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
520 struct nvme_queue *nvmeq)
521{
522 int status;
523 struct nvme_command c;
524 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
525
526 memset(&c, 0, sizeof(c));
527 c.create_sq.opcode = nvme_admin_create_sq;
528 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
529 c.create_sq.sqid = cpu_to_le16(qid);
530 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
531 c.create_sq.sq_flags = cpu_to_le16(flags);
532 c.create_sq.cqid = cpu_to_le16(qid);
533
534 status = nvme_submit_admin_cmd(dev, &c, NULL);
535 if (status)
536 return -EIO;
537 return 0;
538}
539
540static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
541{
542 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
543}
544
545static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
546{
547 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
548}
549
550static void nvme_free_queue(struct nvme_dev *dev, int qid)
551{
552 struct nvme_queue *nvmeq = dev->queues[qid];
553
554 free_irq(dev->entry[nvmeq->cq_vector].vector, nvmeq);
555
556 /* Don't tell the adapter to delete the admin queue */
557 if (qid) {
558 adapter_delete_sq(dev, qid);
559 adapter_delete_cq(dev, qid);
560 }
561
562 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
563 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
564 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
565 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
566 kfree(nvmeq);
567}
568
569static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
570 int depth, int vector)
571{
572 struct device *dmadev = &dev->pci_dev->dev;
573 unsigned extra = (depth + BITS_TO_LONGS(depth)) * sizeof(long);
574 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
575 if (!nvmeq)
576 return NULL;
577
578 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
579 &nvmeq->cq_dma_addr, GFP_KERNEL);
580 if (!nvmeq->cqes)
581 goto free_nvmeq;
582 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
583
584 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
585 &nvmeq->sq_dma_addr, GFP_KERNEL);
586 if (!nvmeq->sq_cmds)
587 goto free_cqdma;
588
589 nvmeq->q_dmadev = dmadev;
590 spin_lock_init(&nvmeq->q_lock);
591 nvmeq->cq_head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500592 nvmeq->cq_phase = 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500593 init_waitqueue_head(&nvmeq->sq_full);
594 bio_list_init(&nvmeq->sq_cong);
595 nvmeq->q_db = &dev->dbs[qid * 2];
596 nvmeq->q_depth = depth;
597 nvmeq->cq_vector = vector;
598
599 return nvmeq;
600
601 free_cqdma:
602 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
603 nvmeq->cq_dma_addr);
604 free_nvmeq:
605 kfree(nvmeq);
606 return NULL;
607}
608
Matthew Wilcox30010822011-01-20 09:10:15 -0500609static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
610 const char *name)
611{
612 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
613 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
614}
615
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500616static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
617 int qid, int cq_size, int vector)
618{
619 int result;
620 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
621
622 result = adapter_alloc_cq(dev, qid, nvmeq);
623 if (result < 0)
624 goto free_nvmeq;
625
626 result = adapter_alloc_sq(dev, qid, nvmeq);
627 if (result < 0)
628 goto release_cq;
629
Matthew Wilcox30010822011-01-20 09:10:15 -0500630 result = queue_request_irq(dev, nvmeq, "nvme");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500631 if (result < 0)
632 goto release_sq;
633
634 return nvmeq;
635
636 release_sq:
637 adapter_delete_sq(dev, qid);
638 release_cq:
639 adapter_delete_cq(dev, qid);
640 free_nvmeq:
641 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
642 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
643 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
644 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
645 kfree(nvmeq);
646 return NULL;
647}
648
649static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
650{
651 int result;
652 u32 aqa;
653 struct nvme_queue *nvmeq;
654
655 dev->dbs = ((void __iomem *)dev->bar) + 4096;
656
657 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
658
659 aqa = nvmeq->q_depth - 1;
660 aqa |= aqa << 16;
661
662 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
663 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
664 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
665
666 writel(aqa, &dev->bar->aqa);
667 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
668 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
669 writel(dev->ctrl_config, &dev->bar->cc);
670
671 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
672 msleep(100);
673 if (fatal_signal_pending(current))
674 return -EINTR;
675 }
676
Matthew Wilcox30010822011-01-20 09:10:15 -0500677 result = queue_request_irq(dev, nvmeq, "nvme admin");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500678 dev->queues[0] = nvmeq;
679 return result;
680}
681
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500682static int nvme_map_user_pages(struct nvme_dev *dev, int write,
683 unsigned long addr, unsigned length,
684 struct scatterlist **sgp)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500685{
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500686 int i, err, count, nents, offset;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500687 struct scatterlist *sg;
688 struct page **pages;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500689
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500690 if (addr & 3)
691 return -EINVAL;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500692 if (!length)
693 return -EINVAL;
694
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500695 offset = offset_in_page(addr);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500696 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
697 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500698
699 err = get_user_pages_fast(addr, count, 1, pages);
700 if (err < count) {
701 count = err;
702 err = -EFAULT;
703 goto put_pages;
704 }
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500705
706 sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500707 sg_init_table(sg, count);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500708 sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500709 length -= (PAGE_SIZE - offset);
710 for (i = 1; i < count; i++) {
711 sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0);
712 length -= PAGE_SIZE;
713 }
714
715 err = -ENOMEM;
716 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
717 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500718 if (!nents)
719 goto put_pages;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500720
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500721 kfree(pages);
722 *sgp = sg;
723 return nents;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500724
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500725 put_pages:
726 for (i = 0; i < count; i++)
727 put_page(pages[i]);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500728 kfree(pages);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500729 return err;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500730}
731
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500732static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
733 unsigned long addr, int length,
734 struct scatterlist *sg, int nents)
735{
736 int i, count;
737
738 count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
739 dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
740
741 for (i = 0; i < count; i++)
742 put_page(sg_page(&sg[i]));
743}
744
745static int nvme_submit_user_admin_command(struct nvme_dev *dev,
746 unsigned long addr, unsigned length,
747 struct nvme_command *cmd)
748{
749 int err, nents;
750 struct scatterlist *sg;
751
752 nents = nvme_map_user_pages(dev, 0, addr, length, &sg);
753 if (nents < 0)
754 return nents;
755 nvme_setup_prps(&cmd->common, sg, length);
756 err = nvme_submit_admin_cmd(dev, cmd, NULL);
757 nvme_unmap_user_pages(dev, 0, addr, length, sg, nents);
758 return err ? -EIO : 0;
759}
760
Matthew Wilcoxbd38c552011-01-26 14:34:32 -0500761static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500762{
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500763 struct nvme_command c;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500764
Matthew Wilcoxbd38c552011-01-26 14:34:32 -0500765 memset(&c, 0, sizeof(c));
766 c.identify.opcode = nvme_admin_identify;
767 c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
768 c.identify.cns = cpu_to_le32(cns);
769
770 return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
771}
772
773static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr)
774{
775 struct nvme_command c;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500776
777 memset(&c, 0, sizeof(c));
778 c.features.opcode = nvme_admin_get_features;
779 c.features.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500780 c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
781
Matthew Wilcoxbd38c552011-01-26 14:34:32 -0500782 return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500783}
784
Matthew Wilcoxa53295b2011-02-01 16:13:29 -0500785static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
786{
787 struct nvme_dev *dev = ns->dev;
788 struct nvme_queue *nvmeq;
789 struct nvme_user_io io;
790 struct nvme_command c;
791 unsigned length;
792 u32 result;
793 int nents, status;
794 struct scatterlist *sg;
795
796 if (copy_from_user(&io, uio, sizeof(io)))
797 return -EFAULT;
798 length = io.nblocks << io.block_shift;
799 nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg);
800 if (nents < 0)
801 return nents;
802
803 memset(&c, 0, sizeof(c));
804 c.rw.opcode = io.opcode;
805 c.rw.flags = io.flags;
806 c.rw.nsid = cpu_to_le32(io.nsid);
807 c.rw.slba = cpu_to_le64(io.slba);
808 c.rw.length = cpu_to_le16(io.nblocks - 1);
809 c.rw.control = cpu_to_le16(io.control);
810 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
811 c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */
812 c.rw.apptag = cpu_to_le16(io.apptag);
813 c.rw.appmask = cpu_to_le16(io.appmask);
814 /* XXX: metadata */
815 nvme_setup_prps(&c.common, sg, length);
816
817 nvmeq = get_nvmeq(ns);
818 status = nvme_submit_sync_cmd(nvmeq, &c, &result);
819 put_nvmeq(nvmeq);
820
821 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
822 put_user(result, &uio->result);
823 return status;
824}
825
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500826static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
827 unsigned long arg)
828{
829 struct nvme_ns *ns = bdev->bd_disk->private_data;
830
831 switch (cmd) {
832 case NVME_IOCTL_IDENTIFY_NS:
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500833 return nvme_identify(ns, arg, 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500834 case NVME_IOCTL_IDENTIFY_CTRL:
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500835 return nvme_identify(ns, arg, 1);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500836 case NVME_IOCTL_GET_RANGE_TYPE:
Matthew Wilcoxbd38c552011-01-26 14:34:32 -0500837 return nvme_get_range_type(ns, arg);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -0500838 case NVME_IOCTL_SUBMIT_IO:
839 return nvme_submit_io(ns, (void __user *)arg);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500840 default:
841 return -ENOTTY;
842 }
843}
844
845static const struct block_device_operations nvme_fops = {
846 .owner = THIS_MODULE,
847 .ioctl = nvme_ioctl,
848};
849
850static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index,
851 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
852{
853 struct nvme_ns *ns;
854 struct gendisk *disk;
855 int lbaf;
856
857 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
858 return NULL;
859
860 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
861 if (!ns)
862 return NULL;
863 ns->queue = blk_alloc_queue(GFP_KERNEL);
864 if (!ns->queue)
865 goto out_free_ns;
866 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
867 QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
868 blk_queue_make_request(ns->queue, nvme_make_request);
869 ns->dev = dev;
870 ns->queue->queuedata = ns;
871
872 disk = alloc_disk(NVME_MINORS);
873 if (!disk)
874 goto out_free_queue;
875 ns->ns_id = index;
876 ns->disk = disk;
877 lbaf = id->flbas & 0xf;
878 ns->lba_shift = id->lbaf[lbaf].ds;
879
880 disk->major = nvme_major;
881 disk->minors = NVME_MINORS;
882 disk->first_minor = NVME_MINORS * index;
883 disk->fops = &nvme_fops;
884 disk->private_data = ns;
885 disk->queue = ns->queue;
886 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index);
887 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
888
889 return ns;
890
891 out_free_queue:
892 blk_cleanup_queue(ns->queue);
893 out_free_ns:
894 kfree(ns);
895 return NULL;
896}
897
898static void nvme_ns_free(struct nvme_ns *ns)
899{
900 put_disk(ns->disk);
901 blk_cleanup_queue(ns->queue);
902 kfree(ns);
903}
904
Matthew Wilcoxb3b06812011-01-20 09:14:34 -0500905static int set_queue_count(struct nvme_dev *dev, int count)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500906{
907 int status;
908 u32 result;
909 struct nvme_command c;
Matthew Wilcoxb3b06812011-01-20 09:14:34 -0500910 u32 q_count = (count - 1) | ((count - 1) << 16);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500911
912 memset(&c, 0, sizeof(c));
913 c.features.opcode = nvme_admin_get_features;
914 c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES);
915 c.features.dword11 = cpu_to_le32(q_count);
916
917 status = nvme_submit_admin_cmd(dev, &c, &result);
918 if (status)
919 return -EIO;
920 return min(result & 0xffff, result >> 16) + 1;
921}
922
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500923static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
924{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500925 int result, cpu, i, nr_queues;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500926
Matthew Wilcox1b234842011-01-20 13:01:49 -0500927 nr_queues = num_online_cpus();
928 result = set_queue_count(dev, nr_queues);
929 if (result < 0)
930 return result;
931 if (result < nr_queues)
932 nr_queues = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500933
Matthew Wilcox1b234842011-01-20 13:01:49 -0500934 /* Deregister the admin queue's interrupt */
935 free_irq(dev->entry[0].vector, dev->queues[0]);
936
937 for (i = 0; i < nr_queues; i++)
938 dev->entry[i].entry = i;
939 for (;;) {
940 result = pci_enable_msix(dev->pci_dev, dev->entry, nr_queues);
941 if (result == 0) {
942 break;
943 } else if (result > 0) {
944 nr_queues = result;
945 continue;
946 } else {
947 nr_queues = 1;
948 break;
949 }
950 }
951
952 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
953 /* XXX: handle failure here */
954
955 cpu = cpumask_first(cpu_online_mask);
956 for (i = 0; i < nr_queues; i++) {
957 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
958 cpu = cpumask_next(cpu, cpu_online_mask);
959 }
960
961 for (i = 0; i < nr_queues; i++) {
962 dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
963 NVME_Q_DEPTH, i);
964 if (!dev->queues[i + 1])
965 return -ENOMEM;
966 dev->queue_count++;
967 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500968
969 return 0;
970}
971
972static void nvme_free_queues(struct nvme_dev *dev)
973{
974 int i;
975
976 for (i = dev->queue_count - 1; i >= 0; i--)
977 nvme_free_queue(dev, i);
978}
979
980static int __devinit nvme_dev_add(struct nvme_dev *dev)
981{
982 int res, nn, i;
983 struct nvme_ns *ns, *next;
Matthew Wilcox51814232011-02-01 16:18:08 -0500984 struct nvme_id_ctrl *ctrl;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500985 void *id;
986 dma_addr_t dma_addr;
987 struct nvme_command cid, crt;
988
989 res = nvme_setup_io_queues(dev);
990 if (res)
991 return res;
992
993 /* XXX: Switch to a SG list once prp2 works */
994 id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
995 GFP_KERNEL);
996
997 memset(&cid, 0, sizeof(cid));
998 cid.identify.opcode = nvme_admin_identify;
999 cid.identify.nsid = 0;
1000 cid.identify.prp1 = cpu_to_le64(dma_addr);
1001 cid.identify.cns = cpu_to_le32(1);
1002
1003 res = nvme_submit_admin_cmd(dev, &cid, NULL);
1004 if (res) {
1005 res = -EIO;
1006 goto out_free;
1007 }
1008
Matthew Wilcox51814232011-02-01 16:18:08 -05001009 ctrl = id;
1010 nn = le32_to_cpup(&ctrl->nn);
1011 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1012 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1013 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001014
1015 cid.identify.cns = 0;
1016 memset(&crt, 0, sizeof(crt));
1017 crt.features.opcode = nvme_admin_get_features;
1018 crt.features.prp1 = cpu_to_le64(dma_addr + 4096);
1019 crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
1020
1021 for (i = 0; i < nn; i++) {
1022 cid.identify.nsid = cpu_to_le32(i);
1023 res = nvme_submit_admin_cmd(dev, &cid, NULL);
1024 if (res)
1025 continue;
1026
1027 if (((struct nvme_id_ns *)id)->ncap == 0)
1028 continue;
1029
1030 crt.features.nsid = cpu_to_le32(i);
1031 res = nvme_submit_admin_cmd(dev, &crt, NULL);
1032 if (res)
1033 continue;
1034
1035 ns = nvme_alloc_ns(dev, i, id, id + 4096);
1036 if (ns)
1037 list_add_tail(&ns->list, &dev->namespaces);
1038 }
1039 list_for_each_entry(ns, &dev->namespaces, list)
1040 add_disk(ns->disk);
1041
1042 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
1043 return 0;
1044
1045 out_free:
1046 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1047 list_del(&ns->list);
1048 nvme_ns_free(ns);
1049 }
1050
1051 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
1052 return res;
1053}
1054
1055static int nvme_dev_remove(struct nvme_dev *dev)
1056{
1057 struct nvme_ns *ns, *next;
1058
1059 /* TODO: wait all I/O finished or cancel them */
1060
1061 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1062 list_del(&ns->list);
1063 del_gendisk(ns->disk);
1064 nvme_ns_free(ns);
1065 }
1066
1067 nvme_free_queues(dev);
1068
1069 return 0;
1070}
1071
1072/* XXX: Use an ida or something to let remove / add work correctly */
1073static void nvme_set_instance(struct nvme_dev *dev)
1074{
1075 static int instance;
1076 dev->instance = instance++;
1077}
1078
1079static void nvme_release_instance(struct nvme_dev *dev)
1080{
1081}
1082
1083static int __devinit nvme_probe(struct pci_dev *pdev,
1084 const struct pci_device_id *id)
1085{
1086 int result = -ENOMEM;
1087 struct nvme_dev *dev;
1088
1089 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1090 if (!dev)
1091 return -ENOMEM;
1092 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1093 GFP_KERNEL);
1094 if (!dev->entry)
1095 goto free;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001096 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1097 GFP_KERNEL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001098 if (!dev->queues)
1099 goto free;
1100
1101 INIT_LIST_HEAD(&dev->namespaces);
1102 dev->pci_dev = pdev;
1103 pci_set_drvdata(pdev, dev);
1104 dma_set_mask(&dev->pci_dev->dev, DMA_BIT_MASK(64));
1105 nvme_set_instance(dev);
Matthew Wilcox53c95772011-01-20 13:42:34 -05001106 dev->entry[0].vector = pdev->irq;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001107
1108 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1109 if (!dev->bar) {
1110 result = -ENOMEM;
1111 goto disable;
1112 }
1113
1114 result = nvme_configure_admin_queue(dev);
1115 if (result)
1116 goto unmap;
1117 dev->queue_count++;
1118
1119 result = nvme_dev_add(dev);
1120 if (result)
1121 goto delete;
1122 return 0;
1123
1124 delete:
1125 nvme_free_queues(dev);
1126 unmap:
1127 iounmap(dev->bar);
1128 disable:
1129 pci_disable_msix(pdev);
1130 nvme_release_instance(dev);
1131 free:
1132 kfree(dev->queues);
1133 kfree(dev->entry);
1134 kfree(dev);
1135 return result;
1136}
1137
1138static void __devexit nvme_remove(struct pci_dev *pdev)
1139{
1140 struct nvme_dev *dev = pci_get_drvdata(pdev);
1141 nvme_dev_remove(dev);
1142 pci_disable_msix(pdev);
1143 iounmap(dev->bar);
1144 nvme_release_instance(dev);
1145 kfree(dev->queues);
1146 kfree(dev->entry);
1147 kfree(dev);
1148}
1149
1150/* These functions are yet to be implemented */
1151#define nvme_error_detected NULL
1152#define nvme_dump_registers NULL
1153#define nvme_link_reset NULL
1154#define nvme_slot_reset NULL
1155#define nvme_error_resume NULL
1156#define nvme_suspend NULL
1157#define nvme_resume NULL
1158
1159static struct pci_error_handlers nvme_err_handler = {
1160 .error_detected = nvme_error_detected,
1161 .mmio_enabled = nvme_dump_registers,
1162 .link_reset = nvme_link_reset,
1163 .slot_reset = nvme_slot_reset,
1164 .resume = nvme_error_resume,
1165};
1166
1167/* Move to pci_ids.h later */
1168#define PCI_CLASS_STORAGE_EXPRESS 0x010802
1169
1170static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1171 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1172 { 0, }
1173};
1174MODULE_DEVICE_TABLE(pci, nvme_id_table);
1175
1176static struct pci_driver nvme_driver = {
1177 .name = "nvme",
1178 .id_table = nvme_id_table,
1179 .probe = nvme_probe,
1180 .remove = __devexit_p(nvme_remove),
1181 .suspend = nvme_suspend,
1182 .resume = nvme_resume,
1183 .err_handler = &nvme_err_handler,
1184};
1185
1186static int __init nvme_init(void)
1187{
1188 int result;
1189
1190 nvme_major = register_blkdev(nvme_major, "nvme");
1191 if (nvme_major <= 0)
1192 return -EBUSY;
1193
1194 result = pci_register_driver(&nvme_driver);
1195 if (!result)
1196 return 0;
1197
1198 unregister_blkdev(nvme_major, "nvme");
1199 return result;
1200}
1201
1202static void __exit nvme_exit(void)
1203{
1204 pci_unregister_driver(&nvme_driver);
1205 unregister_blkdev(nvme_major, "nvme");
1206}
1207
1208MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1209MODULE_LICENSE("GPL");
1210MODULE_VERSION("0.1");
1211module_init(nvme_init);
1212module_exit(nvme_exit);