blob: 0956e124152047ef9ef5cc6c66137a730722444f [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
Matthew Wilcox8de05532011-05-12 13:50:28 -040021#include <linux/bitops.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022#include <linux/blkdev.h>
Matthew Wilcoxfd63e9ce2011-05-06 08:37:54 -040023#include <linux/delay.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050024#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/genhd.h>
Matthew Wilcox5aff9382011-05-06 08:45:47 -040027#include <linux/idr.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kdev_t.h>
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050032#include <linux/kthread.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050033#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -050038#include <linux/poison.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050039#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/types.h>
42#include <linux/version.h>
43
44#define NVME_Q_DEPTH 1024
45#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
46#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
47#define NVME_MINORS 64
Matthew Wilcoxe85248e2011-02-06 18:30:16 -050048#define IO_TIMEOUT (5 * HZ)
49#define ADMIN_TIMEOUT (60 * HZ)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050050
51static int nvme_major;
52module_param(nvme_major, int, 0);
53
Matthew Wilcox58ffacb2011-02-06 07:28:06 -050054static int use_threaded_interrupts;
55module_param(use_threaded_interrupts, int, 0);
56
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050057static DEFINE_SPINLOCK(dev_list_lock);
58static LIST_HEAD(dev_list);
59static struct task_struct *nvme_thread;
60
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050061/*
62 * Represents an NVM Express device. Each nvme_dev is a PCI function.
63 */
64struct nvme_dev {
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050065 struct list_head node;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050066 struct nvme_queue **queues;
67 u32 __iomem *dbs;
68 struct pci_dev *pci_dev;
Matthew Wilcox091b6092011-02-10 09:56:01 -050069 struct dma_pool *prp_page_pool;
Matthew Wilcox99802a72011-02-10 10:30:34 -050070 struct dma_pool *prp_small_pool;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050071 int instance;
72 int queue_count;
73 u32 ctrl_config;
74 struct msix_entry *entry;
75 struct nvme_bar __iomem *bar;
76 struct list_head namespaces;
Matthew Wilcox51814232011-02-01 16:18:08 -050077 char serial[20];
78 char model[40];
79 char firmware_rev[8];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050080};
81
82/*
83 * An NVM Express namespace is equivalent to a SCSI LUN
84 */
85struct nvme_ns {
86 struct list_head list;
87
88 struct nvme_dev *dev;
89 struct request_queue *queue;
90 struct gendisk *disk;
91
92 int ns_id;
93 int lba_shift;
94};
95
96/*
97 * An NVM Express queue. Each device has at least two (one for admin
98 * commands and one for I/O commands).
99 */
100struct nvme_queue {
101 struct device *q_dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -0500102 struct nvme_dev *dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500103 spinlock_t q_lock;
104 struct nvme_command *sq_cmds;
105 volatile struct nvme_completion *cqes;
106 dma_addr_t sq_dma_addr;
107 dma_addr_t cq_dma_addr;
108 wait_queue_head_t sq_full;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -0500109 wait_queue_t sq_cong_wait;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500110 struct bio_list sq_cong;
111 u32 __iomem *q_db;
112 u16 q_depth;
113 u16 cq_vector;
114 u16 sq_head;
115 u16 sq_tail;
116 u16 cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500117 u16 cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500118 unsigned long cmdid_data[];
119};
120
121/*
122 * Check we didin't inadvertently grow the command struct
123 */
124static inline void _nvme_check_size(void)
125{
126 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
127 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
128 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
129 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
130 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
131 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
132 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
133 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
134 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
135}
136
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500137struct nvme_cmd_info {
138 unsigned long ctx;
139 unsigned long timeout;
140};
141
142static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
143{
144 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
145}
146
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500147/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400148 * alloc_cmdid() - Allocate a Command ID
149 * @nvmeq: The queue that will be used for this command
150 * @ctx: A pointer that will be passed to the handler
151 * @handler: The ID of the handler to call
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500152 *
153 * Allocate a Command ID for a queue. The data passed in will
154 * be passed to the completion handler. This is implemented by using
155 * the bottom two bits of the ctx pointer to store the handler ID.
156 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
157 * We can change this if it becomes a problem.
Matthew Wilcox184d2942011-05-11 21:36:38 -0400158 *
159 * May be called with local interrupts disabled and the q_lock held,
160 * or with interrupts enabled and no locks held.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500161 */
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500162static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler,
163 unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500164{
Matthew Wilcoxe6d15f72011-02-24 08:49:41 -0500165 int depth = nvmeq->q_depth - 1;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500166 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500167 int cmdid;
168
169 BUG_ON((unsigned long)ctx & 3);
170
171 do {
172 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
173 if (cmdid >= depth)
174 return -EBUSY;
175 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
176
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500177 info[cmdid].ctx = (unsigned long)ctx | handler;
178 info[cmdid].timeout = jiffies + timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500179 return cmdid;
180}
181
182static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500183 int handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500184{
185 int cmdid;
186 wait_event_killable(nvmeq->sq_full,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500187 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500188 return (cmdid < 0) ? -EINTR : cmdid;
189}
190
Matthew Wilcoxfa922822011-03-16 16:29:00 -0400191/*
192 * If you need more than four handlers, you'll need to change how
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500193 * alloc_cmdid and nvme_process_cq work. Consider using a special
194 * CMD_CTX value instead, if that works for your situation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500195 */
196enum {
197 sync_completion_id = 0,
198 bio_completion_id,
199};
200
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500201/* Special values must be a multiple of 4, and less than 0x1000 */
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500202#define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id)
Matthew Wilcoxd2d87032011-02-07 15:55:59 -0500203#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
204#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
205#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500206#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500207
Matthew Wilcox184d2942011-05-11 21:36:38 -0400208/*
209 * Called with local interrupts disabled and the q_lock held. May not sleep.
210 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500211static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
212{
213 unsigned long data;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500214 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500215
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500216 if (cmdid >= nvmeq->q_depth)
Matthew Wilcox48e3d392011-02-06 08:51:15 -0500217 return CMD_CTX_INVALID;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500218 data = info[cmdid].ctx;
219 info[cmdid].ctx = CMD_CTX_COMPLETED;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500220 clear_bit(cmdid, nvmeq->cmdid_data);
221 wake_up(&nvmeq->sq_full);
222 return data;
223}
224
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700225static unsigned long cancel_cmdid(struct nvme_queue *nvmeq, int cmdid)
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500226{
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700227 unsigned long data;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500228 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700229 data = info[cmdid].ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500230 info[cmdid].ctx = CMD_CTX_CANCELLED;
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700231 return data;
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500232}
233
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500234static struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
235{
Matthew Wilcox9ecdc942011-03-16 16:52:19 -0400236 return ns->dev->queues[get_cpu() + 1];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500237}
238
239static void put_nvmeq(struct nvme_queue *nvmeq)
240{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500241 put_cpu();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500242}
243
244/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400245 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500246 * @nvmeq: The queue to use
247 * @cmd: The command to send
248 *
249 * Safe to use from interrupt context
250 */
251static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
252{
253 unsigned long flags;
254 u16 tail;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500255 spin_lock_irqsave(&nvmeq->q_lock, flags);
256 tail = nvmeq->sq_tail;
257 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500258 if (++tail == nvmeq->q_depth)
259 tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500260 writel(tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500261 nvmeq->sq_tail = tail;
262 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
263
264 return 0;
265}
266
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500267struct nvme_prps {
268 int npages;
269 dma_addr_t first_dma;
270 __le64 *list[0];
271};
272
Matthew Wilcoxd5677602011-02-10 10:47:55 -0500273static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps)
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500274{
275 const int last_prp = PAGE_SIZE / 8 - 1;
276 int i;
277 dma_addr_t prp_dma;
278
279 if (!prps)
280 return;
281
282 prp_dma = prps->first_dma;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500283
284 if (prps->npages == 0)
285 dma_pool_free(dev->prp_small_pool, prps->list[0], prp_dma);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500286 for (i = 0; i < prps->npages; i++) {
287 __le64 *prp_list = prps->list[i];
288 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
Matthew Wilcox091b6092011-02-10 09:56:01 -0500289 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500290 prp_dma = next_prp_dma;
291 }
292 kfree(prps);
293}
294
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500295struct nvme_bio {
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500296 struct bio *bio;
297 int nents;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500298 struct nvme_prps *prps;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500299 struct scatterlist sg[0];
300};
301
302/* XXX: use a mempool */
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500303static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500304{
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500305 return kzalloc(sizeof(struct nvme_bio) +
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500306 sizeof(struct scatterlist) * nseg, gfp);
307}
308
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500309static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500310{
Matthew Wilcoxd5677602011-02-10 10:47:55 -0500311 nvme_free_prps(nvmeq->dev, nbio->prps);
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500312 kfree(nbio);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500313}
314
315static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
316 struct nvme_completion *cqe)
317{
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500318 struct nvme_bio *nbio = ctx;
319 struct bio *bio = nbio->bio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500320 u16 status = le16_to_cpup(&cqe->status) >> 1;
321
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500322 dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500323 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500324 free_nbio(nvmeq, nbio);
Matthew Wilcox09a58f52011-04-28 23:09:09 -0700325 if (status) {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500326 bio_endio(bio, -EIO);
Matthew Wilcox09a58f52011-04-28 23:09:09 -0700327 } else if (bio->bi_vcnt > bio->bi_idx) {
Matthew Wilcoxeac623b2011-05-20 09:34:43 -0400328 if (bio_list_empty(&nvmeq->sq_cong))
329 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500330 bio_list_add(&nvmeq->sq_cong, bio);
331 wake_up_process(nvme_thread);
332 } else {
333 bio_endio(bio, 0);
334 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500335}
336
Matthew Wilcox184d2942011-05-11 21:36:38 -0400337/* length is in bytes. gfp flags indicates whether we may sleep. */
Matthew Wilcoxd5677602011-02-10 10:47:55 -0500338static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500339 struct nvme_common_command *cmd,
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400340 struct scatterlist *sg, int *len,
341 gfp_t gfp)
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500342{
Matthew Wilcox99802a72011-02-10 10:30:34 -0500343 struct dma_pool *pool;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400344 int length = *len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500345 int dma_len = sg_dma_len(sg);
346 u64 dma_addr = sg_dma_address(sg);
347 int offset = offset_in_page(dma_addr);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500348 __le64 *prp_list;
349 dma_addr_t prp_dma;
350 int nprps, npages, i, prp_page;
351 struct nvme_prps *prps = NULL;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500352
353 cmd->prp1 = cpu_to_le64(dma_addr);
354 length -= (PAGE_SIZE - offset);
355 if (length <= 0)
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500356 return prps;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500357
358 dma_len -= (PAGE_SIZE - offset);
359 if (dma_len) {
360 dma_addr += (PAGE_SIZE - offset);
361 } else {
362 sg = sg_next(sg);
363 dma_addr = sg_dma_address(sg);
364 dma_len = sg_dma_len(sg);
365 }
366
367 if (length <= PAGE_SIZE) {
368 cmd->prp2 = cpu_to_le64(dma_addr);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500369 return prps;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500370 }
371
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500372 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
373 npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE);
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400374 prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
375 if (!prps) {
376 cmd->prp2 = cpu_to_le64(dma_addr);
377 *len = (*len - length) + PAGE_SIZE;
378 return prps;
379 }
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500380 prp_page = 0;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500381 if (nprps <= (256 / 8)) {
382 pool = dev->prp_small_pool;
383 prps->npages = 0;
384 } else {
385 pool = dev->prp_page_pool;
386 prps->npages = npages;
387 }
388
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400389 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
390 if (!prp_list) {
391 cmd->prp2 = cpu_to_le64(dma_addr);
392 *len = (*len - length) + PAGE_SIZE;
393 kfree(prps);
394 return NULL;
395 }
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500396 prps->list[prp_page++] = prp_list;
397 prps->first_dma = prp_dma;
398 cmd->prp2 = cpu_to_le64(prp_dma);
399 i = 0;
400 for (;;) {
Matthew Wilcox7523d832011-03-16 16:43:40 -0400401 if (i == PAGE_SIZE / 8) {
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500402 __le64 *old_prp_list = prp_list;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400403 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
404 if (!prp_list) {
405 *len = (*len - length);
406 return prps;
407 }
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500408 prps->list[prp_page++] = prp_list;
Matthew Wilcox7523d832011-03-16 16:43:40 -0400409 prp_list[0] = old_prp_list[i - 1];
410 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
411 i = 1;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500412 }
413 prp_list[i++] = cpu_to_le64(dma_addr);
414 dma_len -= PAGE_SIZE;
415 dma_addr += PAGE_SIZE;
416 length -= PAGE_SIZE;
417 if (length <= 0)
418 break;
419 if (dma_len > 0)
420 continue;
421 BUG_ON(dma_len < 0);
422 sg = sg_next(sg);
423 dma_addr = sg_dma_address(sg);
424 dma_len = sg_dma_len(sg);
425 }
426
427 return prps;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500428}
429
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500430/* NVMe scatterlists require no holes in the virtual address */
431#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
432 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
433
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500434static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500435 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
436{
Matthew Wilcox76830842011-02-10 13:55:39 -0500437 struct bio_vec *bvec, *bvprv = NULL;
438 struct scatterlist *sg = NULL;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500439 int i, old_idx, length = 0, nsegs = 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500440
Matthew Wilcox76830842011-02-10 13:55:39 -0500441 sg_init_table(nbio->sg, psegs);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500442 old_idx = bio->bi_idx;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500443 bio_for_each_segment(bvec, bio, i) {
Matthew Wilcox76830842011-02-10 13:55:39 -0500444 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
445 sg->length += bvec->bv_len;
446 } else {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500447 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
448 break;
Matthew Wilcox76830842011-02-10 13:55:39 -0500449 sg = sg ? sg + 1 : nbio->sg;
450 sg_set_page(sg, bvec->bv_page, bvec->bv_len,
451 bvec->bv_offset);
452 nsegs++;
453 }
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500454 length += bvec->bv_len;
Matthew Wilcox76830842011-02-10 13:55:39 -0500455 bvprv = bvec;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500456 }
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500457 bio->bi_idx = i;
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500458 nbio->nents = nsegs;
Matthew Wilcox76830842011-02-10 13:55:39 -0500459 sg_mark_end(sg);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500460 if (dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir) == 0) {
461 bio->bi_idx = old_idx;
462 return -ENOMEM;
463 }
464 return length;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500465}
466
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500467static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
468 int cmdid)
469{
470 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
471
472 memset(cmnd, 0, sizeof(*cmnd));
473 cmnd->common.opcode = nvme_cmd_flush;
474 cmnd->common.command_id = cmdid;
475 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
476
477 if (++nvmeq->sq_tail == nvmeq->q_depth)
478 nvmeq->sq_tail = 0;
479 writel(nvmeq->sq_tail, nvmeq->q_db);
480
481 return 0;
482}
483
484static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
485{
486 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
487 sync_completion_id, IO_TIMEOUT);
488 if (unlikely(cmdid < 0))
489 return cmdid;
490
491 return nvme_submit_flush(nvmeq, ns, cmdid);
492}
493
Matthew Wilcox184d2942011-05-11 21:36:38 -0400494/*
495 * Called with local interrupts disabled and the q_lock held. May not sleep.
496 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500497static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
498 struct bio *bio)
499{
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500500 struct nvme_command *cmnd;
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500501 struct nvme_bio *nbio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500502 enum dma_data_direction dma_dir;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500503 int cmdid, length, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500504 u16 control;
505 u32 dsmgmt;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500506 int psegs = bio_phys_segments(ns->queue, bio);
507
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500508 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
509 result = nvme_submit_flush_data(nvmeq, ns);
510 if (result)
511 return result;
512 }
513
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500514 nbio = alloc_nbio(psegs, GFP_ATOMIC);
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500515 if (!nbio)
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500516 goto nomem;
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500517 nbio->bio = bio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500518
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500519 result = -EBUSY;
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500520 cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500521 if (unlikely(cmdid < 0))
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500522 goto free_nbio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500523
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500524 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
525 return nvme_submit_flush(nvmeq, ns, cmdid);
526
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500527 control = 0;
528 if (bio->bi_rw & REQ_FUA)
529 control |= NVME_RW_FUA;
530 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
531 control |= NVME_RW_LR;
532
533 dsmgmt = 0;
534 if (bio->bi_rw & REQ_RAHEAD)
535 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
536
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500537 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500538
Matthew Wilcoxb8deb622011-01-26 10:08:25 -0500539 memset(cmnd, 0, sizeof(*cmnd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500540 if (bio_data_dir(bio)) {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500541 cmnd->rw.opcode = nvme_cmd_write;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500542 dma_dir = DMA_TO_DEVICE;
543 } else {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500544 cmnd->rw.opcode = nvme_cmd_read;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500545 dma_dir = DMA_FROM_DEVICE;
546 }
547
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500548 result = nvme_map_bio(nvmeq->q_dmadev, nbio, bio, dma_dir, psegs);
549 if (result < 0)
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500550 goto free_nbio;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500551 length = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500552
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500553 cmnd->rw.command_id = cmdid;
554 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxd5677602011-02-10 10:47:55 -0500555 nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg,
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400556 &length, GFP_ATOMIC);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500557 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500558 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500559 cmnd->rw.control = cpu_to_le16(control);
560 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500561
Matthew Wilcoxd8ee9d62011-02-24 08:46:00 -0500562 bio->bi_sector += length >> 9;
563
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500564 if (++nvmeq->sq_tail == nvmeq->q_depth)
565 nvmeq->sq_tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500566 writel(nvmeq->sq_tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500567
Matthew Wilcox1974b1a2011-02-10 12:01:09 -0500568 return 0;
569
Matthew Wilcoxd534df32011-02-10 09:03:06 -0500570 free_nbio:
571 free_nbio(nvmeq, nbio);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500572 nomem:
573 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500574}
575
576/*
577 * NB: return value of non-zero would mean that we were a stacking driver.
578 * make_request must always succeed.
579 */
580static int nvme_make_request(struct request_queue *q, struct bio *bio)
581{
582 struct nvme_ns *ns = q->queuedata;
583 struct nvme_queue *nvmeq = get_nvmeq(ns);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500584 int result = -EBUSY;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500585
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500586 spin_lock_irq(&nvmeq->q_lock);
587 if (bio_list_empty(&nvmeq->sq_cong))
588 result = nvme_submit_bio_queue(nvmeq, ns, bio);
589 if (unlikely(result)) {
590 if (bio_list_empty(&nvmeq->sq_cong))
591 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500592 bio_list_add(&nvmeq->sq_cong, bio);
593 }
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500594
595 spin_unlock_irq(&nvmeq->q_lock);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500596 put_nvmeq(nvmeq);
597
598 return 0;
599}
600
601struct sync_cmd_info {
602 struct task_struct *task;
603 u32 result;
604 int status;
605};
606
607static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
608 struct nvme_completion *cqe)
609{
610 struct sync_cmd_info *cmdinfo = ctx;
Matthew Wilcoxc4270552011-02-22 14:15:34 -0500611 if (unlikely((unsigned long)cmdinfo == CMD_CTX_CANCELLED))
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500612 return;
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500613 if ((unsigned long)cmdinfo == CMD_CTX_FLUSH)
614 return;
Matthew Wilcoxb36235d2011-02-06 08:49:55 -0500615 if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) {
616 dev_warn(nvmeq->q_dmadev,
617 "completed id %d twice on queue %d\n",
618 cqe->command_id, le16_to_cpup(&cqe->sq_id));
619 return;
620 }
Matthew Wilcox48e3d392011-02-06 08:51:15 -0500621 if (unlikely((unsigned long)cmdinfo == CMD_CTX_INVALID)) {
622 dev_warn(nvmeq->q_dmadev,
623 "invalid id %d completed on queue %d\n",
624 cqe->command_id, le16_to_cpup(&cqe->sq_id));
625 return;
626 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500627 cmdinfo->result = le32_to_cpup(&cqe->result);
628 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
629 wake_up_process(cmdinfo->task);
630}
631
632typedef void (*completion_fn)(struct nvme_queue *, void *,
633 struct nvme_completion *);
634
Matthew Wilcox8de05532011-05-12 13:50:28 -0400635static const completion_fn nvme_completions[4] = {
636 [sync_completion_id] = sync_completion,
637 [bio_completion_id] = bio_completion,
638};
639
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500640static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
641{
Matthew Wilcox82123462011-01-20 13:24:06 -0500642 u16 head, phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500643
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500644 head = nvmeq->cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500645 phase = nvmeq->cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500646
647 for (;;) {
648 unsigned long data;
649 void *ptr;
650 unsigned char handler;
651 struct nvme_completion cqe = nvmeq->cqes[head];
Matthew Wilcox82123462011-01-20 13:24:06 -0500652 if ((le16_to_cpu(cqe.status) & 1) != phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500653 break;
654 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
655 if (++head == nvmeq->q_depth) {
656 head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500657 phase = !phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500658 }
659
660 data = free_cmdid(nvmeq, cqe.command_id);
661 handler = data & 3;
662 ptr = (void *)(data & ~3UL);
Matthew Wilcox8de05532011-05-12 13:50:28 -0400663 nvme_completions[handler](nvmeq, ptr, &cqe);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500664 }
665
666 /* If the controller ignores the cq head doorbell and continuously
667 * writes to the queue, it is theoretically possible to wrap around
668 * the queue twice and mistakenly return IRQ_NONE. Linux only
669 * requires that 0.1% of your interrupts are handled, so this isn't
670 * a big problem.
671 */
Matthew Wilcox82123462011-01-20 13:24:06 -0500672 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500673 return IRQ_NONE;
674
675 writel(head, nvmeq->q_db + 1);
676 nvmeq->cq_head = head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500677 nvmeq->cq_phase = phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500678
679 return IRQ_HANDLED;
680}
681
682static irqreturn_t nvme_irq(int irq, void *data)
683{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500684 irqreturn_t result;
685 struct nvme_queue *nvmeq = data;
686 spin_lock(&nvmeq->q_lock);
687 result = nvme_process_cq(nvmeq);
688 spin_unlock(&nvmeq->q_lock);
689 return result;
690}
691
692static irqreturn_t nvme_irq_check(int irq, void *data)
693{
694 struct nvme_queue *nvmeq = data;
695 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
696 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
697 return IRQ_NONE;
698 return IRQ_WAKE_THREAD;
699}
700
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500701static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
702{
703 spin_lock_irq(&nvmeq->q_lock);
Matthew Wilcox21075bd2011-04-28 23:17:36 -0700704 cancel_cmdid(nvmeq, cmdid);
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500705 spin_unlock_irq(&nvmeq->q_lock);
706}
707
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500708/*
709 * Returns 0 on success. If the result is negative, it's a Linux error code;
710 * if the result is positive, it's an NVM Express status code
711 */
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500712static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500713 struct nvme_command *cmd, u32 *result, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500714{
715 int cmdid;
716 struct sync_cmd_info cmdinfo;
717
718 cmdinfo.task = current;
719 cmdinfo.status = -EINTR;
720
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500721 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id,
722 timeout);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500723 if (cmdid < 0)
724 return cmdid;
725 cmd->common.command_id = cmdid;
726
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500727 set_current_state(TASK_KILLABLE);
728 nvme_submit_cmd(nvmeq, cmd);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500729 schedule();
730
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500731 if (cmdinfo.status == -EINTR) {
732 nvme_abort_command(nvmeq, cmdid);
733 return -EINTR;
734 }
735
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500736 if (result)
737 *result = cmdinfo.result;
738
739 return cmdinfo.status;
740}
741
742static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
743 u32 *result)
744{
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500745 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500746}
747
748static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
749{
750 int status;
751 struct nvme_command c;
752
753 memset(&c, 0, sizeof(c));
754 c.delete_queue.opcode = opcode;
755 c.delete_queue.qid = cpu_to_le16(id);
756
757 status = nvme_submit_admin_cmd(dev, &c, NULL);
758 if (status)
759 return -EIO;
760 return 0;
761}
762
763static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
764 struct nvme_queue *nvmeq)
765{
766 int status;
767 struct nvme_command c;
768 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
769
770 memset(&c, 0, sizeof(c));
771 c.create_cq.opcode = nvme_admin_create_cq;
772 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
773 c.create_cq.cqid = cpu_to_le16(qid);
774 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
775 c.create_cq.cq_flags = cpu_to_le16(flags);
776 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
777
778 status = nvme_submit_admin_cmd(dev, &c, NULL);
779 if (status)
780 return -EIO;
781 return 0;
782}
783
784static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
785 struct nvme_queue *nvmeq)
786{
787 int status;
788 struct nvme_command c;
789 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
790
791 memset(&c, 0, sizeof(c));
792 c.create_sq.opcode = nvme_admin_create_sq;
793 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
794 c.create_sq.sqid = cpu_to_le16(qid);
795 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
796 c.create_sq.sq_flags = cpu_to_le16(flags);
797 c.create_sq.cqid = cpu_to_le16(qid);
798
799 status = nvme_submit_admin_cmd(dev, &c, NULL);
800 if (status)
801 return -EIO;
802 return 0;
803}
804
805static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
806{
807 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
808}
809
810static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
811{
812 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
813}
814
815static void nvme_free_queue(struct nvme_dev *dev, int qid)
816{
817 struct nvme_queue *nvmeq = dev->queues[qid];
Matthew Wilcoxaba20802011-03-27 08:52:06 -0400818 int vector = dev->entry[nvmeq->cq_vector].vector;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500819
Matthew Wilcoxaba20802011-03-27 08:52:06 -0400820 irq_set_affinity_hint(vector, NULL);
821 free_irq(vector, nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500822
823 /* Don't tell the adapter to delete the admin queue */
824 if (qid) {
825 adapter_delete_sq(dev, qid);
826 adapter_delete_cq(dev, qid);
827 }
828
829 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
830 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
831 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
832 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
833 kfree(nvmeq);
834}
835
836static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
837 int depth, int vector)
838{
839 struct device *dmadev = &dev->pci_dev->dev;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500840 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500841 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
842 if (!nvmeq)
843 return NULL;
844
845 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
846 &nvmeq->cq_dma_addr, GFP_KERNEL);
847 if (!nvmeq->cqes)
848 goto free_nvmeq;
849 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
850
851 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
852 &nvmeq->sq_dma_addr, GFP_KERNEL);
853 if (!nvmeq->sq_cmds)
854 goto free_cqdma;
855
856 nvmeq->q_dmadev = dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -0500857 nvmeq->dev = dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500858 spin_lock_init(&nvmeq->q_lock);
859 nvmeq->cq_head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500860 nvmeq->cq_phase = 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500861 init_waitqueue_head(&nvmeq->sq_full);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -0500862 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500863 bio_list_init(&nvmeq->sq_cong);
864 nvmeq->q_db = &dev->dbs[qid * 2];
865 nvmeq->q_depth = depth;
866 nvmeq->cq_vector = vector;
867
868 return nvmeq;
869
870 free_cqdma:
871 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
872 nvmeq->cq_dma_addr);
873 free_nvmeq:
874 kfree(nvmeq);
875 return NULL;
876}
877
Matthew Wilcox30010822011-01-20 09:10:15 -0500878static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
879 const char *name)
880{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500881 if (use_threaded_interrupts)
882 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
Matthew Wilcoxec6ce612011-02-06 09:01:00 -0500883 nvme_irq_check, nvme_irq,
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500884 IRQF_DISABLED | IRQF_SHARED,
885 name, nvmeq);
Matthew Wilcox30010822011-01-20 09:10:15 -0500886 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
887 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
888}
889
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500890static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
891 int qid, int cq_size, int vector)
892{
893 int result;
894 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
895
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500896 if (!nvmeq)
Matthew Wilcox6f0f5442011-05-11 13:30:59 -0700897 return ERR_PTR(-ENOMEM);
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500898
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500899 result = adapter_alloc_cq(dev, qid, nvmeq);
900 if (result < 0)
901 goto free_nvmeq;
902
903 result = adapter_alloc_sq(dev, qid, nvmeq);
904 if (result < 0)
905 goto release_cq;
906
Matthew Wilcox30010822011-01-20 09:10:15 -0500907 result = queue_request_irq(dev, nvmeq, "nvme");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500908 if (result < 0)
909 goto release_sq;
910
911 return nvmeq;
912
913 release_sq:
914 adapter_delete_sq(dev, qid);
915 release_cq:
916 adapter_delete_cq(dev, qid);
917 free_nvmeq:
918 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
919 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
920 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
921 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
922 kfree(nvmeq);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -0700923 return ERR_PTR(result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500924}
925
926static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
927{
928 int result;
929 u32 aqa;
Matthew Wilcox22605f92011-04-19 15:04:20 -0400930 u64 cap;
931 unsigned long timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500932 struct nvme_queue *nvmeq;
933
934 dev->dbs = ((void __iomem *)dev->bar) + 4096;
935
936 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
Matthew Wilcox3f85d502011-02-01 08:39:04 -0500937 if (!nvmeq)
938 return -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500939
940 aqa = nvmeq->q_depth - 1;
941 aqa |= aqa << 16;
942
943 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
944 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
945 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -0400946 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500947
Shane Michael Matthews5911f202011-02-01 11:31:55 -0500948 writel(0, &dev->bar->cc);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500949 writel(aqa, &dev->bar->aqa);
950 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
951 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
952 writel(dev->ctrl_config, &dev->bar->cc);
953
Matthew Wilcox22605f92011-04-19 15:04:20 -0400954 cap = readq(&dev->bar->cap);
955 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
956
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500957 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
958 msleep(100);
959 if (fatal_signal_pending(current))
960 return -EINTR;
Matthew Wilcox22605f92011-04-19 15:04:20 -0400961 if (time_after(jiffies, timeout)) {
962 dev_err(&dev->pci_dev->dev,
963 "Device not ready; aborting initialisation\n");
964 return -ENODEV;
965 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500966 }
967
Matthew Wilcox30010822011-01-20 09:10:15 -0500968 result = queue_request_irq(dev, nvmeq, "nvme admin");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500969 dev->queues[0] = nvmeq;
970 return result;
971}
972
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500973static int nvme_map_user_pages(struct nvme_dev *dev, int write,
974 unsigned long addr, unsigned length,
975 struct scatterlist **sgp)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500976{
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500977 int i, err, count, nents, offset;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500978 struct scatterlist *sg;
979 struct page **pages;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500980
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500981 if (addr & 3)
982 return -EINVAL;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500983 if (!length)
984 return -EINVAL;
985
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500986 offset = offset_in_page(addr);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500987 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
988 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500989
990 err = get_user_pages_fast(addr, count, 1, pages);
991 if (err < count) {
992 count = err;
993 err = -EFAULT;
994 goto put_pages;
995 }
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -0500996
997 sg = kcalloc(count, sizeof(*sg), GFP_KERNEL);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -0500998 sg_init_table(sg, count);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500999 sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001000 length -= (PAGE_SIZE - offset);
1001 for (i = 1; i < count; i++) {
1002 sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0);
1003 length -= PAGE_SIZE;
1004 }
1005
1006 err = -ENOMEM;
1007 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1008 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001009 if (!nents)
1010 goto put_pages;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001011
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001012 kfree(pages);
1013 *sgp = sg;
1014 return nents;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001015
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001016 put_pages:
1017 for (i = 0; i < count; i++)
1018 put_page(pages[i]);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001019 kfree(pages);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001020 return err;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001021}
1022
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001023static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
1024 unsigned long addr, int length,
1025 struct scatterlist *sg, int nents)
1026{
1027 int i, count;
1028
1029 count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE);
1030 dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE);
1031
1032 for (i = 0; i < count; i++)
1033 put_page(sg_page(&sg[i]));
1034}
1035
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001036static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1037{
1038 struct nvme_dev *dev = ns->dev;
1039 struct nvme_queue *nvmeq;
1040 struct nvme_user_io io;
1041 struct nvme_command c;
1042 unsigned length;
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001043 int nents, status;
1044 struct scatterlist *sg;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001045 struct nvme_prps *prps;
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001046
1047 if (copy_from_user(&io, uio, sizeof(io)))
1048 return -EFAULT;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001049 length = (io.nblocks + 1) << ns->lba_shift;
1050
1051 switch (io.opcode) {
1052 case nvme_cmd_write:
1053 case nvme_cmd_read:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001054 case nvme_cmd_compare:
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001055 nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr,
1056 length, &sg);
Matthew Wilcox64132142011-08-09 12:56:37 -04001057 break;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001058 default:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001059 return -EINVAL;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001060 }
1061
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001062 if (nents < 0)
1063 return nents;
1064
1065 memset(&c, 0, sizeof(c));
1066 c.rw.opcode = io.opcode;
1067 c.rw.flags = io.flags;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001068 c.rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001069 c.rw.slba = cpu_to_le64(io.slba);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001070 c.rw.length = cpu_to_le16(io.nblocks);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001071 c.rw.control = cpu_to_le16(io.control);
1072 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001073 c.rw.reftag = io.reftag;
1074 c.rw.apptag = io.apptag;
1075 c.rw.appmask = io.appmask;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001076 /* XXX: metadata */
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001077 prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001078
Matthew Wilcoxd5677602011-02-10 10:47:55 -05001079 nvmeq = get_nvmeq(ns);
Matthew Wilcoxfa922822011-03-16 16:29:00 -04001080 /*
1081 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
Matthew Wilcoxb1ad37e2011-02-04 16:14:30 -05001082 * disabled. We may be preempted at any point, and be rescheduled
1083 * to a different CPU. That will cause cacheline bouncing, but no
1084 * additional races since q_lock already protects against other CPUs.
1085 */
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001086 put_nvmeq(nvmeq);
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001087 if (length != (io.nblocks + 1) << ns->lba_shift)
1088 status = -ENOMEM;
1089 else
1090 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001091
1092 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
Matthew Wilcoxd5677602011-02-10 10:47:55 -05001093 nvme_free_prps(dev, prps);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001094 return status;
1095}
1096
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001097static int nvme_user_admin_cmd(struct nvme_ns *ns,
1098 struct nvme_admin_cmd __user *ucmd)
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001099{
1100 struct nvme_dev *dev = ns->dev;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001101 struct nvme_admin_cmd cmd;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001102 struct nvme_command c;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001103 int status, length, nents = 0;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001104 struct scatterlist *sg;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001105 struct nvme_prps *prps = NULL;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001106
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001107 if (!capable(CAP_SYS_ADMIN))
1108 return -EACCES;
1109 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001110 return -EFAULT;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001111
1112 memset(&c, 0, sizeof(c));
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001113 c.common.opcode = cmd.opcode;
1114 c.common.flags = cmd.flags;
1115 c.common.nsid = cpu_to_le32(cmd.nsid);
1116 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1117 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1118 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1119 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1120 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1121 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1122 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1123 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1124
1125 length = cmd.data_len;
1126 if (cmd.data_len) {
1127 nents = nvme_map_user_pages(dev, 1, cmd.addr, length, &sg);
1128 if (nents < 0)
1129 return nents;
1130 prps = nvme_setup_prps(dev, &c.common, sg, &length, GFP_KERNEL);
1131 }
1132
1133 if (length != cmd.data_len)
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001134 status = -ENOMEM;
1135 else
1136 status = nvme_submit_admin_cmd(dev, &c, NULL);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001137 if (cmd.data_len) {
1138 nvme_unmap_user_pages(dev, 0, cmd.addr, cmd.data_len, sg,
1139 nents);
1140 nvme_free_prps(dev, prps);
1141 }
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001142 return status;
1143}
1144
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001145static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1146 unsigned long arg)
1147{
1148 struct nvme_ns *ns = bdev->bd_disk->private_data;
1149
1150 switch (cmd) {
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001151 case NVME_IOCTL_ID:
1152 return ns->ns_id;
1153 case NVME_IOCTL_ADMIN_CMD:
1154 return nvme_user_admin_cmd(ns, (void __user *)arg);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001155 case NVME_IOCTL_SUBMIT_IO:
1156 return nvme_submit_io(ns, (void __user *)arg);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001157 default:
1158 return -ENOTTY;
1159 }
1160}
1161
1162static const struct block_device_operations nvme_fops = {
1163 .owner = THIS_MODULE,
1164 .ioctl = nvme_ioctl,
Matthew Wilcox49481682011-03-19 14:55:38 -04001165 .compat_ioctl = nvme_ioctl,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001166};
1167
Matthew Wilcox8de05532011-05-12 13:50:28 -04001168static void nvme_timeout_ios(struct nvme_queue *nvmeq)
1169{
1170 int depth = nvmeq->q_depth - 1;
1171 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1172 unsigned long now = jiffies;
1173 int cmdid;
1174
1175 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1176 unsigned long data;
1177 void *ptr;
1178 unsigned char handler;
1179 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
1180
1181 if (!time_after(now, info[cmdid].timeout))
1182 continue;
1183 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
1184 data = cancel_cmdid(nvmeq, cmdid);
1185 handler = data & 3;
1186 ptr = (void *)(data & ~3UL);
1187 nvme_completions[handler](nvmeq, ptr, &cqe);
1188 }
1189}
1190
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001191static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1192{
1193 while (bio_list_peek(&nvmeq->sq_cong)) {
1194 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1195 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
1196 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1197 bio_list_add_head(&nvmeq->sq_cong, bio);
1198 break;
1199 }
Matthew Wilcox3cb967c2011-03-16 16:45:49 -04001200 if (bio_list_empty(&nvmeq->sq_cong))
1201 remove_wait_queue(&nvmeq->sq_full,
1202 &nvmeq->sq_cong_wait);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001203 }
1204}
1205
1206static int nvme_kthread(void *data)
1207{
1208 struct nvme_dev *dev;
1209
1210 while (!kthread_should_stop()) {
1211 __set_current_state(TASK_RUNNING);
1212 spin_lock(&dev_list_lock);
1213 list_for_each_entry(dev, &dev_list, node) {
1214 int i;
1215 for (i = 0; i < dev->queue_count; i++) {
1216 struct nvme_queue *nvmeq = dev->queues[i];
Matthew Wilcox740216f2011-02-15 16:28:20 -05001217 if (!nvmeq)
1218 continue;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001219 spin_lock_irq(&nvmeq->q_lock);
1220 if (nvme_process_cq(nvmeq))
1221 printk("process_cq did something\n");
Matthew Wilcox8de05532011-05-12 13:50:28 -04001222 nvme_timeout_ios(nvmeq);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001223 nvme_resubmit_bios(nvmeq);
1224 spin_unlock_irq(&nvmeq->q_lock);
1225 }
1226 }
1227 spin_unlock(&dev_list_lock);
1228 set_current_state(TASK_INTERRUPTIBLE);
1229 schedule_timeout(HZ);
1230 }
1231 return 0;
1232}
1233
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001234static DEFINE_IDA(nvme_index_ida);
1235
1236static int nvme_get_ns_idx(void)
1237{
1238 int index, error;
1239
1240 do {
1241 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1242 return -1;
1243
1244 spin_lock(&dev_list_lock);
1245 error = ida_get_new(&nvme_index_ida, &index);
1246 spin_unlock(&dev_list_lock);
1247 } while (error == -EAGAIN);
1248
1249 if (error)
1250 index = -1;
1251 return index;
1252}
1253
1254static void nvme_put_ns_idx(int index)
1255{
1256 spin_lock(&dev_list_lock);
1257 ida_remove(&nvme_index_ida, index);
1258 spin_unlock(&dev_list_lock);
1259}
1260
1261static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001262 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1263{
1264 struct nvme_ns *ns;
1265 struct gendisk *disk;
1266 int lbaf;
1267
1268 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1269 return NULL;
1270
1271 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1272 if (!ns)
1273 return NULL;
1274 ns->queue = blk_alloc_queue(GFP_KERNEL);
1275 if (!ns->queue)
1276 goto out_free_ns;
1277 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
1278 QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
1279 blk_queue_make_request(ns->queue, nvme_make_request);
1280 ns->dev = dev;
1281 ns->queue->queuedata = ns;
1282
1283 disk = alloc_disk(NVME_MINORS);
1284 if (!disk)
1285 goto out_free_queue;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001286 ns->ns_id = nsid;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001287 ns->disk = disk;
1288 lbaf = id->flbas & 0xf;
1289 ns->lba_shift = id->lbaf[lbaf].ds;
1290
1291 disk->major = nvme_major;
1292 disk->minors = NVME_MINORS;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001293 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001294 disk->fops = &nvme_fops;
1295 disk->private_data = ns;
1296 disk->queue = ns->queue;
Matthew Wilcox388f0372011-02-01 12:49:38 -05001297 disk->driverfs_dev = &dev->pci_dev->dev;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001298 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001299 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1300
1301 return ns;
1302
1303 out_free_queue:
1304 blk_cleanup_queue(ns->queue);
1305 out_free_ns:
1306 kfree(ns);
1307 return NULL;
1308}
1309
1310static void nvme_ns_free(struct nvme_ns *ns)
1311{
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001312 int index = ns->disk->first_minor / NVME_MINORS;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001313 put_disk(ns->disk);
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001314 nvme_put_ns_idx(index);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001315 blk_cleanup_queue(ns->queue);
1316 kfree(ns);
1317}
1318
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001319static int set_queue_count(struct nvme_dev *dev, int count)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001320{
1321 int status;
1322 u32 result;
1323 struct nvme_command c;
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001324 u32 q_count = (count - 1) | ((count - 1) << 16);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001325
1326 memset(&c, 0, sizeof(c));
1327 c.features.opcode = nvme_admin_get_features;
1328 c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES);
1329 c.features.dword11 = cpu_to_le32(q_count);
1330
1331 status = nvme_submit_admin_cmd(dev, &c, &result);
1332 if (status)
1333 return -EIO;
1334 return min(result & 0xffff, result >> 16) + 1;
1335}
1336
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001337static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
1338{
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001339 int result, cpu, i, nr_io_queues;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001340
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001341 nr_io_queues = num_online_cpus();
1342 result = set_queue_count(dev, nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001343 if (result < 0)
1344 return result;
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001345 if (result < nr_io_queues)
1346 nr_io_queues = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001347
Matthew Wilcox1b234842011-01-20 13:01:49 -05001348 /* Deregister the admin queue's interrupt */
1349 free_irq(dev->entry[0].vector, dev->queues[0]);
1350
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001351 for (i = 0; i < nr_io_queues; i++)
Matthew Wilcox1b234842011-01-20 13:01:49 -05001352 dev->entry[i].entry = i;
1353 for (;;) {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001354 result = pci_enable_msix(dev->pci_dev, dev->entry,
1355 nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001356 if (result == 0) {
1357 break;
1358 } else if (result > 0) {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001359 nr_io_queues = result;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001360 continue;
1361 } else {
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001362 nr_io_queues = 1;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001363 break;
1364 }
1365 }
1366
1367 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1368 /* XXX: handle failure here */
1369
1370 cpu = cpumask_first(cpu_online_mask);
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001371 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001372 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1373 cpu = cpumask_next(cpu, cpu_online_mask);
1374 }
1375
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001376 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001377 dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
1378 NVME_Q_DEPTH, i);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001379 if (IS_ERR(dev->queues[i + 1]))
1380 return PTR_ERR(dev->queues[i + 1]);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001381 dev->queue_count++;
1382 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001383
Matthew Wilcox9ecdc942011-03-16 16:52:19 -04001384 for (; i < num_possible_cpus(); i++) {
1385 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1386 dev->queues[i + 1] = dev->queues[target + 1];
1387 }
1388
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001389 return 0;
1390}
1391
1392static void nvme_free_queues(struct nvme_dev *dev)
1393{
1394 int i;
1395
1396 for (i = dev->queue_count - 1; i >= 0; i--)
1397 nvme_free_queue(dev, i);
1398}
1399
1400static int __devinit nvme_dev_add(struct nvme_dev *dev)
1401{
1402 int res, nn, i;
1403 struct nvme_ns *ns, *next;
Matthew Wilcox51814232011-02-01 16:18:08 -05001404 struct nvme_id_ctrl *ctrl;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001405 void *id;
1406 dma_addr_t dma_addr;
1407 struct nvme_command cid, crt;
1408
1409 res = nvme_setup_io_queues(dev);
1410 if (res)
1411 return res;
1412
1413 /* XXX: Switch to a SG list once prp2 works */
1414 id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
1415 GFP_KERNEL);
1416
1417 memset(&cid, 0, sizeof(cid));
1418 cid.identify.opcode = nvme_admin_identify;
1419 cid.identify.nsid = 0;
1420 cid.identify.prp1 = cpu_to_le64(dma_addr);
1421 cid.identify.cns = cpu_to_le32(1);
1422
1423 res = nvme_submit_admin_cmd(dev, &cid, NULL);
1424 if (res) {
1425 res = -EIO;
1426 goto out_free;
1427 }
1428
Matthew Wilcox51814232011-02-01 16:18:08 -05001429 ctrl = id;
1430 nn = le32_to_cpup(&ctrl->nn);
1431 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1432 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1433 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001434
1435 cid.identify.cns = 0;
1436 memset(&crt, 0, sizeof(crt));
1437 crt.features.opcode = nvme_admin_get_features;
1438 crt.features.prp1 = cpu_to_le64(dma_addr + 4096);
1439 crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
1440
Matthew Wilcoxac88c362011-03-16 16:29:58 -04001441 for (i = 0; i <= nn; i++) {
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001442 cid.identify.nsid = cpu_to_le32(i);
1443 res = nvme_submit_admin_cmd(dev, &cid, NULL);
1444 if (res)
1445 continue;
1446
1447 if (((struct nvme_id_ns *)id)->ncap == 0)
1448 continue;
1449
1450 crt.features.nsid = cpu_to_le32(i);
1451 res = nvme_submit_admin_cmd(dev, &crt, NULL);
1452 if (res)
1453 continue;
1454
1455 ns = nvme_alloc_ns(dev, i, id, id + 4096);
1456 if (ns)
1457 list_add_tail(&ns->list, &dev->namespaces);
1458 }
1459 list_for_each_entry(ns, &dev->namespaces, list)
1460 add_disk(ns->disk);
1461
1462 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
1463 return 0;
1464
1465 out_free:
1466 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1467 list_del(&ns->list);
1468 nvme_ns_free(ns);
1469 }
1470
1471 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
1472 return res;
1473}
1474
1475static int nvme_dev_remove(struct nvme_dev *dev)
1476{
1477 struct nvme_ns *ns, *next;
1478
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001479 spin_lock(&dev_list_lock);
1480 list_del(&dev->node);
1481 spin_unlock(&dev_list_lock);
1482
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001483 /* TODO: wait all I/O finished or cancel them */
1484
1485 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1486 list_del(&ns->list);
1487 del_gendisk(ns->disk);
1488 nvme_ns_free(ns);
1489 }
1490
1491 nvme_free_queues(dev);
1492
1493 return 0;
1494}
1495
Matthew Wilcox091b6092011-02-10 09:56:01 -05001496static int nvme_setup_prp_pools(struct nvme_dev *dev)
1497{
1498 struct device *dmadev = &dev->pci_dev->dev;
1499 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
1500 PAGE_SIZE, PAGE_SIZE, 0);
1501 if (!dev->prp_page_pool)
1502 return -ENOMEM;
1503
Matthew Wilcox99802a72011-02-10 10:30:34 -05001504 /* Optimisation for I/Os between 4k and 128k */
1505 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
1506 256, 256, 0);
1507 if (!dev->prp_small_pool) {
1508 dma_pool_destroy(dev->prp_page_pool);
1509 return -ENOMEM;
1510 }
Matthew Wilcox091b6092011-02-10 09:56:01 -05001511 return 0;
1512}
1513
1514static void nvme_release_prp_pools(struct nvme_dev *dev)
1515{
1516 dma_pool_destroy(dev->prp_page_pool);
Matthew Wilcox99802a72011-02-10 10:30:34 -05001517 dma_pool_destroy(dev->prp_small_pool);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001518}
1519
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001520/* XXX: Use an ida or something to let remove / add work correctly */
1521static void nvme_set_instance(struct nvme_dev *dev)
1522{
1523 static int instance;
1524 dev->instance = instance++;
1525}
1526
1527static void nvme_release_instance(struct nvme_dev *dev)
1528{
1529}
1530
1531static int __devinit nvme_probe(struct pci_dev *pdev,
1532 const struct pci_device_id *id)
1533{
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001534 int bars, result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001535 struct nvme_dev *dev;
1536
1537 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1538 if (!dev)
1539 return -ENOMEM;
1540 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
1541 GFP_KERNEL);
1542 if (!dev->entry)
1543 goto free;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001544 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
1545 GFP_KERNEL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001546 if (!dev->queues)
1547 goto free;
1548
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001549 if (pci_enable_device_mem(pdev))
1550 goto free;
Matthew Wilcoxf64d3362011-02-01 09:01:59 -05001551 pci_set_master(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001552 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1553 if (pci_request_selected_regions(pdev, bars, "nvme"))
1554 goto disable;
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001555
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001556 INIT_LIST_HEAD(&dev->namespaces);
1557 dev->pci_dev = pdev;
1558 pci_set_drvdata(pdev, dev);
Matthew Wilcox29303532011-02-01 16:23:39 -05001559 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1560 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001561 nvme_set_instance(dev);
Matthew Wilcox53c95772011-01-20 13:42:34 -05001562 dev->entry[0].vector = pdev->irq;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001563
Matthew Wilcox091b6092011-02-10 09:56:01 -05001564 result = nvme_setup_prp_pools(dev);
1565 if (result)
1566 goto disable_msix;
1567
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001568 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1569 if (!dev->bar) {
1570 result = -ENOMEM;
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001571 goto disable_msix;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001572 }
1573
1574 result = nvme_configure_admin_queue(dev);
1575 if (result)
1576 goto unmap;
1577 dev->queue_count++;
1578
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001579 spin_lock(&dev_list_lock);
1580 list_add(&dev->node, &dev_list);
1581 spin_unlock(&dev_list_lock);
1582
Matthew Wilcox740216f2011-02-15 16:28:20 -05001583 result = nvme_dev_add(dev);
1584 if (result)
1585 goto delete;
1586
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001587 return 0;
1588
1589 delete:
Matthew Wilcox740216f2011-02-15 16:28:20 -05001590 spin_lock(&dev_list_lock);
1591 list_del(&dev->node);
1592 spin_unlock(&dev_list_lock);
1593
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001594 nvme_free_queues(dev);
1595 unmap:
1596 iounmap(dev->bar);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001597 disable_msix:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001598 pci_disable_msix(pdev);
1599 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001600 nvme_release_prp_pools(dev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001601 disable:
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001602 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001603 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001604 free:
1605 kfree(dev->queues);
1606 kfree(dev->entry);
1607 kfree(dev);
1608 return result;
1609}
1610
1611static void __devexit nvme_remove(struct pci_dev *pdev)
1612{
1613 struct nvme_dev *dev = pci_get_drvdata(pdev);
1614 nvme_dev_remove(dev);
1615 pci_disable_msix(pdev);
1616 iounmap(dev->bar);
1617 nvme_release_instance(dev);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001618 nvme_release_prp_pools(dev);
Shane Michael Matthews0ee5a7d2011-02-01 08:49:30 -05001619 pci_disable_device(pdev);
Matthew Wilcox574e8b92011-02-01 16:24:35 -05001620 pci_release_regions(pdev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001621 kfree(dev->queues);
1622 kfree(dev->entry);
1623 kfree(dev);
1624}
1625
1626/* These functions are yet to be implemented */
1627#define nvme_error_detected NULL
1628#define nvme_dump_registers NULL
1629#define nvme_link_reset NULL
1630#define nvme_slot_reset NULL
1631#define nvme_error_resume NULL
1632#define nvme_suspend NULL
1633#define nvme_resume NULL
1634
1635static struct pci_error_handlers nvme_err_handler = {
1636 .error_detected = nvme_error_detected,
1637 .mmio_enabled = nvme_dump_registers,
1638 .link_reset = nvme_link_reset,
1639 .slot_reset = nvme_slot_reset,
1640 .resume = nvme_error_resume,
1641};
1642
1643/* Move to pci_ids.h later */
1644#define PCI_CLASS_STORAGE_EXPRESS 0x010802
1645
1646static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1647 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1648 { 0, }
1649};
1650MODULE_DEVICE_TABLE(pci, nvme_id_table);
1651
1652static struct pci_driver nvme_driver = {
1653 .name = "nvme",
1654 .id_table = nvme_id_table,
1655 .probe = nvme_probe,
1656 .remove = __devexit_p(nvme_remove),
1657 .suspend = nvme_suspend,
1658 .resume = nvme_resume,
1659 .err_handler = &nvme_err_handler,
1660};
1661
1662static int __init nvme_init(void)
1663{
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001664 int result = -EBUSY;
1665
1666 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
1667 if (IS_ERR(nvme_thread))
1668 return PTR_ERR(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001669
1670 nvme_major = register_blkdev(nvme_major, "nvme");
1671 if (nvme_major <= 0)
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001672 goto kill_kthread;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001673
1674 result = pci_register_driver(&nvme_driver);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001675 if (result)
1676 goto unregister_blkdev;
1677 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001678
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001679 unregister_blkdev:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001680 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001681 kill_kthread:
1682 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001683 return result;
1684}
1685
1686static void __exit nvme_exit(void)
1687{
1688 pci_unregister_driver(&nvme_driver);
1689 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001690 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001691}
1692
1693MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1694MODULE_LICENSE("GPL");
Matthew Wilcoxbe5e0942011-05-11 21:38:57 -04001695MODULE_VERSION("0.6");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001696module_init(nvme_init);
1697module_exit(nvme_exit);