blob: a93f52c48036f333213a50d64b2ac847a899502a [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * NVM Express device driver
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/nvme.h>
20#include <linux/bio.h>
Matthew Wilcox8de05532011-05-12 13:50:28 -040021#include <linux/bitops.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022#include <linux/blkdev.h>
Matthew Wilcoxfd63e9ce2011-05-06 08:37:54 -040023#include <linux/delay.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050024#include <linux/errno.h>
25#include <linux/fs.h>
26#include <linux/genhd.h>
Matthew Wilcox5aff9382011-05-06 08:45:47 -040027#include <linux/idr.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/kdev_t.h>
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050032#include <linux/kthread.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050033#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -050038#include <linux/poison.h>
Matthew Wilcoxc3bfe712013-07-08 17:26:25 -040039#include <linux/ptrace.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050040#include <linux/sched.h>
41#include <linux/slab.h>
42#include <linux/types.h>
Vishal Verma5d0f6132013-03-04 18:40:58 -070043#include <scsi/sg.h>
Hitoshi Mitake797a7962012-02-07 11:45:33 +090044#include <asm-generic/io-64-nonatomic-lo-hi.h>
45
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050046#define NVME_Q_DEPTH 1024
47#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
48#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
49#define NVME_MINORS 64
Matthew Wilcoxe85248e2011-02-06 18:30:16 -050050#define ADMIN_TIMEOUT (60 * HZ)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050051
52static int nvme_major;
53module_param(nvme_major, int, 0);
54
Matthew Wilcox58ffacb2011-02-06 07:28:06 -050055static int use_threaded_interrupts;
56module_param(use_threaded_interrupts, int, 0);
57
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050058static DEFINE_SPINLOCK(dev_list_lock);
59static LIST_HEAD(dev_list);
60static struct task_struct *nvme_thread;
61
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050062/*
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050063 * An NVM Express queue. Each device has at least two (one for admin
64 * commands and one for I/O commands).
65 */
66struct nvme_queue {
67 struct device *q_dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -050068 struct nvme_dev *dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050069 spinlock_t q_lock;
70 struct nvme_command *sq_cmds;
71 volatile struct nvme_completion *cqes;
72 dma_addr_t sq_dma_addr;
73 dma_addr_t cq_dma_addr;
74 wait_queue_head_t sq_full;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -050075 wait_queue_t sq_cong_wait;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050076 struct bio_list sq_cong;
77 u32 __iomem *q_db;
78 u16 q_depth;
79 u16 cq_vector;
80 u16 sq_head;
81 u16 sq_tail;
82 u16 cq_head;
Matthew Wilcoxe9539f42013-06-24 11:47:34 -040083 u8 cq_phase;
84 u8 cqe_seen;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050085 unsigned long cmdid_data[];
86};
87
88/*
89 * Check we didin't inadvertently grow the command struct
90 */
91static inline void _nvme_check_size(void)
92{
93 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
94 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
95 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
96 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
97 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
Vishal Vermaf8ebf842013-03-27 07:13:41 -040098 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050099 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
100 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
101 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
102 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
Keith Busch6ecec742012-09-26 12:49:27 -0600103 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500104}
105
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500106typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400107 struct nvme_completion *);
108
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500109struct nvme_cmd_info {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400110 nvme_completion_fn fn;
111 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500112 unsigned long timeout;
113};
114
115static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
116{
117 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
118}
119
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500120/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400121 * alloc_cmdid() - Allocate a Command ID
122 * @nvmeq: The queue that will be used for this command
123 * @ctx: A pointer that will be passed to the handler
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400124 * @handler: The function to call on completion
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500125 *
126 * Allocate a Command ID for a queue. The data passed in will
127 * be passed to the completion handler. This is implemented by using
128 * the bottom two bits of the ctx pointer to store the handler ID.
129 * Passing in a pointer that's not 4-byte aligned will cause a BUG.
130 * We can change this if it becomes a problem.
Matthew Wilcox184d2942011-05-11 21:36:38 -0400131 *
132 * May be called with local interrupts disabled and the q_lock held,
133 * or with interrupts enabled and no locks held.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500134 */
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400135static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
136 nvme_completion_fn handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500137{
Matthew Wilcoxe6d15f72011-02-24 08:49:41 -0500138 int depth = nvmeq->q_depth - 1;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500139 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500140 int cmdid;
141
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500142 do {
143 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
144 if (cmdid >= depth)
145 return -EBUSY;
146 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
147
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400148 info[cmdid].fn = handler;
149 info[cmdid].ctx = ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500150 info[cmdid].timeout = jiffies + timeout;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500151 return cmdid;
152}
153
154static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400155 nvme_completion_fn handler, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500156{
157 int cmdid;
158 wait_event_killable(nvmeq->sq_full,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500159 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500160 return (cmdid < 0) ? -EINTR : cmdid;
161}
162
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400163/* Special values must be less than 0x1000 */
164#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
Matthew Wilcoxd2d87032011-02-07 15:55:59 -0500165#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
166#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
167#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500168#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
Matthew Wilcoxbe7b6272011-02-06 07:53:23 -0500169
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500170static void special_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400171 struct nvme_completion *cqe)
172{
173 if (ctx == CMD_CTX_CANCELLED)
174 return;
175 if (ctx == CMD_CTX_FLUSH)
176 return;
177 if (ctx == CMD_CTX_COMPLETED) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500178 dev_warn(&dev->pci_dev->dev,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400179 "completed id %d twice on queue %d\n",
180 cqe->command_id, le16_to_cpup(&cqe->sq_id));
181 return;
182 }
183 if (ctx == CMD_CTX_INVALID) {
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500184 dev_warn(&dev->pci_dev->dev,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400185 "invalid id %d completed on queue %d\n",
186 cqe->command_id, le16_to_cpup(&cqe->sq_id));
187 return;
188 }
189
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500190 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400191}
192
Matthew Wilcox184d2942011-05-11 21:36:38 -0400193/*
194 * Called with local interrupts disabled and the q_lock held. May not sleep.
195 */
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400196static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
197 nvme_completion_fn *fn)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500198{
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400199 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500200 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500201
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400202 if (cmdid >= nvmeq->q_depth) {
203 *fn = special_completion;
Matthew Wilcox48e3d392011-02-06 08:51:15 -0500204 return CMD_CTX_INVALID;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400205 }
Keith Busch859361a2012-08-02 14:05:59 -0600206 if (fn)
207 *fn = info[cmdid].fn;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400208 ctx = info[cmdid].ctx;
209 info[cmdid].fn = special_completion;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500210 info[cmdid].ctx = CMD_CTX_COMPLETED;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500211 clear_bit(cmdid, nvmeq->cmdid_data);
212 wake_up(&nvmeq->sq_full);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400213 return ctx;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500214}
215
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400216static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
217 nvme_completion_fn *fn)
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500218{
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400219 void *ctx;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500220 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400221 if (fn)
222 *fn = info[cmdid].fn;
223 ctx = info[cmdid].ctx;
224 info[cmdid].fn = special_completion;
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500225 info[cmdid].ctx = CMD_CTX_CANCELLED;
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400226 return ctx;
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500227}
228
Vishal Verma5d0f6132013-03-04 18:40:58 -0700229struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500230{
Matthew Wilcox040a93b2011-12-20 11:04:12 -0500231 return dev->queues[get_cpu() + 1];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500232}
233
Vishal Verma5d0f6132013-03-04 18:40:58 -0700234void put_nvmeq(struct nvme_queue *nvmeq)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500235{
Matthew Wilcox1b234842011-01-20 13:01:49 -0500236 put_cpu();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500237}
238
239/**
Matthew Wilcox714a7a22011-03-16 16:28:24 -0400240 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500241 * @nvmeq: The queue to use
242 * @cmd: The command to send
243 *
244 * Safe to use from interrupt context
245 */
246static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
247{
248 unsigned long flags;
249 u16 tail;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500250 spin_lock_irqsave(&nvmeq->q_lock, flags);
251 tail = nvmeq->sq_tail;
252 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500253 if (++tail == nvmeq->q_depth)
254 tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500255 writel(tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500256 nvmeq->sq_tail = tail;
257 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
258
259 return 0;
260}
261
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500262static __le64 **iod_list(struct nvme_iod *iod)
263{
264 return ((void *)iod) + iod->offset;
265}
266
267/*
268 * Will slightly overestimate the number of pages needed. This is OK
269 * as it only leads to a small amount of wasted memory for the lifetime of
270 * the I/O.
271 */
272static int nvme_npages(unsigned size)
273{
274 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
275 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
276}
277
278static struct nvme_iod *
279nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
280{
281 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
282 sizeof(__le64 *) * nvme_npages(nbytes) +
283 sizeof(struct scatterlist) * nseg, gfp);
284
285 if (iod) {
286 iod->offset = offsetof(struct nvme_iod, sg[nseg]);
287 iod->npages = -1;
288 iod->length = nbytes;
Keith Busch2b196032012-11-06 11:59:23 -0700289 iod->nents = 0;
Keith Busch61982212013-05-29 15:59:39 -0600290 iod->start_time = jiffies;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500291 }
292
293 return iod;
294}
295
Vishal Verma5d0f6132013-03-04 18:40:58 -0700296void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500297{
298 const int last_prp = PAGE_SIZE / 8 - 1;
299 int i;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500300 __le64 **list = iod_list(iod);
301 dma_addr_t prp_dma = iod->first_dma;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500302
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500303 if (iod->npages == 0)
304 dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
305 for (i = 0; i < iod->npages; i++) {
306 __le64 *prp_list = list[i];
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500307 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
Matthew Wilcox091b6092011-02-10 09:56:01 -0500308 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500309 prp_dma = next_prp_dma;
310 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500311 kfree(iod);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500312}
313
Keith Busch61982212013-05-29 15:59:39 -0600314static void nvme_start_io_acct(struct bio *bio)
315{
316 struct gendisk *disk = bio->bi_bdev->bd_disk;
317 const int rw = bio_data_dir(bio);
318 int cpu = part_stat_lock();
319 part_round_stats(cpu, &disk->part0);
320 part_stat_inc(cpu, &disk->part0, ios[rw]);
321 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
322 part_inc_in_flight(&disk->part0, rw);
323 part_stat_unlock();
324}
325
326static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
327{
328 struct gendisk *disk = bio->bi_bdev->bd_disk;
329 const int rw = bio_data_dir(bio);
330 unsigned long duration = jiffies - start_time;
331 int cpu = part_stat_lock();
332 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
333 part_round_stats(cpu, &disk->part0);
334 part_dec_in_flight(&disk->part0, rw);
335 part_stat_unlock();
336}
337
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500338static void bio_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500339 struct nvme_completion *cqe)
340{
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500341 struct nvme_iod *iod = ctx;
342 struct bio *bio = iod->private;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500343 u16 status = le16_to_cpup(&cqe->status) >> 1;
344
Keith Busch9e59d092013-08-08 10:25:38 -0600345 if (iod->nents) {
Keith Busch2b196032012-11-06 11:59:23 -0700346 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500347 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Keith Busch9e59d092013-08-08 10:25:38 -0600348 nvme_end_io_acct(bio, iod->start_time);
349 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500350 nvme_free_iod(dev, iod);
Keith Busch427e9702013-04-09 11:59:32 -0600351 if (status)
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500352 bio_endio(bio, -EIO);
Keith Busch427e9702013-04-09 11:59:32 -0600353 else
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500354 bio_endio(bio, 0);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500355}
356
Matthew Wilcox184d2942011-05-11 21:36:38 -0400357/* length is in bytes. gfp flags indicates whether we may sleep. */
Vishal Verma5d0f6132013-03-04 18:40:58 -0700358int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
359 struct nvme_iod *iod, int total_len, gfp_t gfp)
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500360{
Matthew Wilcox99802a72011-02-10 10:30:34 -0500361 struct dma_pool *pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500362 int length = total_len;
363 struct scatterlist *sg = iod->sg;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500364 int dma_len = sg_dma_len(sg);
365 u64 dma_addr = sg_dma_address(sg);
366 int offset = offset_in_page(dma_addr);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500367 __le64 *prp_list;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500368 __le64 **list = iod_list(iod);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500369 dma_addr_t prp_dma;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500370 int nprps, i;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500371
372 cmd->prp1 = cpu_to_le64(dma_addr);
373 length -= (PAGE_SIZE - offset);
374 if (length <= 0)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500375 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500376
377 dma_len -= (PAGE_SIZE - offset);
378 if (dma_len) {
379 dma_addr += (PAGE_SIZE - offset);
380 } else {
381 sg = sg_next(sg);
382 dma_addr = sg_dma_address(sg);
383 dma_len = sg_dma_len(sg);
384 }
385
386 if (length <= PAGE_SIZE) {
387 cmd->prp2 = cpu_to_le64(dma_addr);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500388 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500389 }
390
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500391 nprps = DIV_ROUND_UP(length, PAGE_SIZE);
Matthew Wilcox99802a72011-02-10 10:30:34 -0500392 if (nprps <= (256 / 8)) {
393 pool = dev->prp_small_pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500394 iod->npages = 0;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500395 } else {
396 pool = dev->prp_page_pool;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500397 iod->npages = 1;
Matthew Wilcox99802a72011-02-10 10:30:34 -0500398 }
399
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400400 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
401 if (!prp_list) {
402 cmd->prp2 = cpu_to_le64(dma_addr);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500403 iod->npages = -1;
404 return (total_len - length) + PAGE_SIZE;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400405 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500406 list[0] = prp_list;
407 iod->first_dma = prp_dma;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500408 cmd->prp2 = cpu_to_le64(prp_dma);
409 i = 0;
410 for (;;) {
Matthew Wilcox7523d832011-03-16 16:43:40 -0400411 if (i == PAGE_SIZE / 8) {
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500412 __le64 *old_prp_list = prp_list;
Matthew Wilcoxb77954c2011-05-12 13:51:41 -0400413 prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500414 if (!prp_list)
415 return total_len - length;
416 list[iod->npages++] = prp_list;
Matthew Wilcox7523d832011-03-16 16:43:40 -0400417 prp_list[0] = old_prp_list[i - 1];
418 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
419 i = 1;
Shane Michael Matthewse025344c2011-02-10 08:51:24 -0500420 }
421 prp_list[i++] = cpu_to_le64(dma_addr);
422 dma_len -= PAGE_SIZE;
423 dma_addr += PAGE_SIZE;
424 length -= PAGE_SIZE;
425 if (length <= 0)
426 break;
427 if (dma_len > 0)
428 continue;
429 BUG_ON(dma_len < 0);
430 sg = sg_next(sg);
431 dma_addr = sg_dma_address(sg);
432 dma_len = sg_dma_len(sg);
433 }
434
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500435 return total_len;
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500436}
437
Keith Busch427e9702013-04-09 11:59:32 -0600438struct nvme_bio_pair {
439 struct bio b1, b2, *parent;
440 struct bio_vec *bv1, *bv2;
441 int err;
442 atomic_t cnt;
443};
444
445static void nvme_bio_pair_endio(struct bio *bio, int err)
446{
447 struct nvme_bio_pair *bp = bio->bi_private;
448
449 if (err)
450 bp->err = err;
451
452 if (atomic_dec_and_test(&bp->cnt)) {
453 bio_endio(bp->parent, bp->err);
Keith Busch1b567492013-07-18 12:13:51 -0600454 kfree(bp->bv1);
455 kfree(bp->bv2);
Keith Busch427e9702013-04-09 11:59:32 -0600456 kfree(bp);
457 }
458}
459
460static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
461 int len, int offset)
462{
463 struct nvme_bio_pair *bp;
464
465 BUG_ON(len > bio->bi_size);
466 BUG_ON(idx > bio->bi_vcnt);
467
468 bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
469 if (!bp)
470 return NULL;
471 bp->err = 0;
472
473 bp->b1 = *bio;
474 bp->b2 = *bio;
475
476 bp->b1.bi_size = len;
477 bp->b2.bi_size -= len;
478 bp->b1.bi_vcnt = idx;
479 bp->b2.bi_idx = idx;
480 bp->b2.bi_sector += len >> 9;
481
482 if (offset) {
483 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
484 GFP_ATOMIC);
485 if (!bp->bv1)
486 goto split_fail_1;
487
488 bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
489 GFP_ATOMIC);
490 if (!bp->bv2)
491 goto split_fail_2;
492
493 memcpy(bp->bv1, bio->bi_io_vec,
494 bio->bi_max_vecs * sizeof(struct bio_vec));
495 memcpy(bp->bv2, bio->bi_io_vec,
496 bio->bi_max_vecs * sizeof(struct bio_vec));
497
498 bp->b1.bi_io_vec = bp->bv1;
499 bp->b2.bi_io_vec = bp->bv2;
500 bp->b2.bi_io_vec[idx].bv_offset += offset;
501 bp->b2.bi_io_vec[idx].bv_len -= offset;
502 bp->b1.bi_io_vec[idx].bv_len = offset;
503 bp->b1.bi_vcnt++;
504 } else
505 bp->bv1 = bp->bv2 = NULL;
506
507 bp->b1.bi_private = bp;
508 bp->b2.bi_private = bp;
509
510 bp->b1.bi_end_io = nvme_bio_pair_endio;
511 bp->b2.bi_end_io = nvme_bio_pair_endio;
512
513 bp->parent = bio;
514 atomic_set(&bp->cnt, 2);
515
516 return bp;
517
518 split_fail_2:
519 kfree(bp->bv1);
520 split_fail_1:
521 kfree(bp);
522 return NULL;
523}
524
525static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
526 int idx, int len, int offset)
527{
528 struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
529 if (!bp)
530 return -ENOMEM;
531
532 if (bio_list_empty(&nvmeq->sq_cong))
533 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
534 bio_list_add(&nvmeq->sq_cong, &bp->b1);
535 bio_list_add(&nvmeq->sq_cong, &bp->b2);
536
537 return 0;
538}
539
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500540/* NVMe scatterlists require no holes in the virtual address */
541#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
542 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
543
Keith Busch427e9702013-04-09 11:59:32 -0600544static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500545 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
546{
Matthew Wilcox76830842011-02-10 13:55:39 -0500547 struct bio_vec *bvec, *bvprv = NULL;
548 struct scatterlist *sg = NULL;
Keith Busch159b67d2013-04-09 17:13:20 -0600549 int i, length = 0, nsegs = 0, split_len = bio->bi_size;
550
551 if (nvmeq->dev->stripe_size)
552 split_len = nvmeq->dev->stripe_size -
553 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500554
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500555 sg_init_table(iod->sg, psegs);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500556 bio_for_each_segment(bvec, bio, i) {
Matthew Wilcox76830842011-02-10 13:55:39 -0500557 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
558 sg->length += bvec->bv_len;
559 } else {
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500560 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
Keith Busch427e9702013-04-09 11:59:32 -0600561 return nvme_split_and_submit(bio, nvmeq, i,
562 length, 0);
563
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500564 sg = sg ? sg + 1 : iod->sg;
Matthew Wilcox76830842011-02-10 13:55:39 -0500565 sg_set_page(sg, bvec->bv_page, bvec->bv_len,
566 bvec->bv_offset);
567 nsegs++;
568 }
Keith Busch159b67d2013-04-09 17:13:20 -0600569
570 if (split_len - length < bvec->bv_len)
571 return nvme_split_and_submit(bio, nvmeq, i, split_len,
572 split_len - length);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500573 length += bvec->bv_len;
Matthew Wilcox76830842011-02-10 13:55:39 -0500574 bvprv = bvec;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500575 }
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500576 iod->nents = nsegs;
Matthew Wilcox76830842011-02-10 13:55:39 -0500577 sg_mark_end(sg);
Keith Busch427e9702013-04-09 11:59:32 -0600578 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500579 return -ENOMEM;
Keith Busch427e9702013-04-09 11:59:32 -0600580
Keith Busch159b67d2013-04-09 17:13:20 -0600581 BUG_ON(length != bio->bi_size);
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500582 return length;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500583}
584
Keith Busch0e5e4f02012-11-09 16:33:05 -0700585/*
586 * We reuse the small pool to allocate the 16-byte range here as it is not
587 * worth having a special pool for these or additional cases to handle freeing
588 * the iod.
589 */
590static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
591 struct bio *bio, struct nvme_iod *iod, int cmdid)
592{
593 struct nvme_dsm_range *range;
594 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
595
596 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
597 &iod->first_dma);
598 if (!range)
599 return -ENOMEM;
600
601 iod_list(iod)[0] = (__le64 *)range;
602 iod->npages = 0;
603
604 range->cattr = cpu_to_le32(0);
605 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
Matthew Wilcox063cc6d2013-03-27 21:28:22 -0400606 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
Keith Busch0e5e4f02012-11-09 16:33:05 -0700607
608 memset(cmnd, 0, sizeof(*cmnd));
609 cmnd->dsm.opcode = nvme_cmd_dsm;
610 cmnd->dsm.command_id = cmdid;
611 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
612 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
613 cmnd->dsm.nr = 0;
614 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
615
616 if (++nvmeq->sq_tail == nvmeq->q_depth)
617 nvmeq->sq_tail = 0;
618 writel(nvmeq->sq_tail, nvmeq->q_db);
619
620 return 0;
621}
622
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500623static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
624 int cmdid)
625{
626 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
627
628 memset(cmnd, 0, sizeof(*cmnd));
629 cmnd->common.opcode = nvme_cmd_flush;
630 cmnd->common.command_id = cmdid;
631 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
632
633 if (++nvmeq->sq_tail == nvmeq->q_depth)
634 nvmeq->sq_tail = 0;
635 writel(nvmeq->sq_tail, nvmeq->q_db);
636
637 return 0;
638}
639
Vishal Verma5d0f6132013-03-04 18:40:58 -0700640int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500641{
642 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
Matthew Wilcoxff976d72011-12-20 13:53:01 -0500643 special_completion, NVME_IO_TIMEOUT);
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500644 if (unlikely(cmdid < 0))
645 return cmdid;
646
647 return nvme_submit_flush(nvmeq, ns, cmdid);
648}
649
Matthew Wilcox184d2942011-05-11 21:36:38 -0400650/*
651 * Called with local interrupts disabled and the q_lock held. May not sleep.
652 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500653static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
654 struct bio *bio)
655{
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500656 struct nvme_command *cmnd;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500657 struct nvme_iod *iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500658 enum dma_data_direction dma_dir;
Wei Yongjun1287dab2013-05-13 22:29:04 +0800659 int cmdid, length, result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500660 u16 control;
661 u32 dsmgmt;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500662 int psegs = bio_phys_segments(ns->queue, bio);
663
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500664 if ((bio->bi_rw & REQ_FLUSH) && psegs) {
665 result = nvme_submit_flush_data(nvmeq, ns);
666 if (result)
667 return result;
668 }
669
Wei Yongjun1287dab2013-05-13 22:29:04 +0800670 result = -ENOMEM;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500671 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
672 if (!iod)
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500673 goto nomem;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500674 iod->private = bio;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500675
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500676 result = -EBUSY;
Matthew Wilcoxff976d72011-12-20 13:53:01 -0500677 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500678 if (unlikely(cmdid < 0))
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500679 goto free_iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500680
Keith Busch0e5e4f02012-11-09 16:33:05 -0700681 if (bio->bi_rw & REQ_DISCARD) {
682 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
683 if (result)
684 goto free_cmdid;
685 return result;
686 }
Matthew Wilcox00df5cb2011-02-22 14:18:30 -0500687 if ((bio->bi_rw & REQ_FLUSH) && !psegs)
688 return nvme_submit_flush(nvmeq, ns, cmdid);
689
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500690 control = 0;
691 if (bio->bi_rw & REQ_FUA)
692 control |= NVME_RW_FUA;
693 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
694 control |= NVME_RW_LR;
695
696 dsmgmt = 0;
697 if (bio->bi_rw & REQ_RAHEAD)
698 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
699
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500700 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500701
Matthew Wilcoxb8deb622011-01-26 10:08:25 -0500702 memset(cmnd, 0, sizeof(*cmnd));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500703 if (bio_data_dir(bio)) {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500704 cmnd->rw.opcode = nvme_cmd_write;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500705 dma_dir = DMA_TO_DEVICE;
706 } else {
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500707 cmnd->rw.opcode = nvme_cmd_read;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500708 dma_dir = DMA_FROM_DEVICE;
709 }
710
Keith Busch427e9702013-04-09 11:59:32 -0600711 result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
712 if (result <= 0)
Keith Busch859361a2012-08-02 14:05:59 -0600713 goto free_cmdid;
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500714 length = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500715
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500716 cmnd->rw.command_id = cmdid;
717 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500718 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
719 GFP_ATOMIC);
Matthew Wilcox063cc6d2013-03-27 21:28:22 -0400720 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
Matthew Wilcox1ad2f892011-02-23 15:20:00 -0500721 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
Matthew Wilcoxff22b542011-01-26 10:02:29 -0500722 cmnd->rw.control = cpu_to_le16(control);
723 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500724
Keith Busch61982212013-05-29 15:59:39 -0600725 nvme_start_io_acct(bio);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500726 if (++nvmeq->sq_tail == nvmeq->q_depth)
727 nvmeq->sq_tail = 0;
Matthew Wilcox75478812011-02-16 09:59:59 -0500728 writel(nvmeq->sq_tail, nvmeq->q_db);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500729
Matthew Wilcox1974b1a2011-02-10 12:01:09 -0500730 return 0;
731
Keith Busch859361a2012-08-02 14:05:59 -0600732 free_cmdid:
733 free_cmdid(nvmeq, cmdid, NULL);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -0500734 free_iod:
735 nvme_free_iod(nvmeq->dev, iod);
Matthew Wilcoxeeee3222011-02-14 15:55:33 -0500736 nomem:
737 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500738}
739
Matthew Wilcoxe9539f42013-06-24 11:47:34 -0400740static int nvme_process_cq(struct nvme_queue *nvmeq)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500741{
Matthew Wilcox82123462011-01-20 13:24:06 -0500742 u16 head, phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500743
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500744 head = nvmeq->cq_head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500745 phase = nvmeq->cq_phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500746
747 for (;;) {
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400748 void *ctx;
749 nvme_completion_fn fn;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500750 struct nvme_completion cqe = nvmeq->cqes[head];
Matthew Wilcox82123462011-01-20 13:24:06 -0500751 if ((le16_to_cpu(cqe.status) & 1) != phase)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500752 break;
753 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
754 if (++head == nvmeq->q_depth) {
755 head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -0500756 phase = !phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500757 }
758
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400759 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500760 fn(nvmeq->dev, ctx, &cqe);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500761 }
762
763 /* If the controller ignores the cq head doorbell and continuously
764 * writes to the queue, it is theoretically possible to wrap around
765 * the queue twice and mistakenly return IRQ_NONE. Linux only
766 * requires that 0.1% of your interrupts are handled, so this isn't
767 * a big problem.
768 */
Matthew Wilcox82123462011-01-20 13:24:06 -0500769 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
Matthew Wilcoxe9539f42013-06-24 11:47:34 -0400770 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500771
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400772 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500773 nvmeq->cq_head = head;
Matthew Wilcox82123462011-01-20 13:24:06 -0500774 nvmeq->cq_phase = phase;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500775
Matthew Wilcoxe9539f42013-06-24 11:47:34 -0400776 nvmeq->cqe_seen = 1;
777 return 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500778}
779
Matthew Wilcox7d822452013-06-24 12:03:57 -0400780static void nvme_make_request(struct request_queue *q, struct bio *bio)
781{
782 struct nvme_ns *ns = q->queuedata;
783 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
784 int result = -EBUSY;
785
786 spin_lock_irq(&nvmeq->q_lock);
787 if (bio_list_empty(&nvmeq->sq_cong))
788 result = nvme_submit_bio_queue(nvmeq, ns, bio);
789 if (unlikely(result)) {
790 if (bio_list_empty(&nvmeq->sq_cong))
791 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
792 bio_list_add(&nvmeq->sq_cong, bio);
793 }
794
795 nvme_process_cq(nvmeq);
796 spin_unlock_irq(&nvmeq->q_lock);
797 put_nvmeq(nvmeq);
798}
799
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500800static irqreturn_t nvme_irq(int irq, void *data)
801{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500802 irqreturn_t result;
803 struct nvme_queue *nvmeq = data;
804 spin_lock(&nvmeq->q_lock);
Matthew Wilcoxe9539f42013-06-24 11:47:34 -0400805 nvme_process_cq(nvmeq);
806 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
807 nvmeq->cqe_seen = 0;
Matthew Wilcox58ffacb2011-02-06 07:28:06 -0500808 spin_unlock(&nvmeq->q_lock);
809 return result;
810}
811
812static irqreturn_t nvme_irq_check(int irq, void *data)
813{
814 struct nvme_queue *nvmeq = data;
815 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
816 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
817 return IRQ_NONE;
818 return IRQ_WAKE_THREAD;
819}
820
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500821static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
822{
823 spin_lock_irq(&nvmeq->q_lock);
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400824 cancel_cmdid(nvmeq, cmdid, NULL);
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500825 spin_unlock_irq(&nvmeq->q_lock);
826}
827
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400828struct sync_cmd_info {
829 struct task_struct *task;
830 u32 result;
831 int status;
832};
833
Matthew Wilcox5c1281a2011-12-20 11:54:53 -0500834static void sync_completion(struct nvme_dev *dev, void *ctx,
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400835 struct nvme_completion *cqe)
836{
837 struct sync_cmd_info *cmdinfo = ctx;
838 cmdinfo->result = le32_to_cpup(&cqe->result);
839 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
840 wake_up_process(cmdinfo->task);
841}
842
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500843/*
844 * Returns 0 on success. If the result is negative, it's a Linux error code;
845 * if the result is positive, it's an NVM Express status code
846 */
Vishal Verma5d0f6132013-03-04 18:40:58 -0700847int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
848 u32 *result, unsigned timeout)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500849{
850 int cmdid;
851 struct sync_cmd_info cmdinfo;
852
853 cmdinfo.task = current;
854 cmdinfo.status = -EINTR;
855
Matthew Wilcoxc2f5b652011-10-15 07:33:46 -0400856 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500857 timeout);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500858 if (cmdid < 0)
859 return cmdid;
860 cmd->common.command_id = cmdid;
861
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500862 set_current_state(TASK_KILLABLE);
863 nvme_submit_cmd(nvmeq, cmd);
Keith Busch78f8d252013-04-19 14:11:06 -0600864 schedule_timeout(timeout);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500865
Matthew Wilcox3c0cf132011-02-04 16:03:56 -0500866 if (cmdinfo.status == -EINTR) {
867 nvme_abort_command(nvmeq, cmdid);
868 return -EINTR;
869 }
870
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500871 if (result)
872 *result = cmdinfo.result;
873
874 return cmdinfo.status;
875}
876
Vishal Verma5d0f6132013-03-04 18:40:58 -0700877int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500878 u32 *result)
879{
Matthew Wilcoxe85248e2011-02-06 18:30:16 -0500880 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500881}
882
883static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
884{
885 int status;
886 struct nvme_command c;
887
888 memset(&c, 0, sizeof(c));
889 c.delete_queue.opcode = opcode;
890 c.delete_queue.qid = cpu_to_le16(id);
891
892 status = nvme_submit_admin_cmd(dev, &c, NULL);
893 if (status)
894 return -EIO;
895 return 0;
896}
897
898static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
899 struct nvme_queue *nvmeq)
900{
901 int status;
902 struct nvme_command c;
903 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
904
905 memset(&c, 0, sizeof(c));
906 c.create_cq.opcode = nvme_admin_create_cq;
907 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
908 c.create_cq.cqid = cpu_to_le16(qid);
909 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
910 c.create_cq.cq_flags = cpu_to_le16(flags);
911 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
912
913 status = nvme_submit_admin_cmd(dev, &c, NULL);
914 if (status)
915 return -EIO;
916 return 0;
917}
918
919static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
920 struct nvme_queue *nvmeq)
921{
922 int status;
923 struct nvme_command c;
924 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
925
926 memset(&c, 0, sizeof(c));
927 c.create_sq.opcode = nvme_admin_create_sq;
928 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
929 c.create_sq.sqid = cpu_to_le16(qid);
930 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
931 c.create_sq.sq_flags = cpu_to_le16(flags);
932 c.create_sq.cqid = cpu_to_le16(qid);
933
934 status = nvme_submit_admin_cmd(dev, &c, NULL);
935 if (status)
936 return -EIO;
937 return 0;
938}
939
940static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
941{
942 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
943}
944
945static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
946{
947 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
948}
949
Vishal Verma5d0f6132013-03-04 18:40:58 -0700950int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400951 dma_addr_t dma_addr)
952{
953 struct nvme_command c;
954
955 memset(&c, 0, sizeof(c));
956 c.identify.opcode = nvme_admin_identify;
957 c.identify.nsid = cpu_to_le32(nsid);
958 c.identify.prp1 = cpu_to_le64(dma_addr);
959 c.identify.cns = cpu_to_le32(cns);
960
961 return nvme_submit_admin_cmd(dev, &c, NULL);
962}
963
Vishal Verma5d0f6132013-03-04 18:40:58 -0700964int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
Keith Busch08df1e02012-09-21 10:52:13 -0600965 dma_addr_t dma_addr, u32 *result)
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400966{
967 struct nvme_command c;
968
969 memset(&c, 0, sizeof(c));
970 c.features.opcode = nvme_admin_get_features;
Keith Buscha42cecc2012-07-25 16:06:38 -0600971 c.features.nsid = cpu_to_le32(nsid);
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400972 c.features.prp1 = cpu_to_le64(dma_addr);
973 c.features.fid = cpu_to_le32(fid);
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400974
Keith Busch08df1e02012-09-21 10:52:13 -0600975 return nvme_submit_admin_cmd(dev, &c, result);
Matthew Wilcoxdf348132012-01-11 07:29:56 -0700976}
977
Vishal Verma5d0f6132013-03-04 18:40:58 -0700978int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
979 dma_addr_t dma_addr, u32 *result)
Matthew Wilcoxdf348132012-01-11 07:29:56 -0700980{
981 struct nvme_command c;
982
983 memset(&c, 0, sizeof(c));
984 c.features.opcode = nvme_admin_set_features;
985 c.features.prp1 = cpu_to_le64(dma_addr);
986 c.features.fid = cpu_to_le32(fid);
987 c.features.dword11 = cpu_to_le32(dword11);
988
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -0400989 return nvme_submit_admin_cmd(dev, &c, result);
990}
991
Matthew Wilcoxa09115b2012-08-07 15:56:23 -0400992/**
993 * nvme_cancel_ios - Cancel outstanding I/Os
994 * @queue: The queue to cancel I/Os on
995 * @timeout: True to only cancel I/Os which have timed out
996 */
997static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
998{
999 int depth = nvmeq->q_depth - 1;
1000 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1001 unsigned long now = jiffies;
1002 int cmdid;
1003
1004 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1005 void *ctx;
1006 nvme_completion_fn fn;
1007 static struct nvme_completion cqe = {
Matthew Wilcoxaf2d9ca2013-04-16 15:18:30 -04001008 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001009 };
1010
1011 if (timeout && !time_after(now, info[cmdid].timeout))
1012 continue;
Keith Busch053ab7022013-04-30 11:19:38 -06001013 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
1014 continue;
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001015 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
1016 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1017 fn(nvmeq->dev, ctx, &cqe);
1018 }
1019}
1020
Matthew Wilcox9e866772012-08-03 13:55:56 -04001021static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
1022{
1023 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1024 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1025 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1026 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1027 kfree(nvmeq);
1028}
1029
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001030static void nvme_free_queue(struct nvme_dev *dev, int qid)
1031{
1032 struct nvme_queue *nvmeq = dev->queues[qid];
Matthew Wilcoxaba20802011-03-27 08:52:06 -04001033 int vector = dev->entry[nvmeq->cq_vector].vector;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001034
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001035 spin_lock_irq(&nvmeq->q_lock);
1036 nvme_cancel_ios(nvmeq, false);
Keith Busch32958742012-08-20 14:57:49 -06001037 while (bio_list_peek(&nvmeq->sq_cong)) {
1038 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1039 bio_endio(bio, -EIO);
1040 }
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001041 spin_unlock_irq(&nvmeq->q_lock);
1042
Matthew Wilcoxaba20802011-03-27 08:52:06 -04001043 irq_set_affinity_hint(vector, NULL);
1044 free_irq(vector, nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001045
1046 /* Don't tell the adapter to delete the admin queue */
1047 if (qid) {
1048 adapter_delete_sq(dev, qid);
1049 adapter_delete_cq(dev, qid);
1050 }
1051
Matthew Wilcox9e866772012-08-03 13:55:56 -04001052 nvme_free_queue_mem(nvmeq);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001053}
1054
1055static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1056 int depth, int vector)
1057{
1058 struct device *dmadev = &dev->pci_dev->dev;
Keith Buscha0cadb82012-07-27 13:57:23 -04001059 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
1060 sizeof(struct nvme_cmd_info));
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001061 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
1062 if (!nvmeq)
1063 return NULL;
1064
1065 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
1066 &nvmeq->cq_dma_addr, GFP_KERNEL);
1067 if (!nvmeq->cqes)
1068 goto free_nvmeq;
1069 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
1070
1071 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
1072 &nvmeq->sq_dma_addr, GFP_KERNEL);
1073 if (!nvmeq->sq_cmds)
1074 goto free_cqdma;
1075
1076 nvmeq->q_dmadev = dmadev;
Matthew Wilcox091b6092011-02-10 09:56:01 -05001077 nvmeq->dev = dev;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001078 spin_lock_init(&nvmeq->q_lock);
1079 nvmeq->cq_head = 0;
Matthew Wilcox82123462011-01-20 13:24:06 -05001080 nvmeq->cq_phase = 1;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001081 init_waitqueue_head(&nvmeq->sq_full);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001082 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001083 bio_list_init(&nvmeq->sq_cong);
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001084 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001085 nvmeq->q_depth = depth;
1086 nvmeq->cq_vector = vector;
1087
1088 return nvmeq;
1089
1090 free_cqdma:
Keith Busch68b8eca2013-05-01 13:07:47 -06001091 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001092 nvmeq->cq_dma_addr);
1093 free_nvmeq:
1094 kfree(nvmeq);
1095 return NULL;
1096}
1097
Matthew Wilcox30010822011-01-20 09:10:15 -05001098static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1099 const char *name)
1100{
Matthew Wilcox58ffacb2011-02-06 07:28:06 -05001101 if (use_threaded_interrupts)
1102 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
Matthew Wilcoxec6ce612011-02-06 09:01:00 -05001103 nvme_irq_check, nvme_irq,
Matthew Wilcox58ffacb2011-02-06 07:28:06 -05001104 IRQF_DISABLED | IRQF_SHARED,
1105 name, nvmeq);
Matthew Wilcox30010822011-01-20 09:10:15 -05001106 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1107 IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
1108}
1109
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001110static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid,
1111 int cq_size, int vector)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001112{
1113 int result;
1114 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
1115
Matthew Wilcox3f85d502011-02-01 08:39:04 -05001116 if (!nvmeq)
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001117 return ERR_PTR(-ENOMEM);
Matthew Wilcox3f85d502011-02-01 08:39:04 -05001118
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001119 result = adapter_alloc_cq(dev, qid, nvmeq);
1120 if (result < 0)
1121 goto free_nvmeq;
1122
1123 result = adapter_alloc_sq(dev, qid, nvmeq);
1124 if (result < 0)
1125 goto release_cq;
1126
Matthew Wilcox30010822011-01-20 09:10:15 -05001127 result = queue_request_irq(dev, nvmeq, "nvme");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001128 if (result < 0)
1129 goto release_sq;
1130
1131 return nvmeq;
1132
1133 release_sq:
1134 adapter_delete_sq(dev, qid);
1135 release_cq:
1136 adapter_delete_cq(dev, qid);
1137 free_nvmeq:
1138 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1139 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1140 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1141 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1142 kfree(nvmeq);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001143 return ERR_PTR(result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001144}
1145
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001146static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
1147{
1148 unsigned long timeout;
1149 u32 bit = enabled ? NVME_CSTS_RDY : 0;
1150
1151 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1152
1153 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
1154 msleep(100);
1155 if (fatal_signal_pending(current))
1156 return -EINTR;
1157 if (time_after(jiffies, timeout)) {
1158 dev_err(&dev->pci_dev->dev,
1159 "Device not ready; aborting initialisation\n");
1160 return -ENODEV;
1161 }
1162 }
1163
1164 return 0;
1165}
1166
1167/*
1168 * If the device has been passed off to us in an enabled state, just clear
1169 * the enabled bit. The spec says we should set the 'shutdown notification
1170 * bits', but doing so may cause the device to complete commands to the
1171 * admin queue ... and we don't know what memory that might be pointing at!
1172 */
1173static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
1174{
Matthew Wilcox44af1462013-05-04 06:43:17 -04001175 u32 cc = readl(&dev->bar->cc);
1176
1177 if (cc & NVME_CC_ENABLE)
1178 writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001179 return nvme_wait_ready(dev, cap, false);
1180}
1181
1182static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
1183{
1184 return nvme_wait_ready(dev, cap, true);
1185}
1186
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001187static int nvme_configure_admin_queue(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001188{
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001189 int result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001190 u32 aqa;
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001191 u64 cap = readq(&dev->bar->cap);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001192 struct nvme_queue *nvmeq;
1193
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001194 result = nvme_disable_ctrl(dev, cap);
1195 if (result < 0)
1196 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001197
1198 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
Matthew Wilcox3f85d502011-02-01 08:39:04 -05001199 if (!nvmeq)
1200 return -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001201
1202 aqa = nvmeq->q_depth - 1;
1203 aqa |= aqa << 16;
1204
1205 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
1206 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
1207 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -04001208 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001209
1210 writel(aqa, &dev->bar->aqa);
1211 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
1212 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
1213 writel(dev->ctrl_config, &dev->bar->cc);
1214
Matthew Wilcoxba47e382013-05-04 06:43:16 -04001215 result = nvme_enable_ctrl(dev, cap);
Keith Busch025c5572013-05-01 13:07:51 -06001216 if (result)
1217 goto free_q;
Matthew Wilcox9e866772012-08-03 13:55:56 -04001218
Matthew Wilcox30010822011-01-20 09:10:15 -05001219 result = queue_request_irq(dev, nvmeq, "nvme admin");
Keith Busch025c5572013-05-01 13:07:51 -06001220 if (result)
1221 goto free_q;
1222
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001223 dev->queues[0] = nvmeq;
1224 return result;
Keith Busch025c5572013-05-01 13:07:51 -06001225
1226 free_q:
1227 nvme_free_queue_mem(nvmeq);
1228 return result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001229}
1230
Vishal Verma5d0f6132013-03-04 18:40:58 -07001231struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001232 unsigned long addr, unsigned length)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001233{
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001234 int i, err, count, nents, offset;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001235 struct scatterlist *sg;
1236 struct page **pages;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001237 struct nvme_iod *iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001238
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001239 if (addr & 3)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001240 return ERR_PTR(-EINVAL);
Dan Carpenter5460fc02013-05-13 17:59:50 +03001241 if (!length || length > INT_MAX - PAGE_SIZE)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001242 return ERR_PTR(-EINVAL);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001243
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001244 offset = offset_in_page(addr);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001245 count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1246 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
Dan Carpenter22fff822012-01-20 07:55:30 -05001247 if (!pages)
1248 return ERR_PTR(-ENOMEM);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001249
1250 err = get_user_pages_fast(addr, count, 1, pages);
1251 if (err < count) {
1252 count = err;
1253 err = -EFAULT;
1254 goto put_pages;
1255 }
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001256
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001257 iod = nvme_alloc_iod(count, length, GFP_KERNEL);
1258 sg = iod->sg;
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001259 sg_init_table(sg, count);
Matthew Wilcoxd0ba1e42011-09-13 17:01:39 -04001260 for (i = 0; i < count; i++) {
1261 sg_set_page(&sg[i], pages[i],
Dan Carpenter5460fc02013-05-13 17:59:50 +03001262 min_t(unsigned, length, PAGE_SIZE - offset),
1263 offset);
Matthew Wilcoxd0ba1e42011-09-13 17:01:39 -04001264 length -= (PAGE_SIZE - offset);
1265 offset = 0;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001266 }
Matthew Wilcoxfe304c42012-01-06 13:49:25 -07001267 sg_mark_end(&sg[i - 1]);
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001268 iod->nents = count;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001269
1270 err = -ENOMEM;
1271 nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
1272 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001273 if (!nents)
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001274 goto free_iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001275
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001276 kfree(pages);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001277 return iod;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001278
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001279 free_iod:
1280 kfree(iod);
Matthew Wilcox36c14ed2011-01-24 07:52:07 -05001281 put_pages:
1282 for (i = 0; i < count; i++)
1283 put_page(pages[i]);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001284 kfree(pages);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001285 return ERR_PTR(err);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001286}
1287
Vishal Verma5d0f6132013-03-04 18:40:58 -07001288void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001289 struct nvme_iod *iod)
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001290{
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001291 int i;
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001292
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001293 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
1294 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001295
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001296 for (i = 0; i < iod->nents; i++)
1297 put_page(sg_page(&iod->sg[i]));
Matthew Wilcox7fc3cda2011-01-26 17:05:50 -05001298}
1299
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001300static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
1301{
1302 struct nvme_dev *dev = ns->dev;
1303 struct nvme_queue *nvmeq;
1304 struct nvme_user_io io;
1305 struct nvme_command c;
Keith Buschf410c682013-04-23 17:23:59 -06001306 unsigned length, meta_len;
1307 int status, i;
1308 struct nvme_iod *iod, *meta_iod = NULL;
1309 dma_addr_t meta_dma_addr;
1310 void *meta, *uninitialized_var(meta_mem);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001311
1312 if (copy_from_user(&io, uio, sizeof(io)))
1313 return -EFAULT;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001314 length = (io.nblocks + 1) << ns->lba_shift;
Keith Buschf410c682013-04-23 17:23:59 -06001315 meta_len = (io.nblocks + 1) * ns->ms;
1316
1317 if (meta_len && ((io.metadata & 3) || !io.metadata))
1318 return -EINVAL;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001319
1320 switch (io.opcode) {
1321 case nvme_cmd_write:
1322 case nvme_cmd_read:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001323 case nvme_cmd_compare:
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001324 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
Matthew Wilcox64132142011-08-09 12:56:37 -04001325 break;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001326 default:
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001327 return -EINVAL;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001328 }
1329
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001330 if (IS_ERR(iod))
1331 return PTR_ERR(iod);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001332
1333 memset(&c, 0, sizeof(c));
1334 c.rw.opcode = io.opcode;
1335 c.rw.flags = io.flags;
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001336 c.rw.nsid = cpu_to_le32(ns->ns_id);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001337 c.rw.slba = cpu_to_le64(io.slba);
Matthew Wilcox6c7d4942011-03-21 09:48:57 -04001338 c.rw.length = cpu_to_le16(io.nblocks);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001339 c.rw.control = cpu_to_le16(io.control);
Matthew Wilcox1c9b5262013-04-16 15:21:06 -04001340 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
1341 c.rw.reftag = cpu_to_le32(io.reftag);
1342 c.rw.apptag = cpu_to_le16(io.apptag);
1343 c.rw.appmask = cpu_to_le16(io.appmask);
Keith Buschf410c682013-04-23 17:23:59 -06001344
1345 if (meta_len) {
Keith Busch1b567492013-07-18 12:13:51 -06001346 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata,
1347 meta_len);
Keith Buschf410c682013-04-23 17:23:59 -06001348 if (IS_ERR(meta_iod)) {
1349 status = PTR_ERR(meta_iod);
1350 meta_iod = NULL;
1351 goto unmap;
1352 }
1353
1354 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
1355 &meta_dma_addr, GFP_KERNEL);
1356 if (!meta_mem) {
1357 status = -ENOMEM;
1358 goto unmap;
1359 }
1360
1361 if (io.opcode & 1) {
1362 int meta_offset = 0;
1363
1364 for (i = 0; i < meta_iod->nents; i++) {
1365 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1366 meta_iod->sg[i].offset;
1367 memcpy(meta_mem + meta_offset, meta,
1368 meta_iod->sg[i].length);
1369 kunmap_atomic(meta);
1370 meta_offset += meta_iod->sg[i].length;
1371 }
1372 }
1373
1374 c.rw.metadata = cpu_to_le64(meta_dma_addr);
1375 }
1376
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001377 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
Shane Michael Matthewse025344c2011-02-10 08:51:24 -05001378
Matthew Wilcox040a93b2011-12-20 11:04:12 -05001379 nvmeq = get_nvmeq(dev);
Matthew Wilcoxfa922822011-03-16 16:29:00 -04001380 /*
1381 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
Matthew Wilcoxb1ad37e2011-02-04 16:14:30 -05001382 * disabled. We may be preempted at any point, and be rescheduled
1383 * to a different CPU. That will cause cacheline bouncing, but no
1384 * additional races since q_lock already protects against other CPUs.
1385 */
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001386 put_nvmeq(nvmeq);
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001387 if (length != (io.nblocks + 1) << ns->lba_shift)
1388 status = -ENOMEM;
1389 else
Matthew Wilcoxff976d72011-12-20 13:53:01 -05001390 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001391
Keith Buschf410c682013-04-23 17:23:59 -06001392 if (meta_len) {
1393 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
1394 int meta_offset = 0;
1395
1396 for (i = 0; i < meta_iod->nents; i++) {
1397 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
1398 meta_iod->sg[i].offset;
1399 memcpy(meta, meta_mem + meta_offset,
1400 meta_iod->sg[i].length);
1401 kunmap_atomic(meta);
1402 meta_offset += meta_iod->sg[i].length;
1403 }
1404 }
1405
1406 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
1407 meta_dma_addr);
1408 }
1409
1410 unmap:
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001411 nvme_unmap_user_pages(dev, io.opcode & 1, iod);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001412 nvme_free_iod(dev, iod);
Keith Buschf410c682013-04-23 17:23:59 -06001413
1414 if (meta_iod) {
1415 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
1416 nvme_free_iod(dev, meta_iod);
1417 }
1418
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001419 return status;
1420}
1421
Keith Busch50af8ba2012-07-25 16:07:55 -06001422static int nvme_user_admin_cmd(struct nvme_dev *dev,
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001423 struct nvme_admin_cmd __user *ucmd)
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001424{
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001425 struct nvme_admin_cmd cmd;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001426 struct nvme_command c;
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001427 int status, length;
Keith Buschc7d36ab2012-07-27 11:53:28 -06001428 struct nvme_iod *uninitialized_var(iod);
Keith Busch94f370c2013-05-09 14:01:38 -06001429 unsigned timeout;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001430
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001431 if (!capable(CAP_SYS_ADMIN))
1432 return -EACCES;
1433 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001434 return -EFAULT;
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001435
1436 memset(&c, 0, sizeof(c));
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001437 c.common.opcode = cmd.opcode;
1438 c.common.flags = cmd.flags;
1439 c.common.nsid = cpu_to_le32(cmd.nsid);
1440 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1441 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1442 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
1443 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
1444 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
1445 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
1446 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
1447 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
1448
1449 length = cmd.data_len;
1450 if (cmd.data_len) {
Matthew Wilcox49742182012-01-06 13:42:45 -07001451 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
1452 length);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001453 if (IS_ERR(iod))
1454 return PTR_ERR(iod);
1455 length = nvme_setup_prps(dev, &c.common, iod, length,
1456 GFP_KERNEL);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001457 }
1458
Keith Busch94f370c2013-05-09 14:01:38 -06001459 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
1460 ADMIN_TIMEOUT;
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001461 if (length != cmd.data_len)
Matthew Wilcoxb77954c2011-05-12 13:51:41 -04001462 status = -ENOMEM;
1463 else
Keith Busch94f370c2013-05-09 14:01:38 -06001464 status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
1465 timeout);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001466
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001467 if (cmd.data_len) {
Matthew Wilcox1c2ad9f2012-01-06 13:52:56 -07001468 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
Matthew Wilcoxeca18b22011-12-20 13:34:52 -05001469 nvme_free_iod(dev, iod);
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001470 }
Keith Buschf4f117f2012-09-21 10:49:05 -06001471
Chayan Biswascf90bc42013-05-22 22:34:49 +00001472 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
Keith Buschf4f117f2012-09-21 10:49:05 -06001473 sizeof(cmd.result)))
1474 status = -EFAULT;
1475
Matthew Wilcox6ee44cd2011-02-03 10:58:26 -05001476 return status;
1477}
1478
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001479static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1480 unsigned long arg)
1481{
1482 struct nvme_ns *ns = bdev->bd_disk->private_data;
1483
1484 switch (cmd) {
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001485 case NVME_IOCTL_ID:
Matthew Wilcoxc3bfe712013-07-08 17:26:25 -04001486 force_successful_syscall_return();
Matthew Wilcox6bbf1ac2011-05-20 13:03:42 -04001487 return ns->ns_id;
1488 case NVME_IOCTL_ADMIN_CMD:
Keith Busch50af8ba2012-07-25 16:07:55 -06001489 return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
Matthew Wilcoxa53295b2011-02-01 16:13:29 -05001490 case NVME_IOCTL_SUBMIT_IO:
1491 return nvme_submit_io(ns, (void __user *)arg);
Vishal Verma5d0f6132013-03-04 18:40:58 -07001492 case SG_GET_VERSION_NUM:
1493 return nvme_sg_get_version_num((void __user *)arg);
1494 case SG_IO:
1495 return nvme_sg_io(ns, (void __user *)arg);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001496 default:
1497 return -ENOTTY;
1498 }
1499}
1500
1501static const struct block_device_operations nvme_fops = {
1502 .owner = THIS_MODULE,
1503 .ioctl = nvme_ioctl,
Matthew Wilcox49481682011-03-19 14:55:38 -04001504 .compat_ioctl = nvme_ioctl,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001505};
1506
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001507static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1508{
1509 while (bio_list_peek(&nvmeq->sq_cong)) {
1510 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1511 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
Keith Busch427e9702013-04-09 11:59:32 -06001512
Matthew Wilcox3cb967c2011-03-16 16:45:49 -04001513 if (bio_list_empty(&nvmeq->sq_cong))
1514 remove_wait_queue(&nvmeq->sq_full,
1515 &nvmeq->sq_cong_wait);
Keith Busch427e9702013-04-09 11:59:32 -06001516 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1517 if (bio_list_empty(&nvmeq->sq_cong))
1518 add_wait_queue(&nvmeq->sq_full,
1519 &nvmeq->sq_cong_wait);
1520 bio_list_add_head(&nvmeq->sq_cong, bio);
1521 break;
1522 }
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001523 }
1524}
1525
1526static int nvme_kthread(void *data)
1527{
1528 struct nvme_dev *dev;
1529
1530 while (!kthread_should_stop()) {
Arjan van de Ven564a2322013-05-01 16:38:23 -04001531 set_current_state(TASK_INTERRUPTIBLE);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001532 spin_lock(&dev_list_lock);
1533 list_for_each_entry(dev, &dev_list, node) {
1534 int i;
1535 for (i = 0; i < dev->queue_count; i++) {
1536 struct nvme_queue *nvmeq = dev->queues[i];
Matthew Wilcox740216f2011-02-15 16:28:20 -05001537 if (!nvmeq)
1538 continue;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001539 spin_lock_irq(&nvmeq->q_lock);
Matthew Wilcoxbc57a0f2013-06-24 11:56:42 -04001540 nvme_process_cq(nvmeq);
Matthew Wilcoxa09115b2012-08-07 15:56:23 -04001541 nvme_cancel_ios(nvmeq, true);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001542 nvme_resubmit_bios(nvmeq);
1543 spin_unlock_irq(&nvmeq->q_lock);
1544 }
1545 }
1546 spin_unlock(&dev_list_lock);
Arjan van de Venacb7aa02013-02-04 14:44:33 -08001547 schedule_timeout(round_jiffies_relative(HZ));
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001548 }
1549 return 0;
1550}
1551
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001552static DEFINE_IDA(nvme_index_ida);
1553
1554static int nvme_get_ns_idx(void)
1555{
1556 int index, error;
1557
1558 do {
1559 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
1560 return -1;
1561
1562 spin_lock(&dev_list_lock);
1563 error = ida_get_new(&nvme_index_ida, &index);
1564 spin_unlock(&dev_list_lock);
1565 } while (error == -EAGAIN);
1566
1567 if (error)
1568 index = -1;
1569 return index;
1570}
1571
1572static void nvme_put_ns_idx(int index)
1573{
1574 spin_lock(&dev_list_lock);
1575 ida_remove(&nvme_index_ida, index);
1576 spin_unlock(&dev_list_lock);
1577}
1578
Keith Busch0e5e4f02012-11-09 16:33:05 -07001579static void nvme_config_discard(struct nvme_ns *ns)
1580{
1581 u32 logical_block_size = queue_logical_block_size(ns->queue);
1582 ns->queue->limits.discard_zeroes_data = 0;
1583 ns->queue->limits.discard_alignment = logical_block_size;
1584 ns->queue->limits.discard_granularity = logical_block_size;
1585 ns->queue->limits.max_discard_sectors = 0xffffffff;
1586 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
1587}
1588
Matthew Wilcoxc3bfe712013-07-08 17:26:25 -04001589static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001590 struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
1591{
1592 struct nvme_ns *ns;
1593 struct gendisk *disk;
1594 int lbaf;
1595
1596 if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
1597 return NULL;
1598
1599 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1600 if (!ns)
1601 return NULL;
1602 ns->queue = blk_alloc_queue(GFP_KERNEL);
1603 if (!ns->queue)
1604 goto out_free_ns;
Matthew Wilcox4eeb9212012-01-10 14:35:08 -07001605 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
1606 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1607 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001608 blk_queue_make_request(ns->queue, nvme_make_request);
1609 ns->dev = dev;
1610 ns->queue->queuedata = ns;
1611
1612 disk = alloc_disk(NVME_MINORS);
1613 if (!disk)
1614 goto out_free_queue;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001615 ns->ns_id = nsid;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001616 ns->disk = disk;
1617 lbaf = id->flbas & 0xf;
1618 ns->lba_shift = id->lbaf[lbaf].ds;
Keith Buschf410c682013-04-23 17:23:59 -06001619 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
Keith Busche9ef4632012-07-24 15:01:04 -06001620 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
Keith Busch8fc23e02012-07-26 11:29:57 -06001621 if (dev->max_hw_sectors)
1622 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001623
1624 disk->major = nvme_major;
1625 disk->minors = NVME_MINORS;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001626 disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001627 disk->fops = &nvme_fops;
1628 disk->private_data = ns;
1629 disk->queue = ns->queue;
Matthew Wilcox388f0372011-02-01 12:49:38 -05001630 disk->driverfs_dev = &dev->pci_dev->dev;
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001631 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001632 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
1633
Keith Busch0e5e4f02012-11-09 16:33:05 -07001634 if (dev->oncs & NVME_CTRL_ONCS_DSM)
1635 nvme_config_discard(ns);
1636
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001637 return ns;
1638
1639 out_free_queue:
1640 blk_cleanup_queue(ns->queue);
1641 out_free_ns:
1642 kfree(ns);
1643 return NULL;
1644}
1645
1646static void nvme_ns_free(struct nvme_ns *ns)
1647{
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001648 int index = ns->disk->first_minor / NVME_MINORS;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001649 put_disk(ns->disk);
Matthew Wilcox5aff9382011-05-06 08:45:47 -04001650 nvme_put_ns_idx(index);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001651 blk_cleanup_queue(ns->queue);
1652 kfree(ns);
1653}
1654
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001655static int set_queue_count(struct nvme_dev *dev, int count)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001656{
1657 int status;
1658 u32 result;
Matthew Wilcoxb3b06812011-01-20 09:14:34 -05001659 u32 q_count = (count - 1) | ((count - 1) << 16);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001660
Matthew Wilcoxdf348132012-01-11 07:29:56 -07001661 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001662 &result);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001663 if (status)
Keith Busch7e03b122013-07-29 16:20:56 -06001664 return status < 0 ? -EIO : -EBUSY;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001665 return min(result & 0xffff, result >> 16) + 1;
1666}
1667
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001668static int nvme_setup_io_queues(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001669{
Ramachandra Rao Gajulafa08a392013-05-11 15:19:31 -07001670 struct pci_dev *pdev = dev->pci_dev;
Matthew Wilcox063a8092013-06-20 10:53:48 -04001671 int result, cpu, i, vecs, nr_io_queues, db_bar_size, q_depth;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001672
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001673 nr_io_queues = num_online_cpus();
1674 result = set_queue_count(dev, nr_io_queues);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001675 if (result < 0)
1676 return result;
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001677 if (result < nr_io_queues)
1678 nr_io_queues = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001679
Matthew Wilcox1b234842011-01-20 13:01:49 -05001680 /* Deregister the admin queue's interrupt */
1681 free_irq(dev->entry[0].vector, dev->queues[0]);
1682
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001683 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1684 if (db_bar_size > 8192) {
1685 iounmap(dev->bar);
Ramachandra Rao Gajulafa08a392013-05-11 15:19:31 -07001686 dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
Matthew Wilcoxf1938f62011-10-20 17:00:41 -04001687 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1688 dev->queues[0]->q_db = dev->dbs;
1689 }
1690
Matthew Wilcox063a8092013-06-20 10:53:48 -04001691 vecs = nr_io_queues;
1692 for (i = 0; i < vecs; i++)
Matthew Wilcox1b234842011-01-20 13:01:49 -05001693 dev->entry[i].entry = i;
1694 for (;;) {
Matthew Wilcox063a8092013-06-20 10:53:48 -04001695 result = pci_enable_msix(pdev, dev->entry, vecs);
1696 if (result <= 0)
Matthew Wilcox1b234842011-01-20 13:01:49 -05001697 break;
Matthew Wilcox063a8092013-06-20 10:53:48 -04001698 vecs = result;
1699 }
1700
1701 if (result < 0) {
1702 vecs = nr_io_queues;
1703 if (vecs > 32)
1704 vecs = 32;
1705 for (;;) {
1706 result = pci_enable_msi_block(pdev, vecs);
1707 if (result == 0) {
1708 for (i = 0; i < vecs; i++)
1709 dev->entry[i].vector = i + pdev->irq;
1710 break;
1711 } else if (result < 0) {
1712 vecs = 1;
1713 break;
1714 }
1715 vecs = result;
Matthew Wilcox1b234842011-01-20 13:01:49 -05001716 }
1717 }
1718
Matthew Wilcox063a8092013-06-20 10:53:48 -04001719 /*
1720 * Should investigate if there's a performance win from allocating
1721 * more queues than interrupt vectors; it might allow the submission
1722 * path to scale better, even if the receive path is limited by the
1723 * number of interrupts.
1724 */
1725 nr_io_queues = vecs;
Ramachandra Rao Gajulafa08a392013-05-11 15:19:31 -07001726
Matthew Wilcox1b234842011-01-20 13:01:49 -05001727 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1728 /* XXX: handle failure here */
1729
1730 cpu = cpumask_first(cpu_online_mask);
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001731 for (i = 0; i < nr_io_queues; i++) {
Matthew Wilcox1b234842011-01-20 13:01:49 -05001732 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
1733 cpu = cpumask_next(cpu, cpu_online_mask);
1734 }
1735
Keith Buscha0cadb82012-07-27 13:57:23 -04001736 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
1737 NVME_Q_DEPTH);
Matthew Wilcoxb348b7d2011-02-15 16:16:02 -05001738 for (i = 0; i < nr_io_queues; i++) {
Keith Buscha0cadb82012-07-27 13:57:23 -04001739 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
Matthew Wilcox6f0f5442011-05-11 13:30:59 -07001740 if (IS_ERR(dev->queues[i + 1]))
1741 return PTR_ERR(dev->queues[i + 1]);
Matthew Wilcox1b234842011-01-20 13:01:49 -05001742 dev->queue_count++;
1743 }
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001744
Matthew Wilcox9ecdc942011-03-16 16:52:19 -04001745 for (; i < num_possible_cpus(); i++) {
1746 int target = i % rounddown_pow_of_two(dev->queue_count - 1);
1747 dev->queues[i + 1] = dev->queues[target + 1];
1748 }
1749
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001750 return 0;
1751}
1752
1753static void nvme_free_queues(struct nvme_dev *dev)
1754{
1755 int i;
1756
1757 for (i = dev->queue_count - 1; i >= 0; i--)
1758 nvme_free_queue(dev, i);
1759}
1760
Matthew Wilcox422ef0c2013-04-16 11:22:36 -04001761/*
1762 * Return: error value if an error occurred setting up the queues or calling
1763 * Identify Device. 0 if these succeeded, even if adding some of the
1764 * namespaces failed. At the moment, these failures are silent. TBD which
1765 * failures should be reported.
1766 */
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08001767static int nvme_dev_add(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001768{
Matthew Wilcoxc3bfe712013-07-08 17:26:25 -04001769 int res;
1770 unsigned nn, i;
Keith Buschcbb62182013-05-01 13:07:49 -06001771 struct nvme_ns *ns;
Matthew Wilcox51814232011-02-01 16:18:08 -05001772 struct nvme_id_ctrl *ctrl;
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001773 struct nvme_id_ns *id_ns;
1774 void *mem;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001775 dma_addr_t dma_addr;
Keith Busch159b67d2013-04-09 17:13:20 -06001776 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001777
1778 res = nvme_setup_io_queues(dev);
1779 if (res)
1780 return res;
1781
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001782 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001783 GFP_KERNEL);
Keith Buscha9ef4342013-05-01 13:07:48 -06001784 if (!mem)
1785 return -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001786
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001787 res = nvme_identify(dev, 0, 1, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001788 if (res) {
1789 res = -EIO;
Keith Buschcbb62182013-05-01 13:07:49 -06001790 goto out;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001791 }
1792
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001793 ctrl = mem;
Matthew Wilcox51814232011-02-01 16:18:08 -05001794 nn = le32_to_cpup(&ctrl->nn);
Keith Busch0e5e4f02012-11-09 16:33:05 -07001795 dev->oncs = le16_to_cpup(&ctrl->oncs);
Matthew Wilcox51814232011-02-01 16:18:08 -05001796 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
1797 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
1798 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
Keith Busch159b67d2013-04-09 17:13:20 -06001799 if (ctrl->mdts)
Keith Busch8fc23e02012-07-26 11:29:57 -06001800 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
Keith Busch159b67d2013-04-09 17:13:20 -06001801 if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
1802 (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
1803 dev->stripe_size = 1 << (ctrl->vs[3] + shift);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001804
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001805 id_ns = mem;
Matthew Wilcox2b2c1892011-10-07 13:10:13 -04001806 for (i = 1; i <= nn; i++) {
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001807 res = nvme_identify(dev, i, 0, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001808 if (res)
1809 continue;
1810
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001811 if (id_ns->ncap == 0)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001812 continue;
1813
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001814 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
Keith Busch08df1e02012-09-21 10:52:13 -06001815 dma_addr + 4096, NULL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001816 if (res)
Keith Busch12209032013-01-31 14:40:38 -07001817 memset(mem + 4096, 0, 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001818
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001819 ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001820 if (ns)
1821 list_add_tail(&ns->list, &dev->namespaces);
1822 }
1823 list_for_each_entry(ns, &dev->namespaces, list)
1824 add_disk(ns->disk);
Matthew Wilcox422ef0c2013-04-16 11:22:36 -04001825 res = 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001826
Matthew Wilcoxbc5fc7e2011-09-19 17:08:14 -04001827 out:
Matthew Wilcox684f5c22011-09-19 17:14:53 -04001828 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001829 return res;
1830}
1831
Keith Busch0877cb02013-07-15 15:02:19 -06001832static int nvme_dev_map(struct nvme_dev *dev)
1833{
1834 int bars, result = -ENOMEM;
1835 struct pci_dev *pdev = dev->pci_dev;
1836
1837 if (pci_enable_device_mem(pdev))
1838 return result;
1839
1840 dev->entry[0].vector = pdev->irq;
1841 pci_set_master(pdev);
1842 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1843 if (pci_request_selected_regions(pdev, bars, "nvme"))
1844 goto disable_pci;
1845
1846 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
1847 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1848 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
1849 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1850 else
1851 goto disable_pci;
1852
1853 pci_set_drvdata(pdev, dev);
1854 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1855 if (!dev->bar)
1856 goto disable;
1857
1858 dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
1859 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1860
1861 return 0;
1862
1863 disable:
1864 pci_release_regions(pdev);
1865 disable_pci:
1866 pci_disable_device(pdev);
1867 return result;
1868}
1869
1870static void nvme_dev_unmap(struct nvme_dev *dev)
1871{
1872 if (dev->pci_dev->msi_enabled)
1873 pci_disable_msi(dev->pci_dev);
1874 else if (dev->pci_dev->msix_enabled)
1875 pci_disable_msix(dev->pci_dev);
1876
1877 if (dev->bar) {
1878 iounmap(dev->bar);
1879 dev->bar = NULL;
1880 }
1881
1882 pci_release_regions(dev->pci_dev);
1883 if (pci_is_enabled(dev->pci_dev))
1884 pci_disable_device(dev->pci_dev);
1885}
1886
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001887static int nvme_dev_remove(struct nvme_dev *dev)
1888{
1889 struct nvme_ns *ns, *next;
1890
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05001891 spin_lock(&dev_list_lock);
1892 list_del(&dev->node);
1893 spin_unlock(&dev_list_lock);
1894
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001895 list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
1896 list_del(&ns->list);
1897 del_gendisk(ns->disk);
1898 nvme_ns_free(ns);
1899 }
1900
1901 nvme_free_queues(dev);
1902
1903 return 0;
1904}
1905
Matthew Wilcox091b6092011-02-10 09:56:01 -05001906static int nvme_setup_prp_pools(struct nvme_dev *dev)
1907{
1908 struct device *dmadev = &dev->pci_dev->dev;
1909 dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
1910 PAGE_SIZE, PAGE_SIZE, 0);
1911 if (!dev->prp_page_pool)
1912 return -ENOMEM;
1913
Matthew Wilcox99802a72011-02-10 10:30:34 -05001914 /* Optimisation for I/Os between 4k and 128k */
1915 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
1916 256, 256, 0);
1917 if (!dev->prp_small_pool) {
1918 dma_pool_destroy(dev->prp_page_pool);
1919 return -ENOMEM;
1920 }
Matthew Wilcox091b6092011-02-10 09:56:01 -05001921 return 0;
1922}
1923
1924static void nvme_release_prp_pools(struct nvme_dev *dev)
1925{
1926 dma_pool_destroy(dev->prp_page_pool);
Matthew Wilcox99802a72011-02-10 10:30:34 -05001927 dma_pool_destroy(dev->prp_small_pool);
Matthew Wilcox091b6092011-02-10 09:56:01 -05001928}
1929
Quoc-Son Anhcd58ad72012-02-21 16:50:53 -07001930static DEFINE_IDA(nvme_instance_ida);
1931
1932static int nvme_set_instance(struct nvme_dev *dev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001933{
Quoc-Son Anhcd58ad72012-02-21 16:50:53 -07001934 int instance, error;
1935
1936 do {
1937 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
1938 return -ENODEV;
1939
1940 spin_lock(&dev_list_lock);
1941 error = ida_get_new(&nvme_instance_ida, &instance);
1942 spin_unlock(&dev_list_lock);
1943 } while (error == -EAGAIN);
1944
1945 if (error)
1946 return -ENODEV;
1947
1948 dev->instance = instance;
1949 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001950}
1951
1952static void nvme_release_instance(struct nvme_dev *dev)
1953{
Quoc-Son Anhcd58ad72012-02-21 16:50:53 -07001954 spin_lock(&dev_list_lock);
1955 ida_remove(&nvme_instance_ida, dev->instance);
1956 spin_unlock(&dev_list_lock);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001957}
1958
Keith Busch5e82e952013-02-19 10:17:58 -07001959static void nvme_free_dev(struct kref *kref)
1960{
1961 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
1962 nvme_dev_remove(dev);
Keith Busch0877cb02013-07-15 15:02:19 -06001963 nvme_dev_unmap(dev);
Keith Busch5e82e952013-02-19 10:17:58 -07001964 nvme_release_instance(dev);
1965 nvme_release_prp_pools(dev);
Keith Busch5e82e952013-02-19 10:17:58 -07001966 kfree(dev->queues);
1967 kfree(dev->entry);
1968 kfree(dev);
1969}
1970
1971static int nvme_dev_open(struct inode *inode, struct file *f)
1972{
1973 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
1974 miscdev);
1975 kref_get(&dev->kref);
1976 f->private_data = dev;
1977 return 0;
1978}
1979
1980static int nvme_dev_release(struct inode *inode, struct file *f)
1981{
1982 struct nvme_dev *dev = f->private_data;
1983 kref_put(&dev->kref, nvme_free_dev);
1984 return 0;
1985}
1986
1987static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1988{
1989 struct nvme_dev *dev = f->private_data;
1990 switch (cmd) {
1991 case NVME_IOCTL_ADMIN_CMD:
1992 return nvme_user_admin_cmd(dev, (void __user *)arg);
1993 default:
1994 return -ENOTTY;
1995 }
1996}
1997
1998static const struct file_operations nvme_dev_fops = {
1999 .owner = THIS_MODULE,
2000 .open = nvme_dev_open,
2001 .release = nvme_dev_release,
2002 .unlocked_ioctl = nvme_dev_ioctl,
2003 .compat_ioctl = nvme_dev_ioctl,
2004};
2005
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08002006static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002007{
Keith Busch0877cb02013-07-15 15:02:19 -06002008 int result = -ENOMEM;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002009 struct nvme_dev *dev;
2010
2011 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2012 if (!dev)
2013 return -ENOMEM;
2014 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
2015 GFP_KERNEL);
2016 if (!dev->entry)
2017 goto free;
Matthew Wilcox1b234842011-01-20 13:01:49 -05002018 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
2019 GFP_KERNEL);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002020 if (!dev->queues)
2021 goto free;
2022
2023 INIT_LIST_HEAD(&dev->namespaces);
2024 dev->pci_dev = pdev;
Quoc-Son Anhcd58ad72012-02-21 16:50:53 -07002025 result = nvme_set_instance(dev);
2026 if (result)
Keith Busch0877cb02013-07-15 15:02:19 -06002027 goto free;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002028
Matthew Wilcox091b6092011-02-10 09:56:01 -05002029 result = nvme_setup_prp_pools(dev);
2030 if (result)
Keith Busch0877cb02013-07-15 15:02:19 -06002031 goto release;
Matthew Wilcox091b6092011-02-10 09:56:01 -05002032
Keith Busch0877cb02013-07-15 15:02:19 -06002033 result = nvme_dev_map(dev);
2034 if (result)
2035 goto release_pools;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002036
2037 result = nvme_configure_admin_queue(dev);
2038 if (result)
2039 goto unmap;
2040 dev->queue_count++;
2041
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002042 spin_lock(&dev_list_lock);
2043 list_add(&dev->node, &dev_list);
2044 spin_unlock(&dev_list_lock);
2045
Matthew Wilcox740216f2011-02-15 16:28:20 -05002046 result = nvme_dev_add(dev);
Keith Busch7e03b122013-07-29 16:20:56 -06002047 if (result && result != -EBUSY)
Matthew Wilcox740216f2011-02-15 16:28:20 -05002048 goto delete;
2049
Keith Busch5e82e952013-02-19 10:17:58 -07002050 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
2051 dev->miscdev.minor = MISC_DYNAMIC_MINOR;
2052 dev->miscdev.parent = &pdev->dev;
2053 dev->miscdev.name = dev->name;
2054 dev->miscdev.fops = &nvme_dev_fops;
2055 result = misc_register(&dev->miscdev);
2056 if (result)
2057 goto remove;
2058
2059 kref_init(&dev->kref);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002060 return 0;
2061
Keith Busch5e82e952013-02-19 10:17:58 -07002062 remove:
2063 nvme_dev_remove(dev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002064 delete:
Matthew Wilcox740216f2011-02-15 16:28:20 -05002065 spin_lock(&dev_list_lock);
2066 list_del(&dev->node);
2067 spin_unlock(&dev_list_lock);
2068
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002069 nvme_free_queues(dev);
2070 unmap:
Keith Busch0877cb02013-07-15 15:02:19 -06002071 nvme_dev_unmap(dev);
2072 release_pools:
Matthew Wilcox091b6092011-02-10 09:56:01 -05002073 nvme_release_prp_pools(dev);
Keith Busch0877cb02013-07-15 15:02:19 -06002074 release:
2075 nvme_release_instance(dev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002076 free:
2077 kfree(dev->queues);
2078 kfree(dev->entry);
2079 kfree(dev);
2080 return result;
2081}
2082
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08002083static void nvme_remove(struct pci_dev *pdev)
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002084{
2085 struct nvme_dev *dev = pci_get_drvdata(pdev);
Keith Busch5e82e952013-02-19 10:17:58 -07002086 misc_deregister(&dev->miscdev);
2087 kref_put(&dev->kref, nvme_free_dev);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002088}
2089
2090/* These functions are yet to be implemented */
2091#define nvme_error_detected NULL
2092#define nvme_dump_registers NULL
2093#define nvme_link_reset NULL
2094#define nvme_slot_reset NULL
2095#define nvme_error_resume NULL
2096#define nvme_suspend NULL
2097#define nvme_resume NULL
2098
Stephen Hemminger1d352032012-09-07 09:33:17 -07002099static const struct pci_error_handlers nvme_err_handler = {
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002100 .error_detected = nvme_error_detected,
2101 .mmio_enabled = nvme_dump_registers,
2102 .link_reset = nvme_link_reset,
2103 .slot_reset = nvme_slot_reset,
2104 .resume = nvme_error_resume,
2105};
2106
2107/* Move to pci_ids.h later */
2108#define PCI_CLASS_STORAGE_EXPRESS 0x010802
2109
2110static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
2111 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
2112 { 0, }
2113};
2114MODULE_DEVICE_TABLE(pci, nvme_id_table);
2115
2116static struct pci_driver nvme_driver = {
2117 .name = "nvme",
2118 .id_table = nvme_id_table,
2119 .probe = nvme_probe,
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -08002120 .remove = nvme_remove,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002121 .suspend = nvme_suspend,
2122 .resume = nvme_resume,
2123 .err_handler = &nvme_err_handler,
2124};
2125
2126static int __init nvme_init(void)
2127{
Matthew Wilcox0ac13142012-07-31 13:31:15 -04002128 int result;
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002129
2130 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
2131 if (IS_ERR(nvme_thread))
2132 return PTR_ERR(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002133
Keith Busch5c42ea12012-07-25 16:05:18 -06002134 result = register_blkdev(nvme_major, "nvme");
2135 if (result < 0)
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002136 goto kill_kthread;
Keith Busch5c42ea12012-07-25 16:05:18 -06002137 else if (result > 0)
Matthew Wilcox0ac13142012-07-31 13:31:15 -04002138 nvme_major = result;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002139
2140 result = pci_register_driver(&nvme_driver);
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002141 if (result)
2142 goto unregister_blkdev;
2143 return 0;
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002144
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002145 unregister_blkdev:
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002146 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002147 kill_kthread:
2148 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002149 return result;
2150}
2151
2152static void __exit nvme_exit(void)
2153{
2154 pci_unregister_driver(&nvme_driver);
2155 unregister_blkdev(nvme_major, "nvme");
Matthew Wilcox1fa6aea2011-03-02 18:37:18 -05002156 kthread_stop(nvme_thread);
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002157}
2158
2159MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
2160MODULE_LICENSE("GPL");
Matthew Wilcox366e8212012-01-10 16:30:15 -05002161MODULE_VERSION("0.8");
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002162module_init(nvme_init);
2163module_exit(nvme_exit);