Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1 | /* |
| 2 | * NVM Express device driver |
| 3 | * Copyright (c) 2011, Intel Corporation. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program; if not, write to the Free Software Foundation, Inc., |
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/nvme.h> |
| 20 | #include <linux/bio.h> |
| 21 | #include <linux/blkdev.h> |
| 22 | #include <linux/errno.h> |
| 23 | #include <linux/fs.h> |
| 24 | #include <linux/genhd.h> |
| 25 | #include <linux/init.h> |
| 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/io.h> |
| 28 | #include <linux/kdev_t.h> |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 29 | #include <linux/kthread.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 30 | #include <linux/kernel.h> |
| 31 | #include <linux/mm.h> |
| 32 | #include <linux/module.h> |
| 33 | #include <linux/moduleparam.h> |
| 34 | #include <linux/pci.h> |
Matthew Wilcox | be7b627 | 2011-02-06 07:53:23 -0500 | [diff] [blame] | 35 | #include <linux/poison.h> |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 36 | #include <linux/sched.h> |
| 37 | #include <linux/slab.h> |
| 38 | #include <linux/types.h> |
| 39 | #include <linux/version.h> |
| 40 | |
| 41 | #define NVME_Q_DEPTH 1024 |
| 42 | #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) |
| 43 | #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) |
| 44 | #define NVME_MINORS 64 |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 45 | #define IO_TIMEOUT (5 * HZ) |
| 46 | #define ADMIN_TIMEOUT (60 * HZ) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 47 | |
| 48 | static int nvme_major; |
| 49 | module_param(nvme_major, int, 0); |
| 50 | |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 51 | static int use_threaded_interrupts; |
| 52 | module_param(use_threaded_interrupts, int, 0); |
| 53 | |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 54 | static DEFINE_SPINLOCK(dev_list_lock); |
| 55 | static LIST_HEAD(dev_list); |
| 56 | static struct task_struct *nvme_thread; |
| 57 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 58 | /* |
| 59 | * Represents an NVM Express device. Each nvme_dev is a PCI function. |
| 60 | */ |
| 61 | struct nvme_dev { |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 62 | struct list_head node; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 63 | struct nvme_queue **queues; |
| 64 | u32 __iomem *dbs; |
| 65 | struct pci_dev *pci_dev; |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 66 | struct dma_pool *prp_page_pool; |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 67 | struct dma_pool *prp_small_pool; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 68 | int instance; |
| 69 | int queue_count; |
| 70 | u32 ctrl_config; |
| 71 | struct msix_entry *entry; |
| 72 | struct nvme_bar __iomem *bar; |
| 73 | struct list_head namespaces; |
Matthew Wilcox | 5181423 | 2011-02-01 16:18:08 -0500 | [diff] [blame] | 74 | char serial[20]; |
| 75 | char model[40]; |
| 76 | char firmware_rev[8]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 77 | }; |
| 78 | |
| 79 | /* |
| 80 | * An NVM Express namespace is equivalent to a SCSI LUN |
| 81 | */ |
| 82 | struct nvme_ns { |
| 83 | struct list_head list; |
| 84 | |
| 85 | struct nvme_dev *dev; |
| 86 | struct request_queue *queue; |
| 87 | struct gendisk *disk; |
| 88 | |
| 89 | int ns_id; |
| 90 | int lba_shift; |
| 91 | }; |
| 92 | |
| 93 | /* |
| 94 | * An NVM Express queue. Each device has at least two (one for admin |
| 95 | * commands and one for I/O commands). |
| 96 | */ |
| 97 | struct nvme_queue { |
| 98 | struct device *q_dmadev; |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 99 | struct nvme_dev *dev; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 100 | spinlock_t q_lock; |
| 101 | struct nvme_command *sq_cmds; |
| 102 | volatile struct nvme_completion *cqes; |
| 103 | dma_addr_t sq_dma_addr; |
| 104 | dma_addr_t cq_dma_addr; |
| 105 | wait_queue_head_t sq_full; |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 106 | wait_queue_t sq_cong_wait; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 107 | struct bio_list sq_cong; |
| 108 | u32 __iomem *q_db; |
| 109 | u16 q_depth; |
| 110 | u16 cq_vector; |
| 111 | u16 sq_head; |
| 112 | u16 sq_tail; |
| 113 | u16 cq_head; |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 114 | u16 cq_phase; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 115 | unsigned long cmdid_data[]; |
| 116 | }; |
| 117 | |
| 118 | /* |
| 119 | * Check we didin't inadvertently grow the command struct |
| 120 | */ |
| 121 | static inline void _nvme_check_size(void) |
| 122 | { |
| 123 | BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); |
| 124 | BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); |
| 125 | BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); |
| 126 | BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); |
| 127 | BUILD_BUG_ON(sizeof(struct nvme_features) != 64); |
| 128 | BUILD_BUG_ON(sizeof(struct nvme_command) != 64); |
| 129 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); |
| 130 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); |
| 131 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); |
| 132 | } |
| 133 | |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 134 | struct nvme_cmd_info { |
| 135 | unsigned long ctx; |
| 136 | unsigned long timeout; |
| 137 | }; |
| 138 | |
| 139 | static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) |
| 140 | { |
| 141 | return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; |
| 142 | } |
| 143 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 144 | /** |
| 145 | * alloc_cmdid - Allocate a Command ID |
| 146 | * @param nvmeq The queue that will be used for this command |
| 147 | * @param ctx A pointer that will be passed to the handler |
| 148 | * @param handler The ID of the handler to call |
| 149 | * |
| 150 | * Allocate a Command ID for a queue. The data passed in will |
| 151 | * be passed to the completion handler. This is implemented by using |
| 152 | * the bottom two bits of the ctx pointer to store the handler ID. |
| 153 | * Passing in a pointer that's not 4-byte aligned will cause a BUG. |
| 154 | * We can change this if it becomes a problem. |
| 155 | */ |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 156 | static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler, |
| 157 | unsigned timeout) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 158 | { |
| 159 | int depth = nvmeq->q_depth; |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 160 | struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 161 | int cmdid; |
| 162 | |
| 163 | BUG_ON((unsigned long)ctx & 3); |
| 164 | |
| 165 | do { |
| 166 | cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); |
| 167 | if (cmdid >= depth) |
| 168 | return -EBUSY; |
| 169 | } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); |
| 170 | |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 171 | info[cmdid].ctx = (unsigned long)ctx | handler; |
| 172 | info[cmdid].timeout = jiffies + timeout; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 173 | return cmdid; |
| 174 | } |
| 175 | |
| 176 | static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 177 | int handler, unsigned timeout) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 178 | { |
| 179 | int cmdid; |
| 180 | wait_event_killable(nvmeq->sq_full, |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 181 | (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 182 | return (cmdid < 0) ? -EINTR : cmdid; |
| 183 | } |
| 184 | |
| 185 | /* If you need more than four handlers, you'll need to change how |
Matthew Wilcox | be7b627 | 2011-02-06 07:53:23 -0500 | [diff] [blame] | 186 | * alloc_cmdid and nvme_process_cq work. Consider using a special |
| 187 | * CMD_CTX value instead, if that works for your situation. |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 188 | */ |
| 189 | enum { |
| 190 | sync_completion_id = 0, |
| 191 | bio_completion_id, |
| 192 | }; |
| 193 | |
Matthew Wilcox | be7b627 | 2011-02-06 07:53:23 -0500 | [diff] [blame] | 194 | #define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id) |
Matthew Wilcox | d2d8703 | 2011-02-07 15:55:59 -0500 | [diff] [blame] | 195 | #define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) |
| 196 | #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) |
| 197 | #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) |
Matthew Wilcox | be7b627 | 2011-02-06 07:53:23 -0500 | [diff] [blame] | 198 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 199 | static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid) |
| 200 | { |
| 201 | unsigned long data; |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 202 | struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 203 | |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 204 | if (cmdid >= nvmeq->q_depth) |
Matthew Wilcox | 48e3d39 | 2011-02-06 08:51:15 -0500 | [diff] [blame] | 205 | return CMD_CTX_INVALID; |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 206 | data = info[cmdid].ctx; |
| 207 | info[cmdid].ctx = CMD_CTX_COMPLETED; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 208 | clear_bit(cmdid, nvmeq->cmdid_data); |
| 209 | wake_up(&nvmeq->sq_full); |
| 210 | return data; |
| 211 | } |
| 212 | |
Matthew Wilcox | be7b627 | 2011-02-06 07:53:23 -0500 | [diff] [blame] | 213 | static void cancel_cmdid_data(struct nvme_queue *nvmeq, int cmdid) |
Matthew Wilcox | 3c0cf13 | 2011-02-04 16:03:56 -0500 | [diff] [blame] | 214 | { |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 215 | struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); |
| 216 | info[cmdid].ctx = CMD_CTX_CANCELLED; |
Matthew Wilcox | 3c0cf13 | 2011-02-04 16:03:56 -0500 | [diff] [blame] | 217 | } |
| 218 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 219 | static struct nvme_queue *get_nvmeq(struct nvme_ns *ns) |
| 220 | { |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 221 | int qid, cpu = get_cpu(); |
| 222 | if (cpu < ns->dev->queue_count) |
| 223 | qid = cpu + 1; |
| 224 | else |
| 225 | qid = (cpu % rounddown_pow_of_two(ns->dev->queue_count)) + 1; |
| 226 | return ns->dev->queues[qid]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | static void put_nvmeq(struct nvme_queue *nvmeq) |
| 230 | { |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 231 | put_cpu(); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | /** |
| 235 | * nvme_submit_cmd: Copy a command into a queue and ring the doorbell |
| 236 | * @nvmeq: The queue to use |
| 237 | * @cmd: The command to send |
| 238 | * |
| 239 | * Safe to use from interrupt context |
| 240 | */ |
| 241 | static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) |
| 242 | { |
| 243 | unsigned long flags; |
| 244 | u16 tail; |
| 245 | /* XXX: Need to check tail isn't going to overrun head */ |
| 246 | spin_lock_irqsave(&nvmeq->q_lock, flags); |
| 247 | tail = nvmeq->sq_tail; |
| 248 | memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); |
| 249 | writel(tail, nvmeq->q_db); |
| 250 | if (++tail == nvmeq->q_depth) |
| 251 | tail = 0; |
| 252 | nvmeq->sq_tail = tail; |
| 253 | spin_unlock_irqrestore(&nvmeq->q_lock, flags); |
| 254 | |
| 255 | return 0; |
| 256 | } |
| 257 | |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 258 | struct nvme_prps { |
| 259 | int npages; |
| 260 | dma_addr_t first_dma; |
| 261 | __le64 *list[0]; |
| 262 | }; |
| 263 | |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 264 | static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps) |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 265 | { |
| 266 | const int last_prp = PAGE_SIZE / 8 - 1; |
| 267 | int i; |
| 268 | dma_addr_t prp_dma; |
| 269 | |
| 270 | if (!prps) |
| 271 | return; |
| 272 | |
| 273 | prp_dma = prps->first_dma; |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 274 | |
| 275 | if (prps->npages == 0) |
| 276 | dma_pool_free(dev->prp_small_pool, prps->list[0], prp_dma); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 277 | for (i = 0; i < prps->npages; i++) { |
| 278 | __le64 *prp_list = prps->list[i]; |
| 279 | dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 280 | dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 281 | prp_dma = next_prp_dma; |
| 282 | } |
| 283 | kfree(prps); |
| 284 | } |
| 285 | |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 286 | struct nvme_bio { |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 287 | struct bio *bio; |
| 288 | int nents; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 289 | struct nvme_prps *prps; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 290 | struct scatterlist sg[0]; |
| 291 | }; |
| 292 | |
| 293 | /* XXX: use a mempool */ |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 294 | static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 295 | { |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 296 | return kzalloc(sizeof(struct nvme_bio) + |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 297 | sizeof(struct scatterlist) * nseg, gfp); |
| 298 | } |
| 299 | |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 300 | static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 301 | { |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 302 | nvme_free_prps(nvmeq->dev, nbio->prps); |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 303 | kfree(nbio); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | static void bio_completion(struct nvme_queue *nvmeq, void *ctx, |
| 307 | struct nvme_completion *cqe) |
| 308 | { |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 309 | struct nvme_bio *nbio = ctx; |
| 310 | struct bio *bio = nbio->bio; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 311 | u16 status = le16_to_cpup(&cqe->status) >> 1; |
| 312 | |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 313 | dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 314 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 315 | free_nbio(nvmeq, nbio); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 316 | bio_endio(bio, status ? -EIO : 0); |
| 317 | } |
| 318 | |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 319 | /* length is in bytes */ |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 320 | static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev, |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 321 | struct nvme_common_command *cmd, |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 322 | struct scatterlist *sg, int length) |
| 323 | { |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 324 | struct dma_pool *pool; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 325 | int dma_len = sg_dma_len(sg); |
| 326 | u64 dma_addr = sg_dma_address(sg); |
| 327 | int offset = offset_in_page(dma_addr); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 328 | __le64 *prp_list; |
| 329 | dma_addr_t prp_dma; |
| 330 | int nprps, npages, i, prp_page; |
| 331 | struct nvme_prps *prps = NULL; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 332 | |
| 333 | cmd->prp1 = cpu_to_le64(dma_addr); |
| 334 | length -= (PAGE_SIZE - offset); |
| 335 | if (length <= 0) |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 336 | return prps; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 337 | |
| 338 | dma_len -= (PAGE_SIZE - offset); |
| 339 | if (dma_len) { |
| 340 | dma_addr += (PAGE_SIZE - offset); |
| 341 | } else { |
| 342 | sg = sg_next(sg); |
| 343 | dma_addr = sg_dma_address(sg); |
| 344 | dma_len = sg_dma_len(sg); |
| 345 | } |
| 346 | |
| 347 | if (length <= PAGE_SIZE) { |
| 348 | cmd->prp2 = cpu_to_le64(dma_addr); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 349 | return prps; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 350 | } |
| 351 | |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 352 | nprps = DIV_ROUND_UP(length, PAGE_SIZE); |
| 353 | npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE); |
| 354 | prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, GFP_ATOMIC); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 355 | prp_page = 0; |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 356 | if (nprps <= (256 / 8)) { |
| 357 | pool = dev->prp_small_pool; |
| 358 | prps->npages = 0; |
| 359 | } else { |
| 360 | pool = dev->prp_page_pool; |
| 361 | prps->npages = npages; |
| 362 | } |
| 363 | |
| 364 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 365 | prps->list[prp_page++] = prp_list; |
| 366 | prps->first_dma = prp_dma; |
| 367 | cmd->prp2 = cpu_to_le64(prp_dma); |
| 368 | i = 0; |
| 369 | for (;;) { |
| 370 | if (i == PAGE_SIZE / 8 - 1) { |
| 371 | __le64 *old_prp_list = prp_list; |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 372 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 373 | prps->list[prp_page++] = prp_list; |
| 374 | old_prp_list[i] = cpu_to_le64(prp_dma); |
| 375 | i = 0; |
| 376 | } |
| 377 | prp_list[i++] = cpu_to_le64(dma_addr); |
| 378 | dma_len -= PAGE_SIZE; |
| 379 | dma_addr += PAGE_SIZE; |
| 380 | length -= PAGE_SIZE; |
| 381 | if (length <= 0) |
| 382 | break; |
| 383 | if (dma_len > 0) |
| 384 | continue; |
| 385 | BUG_ON(dma_len < 0); |
| 386 | sg = sg_next(sg); |
| 387 | dma_addr = sg_dma_address(sg); |
| 388 | dma_len = sg_dma_len(sg); |
| 389 | } |
| 390 | |
| 391 | return prps; |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 392 | } |
| 393 | |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 394 | static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio, |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 395 | struct bio *bio, enum dma_data_direction dma_dir, int psegs) |
| 396 | { |
Matthew Wilcox | 7683084 | 2011-02-10 13:55:39 -0500 | [diff] [blame] | 397 | struct bio_vec *bvec, *bvprv = NULL; |
| 398 | struct scatterlist *sg = NULL; |
| 399 | int i, nsegs = 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 400 | |
Matthew Wilcox | 7683084 | 2011-02-10 13:55:39 -0500 | [diff] [blame] | 401 | sg_init_table(nbio->sg, psegs); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 402 | bio_for_each_segment(bvec, bio, i) { |
Matthew Wilcox | 7683084 | 2011-02-10 13:55:39 -0500 | [diff] [blame] | 403 | if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { |
| 404 | sg->length += bvec->bv_len; |
| 405 | } else { |
| 406 | /* Check bvprv && offset == 0 */ |
| 407 | sg = sg ? sg + 1 : nbio->sg; |
| 408 | sg_set_page(sg, bvec->bv_page, bvec->bv_len, |
| 409 | bvec->bv_offset); |
| 410 | nsegs++; |
| 411 | } |
| 412 | bvprv = bvec; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 413 | } |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 414 | nbio->nents = nsegs; |
Matthew Wilcox | 7683084 | 2011-02-10 13:55:39 -0500 | [diff] [blame] | 415 | sg_mark_end(sg); |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 416 | return dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 417 | } |
| 418 | |
| 419 | static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, |
| 420 | struct bio *bio) |
| 421 | { |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 422 | struct nvme_command *cmnd; |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 423 | struct nvme_bio *nbio; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 424 | enum dma_data_direction dma_dir; |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 425 | int cmdid, result = -ENOMEM; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 426 | u16 control; |
| 427 | u32 dsmgmt; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 428 | int psegs = bio_phys_segments(ns->queue, bio); |
| 429 | |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 430 | nbio = alloc_nbio(psegs, GFP_ATOMIC); |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 431 | if (!nbio) |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 432 | goto nomem; |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 433 | nbio->bio = bio; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 434 | |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 435 | result = -EBUSY; |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 436 | cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 437 | if (unlikely(cmdid < 0)) |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 438 | goto free_nbio; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 439 | |
| 440 | control = 0; |
| 441 | if (bio->bi_rw & REQ_FUA) |
| 442 | control |= NVME_RW_FUA; |
| 443 | if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) |
| 444 | control |= NVME_RW_LR; |
| 445 | |
| 446 | dsmgmt = 0; |
| 447 | if (bio->bi_rw & REQ_RAHEAD) |
| 448 | dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; |
| 449 | |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 450 | cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 451 | |
Matthew Wilcox | b8deb62 | 2011-01-26 10:08:25 -0500 | [diff] [blame] | 452 | memset(cmnd, 0, sizeof(*cmnd)); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 453 | if (bio_data_dir(bio)) { |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 454 | cmnd->rw.opcode = nvme_cmd_write; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 455 | dma_dir = DMA_TO_DEVICE; |
| 456 | } else { |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 457 | cmnd->rw.opcode = nvme_cmd_read; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 458 | dma_dir = DMA_FROM_DEVICE; |
| 459 | } |
| 460 | |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 461 | result = -ENOMEM; |
Matthew Wilcox | 1974b1a | 2011-02-10 12:01:09 -0500 | [diff] [blame] | 462 | if (nvme_map_bio(nvmeq->q_dmadev, nbio, bio, dma_dir, psegs) == 0) |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 463 | goto free_nbio; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 464 | |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 465 | cmnd->rw.command_id = cmdid; |
| 466 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 467 | nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg, |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 468 | bio->bi_size); |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 469 | cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); |
| 470 | cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1); |
| 471 | cmnd->rw.control = cpu_to_le16(control); |
| 472 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 473 | |
| 474 | writel(nvmeq->sq_tail, nvmeq->q_db); |
| 475 | if (++nvmeq->sq_tail == nvmeq->q_depth) |
| 476 | nvmeq->sq_tail = 0; |
| 477 | |
Matthew Wilcox | 1974b1a | 2011-02-10 12:01:09 -0500 | [diff] [blame] | 478 | return 0; |
| 479 | |
Matthew Wilcox | d534df3 | 2011-02-10 09:03:06 -0500 | [diff] [blame] | 480 | free_nbio: |
| 481 | free_nbio(nvmeq, nbio); |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 482 | nomem: |
| 483 | return result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 484 | } |
| 485 | |
| 486 | /* |
| 487 | * NB: return value of non-zero would mean that we were a stacking driver. |
| 488 | * make_request must always succeed. |
| 489 | */ |
| 490 | static int nvme_make_request(struct request_queue *q, struct bio *bio) |
| 491 | { |
| 492 | struct nvme_ns *ns = q->queuedata; |
| 493 | struct nvme_queue *nvmeq = get_nvmeq(ns); |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 494 | int result = -EBUSY; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 495 | |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 496 | spin_lock_irq(&nvmeq->q_lock); |
| 497 | if (bio_list_empty(&nvmeq->sq_cong)) |
| 498 | result = nvme_submit_bio_queue(nvmeq, ns, bio); |
| 499 | if (unlikely(result)) { |
| 500 | if (bio_list_empty(&nvmeq->sq_cong)) |
| 501 | add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 502 | bio_list_add(&nvmeq->sq_cong, bio); |
| 503 | } |
Matthew Wilcox | eeee322 | 2011-02-14 15:55:33 -0500 | [diff] [blame] | 504 | |
| 505 | spin_unlock_irq(&nvmeq->q_lock); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 506 | put_nvmeq(nvmeq); |
| 507 | |
| 508 | return 0; |
| 509 | } |
| 510 | |
| 511 | struct sync_cmd_info { |
| 512 | struct task_struct *task; |
| 513 | u32 result; |
| 514 | int status; |
| 515 | }; |
| 516 | |
| 517 | static void sync_completion(struct nvme_queue *nvmeq, void *ctx, |
| 518 | struct nvme_completion *cqe) |
| 519 | { |
| 520 | struct sync_cmd_info *cmdinfo = ctx; |
Matthew Wilcox | be7b627 | 2011-02-06 07:53:23 -0500 | [diff] [blame] | 521 | if ((unsigned long)cmdinfo == CMD_CTX_CANCELLED) |
| 522 | return; |
Matthew Wilcox | b36235d | 2011-02-06 08:49:55 -0500 | [diff] [blame] | 523 | if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) { |
| 524 | dev_warn(nvmeq->q_dmadev, |
| 525 | "completed id %d twice on queue %d\n", |
| 526 | cqe->command_id, le16_to_cpup(&cqe->sq_id)); |
| 527 | return; |
| 528 | } |
Matthew Wilcox | 48e3d39 | 2011-02-06 08:51:15 -0500 | [diff] [blame] | 529 | if (unlikely((unsigned long)cmdinfo == CMD_CTX_INVALID)) { |
| 530 | dev_warn(nvmeq->q_dmadev, |
| 531 | "invalid id %d completed on queue %d\n", |
| 532 | cqe->command_id, le16_to_cpup(&cqe->sq_id)); |
| 533 | return; |
| 534 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 535 | cmdinfo->result = le32_to_cpup(&cqe->result); |
| 536 | cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; |
| 537 | wake_up_process(cmdinfo->task); |
| 538 | } |
| 539 | |
| 540 | typedef void (*completion_fn)(struct nvme_queue *, void *, |
| 541 | struct nvme_completion *); |
| 542 | |
| 543 | static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) |
| 544 | { |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 545 | u16 head, phase; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 546 | |
| 547 | static const completion_fn completions[4] = { |
| 548 | [sync_completion_id] = sync_completion, |
| 549 | [bio_completion_id] = bio_completion, |
| 550 | }; |
| 551 | |
| 552 | head = nvmeq->cq_head; |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 553 | phase = nvmeq->cq_phase; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 554 | |
| 555 | for (;;) { |
| 556 | unsigned long data; |
| 557 | void *ptr; |
| 558 | unsigned char handler; |
| 559 | struct nvme_completion cqe = nvmeq->cqes[head]; |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 560 | if ((le16_to_cpu(cqe.status) & 1) != phase) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 561 | break; |
| 562 | nvmeq->sq_head = le16_to_cpu(cqe.sq_head); |
| 563 | if (++head == nvmeq->q_depth) { |
| 564 | head = 0; |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 565 | phase = !phase; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 566 | } |
| 567 | |
| 568 | data = free_cmdid(nvmeq, cqe.command_id); |
| 569 | handler = data & 3; |
| 570 | ptr = (void *)(data & ~3UL); |
| 571 | completions[handler](nvmeq, ptr, &cqe); |
| 572 | } |
| 573 | |
| 574 | /* If the controller ignores the cq head doorbell and continuously |
| 575 | * writes to the queue, it is theoretically possible to wrap around |
| 576 | * the queue twice and mistakenly return IRQ_NONE. Linux only |
| 577 | * requires that 0.1% of your interrupts are handled, so this isn't |
| 578 | * a big problem. |
| 579 | */ |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 580 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 581 | return IRQ_NONE; |
| 582 | |
| 583 | writel(head, nvmeq->q_db + 1); |
| 584 | nvmeq->cq_head = head; |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 585 | nvmeq->cq_phase = phase; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 586 | |
| 587 | return IRQ_HANDLED; |
| 588 | } |
| 589 | |
| 590 | static irqreturn_t nvme_irq(int irq, void *data) |
| 591 | { |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 592 | irqreturn_t result; |
| 593 | struct nvme_queue *nvmeq = data; |
| 594 | spin_lock(&nvmeq->q_lock); |
| 595 | result = nvme_process_cq(nvmeq); |
| 596 | spin_unlock(&nvmeq->q_lock); |
| 597 | return result; |
| 598 | } |
| 599 | |
| 600 | static irqreturn_t nvme_irq_check(int irq, void *data) |
| 601 | { |
| 602 | struct nvme_queue *nvmeq = data; |
| 603 | struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; |
| 604 | if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) |
| 605 | return IRQ_NONE; |
| 606 | return IRQ_WAKE_THREAD; |
| 607 | } |
| 608 | |
Matthew Wilcox | 3c0cf13 | 2011-02-04 16:03:56 -0500 | [diff] [blame] | 609 | static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) |
| 610 | { |
| 611 | spin_lock_irq(&nvmeq->q_lock); |
Matthew Wilcox | be7b627 | 2011-02-06 07:53:23 -0500 | [diff] [blame] | 612 | cancel_cmdid_data(nvmeq, cmdid); |
Matthew Wilcox | 3c0cf13 | 2011-02-04 16:03:56 -0500 | [diff] [blame] | 613 | spin_unlock_irq(&nvmeq->q_lock); |
| 614 | } |
| 615 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 616 | /* |
| 617 | * Returns 0 on success. If the result is negative, it's a Linux error code; |
| 618 | * if the result is positive, it's an NVM Express status code |
| 619 | */ |
Matthew Wilcox | 3c0cf13 | 2011-02-04 16:03:56 -0500 | [diff] [blame] | 620 | static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 621 | struct nvme_command *cmd, u32 *result, unsigned timeout) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 622 | { |
| 623 | int cmdid; |
| 624 | struct sync_cmd_info cmdinfo; |
| 625 | |
| 626 | cmdinfo.task = current; |
| 627 | cmdinfo.status = -EINTR; |
| 628 | |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 629 | cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id, |
| 630 | timeout); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 631 | if (cmdid < 0) |
| 632 | return cmdid; |
| 633 | cmd->common.command_id = cmdid; |
| 634 | |
Matthew Wilcox | 3c0cf13 | 2011-02-04 16:03:56 -0500 | [diff] [blame] | 635 | set_current_state(TASK_KILLABLE); |
| 636 | nvme_submit_cmd(nvmeq, cmd); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 637 | schedule(); |
| 638 | |
Matthew Wilcox | 3c0cf13 | 2011-02-04 16:03:56 -0500 | [diff] [blame] | 639 | if (cmdinfo.status == -EINTR) { |
| 640 | nvme_abort_command(nvmeq, cmdid); |
| 641 | return -EINTR; |
| 642 | } |
| 643 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 644 | if (result) |
| 645 | *result = cmdinfo.result; |
| 646 | |
| 647 | return cmdinfo.status; |
| 648 | } |
| 649 | |
| 650 | static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, |
| 651 | u32 *result) |
| 652 | { |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 653 | return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) |
| 657 | { |
| 658 | int status; |
| 659 | struct nvme_command c; |
| 660 | |
| 661 | memset(&c, 0, sizeof(c)); |
| 662 | c.delete_queue.opcode = opcode; |
| 663 | c.delete_queue.qid = cpu_to_le16(id); |
| 664 | |
| 665 | status = nvme_submit_admin_cmd(dev, &c, NULL); |
| 666 | if (status) |
| 667 | return -EIO; |
| 668 | return 0; |
| 669 | } |
| 670 | |
| 671 | static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, |
| 672 | struct nvme_queue *nvmeq) |
| 673 | { |
| 674 | int status; |
| 675 | struct nvme_command c; |
| 676 | int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; |
| 677 | |
| 678 | memset(&c, 0, sizeof(c)); |
| 679 | c.create_cq.opcode = nvme_admin_create_cq; |
| 680 | c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); |
| 681 | c.create_cq.cqid = cpu_to_le16(qid); |
| 682 | c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
| 683 | c.create_cq.cq_flags = cpu_to_le16(flags); |
| 684 | c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); |
| 685 | |
| 686 | status = nvme_submit_admin_cmd(dev, &c, NULL); |
| 687 | if (status) |
| 688 | return -EIO; |
| 689 | return 0; |
| 690 | } |
| 691 | |
| 692 | static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, |
| 693 | struct nvme_queue *nvmeq) |
| 694 | { |
| 695 | int status; |
| 696 | struct nvme_command c; |
| 697 | int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; |
| 698 | |
| 699 | memset(&c, 0, sizeof(c)); |
| 700 | c.create_sq.opcode = nvme_admin_create_sq; |
| 701 | c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); |
| 702 | c.create_sq.sqid = cpu_to_le16(qid); |
| 703 | c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); |
| 704 | c.create_sq.sq_flags = cpu_to_le16(flags); |
| 705 | c.create_sq.cqid = cpu_to_le16(qid); |
| 706 | |
| 707 | status = nvme_submit_admin_cmd(dev, &c, NULL); |
| 708 | if (status) |
| 709 | return -EIO; |
| 710 | return 0; |
| 711 | } |
| 712 | |
| 713 | static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) |
| 714 | { |
| 715 | return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); |
| 716 | } |
| 717 | |
| 718 | static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) |
| 719 | { |
| 720 | return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); |
| 721 | } |
| 722 | |
| 723 | static void nvme_free_queue(struct nvme_dev *dev, int qid) |
| 724 | { |
| 725 | struct nvme_queue *nvmeq = dev->queues[qid]; |
| 726 | |
| 727 | free_irq(dev->entry[nvmeq->cq_vector].vector, nvmeq); |
| 728 | |
| 729 | /* Don't tell the adapter to delete the admin queue */ |
| 730 | if (qid) { |
| 731 | adapter_delete_sq(dev, qid); |
| 732 | adapter_delete_cq(dev, qid); |
| 733 | } |
| 734 | |
| 735 | dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), |
| 736 | (void *)nvmeq->cqes, nvmeq->cq_dma_addr); |
| 737 | dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), |
| 738 | nvmeq->sq_cmds, nvmeq->sq_dma_addr); |
| 739 | kfree(nvmeq); |
| 740 | } |
| 741 | |
| 742 | static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, |
| 743 | int depth, int vector) |
| 744 | { |
| 745 | struct device *dmadev = &dev->pci_dev->dev; |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 746 | unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 747 | struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); |
| 748 | if (!nvmeq) |
| 749 | return NULL; |
| 750 | |
| 751 | nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), |
| 752 | &nvmeq->cq_dma_addr, GFP_KERNEL); |
| 753 | if (!nvmeq->cqes) |
| 754 | goto free_nvmeq; |
| 755 | memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); |
| 756 | |
| 757 | nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), |
| 758 | &nvmeq->sq_dma_addr, GFP_KERNEL); |
| 759 | if (!nvmeq->sq_cmds) |
| 760 | goto free_cqdma; |
| 761 | |
| 762 | nvmeq->q_dmadev = dmadev; |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 763 | nvmeq->dev = dev; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 764 | spin_lock_init(&nvmeq->q_lock); |
| 765 | nvmeq->cq_head = 0; |
Matthew Wilcox | 8212346 | 2011-01-20 13:24:06 -0500 | [diff] [blame] | 766 | nvmeq->cq_phase = 1; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 767 | init_waitqueue_head(&nvmeq->sq_full); |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 768 | init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 769 | bio_list_init(&nvmeq->sq_cong); |
| 770 | nvmeq->q_db = &dev->dbs[qid * 2]; |
| 771 | nvmeq->q_depth = depth; |
| 772 | nvmeq->cq_vector = vector; |
| 773 | |
| 774 | return nvmeq; |
| 775 | |
| 776 | free_cqdma: |
| 777 | dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, |
| 778 | nvmeq->cq_dma_addr); |
| 779 | free_nvmeq: |
| 780 | kfree(nvmeq); |
| 781 | return NULL; |
| 782 | } |
| 783 | |
Matthew Wilcox | 3001082 | 2011-01-20 09:10:15 -0500 | [diff] [blame] | 784 | static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, |
| 785 | const char *name) |
| 786 | { |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 787 | if (use_threaded_interrupts) |
| 788 | return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, |
Matthew Wilcox | ec6ce61 | 2011-02-06 09:01:00 -0500 | [diff] [blame] | 789 | nvme_irq_check, nvme_irq, |
Matthew Wilcox | 58ffacb | 2011-02-06 07:28:06 -0500 | [diff] [blame] | 790 | IRQF_DISABLED | IRQF_SHARED, |
| 791 | name, nvmeq); |
Matthew Wilcox | 3001082 | 2011-01-20 09:10:15 -0500 | [diff] [blame] | 792 | return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, |
| 793 | IRQF_DISABLED | IRQF_SHARED, name, nvmeq); |
| 794 | } |
| 795 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 796 | static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, |
| 797 | int qid, int cq_size, int vector) |
| 798 | { |
| 799 | int result; |
| 800 | struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); |
| 801 | |
Matthew Wilcox | 3f85d50 | 2011-02-01 08:39:04 -0500 | [diff] [blame] | 802 | if (!nvmeq) |
| 803 | return NULL; |
| 804 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 805 | result = adapter_alloc_cq(dev, qid, nvmeq); |
| 806 | if (result < 0) |
| 807 | goto free_nvmeq; |
| 808 | |
| 809 | result = adapter_alloc_sq(dev, qid, nvmeq); |
| 810 | if (result < 0) |
| 811 | goto release_cq; |
| 812 | |
Matthew Wilcox | 3001082 | 2011-01-20 09:10:15 -0500 | [diff] [blame] | 813 | result = queue_request_irq(dev, nvmeq, "nvme"); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 814 | if (result < 0) |
| 815 | goto release_sq; |
| 816 | |
| 817 | return nvmeq; |
| 818 | |
| 819 | release_sq: |
| 820 | adapter_delete_sq(dev, qid); |
| 821 | release_cq: |
| 822 | adapter_delete_cq(dev, qid); |
| 823 | free_nvmeq: |
| 824 | dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), |
| 825 | (void *)nvmeq->cqes, nvmeq->cq_dma_addr); |
| 826 | dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), |
| 827 | nvmeq->sq_cmds, nvmeq->sq_dma_addr); |
| 828 | kfree(nvmeq); |
| 829 | return NULL; |
| 830 | } |
| 831 | |
| 832 | static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) |
| 833 | { |
| 834 | int result; |
| 835 | u32 aqa; |
| 836 | struct nvme_queue *nvmeq; |
| 837 | |
| 838 | dev->dbs = ((void __iomem *)dev->bar) + 4096; |
| 839 | |
| 840 | nvmeq = nvme_alloc_queue(dev, 0, 64, 0); |
Matthew Wilcox | 3f85d50 | 2011-02-01 08:39:04 -0500 | [diff] [blame] | 841 | if (!nvmeq) |
| 842 | return -ENOMEM; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 843 | |
| 844 | aqa = nvmeq->q_depth - 1; |
| 845 | aqa |= aqa << 16; |
| 846 | |
| 847 | dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; |
| 848 | dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; |
| 849 | dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; |
| 850 | |
Shane Michael Matthews | 5911f20 | 2011-02-01 11:31:55 -0500 | [diff] [blame] | 851 | writel(0, &dev->bar->cc); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 852 | writel(aqa, &dev->bar->aqa); |
| 853 | writeq(nvmeq->sq_dma_addr, &dev->bar->asq); |
| 854 | writeq(nvmeq->cq_dma_addr, &dev->bar->acq); |
| 855 | writel(dev->ctrl_config, &dev->bar->cc); |
| 856 | |
| 857 | while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { |
| 858 | msleep(100); |
| 859 | if (fatal_signal_pending(current)) |
| 860 | return -EINTR; |
| 861 | } |
| 862 | |
Matthew Wilcox | 3001082 | 2011-01-20 09:10:15 -0500 | [diff] [blame] | 863 | result = queue_request_irq(dev, nvmeq, "nvme admin"); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 864 | dev->queues[0] = nvmeq; |
| 865 | return result; |
| 866 | } |
| 867 | |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 868 | static int nvme_map_user_pages(struct nvme_dev *dev, int write, |
| 869 | unsigned long addr, unsigned length, |
| 870 | struct scatterlist **sgp) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 871 | { |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 872 | int i, err, count, nents, offset; |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 873 | struct scatterlist *sg; |
| 874 | struct page **pages; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 875 | |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 876 | if (addr & 3) |
| 877 | return -EINVAL; |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 878 | if (!length) |
| 879 | return -EINVAL; |
| 880 | |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 881 | offset = offset_in_page(addr); |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 882 | count = DIV_ROUND_UP(offset + length, PAGE_SIZE); |
| 883 | pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 884 | |
| 885 | err = get_user_pages_fast(addr, count, 1, pages); |
| 886 | if (err < count) { |
| 887 | count = err; |
| 888 | err = -EFAULT; |
| 889 | goto put_pages; |
| 890 | } |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 891 | |
| 892 | sg = kcalloc(count, sizeof(*sg), GFP_KERNEL); |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 893 | sg_init_table(sg, count); |
Matthew Wilcox | ff22b54 | 2011-01-26 10:02:29 -0500 | [diff] [blame] | 894 | sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset); |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 895 | length -= (PAGE_SIZE - offset); |
| 896 | for (i = 1; i < count; i++) { |
| 897 | sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0); |
| 898 | length -= PAGE_SIZE; |
| 899 | } |
| 900 | |
| 901 | err = -ENOMEM; |
| 902 | nents = dma_map_sg(&dev->pci_dev->dev, sg, count, |
| 903 | write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 904 | if (!nents) |
| 905 | goto put_pages; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 906 | |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 907 | kfree(pages); |
| 908 | *sgp = sg; |
| 909 | return nents; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 910 | |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 911 | put_pages: |
| 912 | for (i = 0; i < count; i++) |
| 913 | put_page(pages[i]); |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 914 | kfree(pages); |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 915 | return err; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 916 | } |
| 917 | |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 918 | static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, |
| 919 | unsigned long addr, int length, |
| 920 | struct scatterlist *sg, int nents) |
| 921 | { |
| 922 | int i, count; |
| 923 | |
| 924 | count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE); |
| 925 | dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE); |
| 926 | |
| 927 | for (i = 0; i < count; i++) |
| 928 | put_page(sg_page(&sg[i])); |
| 929 | } |
| 930 | |
| 931 | static int nvme_submit_user_admin_command(struct nvme_dev *dev, |
| 932 | unsigned long addr, unsigned length, |
| 933 | struct nvme_command *cmd) |
| 934 | { |
| 935 | int err, nents; |
| 936 | struct scatterlist *sg; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 937 | struct nvme_prps *prps; |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 938 | |
| 939 | nents = nvme_map_user_pages(dev, 0, addr, length, &sg); |
| 940 | if (nents < 0) |
| 941 | return nents; |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 942 | prps = nvme_setup_prps(dev, &cmd->common, sg, length); |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 943 | err = nvme_submit_admin_cmd(dev, cmd, NULL); |
| 944 | nvme_unmap_user_pages(dev, 0, addr, length, sg, nents); |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 945 | nvme_free_prps(dev, prps); |
Matthew Wilcox | 7fc3cda | 2011-01-26 17:05:50 -0500 | [diff] [blame] | 946 | return err ? -EIO : 0; |
| 947 | } |
| 948 | |
Matthew Wilcox | bd38c55 | 2011-01-26 14:34:32 -0500 | [diff] [blame] | 949 | static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 950 | { |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 951 | struct nvme_command c; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 952 | |
Matthew Wilcox | bd38c55 | 2011-01-26 14:34:32 -0500 | [diff] [blame] | 953 | memset(&c, 0, sizeof(c)); |
| 954 | c.identify.opcode = nvme_admin_identify; |
| 955 | c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id); |
| 956 | c.identify.cns = cpu_to_le32(cns); |
| 957 | |
| 958 | return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c); |
| 959 | } |
| 960 | |
| 961 | static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr) |
| 962 | { |
| 963 | struct nvme_command c; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 964 | |
| 965 | memset(&c, 0, sizeof(c)); |
| 966 | c.features.opcode = nvme_admin_get_features; |
| 967 | c.features.nsid = cpu_to_le32(ns->ns_id); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 968 | c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE); |
| 969 | |
Matthew Wilcox | bd38c55 | 2011-01-26 14:34:32 -0500 | [diff] [blame] | 970 | return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 971 | } |
| 972 | |
Matthew Wilcox | a53295b | 2011-02-01 16:13:29 -0500 | [diff] [blame] | 973 | static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) |
| 974 | { |
| 975 | struct nvme_dev *dev = ns->dev; |
| 976 | struct nvme_queue *nvmeq; |
| 977 | struct nvme_user_io io; |
| 978 | struct nvme_command c; |
| 979 | unsigned length; |
| 980 | u32 result; |
| 981 | int nents, status; |
| 982 | struct scatterlist *sg; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 983 | struct nvme_prps *prps; |
Matthew Wilcox | a53295b | 2011-02-01 16:13:29 -0500 | [diff] [blame] | 984 | |
| 985 | if (copy_from_user(&io, uio, sizeof(io))) |
| 986 | return -EFAULT; |
| 987 | length = io.nblocks << io.block_shift; |
| 988 | nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg); |
| 989 | if (nents < 0) |
| 990 | return nents; |
| 991 | |
| 992 | memset(&c, 0, sizeof(c)); |
| 993 | c.rw.opcode = io.opcode; |
| 994 | c.rw.flags = io.flags; |
| 995 | c.rw.nsid = cpu_to_le32(io.nsid); |
| 996 | c.rw.slba = cpu_to_le64(io.slba); |
| 997 | c.rw.length = cpu_to_le16(io.nblocks - 1); |
| 998 | c.rw.control = cpu_to_le16(io.control); |
| 999 | c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); |
| 1000 | c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */ |
| 1001 | c.rw.apptag = cpu_to_le16(io.apptag); |
| 1002 | c.rw.appmask = cpu_to_le16(io.appmask); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 1003 | /* XXX: metadata */ |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 1004 | prps = nvme_setup_prps(dev, &c.common, sg, length); |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 1005 | |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 1006 | nvmeq = get_nvmeq(ns); |
Matthew Wilcox | b1ad37e | 2011-02-04 16:14:30 -0500 | [diff] [blame] | 1007 | /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption |
| 1008 | * disabled. We may be preempted at any point, and be rescheduled |
| 1009 | * to a different CPU. That will cause cacheline bouncing, but no |
| 1010 | * additional races since q_lock already protects against other CPUs. |
| 1011 | */ |
Matthew Wilcox | a53295b | 2011-02-01 16:13:29 -0500 | [diff] [blame] | 1012 | put_nvmeq(nvmeq); |
Matthew Wilcox | e85248e | 2011-02-06 18:30:16 -0500 | [diff] [blame] | 1013 | status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT); |
Matthew Wilcox | a53295b | 2011-02-01 16:13:29 -0500 | [diff] [blame] | 1014 | |
| 1015 | nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 1016 | nvme_free_prps(dev, prps); |
Matthew Wilcox | a53295b | 2011-02-01 16:13:29 -0500 | [diff] [blame] | 1017 | put_user(result, &uio->result); |
| 1018 | return status; |
| 1019 | } |
| 1020 | |
Matthew Wilcox | 6ee44cd | 2011-02-03 10:58:26 -0500 | [diff] [blame] | 1021 | static int nvme_download_firmware(struct nvme_ns *ns, |
| 1022 | struct nvme_dlfw __user *udlfw) |
| 1023 | { |
| 1024 | struct nvme_dev *dev = ns->dev; |
| 1025 | struct nvme_dlfw dlfw; |
| 1026 | struct nvme_command c; |
| 1027 | int nents, status; |
| 1028 | struct scatterlist *sg; |
Shane Michael Matthews | e025344c | 2011-02-10 08:51:24 -0500 | [diff] [blame] | 1029 | struct nvme_prps *prps; |
Matthew Wilcox | 6ee44cd | 2011-02-03 10:58:26 -0500 | [diff] [blame] | 1030 | |
| 1031 | if (copy_from_user(&dlfw, udlfw, sizeof(dlfw))) |
| 1032 | return -EFAULT; |
| 1033 | if (dlfw.length >= (1 << 30)) |
| 1034 | return -EINVAL; |
| 1035 | |
| 1036 | nents = nvme_map_user_pages(dev, 1, dlfw.addr, dlfw.length * 4, &sg); |
| 1037 | if (nents < 0) |
| 1038 | return nents; |
| 1039 | |
| 1040 | memset(&c, 0, sizeof(c)); |
| 1041 | c.dlfw.opcode = nvme_admin_download_fw; |
| 1042 | c.dlfw.numd = cpu_to_le32(dlfw.length); |
| 1043 | c.dlfw.offset = cpu_to_le32(dlfw.offset); |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 1044 | prps = nvme_setup_prps(dev, &c.common, sg, dlfw.length * 4); |
Matthew Wilcox | 6ee44cd | 2011-02-03 10:58:26 -0500 | [diff] [blame] | 1045 | |
| 1046 | status = nvme_submit_admin_cmd(dev, &c, NULL); |
| 1047 | nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents); |
Matthew Wilcox | d567760 | 2011-02-10 10:47:55 -0500 | [diff] [blame] | 1048 | nvme_free_prps(dev, prps); |
Matthew Wilcox | 6ee44cd | 2011-02-03 10:58:26 -0500 | [diff] [blame] | 1049 | return status; |
| 1050 | } |
| 1051 | |
| 1052 | static int nvme_activate_firmware(struct nvme_ns *ns, unsigned long arg) |
| 1053 | { |
| 1054 | struct nvme_dev *dev = ns->dev; |
| 1055 | struct nvme_command c; |
| 1056 | |
| 1057 | memset(&c, 0, sizeof(c)); |
| 1058 | c.common.opcode = nvme_admin_activate_fw; |
| 1059 | c.common.rsvd10[0] = cpu_to_le32(arg); |
| 1060 | |
| 1061 | return nvme_submit_admin_cmd(dev, &c, NULL); |
| 1062 | } |
| 1063 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1064 | static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, |
| 1065 | unsigned long arg) |
| 1066 | { |
| 1067 | struct nvme_ns *ns = bdev->bd_disk->private_data; |
| 1068 | |
| 1069 | switch (cmd) { |
| 1070 | case NVME_IOCTL_IDENTIFY_NS: |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 1071 | return nvme_identify(ns, arg, 0); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1072 | case NVME_IOCTL_IDENTIFY_CTRL: |
Matthew Wilcox | 36c14ed | 2011-01-24 07:52:07 -0500 | [diff] [blame] | 1073 | return nvme_identify(ns, arg, 1); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1074 | case NVME_IOCTL_GET_RANGE_TYPE: |
Matthew Wilcox | bd38c55 | 2011-01-26 14:34:32 -0500 | [diff] [blame] | 1075 | return nvme_get_range_type(ns, arg); |
Matthew Wilcox | a53295b | 2011-02-01 16:13:29 -0500 | [diff] [blame] | 1076 | case NVME_IOCTL_SUBMIT_IO: |
| 1077 | return nvme_submit_io(ns, (void __user *)arg); |
Matthew Wilcox | 6ee44cd | 2011-02-03 10:58:26 -0500 | [diff] [blame] | 1078 | case NVME_IOCTL_DOWNLOAD_FW: |
| 1079 | return nvme_download_firmware(ns, (void __user *)arg); |
| 1080 | case NVME_IOCTL_ACTIVATE_FW: |
| 1081 | return nvme_activate_firmware(ns, arg); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1082 | default: |
| 1083 | return -ENOTTY; |
| 1084 | } |
| 1085 | } |
| 1086 | |
| 1087 | static const struct block_device_operations nvme_fops = { |
| 1088 | .owner = THIS_MODULE, |
| 1089 | .ioctl = nvme_ioctl, |
| 1090 | }; |
| 1091 | |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1092 | static void nvme_resubmit_bios(struct nvme_queue *nvmeq) |
| 1093 | { |
| 1094 | while (bio_list_peek(&nvmeq->sq_cong)) { |
| 1095 | struct bio *bio = bio_list_pop(&nvmeq->sq_cong); |
| 1096 | struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; |
| 1097 | if (nvme_submit_bio_queue(nvmeq, ns, bio)) { |
| 1098 | bio_list_add_head(&nvmeq->sq_cong, bio); |
| 1099 | break; |
| 1100 | } |
| 1101 | } |
| 1102 | } |
| 1103 | |
| 1104 | static int nvme_kthread(void *data) |
| 1105 | { |
| 1106 | struct nvme_dev *dev; |
| 1107 | |
| 1108 | while (!kthread_should_stop()) { |
| 1109 | __set_current_state(TASK_RUNNING); |
| 1110 | spin_lock(&dev_list_lock); |
| 1111 | list_for_each_entry(dev, &dev_list, node) { |
| 1112 | int i; |
| 1113 | for (i = 0; i < dev->queue_count; i++) { |
| 1114 | struct nvme_queue *nvmeq = dev->queues[i]; |
Matthew Wilcox | 740216f | 2011-02-15 16:28:20 -0500 | [diff] [blame^] | 1115 | if (!nvmeq) |
| 1116 | continue; |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1117 | spin_lock_irq(&nvmeq->q_lock); |
| 1118 | if (nvme_process_cq(nvmeq)) |
| 1119 | printk("process_cq did something\n"); |
| 1120 | nvme_resubmit_bios(nvmeq); |
| 1121 | spin_unlock_irq(&nvmeq->q_lock); |
| 1122 | } |
| 1123 | } |
| 1124 | spin_unlock(&dev_list_lock); |
| 1125 | set_current_state(TASK_INTERRUPTIBLE); |
| 1126 | schedule_timeout(HZ); |
| 1127 | } |
| 1128 | return 0; |
| 1129 | } |
| 1130 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1131 | static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index, |
| 1132 | struct nvme_id_ns *id, struct nvme_lba_range_type *rt) |
| 1133 | { |
| 1134 | struct nvme_ns *ns; |
| 1135 | struct gendisk *disk; |
| 1136 | int lbaf; |
| 1137 | |
| 1138 | if (rt->attributes & NVME_LBART_ATTRIB_HIDE) |
| 1139 | return NULL; |
| 1140 | |
| 1141 | ns = kzalloc(sizeof(*ns), GFP_KERNEL); |
| 1142 | if (!ns) |
| 1143 | return NULL; |
| 1144 | ns->queue = blk_alloc_queue(GFP_KERNEL); |
| 1145 | if (!ns->queue) |
| 1146 | goto out_free_ns; |
| 1147 | ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES | |
| 1148 | QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD; |
| 1149 | blk_queue_make_request(ns->queue, nvme_make_request); |
| 1150 | ns->dev = dev; |
| 1151 | ns->queue->queuedata = ns; |
| 1152 | |
| 1153 | disk = alloc_disk(NVME_MINORS); |
| 1154 | if (!disk) |
| 1155 | goto out_free_queue; |
| 1156 | ns->ns_id = index; |
| 1157 | ns->disk = disk; |
| 1158 | lbaf = id->flbas & 0xf; |
| 1159 | ns->lba_shift = id->lbaf[lbaf].ds; |
| 1160 | |
| 1161 | disk->major = nvme_major; |
| 1162 | disk->minors = NVME_MINORS; |
| 1163 | disk->first_minor = NVME_MINORS * index; |
| 1164 | disk->fops = &nvme_fops; |
| 1165 | disk->private_data = ns; |
| 1166 | disk->queue = ns->queue; |
Matthew Wilcox | 388f037 | 2011-02-01 12:49:38 -0500 | [diff] [blame] | 1167 | disk->driverfs_dev = &dev->pci_dev->dev; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1168 | sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index); |
| 1169 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); |
| 1170 | |
| 1171 | return ns; |
| 1172 | |
| 1173 | out_free_queue: |
| 1174 | blk_cleanup_queue(ns->queue); |
| 1175 | out_free_ns: |
| 1176 | kfree(ns); |
| 1177 | return NULL; |
| 1178 | } |
| 1179 | |
| 1180 | static void nvme_ns_free(struct nvme_ns *ns) |
| 1181 | { |
| 1182 | put_disk(ns->disk); |
| 1183 | blk_cleanup_queue(ns->queue); |
| 1184 | kfree(ns); |
| 1185 | } |
| 1186 | |
Matthew Wilcox | b3b0681 | 2011-01-20 09:14:34 -0500 | [diff] [blame] | 1187 | static int set_queue_count(struct nvme_dev *dev, int count) |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1188 | { |
| 1189 | int status; |
| 1190 | u32 result; |
| 1191 | struct nvme_command c; |
Matthew Wilcox | b3b0681 | 2011-01-20 09:14:34 -0500 | [diff] [blame] | 1192 | u32 q_count = (count - 1) | ((count - 1) << 16); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1193 | |
| 1194 | memset(&c, 0, sizeof(c)); |
| 1195 | c.features.opcode = nvme_admin_get_features; |
| 1196 | c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES); |
| 1197 | c.features.dword11 = cpu_to_le32(q_count); |
| 1198 | |
| 1199 | status = nvme_submit_admin_cmd(dev, &c, &result); |
| 1200 | if (status) |
| 1201 | return -EIO; |
| 1202 | return min(result & 0xffff, result >> 16) + 1; |
| 1203 | } |
| 1204 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1205 | static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) |
| 1206 | { |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1207 | int result, cpu, i, nr_io_queues; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1208 | |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1209 | nr_io_queues = num_online_cpus(); |
| 1210 | result = set_queue_count(dev, nr_io_queues); |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1211 | if (result < 0) |
| 1212 | return result; |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1213 | if (result < nr_io_queues) |
| 1214 | nr_io_queues = result; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1215 | |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1216 | /* Deregister the admin queue's interrupt */ |
| 1217 | free_irq(dev->entry[0].vector, dev->queues[0]); |
| 1218 | |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1219 | for (i = 0; i < nr_io_queues; i++) |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1220 | dev->entry[i].entry = i; |
| 1221 | for (;;) { |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1222 | result = pci_enable_msix(dev->pci_dev, dev->entry, |
| 1223 | nr_io_queues); |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1224 | if (result == 0) { |
| 1225 | break; |
| 1226 | } else if (result > 0) { |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1227 | nr_io_queues = result; |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1228 | continue; |
| 1229 | } else { |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1230 | nr_io_queues = 1; |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1231 | break; |
| 1232 | } |
| 1233 | } |
| 1234 | |
| 1235 | result = queue_request_irq(dev, dev->queues[0], "nvme admin"); |
| 1236 | /* XXX: handle failure here */ |
| 1237 | |
| 1238 | cpu = cpumask_first(cpu_online_mask); |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1239 | for (i = 0; i < nr_io_queues; i++) { |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1240 | irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); |
| 1241 | cpu = cpumask_next(cpu, cpu_online_mask); |
| 1242 | } |
| 1243 | |
Matthew Wilcox | b348b7d | 2011-02-15 16:16:02 -0500 | [diff] [blame] | 1244 | for (i = 0; i < nr_io_queues; i++) { |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1245 | dev->queues[i + 1] = nvme_create_queue(dev, i + 1, |
| 1246 | NVME_Q_DEPTH, i); |
| 1247 | if (!dev->queues[i + 1]) |
| 1248 | return -ENOMEM; |
| 1249 | dev->queue_count++; |
| 1250 | } |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1251 | |
| 1252 | return 0; |
| 1253 | } |
| 1254 | |
| 1255 | static void nvme_free_queues(struct nvme_dev *dev) |
| 1256 | { |
| 1257 | int i; |
| 1258 | |
| 1259 | for (i = dev->queue_count - 1; i >= 0; i--) |
| 1260 | nvme_free_queue(dev, i); |
| 1261 | } |
| 1262 | |
| 1263 | static int __devinit nvme_dev_add(struct nvme_dev *dev) |
| 1264 | { |
| 1265 | int res, nn, i; |
| 1266 | struct nvme_ns *ns, *next; |
Matthew Wilcox | 5181423 | 2011-02-01 16:18:08 -0500 | [diff] [blame] | 1267 | struct nvme_id_ctrl *ctrl; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1268 | void *id; |
| 1269 | dma_addr_t dma_addr; |
| 1270 | struct nvme_command cid, crt; |
| 1271 | |
| 1272 | res = nvme_setup_io_queues(dev); |
| 1273 | if (res) |
| 1274 | return res; |
| 1275 | |
| 1276 | /* XXX: Switch to a SG list once prp2 works */ |
| 1277 | id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, |
| 1278 | GFP_KERNEL); |
| 1279 | |
| 1280 | memset(&cid, 0, sizeof(cid)); |
| 1281 | cid.identify.opcode = nvme_admin_identify; |
| 1282 | cid.identify.nsid = 0; |
| 1283 | cid.identify.prp1 = cpu_to_le64(dma_addr); |
| 1284 | cid.identify.cns = cpu_to_le32(1); |
| 1285 | |
| 1286 | res = nvme_submit_admin_cmd(dev, &cid, NULL); |
| 1287 | if (res) { |
| 1288 | res = -EIO; |
| 1289 | goto out_free; |
| 1290 | } |
| 1291 | |
Matthew Wilcox | 5181423 | 2011-02-01 16:18:08 -0500 | [diff] [blame] | 1292 | ctrl = id; |
| 1293 | nn = le32_to_cpup(&ctrl->nn); |
| 1294 | memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); |
| 1295 | memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); |
| 1296 | memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1297 | |
| 1298 | cid.identify.cns = 0; |
| 1299 | memset(&crt, 0, sizeof(crt)); |
| 1300 | crt.features.opcode = nvme_admin_get_features; |
| 1301 | crt.features.prp1 = cpu_to_le64(dma_addr + 4096); |
| 1302 | crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE); |
| 1303 | |
| 1304 | for (i = 0; i < nn; i++) { |
| 1305 | cid.identify.nsid = cpu_to_le32(i); |
| 1306 | res = nvme_submit_admin_cmd(dev, &cid, NULL); |
| 1307 | if (res) |
| 1308 | continue; |
| 1309 | |
| 1310 | if (((struct nvme_id_ns *)id)->ncap == 0) |
| 1311 | continue; |
| 1312 | |
| 1313 | crt.features.nsid = cpu_to_le32(i); |
| 1314 | res = nvme_submit_admin_cmd(dev, &crt, NULL); |
| 1315 | if (res) |
| 1316 | continue; |
| 1317 | |
| 1318 | ns = nvme_alloc_ns(dev, i, id, id + 4096); |
| 1319 | if (ns) |
| 1320 | list_add_tail(&ns->list, &dev->namespaces); |
| 1321 | } |
| 1322 | list_for_each_entry(ns, &dev->namespaces, list) |
| 1323 | add_disk(ns->disk); |
| 1324 | |
| 1325 | dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); |
| 1326 | return 0; |
| 1327 | |
| 1328 | out_free: |
| 1329 | list_for_each_entry_safe(ns, next, &dev->namespaces, list) { |
| 1330 | list_del(&ns->list); |
| 1331 | nvme_ns_free(ns); |
| 1332 | } |
| 1333 | |
| 1334 | dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); |
| 1335 | return res; |
| 1336 | } |
| 1337 | |
| 1338 | static int nvme_dev_remove(struct nvme_dev *dev) |
| 1339 | { |
| 1340 | struct nvme_ns *ns, *next; |
| 1341 | |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1342 | spin_lock(&dev_list_lock); |
| 1343 | list_del(&dev->node); |
| 1344 | spin_unlock(&dev_list_lock); |
| 1345 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1346 | /* TODO: wait all I/O finished or cancel them */ |
| 1347 | |
| 1348 | list_for_each_entry_safe(ns, next, &dev->namespaces, list) { |
| 1349 | list_del(&ns->list); |
| 1350 | del_gendisk(ns->disk); |
| 1351 | nvme_ns_free(ns); |
| 1352 | } |
| 1353 | |
| 1354 | nvme_free_queues(dev); |
| 1355 | |
| 1356 | return 0; |
| 1357 | } |
| 1358 | |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 1359 | static int nvme_setup_prp_pools(struct nvme_dev *dev) |
| 1360 | { |
| 1361 | struct device *dmadev = &dev->pci_dev->dev; |
| 1362 | dev->prp_page_pool = dma_pool_create("prp list page", dmadev, |
| 1363 | PAGE_SIZE, PAGE_SIZE, 0); |
| 1364 | if (!dev->prp_page_pool) |
| 1365 | return -ENOMEM; |
| 1366 | |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 1367 | /* Optimisation for I/Os between 4k and 128k */ |
| 1368 | dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, |
| 1369 | 256, 256, 0); |
| 1370 | if (!dev->prp_small_pool) { |
| 1371 | dma_pool_destroy(dev->prp_page_pool); |
| 1372 | return -ENOMEM; |
| 1373 | } |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 1374 | return 0; |
| 1375 | } |
| 1376 | |
| 1377 | static void nvme_release_prp_pools(struct nvme_dev *dev) |
| 1378 | { |
| 1379 | dma_pool_destroy(dev->prp_page_pool); |
Matthew Wilcox | 99802a7 | 2011-02-10 10:30:34 -0500 | [diff] [blame] | 1380 | dma_pool_destroy(dev->prp_small_pool); |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 1381 | } |
| 1382 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1383 | /* XXX: Use an ida or something to let remove / add work correctly */ |
| 1384 | static void nvme_set_instance(struct nvme_dev *dev) |
| 1385 | { |
| 1386 | static int instance; |
| 1387 | dev->instance = instance++; |
| 1388 | } |
| 1389 | |
| 1390 | static void nvme_release_instance(struct nvme_dev *dev) |
| 1391 | { |
| 1392 | } |
| 1393 | |
| 1394 | static int __devinit nvme_probe(struct pci_dev *pdev, |
| 1395 | const struct pci_device_id *id) |
| 1396 | { |
Matthew Wilcox | 574e8b9 | 2011-02-01 16:24:35 -0500 | [diff] [blame] | 1397 | int bars, result = -ENOMEM; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1398 | struct nvme_dev *dev; |
| 1399 | |
| 1400 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
| 1401 | if (!dev) |
| 1402 | return -ENOMEM; |
| 1403 | dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), |
| 1404 | GFP_KERNEL); |
| 1405 | if (!dev->entry) |
| 1406 | goto free; |
Matthew Wilcox | 1b23484 | 2011-01-20 13:01:49 -0500 | [diff] [blame] | 1407 | dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), |
| 1408 | GFP_KERNEL); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1409 | if (!dev->queues) |
| 1410 | goto free; |
| 1411 | |
Shane Michael Matthews | 0ee5a7d | 2011-02-01 08:49:30 -0500 | [diff] [blame] | 1412 | if (pci_enable_device_mem(pdev)) |
| 1413 | goto free; |
Matthew Wilcox | f64d336 | 2011-02-01 09:01:59 -0500 | [diff] [blame] | 1414 | pci_set_master(pdev); |
Matthew Wilcox | 574e8b9 | 2011-02-01 16:24:35 -0500 | [diff] [blame] | 1415 | bars = pci_select_bars(pdev, IORESOURCE_MEM); |
| 1416 | if (pci_request_selected_regions(pdev, bars, "nvme")) |
| 1417 | goto disable; |
Shane Michael Matthews | 0ee5a7d | 2011-02-01 08:49:30 -0500 | [diff] [blame] | 1418 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1419 | INIT_LIST_HEAD(&dev->namespaces); |
| 1420 | dev->pci_dev = pdev; |
| 1421 | pci_set_drvdata(pdev, dev); |
Matthew Wilcox | 2930353 | 2011-02-01 16:23:39 -0500 | [diff] [blame] | 1422 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); |
| 1423 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1424 | nvme_set_instance(dev); |
Matthew Wilcox | 53c9577 | 2011-01-20 13:42:34 -0500 | [diff] [blame] | 1425 | dev->entry[0].vector = pdev->irq; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1426 | |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 1427 | result = nvme_setup_prp_pools(dev); |
| 1428 | if (result) |
| 1429 | goto disable_msix; |
| 1430 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1431 | dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); |
| 1432 | if (!dev->bar) { |
| 1433 | result = -ENOMEM; |
Matthew Wilcox | 574e8b9 | 2011-02-01 16:24:35 -0500 | [diff] [blame] | 1434 | goto disable_msix; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1435 | } |
| 1436 | |
| 1437 | result = nvme_configure_admin_queue(dev); |
| 1438 | if (result) |
| 1439 | goto unmap; |
| 1440 | dev->queue_count++; |
| 1441 | |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1442 | spin_lock(&dev_list_lock); |
| 1443 | list_add(&dev->node, &dev_list); |
| 1444 | spin_unlock(&dev_list_lock); |
| 1445 | |
Matthew Wilcox | 740216f | 2011-02-15 16:28:20 -0500 | [diff] [blame^] | 1446 | result = nvme_dev_add(dev); |
| 1447 | if (result) |
| 1448 | goto delete; |
| 1449 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1450 | return 0; |
| 1451 | |
| 1452 | delete: |
Matthew Wilcox | 740216f | 2011-02-15 16:28:20 -0500 | [diff] [blame^] | 1453 | spin_lock(&dev_list_lock); |
| 1454 | list_del(&dev->node); |
| 1455 | spin_unlock(&dev_list_lock); |
| 1456 | |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1457 | nvme_free_queues(dev); |
| 1458 | unmap: |
| 1459 | iounmap(dev->bar); |
Matthew Wilcox | 574e8b9 | 2011-02-01 16:24:35 -0500 | [diff] [blame] | 1460 | disable_msix: |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1461 | pci_disable_msix(pdev); |
| 1462 | nvme_release_instance(dev); |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 1463 | nvme_release_prp_pools(dev); |
Matthew Wilcox | 574e8b9 | 2011-02-01 16:24:35 -0500 | [diff] [blame] | 1464 | disable: |
Shane Michael Matthews | 0ee5a7d | 2011-02-01 08:49:30 -0500 | [diff] [blame] | 1465 | pci_disable_device(pdev); |
Matthew Wilcox | 574e8b9 | 2011-02-01 16:24:35 -0500 | [diff] [blame] | 1466 | pci_release_regions(pdev); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1467 | free: |
| 1468 | kfree(dev->queues); |
| 1469 | kfree(dev->entry); |
| 1470 | kfree(dev); |
| 1471 | return result; |
| 1472 | } |
| 1473 | |
| 1474 | static void __devexit nvme_remove(struct pci_dev *pdev) |
| 1475 | { |
| 1476 | struct nvme_dev *dev = pci_get_drvdata(pdev); |
| 1477 | nvme_dev_remove(dev); |
| 1478 | pci_disable_msix(pdev); |
| 1479 | iounmap(dev->bar); |
| 1480 | nvme_release_instance(dev); |
Matthew Wilcox | 091b609 | 2011-02-10 09:56:01 -0500 | [diff] [blame] | 1481 | nvme_release_prp_pools(dev); |
Shane Michael Matthews | 0ee5a7d | 2011-02-01 08:49:30 -0500 | [diff] [blame] | 1482 | pci_disable_device(pdev); |
Matthew Wilcox | 574e8b9 | 2011-02-01 16:24:35 -0500 | [diff] [blame] | 1483 | pci_release_regions(pdev); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1484 | kfree(dev->queues); |
| 1485 | kfree(dev->entry); |
| 1486 | kfree(dev); |
| 1487 | } |
| 1488 | |
| 1489 | /* These functions are yet to be implemented */ |
| 1490 | #define nvme_error_detected NULL |
| 1491 | #define nvme_dump_registers NULL |
| 1492 | #define nvme_link_reset NULL |
| 1493 | #define nvme_slot_reset NULL |
| 1494 | #define nvme_error_resume NULL |
| 1495 | #define nvme_suspend NULL |
| 1496 | #define nvme_resume NULL |
| 1497 | |
| 1498 | static struct pci_error_handlers nvme_err_handler = { |
| 1499 | .error_detected = nvme_error_detected, |
| 1500 | .mmio_enabled = nvme_dump_registers, |
| 1501 | .link_reset = nvme_link_reset, |
| 1502 | .slot_reset = nvme_slot_reset, |
| 1503 | .resume = nvme_error_resume, |
| 1504 | }; |
| 1505 | |
| 1506 | /* Move to pci_ids.h later */ |
| 1507 | #define PCI_CLASS_STORAGE_EXPRESS 0x010802 |
| 1508 | |
| 1509 | static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { |
| 1510 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
| 1511 | { 0, } |
| 1512 | }; |
| 1513 | MODULE_DEVICE_TABLE(pci, nvme_id_table); |
| 1514 | |
| 1515 | static struct pci_driver nvme_driver = { |
| 1516 | .name = "nvme", |
| 1517 | .id_table = nvme_id_table, |
| 1518 | .probe = nvme_probe, |
| 1519 | .remove = __devexit_p(nvme_remove), |
| 1520 | .suspend = nvme_suspend, |
| 1521 | .resume = nvme_resume, |
| 1522 | .err_handler = &nvme_err_handler, |
| 1523 | }; |
| 1524 | |
| 1525 | static int __init nvme_init(void) |
| 1526 | { |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1527 | int result = -EBUSY; |
| 1528 | |
| 1529 | nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); |
| 1530 | if (IS_ERR(nvme_thread)) |
| 1531 | return PTR_ERR(nvme_thread); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1532 | |
| 1533 | nvme_major = register_blkdev(nvme_major, "nvme"); |
| 1534 | if (nvme_major <= 0) |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1535 | goto kill_kthread; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1536 | |
| 1537 | result = pci_register_driver(&nvme_driver); |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1538 | if (result) |
| 1539 | goto unregister_blkdev; |
| 1540 | return 0; |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1541 | |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1542 | unregister_blkdev: |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1543 | unregister_blkdev(nvme_major, "nvme"); |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1544 | kill_kthread: |
| 1545 | kthread_stop(nvme_thread); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1546 | return result; |
| 1547 | } |
| 1548 | |
| 1549 | static void __exit nvme_exit(void) |
| 1550 | { |
| 1551 | pci_unregister_driver(&nvme_driver); |
| 1552 | unregister_blkdev(nvme_major, "nvme"); |
Matthew Wilcox | 1fa6aea | 2011-03-02 18:37:18 -0500 | [diff] [blame] | 1553 | kthread_stop(nvme_thread); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1554 | } |
| 1555 | |
| 1556 | MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); |
| 1557 | MODULE_LICENSE("GPL"); |
Matthew Wilcox | ad8a5df | 2011-02-14 17:35:00 -0500 | [diff] [blame] | 1558 | MODULE_VERSION("0.3"); |
Matthew Wilcox | b60503b | 2011-01-20 12:50:14 -0500 | [diff] [blame] | 1559 | module_init(nvme_init); |
| 1560 | module_exit(nvme_exit); |