Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 1 | /* |
| 2 | * NVM Express device driver |
| 3 | * Copyright (c) 2011-2014, Intel Corporation. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/blk-mq.h> |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 17 | #include <linux/delay.h> |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 18 | #include <linux/errno.h> |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 19 | #include <linux/hdreg.h> |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 20 | #include <linux/kernel.h> |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 21 | #include <linux/module.h> |
| 22 | #include <linux/list_sort.h> |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 23 | #include <linux/slab.h> |
| 24 | #include <linux/types.h> |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 25 | #include <linux/pr.h> |
| 26 | #include <linux/ptrace.h> |
| 27 | #include <linux/nvme_ioctl.h> |
| 28 | #include <linux/t10-pi.h> |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 29 | #include <linux/pm_qos.h> |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 30 | #include <scsi/sg.h> |
| 31 | #include <asm/unaligned.h> |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 32 | |
| 33 | #include "nvme.h" |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 34 | #include "fabrics.h" |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 35 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 36 | #define NVME_MINORS (1U << MINORBITS) |
| 37 | |
Ming Lin | ba0ba7d | 2016-02-10 10:03:30 -0800 | [diff] [blame] | 38 | unsigned char admin_timeout = 60; |
| 39 | module_param(admin_timeout, byte, 0644); |
| 40 | MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 41 | EXPORT_SYMBOL_GPL(admin_timeout); |
Ming Lin | ba0ba7d | 2016-02-10 10:03:30 -0800 | [diff] [blame] | 42 | |
| 43 | unsigned char nvme_io_timeout = 30; |
| 44 | module_param_named(io_timeout, nvme_io_timeout, byte, 0644); |
| 45 | MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 46 | EXPORT_SYMBOL_GPL(nvme_io_timeout); |
Ming Lin | ba0ba7d | 2016-02-10 10:03:30 -0800 | [diff] [blame] | 47 | |
| 48 | unsigned char shutdown_timeout = 5; |
| 49 | module_param(shutdown_timeout, byte, 0644); |
| 50 | MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); |
| 51 | |
Christoph Hellwig | 44e44b2 | 2017-04-05 19:18:11 +0200 | [diff] [blame] | 52 | static u8 nvme_max_retries = 5; |
| 53 | module_param_named(max_retries, nvme_max_retries, byte, 0644); |
Keith Busch | f80ec96 | 2016-07-12 16:20:31 -0700 | [diff] [blame] | 54 | MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 55 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 56 | static int nvme_char_major; |
| 57 | module_param(nvme_char_major, int, 0); |
| 58 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 59 | static unsigned long default_ps_max_latency_us = 25000; |
| 60 | module_param(default_ps_max_latency_us, ulong, 0644); |
| 61 | MODULE_PARM_DESC(default_ps_max_latency_us, |
| 62 | "max power saving latency for new devices; use PM QOS to change per device"); |
| 63 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 64 | static LIST_HEAD(nvme_ctrl_list); |
Ming Lin | 9f2482b | 2016-02-10 10:03:31 -0800 | [diff] [blame] | 65 | static DEFINE_SPINLOCK(dev_list_lock); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 66 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 67 | static struct class *nvme_class; |
| 68 | |
Christoph Hellwig | 65ba6b5 | 2017-04-20 16:02:58 +0200 | [diff] [blame] | 69 | static int nvme_error_status(struct request *req) |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 70 | { |
| 71 | switch (nvme_req(req)->status & 0x7ff) { |
| 72 | case NVME_SC_SUCCESS: |
| 73 | return 0; |
| 74 | case NVME_SC_CAP_EXCEEDED: |
| 75 | return -ENOSPC; |
| 76 | default: |
| 77 | return -EIO; |
Junxiong Guan | e02ab02 | 2017-04-21 12:59:07 +0200 | [diff] [blame] | 78 | |
| 79 | /* |
| 80 | * XXX: these errors are a nasty side-band protocol to |
| 81 | * drivers/md/dm-mpath.c:noretry_error() that aren't documented |
| 82 | * anywhere.. |
| 83 | */ |
| 84 | case NVME_SC_CMD_SEQ_ERROR: |
| 85 | return -EILSEQ; |
| 86 | case NVME_SC_ONCS_NOT_SUPPORTED: |
| 87 | return -EOPNOTSUPP; |
| 88 | case NVME_SC_WRITE_FAULT: |
| 89 | case NVME_SC_READ_ERROR: |
| 90 | case NVME_SC_UNWRITTEN_BLOCK: |
| 91 | return -ENODATA; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 92 | } |
| 93 | } |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 94 | |
Christoph Hellwig | f6324b1 | 2017-04-05 19:18:09 +0200 | [diff] [blame] | 95 | static inline bool nvme_req_needs_retry(struct request *req) |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 96 | { |
Christoph Hellwig | f6324b1 | 2017-04-05 19:18:09 +0200 | [diff] [blame] | 97 | if (blk_noretry_request(req)) |
| 98 | return false; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 99 | if (nvme_req(req)->status & NVME_SC_DNR) |
Christoph Hellwig | f6324b1 | 2017-04-05 19:18:09 +0200 | [diff] [blame] | 100 | return false; |
| 101 | if (jiffies - req->start_time >= req->timeout) |
| 102 | return false; |
Christoph Hellwig | 44e44b2 | 2017-04-05 19:18:11 +0200 | [diff] [blame] | 103 | if (nvme_req(req)->retries >= nvme_max_retries) |
Christoph Hellwig | f6324b1 | 2017-04-05 19:18:09 +0200 | [diff] [blame] | 104 | return false; |
| 105 | return true; |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | void nvme_complete_rq(struct request *req) |
| 109 | { |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 110 | if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) { |
| 111 | nvme_req(req)->retries++; |
| 112 | blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q)); |
| 113 | return; |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 114 | } |
| 115 | |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 116 | blk_mq_end_request(req, nvme_error_status(req)); |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 117 | } |
| 118 | EXPORT_SYMBOL_GPL(nvme_complete_rq); |
| 119 | |
Ming Lin | c55a2fd | 2016-05-18 14:05:02 -0700 | [diff] [blame] | 120 | void nvme_cancel_request(struct request *req, void *data, bool reserved) |
| 121 | { |
| 122 | int status; |
| 123 | |
| 124 | if (!blk_mq_request_started(req)) |
| 125 | return; |
| 126 | |
| 127 | dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, |
| 128 | "Cancelling I/O %d", req->tag); |
| 129 | |
| 130 | status = NVME_SC_ABORT_REQ; |
| 131 | if (blk_queue_dying(req->q)) |
| 132 | status |= NVME_SC_DNR; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 133 | nvme_req(req)->status = status; |
Christoph Hellwig | 08e0029 | 2017-04-20 16:03:09 +0200 | [diff] [blame] | 134 | blk_mq_complete_request(req); |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 135 | |
Ming Lin | c55a2fd | 2016-05-18 14:05:02 -0700 | [diff] [blame] | 136 | } |
| 137 | EXPORT_SYMBOL_GPL(nvme_cancel_request); |
| 138 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 139 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
| 140 | enum nvme_ctrl_state new_state) |
| 141 | { |
Gabriel Krisman Bertazi | f6b6a28 | 2016-07-29 16:15:18 -0300 | [diff] [blame] | 142 | enum nvme_ctrl_state old_state; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 143 | bool changed = false; |
| 144 | |
| 145 | spin_lock_irq(&ctrl->lock); |
Gabriel Krisman Bertazi | f6b6a28 | 2016-07-29 16:15:18 -0300 | [diff] [blame] | 146 | |
| 147 | old_state = ctrl->state; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 148 | switch (new_state) { |
| 149 | case NVME_CTRL_LIVE: |
| 150 | switch (old_state) { |
Christoph Hellwig | 7d2e800 | 2016-06-13 16:45:22 +0200 | [diff] [blame] | 151 | case NVME_CTRL_NEW: |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 152 | case NVME_CTRL_RESETTING: |
Christoph Hellwig | def61ec | 2016-07-06 21:55:49 +0900 | [diff] [blame] | 153 | case NVME_CTRL_RECONNECTING: |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 154 | changed = true; |
| 155 | /* FALLTHRU */ |
| 156 | default: |
| 157 | break; |
| 158 | } |
| 159 | break; |
| 160 | case NVME_CTRL_RESETTING: |
| 161 | switch (old_state) { |
| 162 | case NVME_CTRL_NEW: |
| 163 | case NVME_CTRL_LIVE: |
Christoph Hellwig | def61ec | 2016-07-06 21:55:49 +0900 | [diff] [blame] | 164 | case NVME_CTRL_RECONNECTING: |
| 165 | changed = true; |
| 166 | /* FALLTHRU */ |
| 167 | default: |
| 168 | break; |
| 169 | } |
| 170 | break; |
| 171 | case NVME_CTRL_RECONNECTING: |
| 172 | switch (old_state) { |
| 173 | case NVME_CTRL_LIVE: |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 174 | changed = true; |
| 175 | /* FALLTHRU */ |
| 176 | default: |
| 177 | break; |
| 178 | } |
| 179 | break; |
| 180 | case NVME_CTRL_DELETING: |
| 181 | switch (old_state) { |
| 182 | case NVME_CTRL_LIVE: |
| 183 | case NVME_CTRL_RESETTING: |
Christoph Hellwig | def61ec | 2016-07-06 21:55:49 +0900 | [diff] [blame] | 184 | case NVME_CTRL_RECONNECTING: |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 185 | changed = true; |
| 186 | /* FALLTHRU */ |
| 187 | default: |
| 188 | break; |
| 189 | } |
| 190 | break; |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 191 | case NVME_CTRL_DEAD: |
| 192 | switch (old_state) { |
| 193 | case NVME_CTRL_DELETING: |
| 194 | changed = true; |
| 195 | /* FALLTHRU */ |
| 196 | default: |
| 197 | break; |
| 198 | } |
| 199 | break; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 200 | default: |
| 201 | break; |
| 202 | } |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 203 | |
| 204 | if (changed) |
| 205 | ctrl->state = new_state; |
| 206 | |
Gabriel Krisman Bertazi | f6b6a28 | 2016-07-29 16:15:18 -0300 | [diff] [blame] | 207 | spin_unlock_irq(&ctrl->lock); |
| 208 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 209 | return changed; |
| 210 | } |
| 211 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); |
| 212 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 213 | static void nvme_free_ns(struct kref *kref) |
| 214 | { |
| 215 | struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); |
| 216 | |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 217 | if (ns->ndev) |
| 218 | nvme_nvm_unregister(ns); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 219 | |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 220 | if (ns->disk) { |
| 221 | spin_lock(&dev_list_lock); |
| 222 | ns->disk->private_data = NULL; |
| 223 | spin_unlock(&dev_list_lock); |
| 224 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 225 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 226 | put_disk(ns->disk); |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 227 | ida_simple_remove(&ns->ctrl->ns_ida, ns->instance); |
| 228 | nvme_put_ctrl(ns->ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 229 | kfree(ns); |
| 230 | } |
| 231 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 232 | static void nvme_put_ns(struct nvme_ns *ns) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 233 | { |
| 234 | kref_put(&ns->kref, nvme_free_ns); |
| 235 | } |
| 236 | |
| 237 | static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk) |
| 238 | { |
| 239 | struct nvme_ns *ns; |
| 240 | |
| 241 | spin_lock(&dev_list_lock); |
| 242 | ns = disk->private_data; |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 243 | if (ns) { |
| 244 | if (!kref_get_unless_zero(&ns->kref)) |
| 245 | goto fail; |
| 246 | if (!try_module_get(ns->ctrl->ops->module)) |
| 247 | goto fail_put_ns; |
| 248 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 249 | spin_unlock(&dev_list_lock); |
| 250 | |
| 251 | return ns; |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 252 | |
| 253 | fail_put_ns: |
| 254 | kref_put(&ns->kref, nvme_free_ns); |
| 255 | fail: |
| 256 | spin_unlock(&dev_list_lock); |
| 257 | return NULL; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 258 | } |
| 259 | |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 260 | struct request *nvme_alloc_request(struct request_queue *q, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 261 | struct nvme_command *cmd, unsigned int flags, int qid) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 262 | { |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 263 | unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 264 | struct request *req; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 265 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 266 | if (qid == NVME_QID_ANY) { |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 267 | req = blk_mq_alloc_request(q, op, flags); |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 268 | } else { |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 269 | req = blk_mq_alloc_request_hctx(q, op, flags, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 270 | qid ? qid - 1 : 0); |
| 271 | } |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 272 | if (IS_ERR(req)) |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 273 | return req; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 274 | |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 275 | req->cmd_flags |= REQ_FAILFAST_DRIVER; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 276 | nvme_req(req)->cmd = cmd; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 277 | |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 278 | return req; |
| 279 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 280 | EXPORT_SYMBOL_GPL(nvme_alloc_request); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 281 | |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 282 | static inline void nvme_setup_flush(struct nvme_ns *ns, |
| 283 | struct nvme_command *cmnd) |
| 284 | { |
| 285 | memset(cmnd, 0, sizeof(*cmnd)); |
| 286 | cmnd->common.opcode = nvme_cmd_flush; |
| 287 | cmnd->common.nsid = cpu_to_le32(ns->ns_id); |
| 288 | } |
| 289 | |
| 290 | static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, |
| 291 | struct nvme_command *cmnd) |
| 292 | { |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 293 | unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 294 | struct nvme_dsm_range *range; |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 295 | struct bio *bio; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 296 | |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 297 | range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 298 | if (!range) |
| 299 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 300 | |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 301 | __rq_for_each_bio(bio, req) { |
| 302 | u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); |
| 303 | u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; |
| 304 | |
| 305 | range[n].cattr = cpu_to_le32(0); |
| 306 | range[n].nlb = cpu_to_le32(nlb); |
| 307 | range[n].slba = cpu_to_le64(slba); |
| 308 | n++; |
| 309 | } |
| 310 | |
| 311 | if (WARN_ON_ONCE(n != segments)) { |
| 312 | kfree(range); |
| 313 | return BLK_MQ_RQ_QUEUE_ERROR; |
| 314 | } |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 315 | |
| 316 | memset(cmnd, 0, sizeof(*cmnd)); |
| 317 | cmnd->dsm.opcode = nvme_cmd_dsm; |
| 318 | cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); |
Christoph Hellwig | f1dd03a | 2017-03-31 17:00:05 +0200 | [diff] [blame] | 319 | cmnd->dsm.nr = cpu_to_le32(segments - 1); |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 320 | cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); |
| 321 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 322 | req->special_vec.bv_page = virt_to_page(range); |
| 323 | req->special_vec.bv_offset = offset_in_page(range); |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 324 | req->special_vec.bv_len = sizeof(*range) * segments; |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 325 | req->rq_flags |= RQF_SPECIAL_PAYLOAD; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 326 | |
Omar Sandoval | bac0000 | 2016-11-15 11:11:58 -0800 | [diff] [blame] | 327 | return BLK_MQ_RQ_QUEUE_OK; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 328 | } |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 329 | |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 330 | static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, |
| 331 | struct nvme_command *cmnd) |
| 332 | { |
| 333 | u16 control = 0; |
| 334 | u32 dsmgmt = 0; |
| 335 | |
| 336 | if (req->cmd_flags & REQ_FUA) |
| 337 | control |= NVME_RW_FUA; |
| 338 | if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) |
| 339 | control |= NVME_RW_LR; |
| 340 | |
| 341 | if (req->cmd_flags & REQ_RAHEAD) |
| 342 | dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; |
| 343 | |
| 344 | memset(cmnd, 0, sizeof(*cmnd)); |
| 345 | cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 346 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); |
| 347 | cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); |
| 348 | cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); |
| 349 | |
| 350 | if (ns->ms) { |
| 351 | switch (ns->pi_type) { |
| 352 | case NVME_NS_DPS_PI_TYPE3: |
| 353 | control |= NVME_RW_PRINFO_PRCHK_GUARD; |
| 354 | break; |
| 355 | case NVME_NS_DPS_PI_TYPE1: |
| 356 | case NVME_NS_DPS_PI_TYPE2: |
| 357 | control |= NVME_RW_PRINFO_PRCHK_GUARD | |
| 358 | NVME_RW_PRINFO_PRCHK_REF; |
| 359 | cmnd->rw.reftag = cpu_to_le32( |
| 360 | nvme_block_nr(ns, blk_rq_pos(req))); |
| 361 | break; |
| 362 | } |
| 363 | if (!blk_integrity_rq(req)) |
| 364 | control |= NVME_RW_PRINFO_PRACT; |
| 365 | } |
| 366 | |
| 367 | cmnd->rw.control = cpu_to_le16(control); |
| 368 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); |
| 369 | } |
| 370 | |
| 371 | int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, |
| 372 | struct nvme_command *cmd) |
| 373 | { |
Omar Sandoval | bac0000 | 2016-11-15 11:11:58 -0800 | [diff] [blame] | 374 | int ret = BLK_MQ_RQ_QUEUE_OK; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 375 | |
Christoph Hellwig | 987f699 | 2017-04-05 19:18:08 +0200 | [diff] [blame] | 376 | if (!(req->rq_flags & RQF_DONTPREP)) { |
Christoph Hellwig | 44e44b2 | 2017-04-05 19:18:11 +0200 | [diff] [blame] | 377 | nvme_req(req)->retries = 0; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 378 | nvme_req(req)->flags = 0; |
Christoph Hellwig | 987f699 | 2017-04-05 19:18:08 +0200 | [diff] [blame] | 379 | req->rq_flags |= RQF_DONTPREP; |
| 380 | } |
| 381 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 382 | switch (req_op(req)) { |
| 383 | case REQ_OP_DRV_IN: |
| 384 | case REQ_OP_DRV_OUT: |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 385 | memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 386 | break; |
| 387 | case REQ_OP_FLUSH: |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 388 | nvme_setup_flush(ns, cmd); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 389 | break; |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 390 | case REQ_OP_WRITE_ZEROES: |
| 391 | /* currently only aliased to deallocate for a few ctrls: */ |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 392 | case REQ_OP_DISCARD: |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 393 | ret = nvme_setup_discard(ns, req, cmd); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 394 | break; |
| 395 | case REQ_OP_READ: |
| 396 | case REQ_OP_WRITE: |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 397 | nvme_setup_rw(ns, req, cmd); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 398 | break; |
| 399 | default: |
| 400 | WARN_ON_ONCE(1); |
| 401 | return BLK_MQ_RQ_QUEUE_ERROR; |
| 402 | } |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 403 | |
James Smart | 721b391 | 2016-10-21 23:33:34 +0300 | [diff] [blame] | 404 | cmd->common.command_id = req->tag; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 405 | return ret; |
| 406 | } |
| 407 | EXPORT_SYMBOL_GPL(nvme_setup_cmd); |
| 408 | |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 409 | /* |
| 410 | * Returns 0 on success. If the result is negative, it's a Linux error code; |
| 411 | * if the result is positive, it's an NVM Express status code |
| 412 | */ |
| 413 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 414 | union nvme_result *result, void *buffer, unsigned bufflen, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 415 | unsigned timeout, int qid, int at_head, int flags) |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 416 | { |
| 417 | struct request *req; |
| 418 | int ret; |
| 419 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 420 | req = nvme_alloc_request(q, cmd, flags, qid); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 421 | if (IS_ERR(req)) |
| 422 | return PTR_ERR(req); |
| 423 | |
| 424 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; |
| 425 | |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 426 | if (buffer && bufflen) { |
| 427 | ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); |
| 428 | if (ret) |
| 429 | goto out; |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 430 | } |
| 431 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 432 | blk_execute_rq(req->q, NULL, req, at_head); |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 433 | if (result) |
| 434 | *result = nvme_req(req)->result; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 435 | if (nvme_req(req)->flags & NVME_REQ_CANCELLED) |
| 436 | ret = -EINTR; |
| 437 | else |
| 438 | ret = nvme_req(req)->status; |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 439 | out: |
| 440 | blk_mq_free_request(req); |
| 441 | return ret; |
| 442 | } |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 443 | EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 444 | |
| 445 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 446 | void *buffer, unsigned bufflen) |
| 447 | { |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 448 | return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, |
| 449 | NVME_QID_ANY, 0, 0); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 450 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 451 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 452 | |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 453 | int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 454 | void __user *ubuffer, unsigned bufflen, |
| 455 | void __user *meta_buffer, unsigned meta_len, u32 meta_seed, |
| 456 | u32 *result, unsigned timeout) |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 457 | { |
Christoph Hellwig | 7a5abb4 | 2016-06-06 23:20:49 +0200 | [diff] [blame] | 458 | bool write = nvme_is_write(cmd); |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 459 | struct nvme_ns *ns = q->queuedata; |
| 460 | struct gendisk *disk = ns ? ns->disk : NULL; |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 461 | struct request *req; |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 462 | struct bio *bio = NULL; |
| 463 | void *meta = NULL; |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 464 | int ret; |
| 465 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 466 | req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 467 | if (IS_ERR(req)) |
| 468 | return PTR_ERR(req); |
| 469 | |
| 470 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; |
| 471 | |
| 472 | if (ubuffer && bufflen) { |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 473 | ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, |
| 474 | GFP_KERNEL); |
| 475 | if (ret) |
| 476 | goto out; |
| 477 | bio = req->bio; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 478 | |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 479 | if (!disk) |
| 480 | goto submit; |
| 481 | bio->bi_bdev = bdget_disk(disk, 0); |
| 482 | if (!bio->bi_bdev) { |
| 483 | ret = -ENODEV; |
| 484 | goto out_unmap; |
| 485 | } |
| 486 | |
Keith Busch | e9fc63d | 2016-02-24 09:15:58 -0700 | [diff] [blame] | 487 | if (meta_buffer && meta_len) { |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 488 | struct bio_integrity_payload *bip; |
| 489 | |
| 490 | meta = kmalloc(meta_len, GFP_KERNEL); |
| 491 | if (!meta) { |
| 492 | ret = -ENOMEM; |
| 493 | goto out_unmap; |
| 494 | } |
| 495 | |
| 496 | if (write) { |
| 497 | if (copy_from_user(meta, meta_buffer, |
| 498 | meta_len)) { |
| 499 | ret = -EFAULT; |
| 500 | goto out_free_meta; |
| 501 | } |
| 502 | } |
| 503 | |
| 504 | bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); |
Keith Busch | 06c1e39 | 2015-12-03 09:32:21 -0700 | [diff] [blame] | 505 | if (IS_ERR(bip)) { |
| 506 | ret = PTR_ERR(bip); |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 507 | goto out_free_meta; |
| 508 | } |
| 509 | |
| 510 | bip->bip_iter.bi_size = meta_len; |
| 511 | bip->bip_iter.bi_sector = meta_seed; |
| 512 | |
| 513 | ret = bio_integrity_add_page(bio, virt_to_page(meta), |
| 514 | meta_len, offset_in_page(meta)); |
| 515 | if (ret != meta_len) { |
| 516 | ret = -ENOMEM; |
| 517 | goto out_free_meta; |
| 518 | } |
| 519 | } |
| 520 | } |
| 521 | submit: |
| 522 | blk_execute_rq(req->q, disk, req, 0); |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 523 | if (nvme_req(req)->flags & NVME_REQ_CANCELLED) |
| 524 | ret = -EINTR; |
| 525 | else |
| 526 | ret = nvme_req(req)->status; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 527 | if (result) |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 528 | *result = le32_to_cpu(nvme_req(req)->result.u32); |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 529 | if (meta && !ret && !write) { |
| 530 | if (copy_to_user(meta_buffer, meta, meta_len)) |
| 531 | ret = -EFAULT; |
| 532 | } |
| 533 | out_free_meta: |
| 534 | kfree(meta); |
| 535 | out_unmap: |
| 536 | if (bio) { |
| 537 | if (disk && bio->bi_bdev) |
| 538 | bdput(bio->bi_bdev); |
| 539 | blk_rq_unmap_user(bio); |
| 540 | } |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 541 | out: |
| 542 | blk_mq_free_request(req); |
| 543 | return ret; |
| 544 | } |
| 545 | |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 546 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 547 | void __user *ubuffer, unsigned bufflen, u32 *result, |
| 548 | unsigned timeout) |
| 549 | { |
| 550 | return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0, |
| 551 | result, timeout); |
| 552 | } |
| 553 | |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 554 | static void nvme_keep_alive_end_io(struct request *rq, int error) |
| 555 | { |
| 556 | struct nvme_ctrl *ctrl = rq->end_io_data; |
| 557 | |
| 558 | blk_mq_free_request(rq); |
| 559 | |
| 560 | if (error) { |
| 561 | dev_err(ctrl->device, |
| 562 | "failed nvme_keep_alive_end_io error=%d\n", error); |
| 563 | return; |
| 564 | } |
| 565 | |
| 566 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); |
| 567 | } |
| 568 | |
| 569 | static int nvme_keep_alive(struct nvme_ctrl *ctrl) |
| 570 | { |
| 571 | struct nvme_command c; |
| 572 | struct request *rq; |
| 573 | |
| 574 | memset(&c, 0, sizeof(c)); |
| 575 | c.common.opcode = nvme_admin_keep_alive; |
| 576 | |
| 577 | rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, |
| 578 | NVME_QID_ANY); |
| 579 | if (IS_ERR(rq)) |
| 580 | return PTR_ERR(rq); |
| 581 | |
| 582 | rq->timeout = ctrl->kato * HZ; |
| 583 | rq->end_io_data = ctrl; |
| 584 | |
| 585 | blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); |
| 586 | |
| 587 | return 0; |
| 588 | } |
| 589 | |
| 590 | static void nvme_keep_alive_work(struct work_struct *work) |
| 591 | { |
| 592 | struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), |
| 593 | struct nvme_ctrl, ka_work); |
| 594 | |
| 595 | if (nvme_keep_alive(ctrl)) { |
| 596 | /* allocation failure, reset the controller */ |
| 597 | dev_err(ctrl->device, "keep-alive failed\n"); |
| 598 | ctrl->ops->reset_ctrl(ctrl); |
| 599 | return; |
| 600 | } |
| 601 | } |
| 602 | |
| 603 | void nvme_start_keep_alive(struct nvme_ctrl *ctrl) |
| 604 | { |
| 605 | if (unlikely(ctrl->kato == 0)) |
| 606 | return; |
| 607 | |
| 608 | INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); |
| 609 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); |
| 610 | } |
| 611 | EXPORT_SYMBOL_GPL(nvme_start_keep_alive); |
| 612 | |
| 613 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) |
| 614 | { |
| 615 | if (unlikely(ctrl->kato == 0)) |
| 616 | return; |
| 617 | |
| 618 | cancel_delayed_work_sync(&ctrl->ka_work); |
| 619 | } |
| 620 | EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); |
| 621 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 622 | int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 623 | { |
| 624 | struct nvme_command c = { }; |
| 625 | int error; |
| 626 | |
| 627 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ |
| 628 | c.identify.opcode = nvme_admin_identify; |
Parav Pandit | 986994a | 2017-01-26 17:17:28 +0200 | [diff] [blame] | 629 | c.identify.cns = NVME_ID_CNS_CTRL; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 630 | |
| 631 | *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); |
| 632 | if (!*id) |
| 633 | return -ENOMEM; |
| 634 | |
| 635 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, |
| 636 | sizeof(struct nvme_id_ctrl)); |
| 637 | if (error) |
| 638 | kfree(*id); |
| 639 | return error; |
| 640 | } |
| 641 | |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 642 | static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) |
| 643 | { |
| 644 | struct nvme_command c = { }; |
| 645 | |
| 646 | c.identify.opcode = nvme_admin_identify; |
Parav Pandit | 986994a | 2017-01-26 17:17:28 +0200 | [diff] [blame] | 647 | c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 648 | c.identify.nsid = cpu_to_le32(nsid); |
| 649 | return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); |
| 650 | } |
| 651 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 652 | int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 653 | struct nvme_id_ns **id) |
| 654 | { |
| 655 | struct nvme_command c = { }; |
| 656 | int error; |
| 657 | |
| 658 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ |
Max Gurtovoy | 778f067 | 2017-01-26 17:17:27 +0200 | [diff] [blame] | 659 | c.identify.opcode = nvme_admin_identify; |
| 660 | c.identify.nsid = cpu_to_le32(nsid); |
Parav Pandit | 986994a | 2017-01-26 17:17:28 +0200 | [diff] [blame] | 661 | c.identify.cns = NVME_ID_CNS_NS; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 662 | |
| 663 | *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL); |
| 664 | if (!*id) |
| 665 | return -ENOMEM; |
| 666 | |
| 667 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, |
| 668 | sizeof(struct nvme_id_ns)); |
| 669 | if (error) |
| 670 | kfree(*id); |
| 671 | return error; |
| 672 | } |
| 673 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 674 | int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 675 | void *buffer, size_t buflen, u32 *result) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 676 | { |
| 677 | struct nvme_command c; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 678 | union nvme_result res; |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 679 | int ret; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 680 | |
| 681 | memset(&c, 0, sizeof(c)); |
| 682 | c.features.opcode = nvme_admin_get_features; |
| 683 | c.features.nsid = cpu_to_le32(nsid); |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 684 | c.features.fid = cpu_to_le32(fid); |
| 685 | |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 686 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 687 | NVME_QID_ANY, 0, 0); |
Andy Lutomirski | 9b47f77a | 2016-08-24 03:52:12 -0700 | [diff] [blame] | 688 | if (ret >= 0 && result) |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 689 | *result = le32_to_cpu(res.u32); |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 690 | return ret; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 691 | } |
| 692 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 693 | int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 694 | void *buffer, size_t buflen, u32 *result) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 695 | { |
| 696 | struct nvme_command c; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 697 | union nvme_result res; |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 698 | int ret; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 699 | |
| 700 | memset(&c, 0, sizeof(c)); |
| 701 | c.features.opcode = nvme_admin_set_features; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 702 | c.features.fid = cpu_to_le32(fid); |
| 703 | c.features.dword11 = cpu_to_le32(dword11); |
| 704 | |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 705 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 706 | buffer, buflen, 0, NVME_QID_ANY, 0, 0); |
Andy Lutomirski | 9b47f77a | 2016-08-24 03:52:12 -0700 | [diff] [blame] | 707 | if (ret >= 0 && result) |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 708 | *result = le32_to_cpu(res.u32); |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 709 | return ret; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 710 | } |
| 711 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 712 | int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 713 | { |
| 714 | struct nvme_command c = { }; |
| 715 | int error; |
| 716 | |
| 717 | c.common.opcode = nvme_admin_get_log_page, |
| 718 | c.common.nsid = cpu_to_le32(0xFFFFFFFF), |
| 719 | c.common.cdw10[0] = cpu_to_le32( |
| 720 | (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) | |
| 721 | NVME_LOG_SMART), |
| 722 | |
| 723 | *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL); |
| 724 | if (!*log) |
| 725 | return -ENOMEM; |
| 726 | |
| 727 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *log, |
| 728 | sizeof(struct nvme_smart_log)); |
| 729 | if (error) |
| 730 | kfree(*log); |
| 731 | return error; |
| 732 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 733 | |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 734 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) |
| 735 | { |
| 736 | u32 q_count = (*count - 1) | ((*count - 1) << 16); |
| 737 | u32 result; |
| 738 | int status, nr_io_queues; |
| 739 | |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 740 | status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 741 | &result); |
Christoph Hellwig | f5fa90d | 2016-06-06 23:20:50 +0200 | [diff] [blame] | 742 | if (status < 0) |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 743 | return status; |
| 744 | |
Christoph Hellwig | f5fa90d | 2016-06-06 23:20:50 +0200 | [diff] [blame] | 745 | /* |
| 746 | * Degraded controllers might return an error when setting the queue |
| 747 | * count. We still want to be able to bring them online and offer |
| 748 | * access to the admin queue, as that might be only way to fix them up. |
| 749 | */ |
| 750 | if (status > 0) { |
| 751 | dev_err(ctrl->dev, "Could not set queue count (%d)\n", status); |
| 752 | *count = 0; |
| 753 | } else { |
| 754 | nr_io_queues = min(result & 0xffff, result >> 16) + 1; |
| 755 | *count = min(*count, nr_io_queues); |
| 756 | } |
| 757 | |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 758 | return 0; |
| 759 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 760 | EXPORT_SYMBOL_GPL(nvme_set_queue_count); |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 761 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 762 | static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) |
| 763 | { |
| 764 | struct nvme_user_io io; |
| 765 | struct nvme_command c; |
| 766 | unsigned length, meta_len; |
| 767 | void __user *metadata; |
| 768 | |
| 769 | if (copy_from_user(&io, uio, sizeof(io))) |
| 770 | return -EFAULT; |
Keith Busch | 63088ec | 2016-02-24 09:15:57 -0700 | [diff] [blame] | 771 | if (io.flags) |
| 772 | return -EINVAL; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 773 | |
| 774 | switch (io.opcode) { |
| 775 | case nvme_cmd_write: |
| 776 | case nvme_cmd_read: |
| 777 | case nvme_cmd_compare: |
| 778 | break; |
| 779 | default: |
| 780 | return -EINVAL; |
| 781 | } |
| 782 | |
| 783 | length = (io.nblocks + 1) << ns->lba_shift; |
| 784 | meta_len = (io.nblocks + 1) * ns->ms; |
| 785 | metadata = (void __user *)(uintptr_t)io.metadata; |
| 786 | |
| 787 | if (ns->ext) { |
| 788 | length += meta_len; |
| 789 | meta_len = 0; |
| 790 | } else if (meta_len) { |
| 791 | if ((io.metadata & 3) || !io.metadata) |
| 792 | return -EINVAL; |
| 793 | } |
| 794 | |
| 795 | memset(&c, 0, sizeof(c)); |
| 796 | c.rw.opcode = io.opcode; |
| 797 | c.rw.flags = io.flags; |
| 798 | c.rw.nsid = cpu_to_le32(ns->ns_id); |
| 799 | c.rw.slba = cpu_to_le64(io.slba); |
| 800 | c.rw.length = cpu_to_le16(io.nblocks); |
| 801 | c.rw.control = cpu_to_le16(io.control); |
| 802 | c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); |
| 803 | c.rw.reftag = cpu_to_le32(io.reftag); |
| 804 | c.rw.apptag = cpu_to_le16(io.apptag); |
| 805 | c.rw.appmask = cpu_to_le16(io.appmask); |
| 806 | |
| 807 | return __nvme_submit_user_cmd(ns->queue, &c, |
| 808 | (void __user *)(uintptr_t)io.addr, length, |
| 809 | metadata, meta_len, io.slba, NULL, 0); |
| 810 | } |
| 811 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 812 | static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 813 | struct nvme_passthru_cmd __user *ucmd) |
| 814 | { |
| 815 | struct nvme_passthru_cmd cmd; |
| 816 | struct nvme_command c; |
| 817 | unsigned timeout = 0; |
| 818 | int status; |
| 819 | |
| 820 | if (!capable(CAP_SYS_ADMIN)) |
| 821 | return -EACCES; |
| 822 | if (copy_from_user(&cmd, ucmd, sizeof(cmd))) |
| 823 | return -EFAULT; |
Keith Busch | 63088ec | 2016-02-24 09:15:57 -0700 | [diff] [blame] | 824 | if (cmd.flags) |
| 825 | return -EINVAL; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 826 | |
| 827 | memset(&c, 0, sizeof(c)); |
| 828 | c.common.opcode = cmd.opcode; |
| 829 | c.common.flags = cmd.flags; |
| 830 | c.common.nsid = cpu_to_le32(cmd.nsid); |
| 831 | c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); |
| 832 | c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); |
| 833 | c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); |
| 834 | c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); |
| 835 | c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); |
| 836 | c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); |
| 837 | c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); |
| 838 | c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); |
| 839 | |
| 840 | if (cmd.timeout_ms) |
| 841 | timeout = msecs_to_jiffies(cmd.timeout_ms); |
| 842 | |
| 843 | status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, |
Arnd Bergmann | d1ea7be | 2015-12-08 16:22:17 +0100 | [diff] [blame] | 844 | (void __user *)(uintptr_t)cmd.addr, cmd.data_len, |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 845 | &cmd.result, timeout); |
| 846 | if (status >= 0) { |
| 847 | if (put_user(cmd.result, &ucmd->result)) |
| 848 | return -EFAULT; |
| 849 | } |
| 850 | |
| 851 | return status; |
| 852 | } |
| 853 | |
| 854 | static int nvme_ioctl(struct block_device *bdev, fmode_t mode, |
| 855 | unsigned int cmd, unsigned long arg) |
| 856 | { |
| 857 | struct nvme_ns *ns = bdev->bd_disk->private_data; |
| 858 | |
| 859 | switch (cmd) { |
| 860 | case NVME_IOCTL_ID: |
| 861 | force_successful_syscall_return(); |
| 862 | return ns->ns_id; |
| 863 | case NVME_IOCTL_ADMIN_CMD: |
| 864 | return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); |
| 865 | case NVME_IOCTL_IO_CMD: |
| 866 | return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); |
| 867 | case NVME_IOCTL_SUBMIT_IO: |
| 868 | return nvme_submit_io(ns, (void __user *)arg); |
Christoph Hellwig | 4490733 | 2015-12-24 15:27:02 +0100 | [diff] [blame] | 869 | #ifdef CONFIG_BLK_DEV_NVME_SCSI |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 870 | case SG_GET_VERSION_NUM: |
| 871 | return nvme_sg_get_version_num((void __user *)arg); |
| 872 | case SG_IO: |
| 873 | return nvme_sg_io(ns, (void __user *)arg); |
Christoph Hellwig | 4490733 | 2015-12-24 15:27:02 +0100 | [diff] [blame] | 874 | #endif |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 875 | default: |
Matias Bjørling | 84d4add | 2017-01-31 13:17:16 +0100 | [diff] [blame] | 876 | #ifdef CONFIG_NVM |
| 877 | if (ns->ndev) |
| 878 | return nvme_nvm_ioctl(ns, cmd, arg); |
| 879 | #endif |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 880 | if (is_sed_ioctl(cmd)) |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 881 | return sed_ioctl(ns->ctrl->opal_dev, cmd, |
Scott Bauer | e225c20 | 2017-02-14 17:29:36 -0700 | [diff] [blame] | 882 | (void __user *) arg); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 883 | return -ENOTTY; |
| 884 | } |
| 885 | } |
| 886 | |
| 887 | #ifdef CONFIG_COMPAT |
| 888 | static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, |
| 889 | unsigned int cmd, unsigned long arg) |
| 890 | { |
| 891 | switch (cmd) { |
| 892 | case SG_IO: |
| 893 | return -ENOIOCTLCMD; |
| 894 | } |
| 895 | return nvme_ioctl(bdev, mode, cmd, arg); |
| 896 | } |
| 897 | #else |
| 898 | #define nvme_compat_ioctl NULL |
| 899 | #endif |
| 900 | |
| 901 | static int nvme_open(struct block_device *bdev, fmode_t mode) |
| 902 | { |
| 903 | return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO; |
| 904 | } |
| 905 | |
| 906 | static void nvme_release(struct gendisk *disk, fmode_t mode) |
| 907 | { |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 908 | struct nvme_ns *ns = disk->private_data; |
| 909 | |
| 910 | module_put(ns->ctrl->ops->module); |
| 911 | nvme_put_ns(ns); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 912 | } |
| 913 | |
| 914 | static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
| 915 | { |
| 916 | /* some standard values */ |
| 917 | geo->heads = 1 << 6; |
| 918 | geo->sectors = 1 << 5; |
| 919 | geo->cylinders = get_capacity(bdev->bd_disk) >> 11; |
| 920 | return 0; |
| 921 | } |
| 922 | |
| 923 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
| 924 | static void nvme_init_integrity(struct nvme_ns *ns) |
| 925 | { |
| 926 | struct blk_integrity integrity; |
| 927 | |
Jay Freyensee | fa9a89f | 2016-07-20 21:26:16 -0600 | [diff] [blame] | 928 | memset(&integrity, 0, sizeof(integrity)); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 929 | switch (ns->pi_type) { |
| 930 | case NVME_NS_DPS_PI_TYPE3: |
| 931 | integrity.profile = &t10_pi_type3_crc; |
Nicholas Bellinger | ba36c21 | 2016-04-09 03:04:42 +0000 | [diff] [blame] | 932 | integrity.tag_size = sizeof(u16) + sizeof(u32); |
| 933 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 934 | break; |
| 935 | case NVME_NS_DPS_PI_TYPE1: |
| 936 | case NVME_NS_DPS_PI_TYPE2: |
| 937 | integrity.profile = &t10_pi_type1_crc; |
Nicholas Bellinger | ba36c21 | 2016-04-09 03:04:42 +0000 | [diff] [blame] | 938 | integrity.tag_size = sizeof(u16); |
| 939 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 940 | break; |
| 941 | default: |
| 942 | integrity.profile = NULL; |
| 943 | break; |
| 944 | } |
| 945 | integrity.tuple_size = ns->ms; |
| 946 | blk_integrity_register(ns->disk, &integrity); |
| 947 | blk_queue_max_integrity_segments(ns->queue, 1); |
| 948 | } |
| 949 | #else |
| 950 | static void nvme_init_integrity(struct nvme_ns *ns) |
| 951 | { |
| 952 | } |
| 953 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 954 | |
| 955 | static void nvme_config_discard(struct nvme_ns *ns) |
| 956 | { |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 957 | struct nvme_ctrl *ctrl = ns->ctrl; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 958 | u32 logical_block_size = queue_logical_block_size(ns->queue); |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 959 | |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 960 | BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < |
| 961 | NVME_DSM_MAX_RANGES); |
| 962 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 963 | ns->queue->limits.discard_alignment = logical_block_size; |
| 964 | ns->queue->limits.discard_granularity = logical_block_size; |
Minfei Huang | bd0fc28 | 2016-05-17 15:58:41 +0800 | [diff] [blame] | 965 | blk_queue_max_discard_sectors(ns->queue, UINT_MAX); |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 966 | blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 967 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 968 | |
| 969 | if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) |
| 970 | blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 971 | } |
| 972 | |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 973 | static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 974 | { |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 975 | if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) { |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 976 | dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 977 | return -ENODEV; |
| 978 | } |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 979 | |
| 980 | if ((*id)->ncap == 0) { |
| 981 | kfree(*id); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 982 | return -ENODEV; |
| 983 | } |
| 984 | |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 985 | if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 986 | memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 987 | if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 988 | memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); |
| 989 | |
| 990 | return 0; |
| 991 | } |
| 992 | |
| 993 | static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) |
| 994 | { |
| 995 | struct nvme_ns *ns = disk->private_data; |
| 996 | u8 lbaf, pi_type; |
| 997 | u16 old_ms; |
| 998 | unsigned short bs; |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 999 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1000 | old_ms = ns->ms; |
| 1001 | lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; |
| 1002 | ns->lba_shift = id->lbaf[lbaf].ds; |
| 1003 | ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); |
| 1004 | ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); |
| 1005 | |
| 1006 | /* |
| 1007 | * If identify namespace failed, use default 512 byte block size so |
| 1008 | * block layer can use before failing read/write for 0 capacity. |
| 1009 | */ |
| 1010 | if (ns->lba_shift == 0) |
| 1011 | ns->lba_shift = 9; |
| 1012 | bs = 1 << ns->lba_shift; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1013 | /* XXX: PI implementation requires metadata equal t10 pi tuple size */ |
| 1014 | pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? |
| 1015 | id->dps & NVME_NS_DPS_PI_MASK : 0; |
| 1016 | |
| 1017 | blk_mq_freeze_queue(disk->queue); |
| 1018 | if (blk_get_integrity(disk) && (ns->pi_type != pi_type || |
| 1019 | ns->ms != old_ms || |
| 1020 | bs != queue_logical_block_size(disk->queue) || |
| 1021 | (ns->ms && ns->ext))) |
| 1022 | blk_integrity_unregister(disk); |
| 1023 | |
| 1024 | ns->pi_type = pi_type; |
| 1025 | blk_queue_logical_block_size(ns->queue, bs); |
| 1026 | |
Keith Busch | 4b9d5b1 | 2015-11-20 09:13:30 +0100 | [diff] [blame] | 1027 | if (ns->ms && !blk_get_integrity(disk) && !ns->ext) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1028 | nvme_init_integrity(ns); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1029 | if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) |
| 1030 | set_capacity(disk, 0); |
| 1031 | else |
| 1032 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); |
| 1033 | |
| 1034 | if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) |
| 1035 | nvme_config_discard(ns); |
| 1036 | blk_mq_unfreeze_queue(disk->queue); |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1037 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1038 | |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1039 | static int nvme_revalidate_disk(struct gendisk *disk) |
| 1040 | { |
| 1041 | struct nvme_ns *ns = disk->private_data; |
| 1042 | struct nvme_id_ns *id = NULL; |
| 1043 | int ret; |
| 1044 | |
| 1045 | if (test_bit(NVME_NS_DEAD, &ns->flags)) { |
| 1046 | set_capacity(disk, 0); |
| 1047 | return -ENODEV; |
| 1048 | } |
| 1049 | |
| 1050 | ret = nvme_revalidate_ns(ns, &id); |
| 1051 | if (ret) |
| 1052 | return ret; |
| 1053 | |
| 1054 | __nvme_revalidate_disk(disk, id); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1055 | kfree(id); |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1056 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1057 | return 0; |
| 1058 | } |
| 1059 | |
| 1060 | static char nvme_pr_type(enum pr_type type) |
| 1061 | { |
| 1062 | switch (type) { |
| 1063 | case PR_WRITE_EXCLUSIVE: |
| 1064 | return 1; |
| 1065 | case PR_EXCLUSIVE_ACCESS: |
| 1066 | return 2; |
| 1067 | case PR_WRITE_EXCLUSIVE_REG_ONLY: |
| 1068 | return 3; |
| 1069 | case PR_EXCLUSIVE_ACCESS_REG_ONLY: |
| 1070 | return 4; |
| 1071 | case PR_WRITE_EXCLUSIVE_ALL_REGS: |
| 1072 | return 5; |
| 1073 | case PR_EXCLUSIVE_ACCESS_ALL_REGS: |
| 1074 | return 6; |
| 1075 | default: |
| 1076 | return 0; |
| 1077 | } |
| 1078 | }; |
| 1079 | |
| 1080 | static int nvme_pr_command(struct block_device *bdev, u32 cdw10, |
| 1081 | u64 key, u64 sa_key, u8 op) |
| 1082 | { |
| 1083 | struct nvme_ns *ns = bdev->bd_disk->private_data; |
| 1084 | struct nvme_command c; |
| 1085 | u8 data[16] = { 0, }; |
| 1086 | |
| 1087 | put_unaligned_le64(key, &data[0]); |
| 1088 | put_unaligned_le64(sa_key, &data[8]); |
| 1089 | |
| 1090 | memset(&c, 0, sizeof(c)); |
| 1091 | c.common.opcode = op; |
| 1092 | c.common.nsid = cpu_to_le32(ns->ns_id); |
| 1093 | c.common.cdw10[0] = cpu_to_le32(cdw10); |
| 1094 | |
| 1095 | return nvme_submit_sync_cmd(ns->queue, &c, data, 16); |
| 1096 | } |
| 1097 | |
| 1098 | static int nvme_pr_register(struct block_device *bdev, u64 old, |
| 1099 | u64 new, unsigned flags) |
| 1100 | { |
| 1101 | u32 cdw10; |
| 1102 | |
| 1103 | if (flags & ~PR_FL_IGNORE_KEY) |
| 1104 | return -EOPNOTSUPP; |
| 1105 | |
| 1106 | cdw10 = old ? 2 : 0; |
| 1107 | cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; |
| 1108 | cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ |
| 1109 | return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); |
| 1110 | } |
| 1111 | |
| 1112 | static int nvme_pr_reserve(struct block_device *bdev, u64 key, |
| 1113 | enum pr_type type, unsigned flags) |
| 1114 | { |
| 1115 | u32 cdw10; |
| 1116 | |
| 1117 | if (flags & ~PR_FL_IGNORE_KEY) |
| 1118 | return -EOPNOTSUPP; |
| 1119 | |
| 1120 | cdw10 = nvme_pr_type(type) << 8; |
| 1121 | cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); |
| 1122 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); |
| 1123 | } |
| 1124 | |
| 1125 | static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, |
| 1126 | enum pr_type type, bool abort) |
| 1127 | { |
| 1128 | u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1; |
| 1129 | return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); |
| 1130 | } |
| 1131 | |
| 1132 | static int nvme_pr_clear(struct block_device *bdev, u64 key) |
| 1133 | { |
Dan Carpenter | 8c0b391 | 2015-12-09 13:24:06 +0300 | [diff] [blame] | 1134 | u32 cdw10 = 1 | (key ? 1 << 3 : 0); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1135 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); |
| 1136 | } |
| 1137 | |
| 1138 | static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) |
| 1139 | { |
| 1140 | u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0; |
| 1141 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); |
| 1142 | } |
| 1143 | |
| 1144 | static const struct pr_ops nvme_pr_ops = { |
| 1145 | .pr_register = nvme_pr_register, |
| 1146 | .pr_reserve = nvme_pr_reserve, |
| 1147 | .pr_release = nvme_pr_release, |
| 1148 | .pr_preempt = nvme_pr_preempt, |
| 1149 | .pr_clear = nvme_pr_clear, |
| 1150 | }; |
| 1151 | |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1152 | #ifdef CONFIG_BLK_SED_OPAL |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 1153 | int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, |
| 1154 | bool send) |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1155 | { |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 1156 | struct nvme_ctrl *ctrl = data; |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1157 | struct nvme_command cmd; |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1158 | |
| 1159 | memset(&cmd, 0, sizeof(cmd)); |
| 1160 | if (send) |
| 1161 | cmd.common.opcode = nvme_admin_security_send; |
| 1162 | else |
| 1163 | cmd.common.opcode = nvme_admin_security_recv; |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1164 | cmd.common.nsid = 0; |
| 1165 | cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); |
| 1166 | cmd.common.cdw10[1] = cpu_to_le32(len); |
| 1167 | |
| 1168 | return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, |
| 1169 | ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); |
| 1170 | } |
| 1171 | EXPORT_SYMBOL_GPL(nvme_sec_submit); |
| 1172 | #endif /* CONFIG_BLK_SED_OPAL */ |
| 1173 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1174 | static const struct block_device_operations nvme_fops = { |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1175 | .owner = THIS_MODULE, |
| 1176 | .ioctl = nvme_ioctl, |
| 1177 | .compat_ioctl = nvme_compat_ioctl, |
| 1178 | .open = nvme_open, |
| 1179 | .release = nvme_release, |
| 1180 | .getgeo = nvme_getgeo, |
| 1181 | .revalidate_disk= nvme_revalidate_disk, |
| 1182 | .pr_ops = &nvme_pr_ops, |
| 1183 | }; |
| 1184 | |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1185 | static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) |
| 1186 | { |
| 1187 | unsigned long timeout = |
| 1188 | ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; |
| 1189 | u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; |
| 1190 | int ret; |
| 1191 | |
| 1192 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { |
Keith Busch | 0df1e4f | 2016-10-11 13:31:58 -0400 | [diff] [blame] | 1193 | if (csts == ~0) |
| 1194 | return -ENODEV; |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1195 | if ((csts & NVME_CSTS_RDY) == bit) |
| 1196 | break; |
| 1197 | |
| 1198 | msleep(100); |
| 1199 | if (fatal_signal_pending(current)) |
| 1200 | return -EINTR; |
| 1201 | if (time_after(jiffies, timeout)) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1202 | dev_err(ctrl->device, |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1203 | "Device not ready; aborting %s\n", enabled ? |
| 1204 | "initialisation" : "reset"); |
| 1205 | return -ENODEV; |
| 1206 | } |
| 1207 | } |
| 1208 | |
| 1209 | return ret; |
| 1210 | } |
| 1211 | |
| 1212 | /* |
| 1213 | * If the device has been passed off to us in an enabled state, just clear |
| 1214 | * the enabled bit. The spec says we should set the 'shutdown notification |
| 1215 | * bits', but doing so may cause the device to complete commands to the |
| 1216 | * admin queue ... and we don't know what memory that might be pointing at! |
| 1217 | */ |
| 1218 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) |
| 1219 | { |
| 1220 | int ret; |
| 1221 | |
| 1222 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; |
| 1223 | ctrl->ctrl_config &= ~NVME_CC_ENABLE; |
| 1224 | |
| 1225 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
| 1226 | if (ret) |
| 1227 | return ret; |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 1228 | |
Guilherme G. Piccoli | b5a10c5 | 2016-12-28 22:13:15 -0200 | [diff] [blame] | 1229 | if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 1230 | msleep(NVME_QUIRK_DELAY_AMOUNT); |
| 1231 | |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1232 | return nvme_wait_ready(ctrl, cap, false); |
| 1233 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 1234 | EXPORT_SYMBOL_GPL(nvme_disable_ctrl); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1235 | |
| 1236 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) |
| 1237 | { |
| 1238 | /* |
| 1239 | * Default to a 4K page size, with the intention to update this |
| 1240 | * path in the future to accomodate architectures with differing |
| 1241 | * kernel and IO page sizes. |
| 1242 | */ |
| 1243 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; |
| 1244 | int ret; |
| 1245 | |
| 1246 | if (page_shift < dev_page_min) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1247 | dev_err(ctrl->device, |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1248 | "Minimum device page size %u too large for host (%u)\n", |
| 1249 | 1 << dev_page_min, 1 << page_shift); |
| 1250 | return -ENODEV; |
| 1251 | } |
| 1252 | |
| 1253 | ctrl->page_size = 1 << page_shift; |
| 1254 | |
| 1255 | ctrl->ctrl_config = NVME_CC_CSS_NVM; |
| 1256 | ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; |
| 1257 | ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; |
| 1258 | ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; |
| 1259 | ctrl->ctrl_config |= NVME_CC_ENABLE; |
| 1260 | |
| 1261 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
| 1262 | if (ret) |
| 1263 | return ret; |
| 1264 | return nvme_wait_ready(ctrl, cap, true); |
| 1265 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 1266 | EXPORT_SYMBOL_GPL(nvme_enable_ctrl); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1267 | |
| 1268 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) |
| 1269 | { |
| 1270 | unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies; |
| 1271 | u32 csts; |
| 1272 | int ret; |
| 1273 | |
| 1274 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; |
| 1275 | ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; |
| 1276 | |
| 1277 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
| 1278 | if (ret) |
| 1279 | return ret; |
| 1280 | |
| 1281 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { |
| 1282 | if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) |
| 1283 | break; |
| 1284 | |
| 1285 | msleep(100); |
| 1286 | if (fatal_signal_pending(current)) |
| 1287 | return -EINTR; |
| 1288 | if (time_after(jiffies, timeout)) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1289 | dev_err(ctrl->device, |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1290 | "Device shutdown incomplete; abort shutdown\n"); |
| 1291 | return -ENODEV; |
| 1292 | } |
| 1293 | } |
| 1294 | |
| 1295 | return ret; |
| 1296 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 1297 | EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1298 | |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1299 | static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, |
| 1300 | struct request_queue *q) |
| 1301 | { |
Jens Axboe | 7c88cb0 | 2016-04-12 15:43:09 -0600 | [diff] [blame] | 1302 | bool vwc = false; |
| 1303 | |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1304 | if (ctrl->max_hw_sectors) { |
Christoph Hellwig | 45686b6 | 2016-03-02 18:07:12 +0100 | [diff] [blame] | 1305 | u32 max_segments = |
| 1306 | (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; |
| 1307 | |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1308 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); |
Christoph Hellwig | 45686b6 | 2016-03-02 18:07:12 +0100 | [diff] [blame] | 1309 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1310 | } |
Keith Busch | e6282ae | 2016-12-19 11:37:50 -0500 | [diff] [blame] | 1311 | if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) |
| 1312 | blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1313 | blk_queue_virt_boundary(q, ctrl->page_size - 1); |
Jens Axboe | 7c88cb0 | 2016-04-12 15:43:09 -0600 | [diff] [blame] | 1314 | if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) |
| 1315 | vwc = true; |
| 1316 | blk_queue_write_cache(q, vwc, vwc); |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1317 | } |
| 1318 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1319 | static void nvme_configure_apst(struct nvme_ctrl *ctrl) |
| 1320 | { |
| 1321 | /* |
| 1322 | * APST (Autonomous Power State Transition) lets us program a |
| 1323 | * table of power state transitions that the controller will |
| 1324 | * perform automatically. We configure it with a simple |
| 1325 | * heuristic: we are willing to spend at most 2% of the time |
| 1326 | * transitioning between power states. Therefore, when running |
| 1327 | * in any given state, we will enter the next lower-power |
Andy Lutomirski | 76e4ad0 | 2017-04-21 16:19:22 -0700 | [diff] [blame] | 1328 | * non-operational state after waiting 50 * (enlat + exlat) |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1329 | * microseconds, as long as that state's total latency is under |
| 1330 | * the requested maximum latency. |
| 1331 | * |
| 1332 | * We will not autonomously enter any non-operational state for |
| 1333 | * which the total latency exceeds ps_max_latency_us. Users |
| 1334 | * can set ps_max_latency_us to zero to turn off APST. |
| 1335 | */ |
| 1336 | |
| 1337 | unsigned apste; |
| 1338 | struct nvme_feat_auto_pst *table; |
Andy Lutomirski | fb0dc39 | 2017-04-21 16:19:23 -0700 | [diff] [blame^] | 1339 | u64 max_lat_us = 0; |
| 1340 | int max_ps = -1; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1341 | int ret; |
| 1342 | |
| 1343 | /* |
| 1344 | * If APST isn't supported or if we haven't been initialized yet, |
| 1345 | * then don't do anything. |
| 1346 | */ |
| 1347 | if (!ctrl->apsta) |
| 1348 | return; |
| 1349 | |
| 1350 | if (ctrl->npss > 31) { |
| 1351 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); |
| 1352 | return; |
| 1353 | } |
| 1354 | |
| 1355 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
| 1356 | if (!table) |
| 1357 | return; |
| 1358 | |
| 1359 | if (ctrl->ps_max_latency_us == 0) { |
| 1360 | /* Turn off APST. */ |
| 1361 | apste = 0; |
Andy Lutomirski | fb0dc39 | 2017-04-21 16:19:23 -0700 | [diff] [blame^] | 1362 | dev_dbg(ctrl->device, "APST disabled\n"); |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1363 | } else { |
| 1364 | __le64 target = cpu_to_le64(0); |
| 1365 | int state; |
| 1366 | |
| 1367 | /* |
| 1368 | * Walk through all states from lowest- to highest-power. |
| 1369 | * According to the spec, lower-numbered states use more |
| 1370 | * power. NPSS, despite the name, is the index of the |
| 1371 | * lowest-power state, not the number of states. |
| 1372 | */ |
| 1373 | for (state = (int)ctrl->npss; state >= 0; state--) { |
| 1374 | u64 total_latency_us, transition_ms; |
| 1375 | |
| 1376 | if (target) |
| 1377 | table->entries[state] = target; |
| 1378 | |
| 1379 | /* |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 1380 | * Don't allow transitions to the deepest state |
| 1381 | * if it's quirked off. |
| 1382 | */ |
| 1383 | if (state == ctrl->npss && |
| 1384 | (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) |
| 1385 | continue; |
| 1386 | |
| 1387 | /* |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1388 | * Is this state a useful non-operational state for |
| 1389 | * higher-power states to autonomously transition to? |
| 1390 | */ |
| 1391 | if (!(ctrl->psd[state].flags & |
| 1392 | NVME_PS_FLAGS_NON_OP_STATE)) |
| 1393 | continue; |
| 1394 | |
| 1395 | total_latency_us = |
| 1396 | (u64)le32_to_cpu(ctrl->psd[state].entry_lat) + |
| 1397 | + le32_to_cpu(ctrl->psd[state].exit_lat); |
| 1398 | if (total_latency_us > ctrl->ps_max_latency_us) |
| 1399 | continue; |
| 1400 | |
| 1401 | /* |
| 1402 | * This state is good. Use it as the APST idle |
| 1403 | * target for higher power states. |
| 1404 | */ |
| 1405 | transition_ms = total_latency_us + 19; |
| 1406 | do_div(transition_ms, 20); |
| 1407 | if (transition_ms > (1 << 24) - 1) |
| 1408 | transition_ms = (1 << 24) - 1; |
| 1409 | |
| 1410 | target = cpu_to_le64((state << 3) | |
| 1411 | (transition_ms << 8)); |
Andy Lutomirski | fb0dc39 | 2017-04-21 16:19:23 -0700 | [diff] [blame^] | 1412 | |
| 1413 | if (max_ps == -1) |
| 1414 | max_ps = state; |
| 1415 | |
| 1416 | if (total_latency_us > max_lat_us) |
| 1417 | max_lat_us = total_latency_us; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1418 | } |
| 1419 | |
| 1420 | apste = 1; |
Andy Lutomirski | fb0dc39 | 2017-04-21 16:19:23 -0700 | [diff] [blame^] | 1421 | |
| 1422 | if (max_ps == -1) { |
| 1423 | dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); |
| 1424 | } else { |
| 1425 | dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", |
| 1426 | max_ps, max_lat_us, (int)sizeof(*table), table); |
| 1427 | } |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1428 | } |
| 1429 | |
| 1430 | ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, |
| 1431 | table, sizeof(*table), NULL); |
| 1432 | if (ret) |
| 1433 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); |
| 1434 | |
| 1435 | kfree(table); |
| 1436 | } |
| 1437 | |
| 1438 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) |
| 1439 | { |
| 1440 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1441 | u64 latency; |
| 1442 | |
| 1443 | switch (val) { |
| 1444 | case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: |
| 1445 | case PM_QOS_LATENCY_ANY: |
| 1446 | latency = U64_MAX; |
| 1447 | break; |
| 1448 | |
| 1449 | default: |
| 1450 | latency = val; |
| 1451 | } |
| 1452 | |
| 1453 | if (ctrl->ps_max_latency_us != latency) { |
| 1454 | ctrl->ps_max_latency_us = latency; |
| 1455 | nvme_configure_apst(ctrl); |
| 1456 | } |
| 1457 | } |
| 1458 | |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1459 | struct nvme_core_quirk_entry { |
| 1460 | /* |
| 1461 | * NVMe model and firmware strings are padded with spaces. For |
| 1462 | * simplicity, strings in the quirk table are padded with NULLs |
| 1463 | * instead. |
| 1464 | */ |
| 1465 | u16 vid; |
| 1466 | const char *mn; |
| 1467 | const char *fr; |
| 1468 | unsigned long quirks; |
| 1469 | }; |
| 1470 | |
| 1471 | static const struct nvme_core_quirk_entry core_quirks[] = { |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1472 | { |
Andy Lutomirski | be56945 | 2017-04-20 13:37:56 -0700 | [diff] [blame] | 1473 | /* |
| 1474 | * This Toshiba device seems to die using any APST states. See: |
| 1475 | * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 |
| 1476 | */ |
| 1477 | .vid = 0x1179, |
| 1478 | .mn = "THNSF5256GPUK TOSHIBA", |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1479 | .quirks = NVME_QUIRK_NO_APST, |
Andy Lutomirski | be56945 | 2017-04-20 13:37:56 -0700 | [diff] [blame] | 1480 | } |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1481 | }; |
| 1482 | |
| 1483 | /* match is null-terminated but idstr is space-padded. */ |
| 1484 | static bool string_matches(const char *idstr, const char *match, size_t len) |
| 1485 | { |
| 1486 | size_t matchlen; |
| 1487 | |
| 1488 | if (!match) |
| 1489 | return true; |
| 1490 | |
| 1491 | matchlen = strlen(match); |
| 1492 | WARN_ON_ONCE(matchlen > len); |
| 1493 | |
| 1494 | if (memcmp(idstr, match, matchlen)) |
| 1495 | return false; |
| 1496 | |
| 1497 | for (; matchlen < len; matchlen++) |
| 1498 | if (idstr[matchlen] != ' ') |
| 1499 | return false; |
| 1500 | |
| 1501 | return true; |
| 1502 | } |
| 1503 | |
| 1504 | static bool quirk_matches(const struct nvme_id_ctrl *id, |
| 1505 | const struct nvme_core_quirk_entry *q) |
| 1506 | { |
| 1507 | return q->vid == le16_to_cpu(id->vid) && |
| 1508 | string_matches(id->mn, q->mn, sizeof(id->mn)) && |
| 1509 | string_matches(id->fr, q->fr, sizeof(id->fr)); |
| 1510 | } |
| 1511 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1512 | /* |
| 1513 | * Initialize the cached copies of the Identify data and various controller |
| 1514 | * register in our nvme_ctrl structure. This should be called as soon as |
| 1515 | * the admin queue is fully up and running. |
| 1516 | */ |
| 1517 | int nvme_init_identify(struct nvme_ctrl *ctrl) |
| 1518 | { |
| 1519 | struct nvme_id_ctrl *id; |
| 1520 | u64 cap; |
| 1521 | int ret, page_shift; |
Christoph Hellwig | a229dbf | 2016-06-06 23:20:48 +0200 | [diff] [blame] | 1522 | u32 max_hw_sectors; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1523 | u8 prev_apsta; |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1524 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1525 | ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); |
| 1526 | if (ret) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1527 | dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1528 | return ret; |
| 1529 | } |
| 1530 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1531 | ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); |
| 1532 | if (ret) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1533 | dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1534 | return ret; |
| 1535 | } |
| 1536 | page_shift = NVME_CAP_MPSMIN(cap) + 12; |
| 1537 | |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 1538 | if (ctrl->vs >= NVME_VS(1, 1, 0)) |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1539 | ctrl->subsystem = NVME_CAP_NSSRC(cap); |
| 1540 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1541 | ret = nvme_identify_ctrl(ctrl, &id); |
| 1542 | if (ret) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1543 | dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1544 | return -EIO; |
| 1545 | } |
| 1546 | |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1547 | if (!ctrl->identified) { |
| 1548 | /* |
| 1549 | * Check for quirks. Quirk can depend on firmware version, |
| 1550 | * so, in principle, the set of quirks present can change |
| 1551 | * across a reset. As a possible future enhancement, we |
| 1552 | * could re-scan for quirks every time we reinitialize |
| 1553 | * the device, but we'd have to make sure that the driver |
| 1554 | * behaves intelligently if the quirks change. |
| 1555 | */ |
| 1556 | |
| 1557 | int i; |
| 1558 | |
| 1559 | for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { |
| 1560 | if (quirk_matches(id, &core_quirks[i])) |
| 1561 | ctrl->quirks |= core_quirks[i].quirks; |
| 1562 | } |
| 1563 | } |
| 1564 | |
Scott Bauer | 8a9ae52 | 2017-02-17 13:59:40 +0100 | [diff] [blame] | 1565 | ctrl->oacs = le16_to_cpu(id->oacs); |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 1566 | ctrl->vid = le16_to_cpu(id->vid); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1567 | ctrl->oncs = le16_to_cpup(&id->oncs); |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 1568 | atomic_set(&ctrl->abort_limit, id->acl + 1); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1569 | ctrl->vwc = id->vwc; |
Ming Lin | 931e1c2 | 2016-02-26 13:24:19 -0800 | [diff] [blame] | 1570 | ctrl->cntlid = le16_to_cpup(&id->cntlid); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1571 | memcpy(ctrl->serial, id->sn, sizeof(id->sn)); |
| 1572 | memcpy(ctrl->model, id->mn, sizeof(id->mn)); |
| 1573 | memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr)); |
| 1574 | if (id->mdts) |
Christoph Hellwig | a229dbf | 2016-06-06 23:20:48 +0200 | [diff] [blame] | 1575 | max_hw_sectors = 1 << (id->mdts + page_shift - 9); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1576 | else |
Christoph Hellwig | a229dbf | 2016-06-06 23:20:48 +0200 | [diff] [blame] | 1577 | max_hw_sectors = UINT_MAX; |
| 1578 | ctrl->max_hw_sectors = |
| 1579 | min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1580 | |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1581 | nvme_set_queue_limits(ctrl, ctrl->admin_q); |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1582 | ctrl->sgls = le32_to_cpu(id->sgls); |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 1583 | ctrl->kas = le16_to_cpu(id->kas); |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1584 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1585 | ctrl->npss = id->npss; |
| 1586 | prev_apsta = ctrl->apsta; |
| 1587 | ctrl->apsta = (ctrl->quirks & NVME_QUIRK_NO_APST) ? 0 : id->apsta; |
| 1588 | memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); |
| 1589 | |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1590 | if (ctrl->ops->is_fabrics) { |
| 1591 | ctrl->icdoff = le16_to_cpu(id->icdoff); |
| 1592 | ctrl->ioccsz = le32_to_cpu(id->ioccsz); |
| 1593 | ctrl->iorcsz = le32_to_cpu(id->iorcsz); |
| 1594 | ctrl->maxcmd = le16_to_cpu(id->maxcmd); |
| 1595 | |
| 1596 | /* |
| 1597 | * In fabrics we need to verify the cntlid matches the |
| 1598 | * admin connect |
| 1599 | */ |
| 1600 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) |
| 1601 | ret = -EINVAL; |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 1602 | |
| 1603 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { |
| 1604 | dev_err(ctrl->dev, |
| 1605 | "keep-alive support is mandatory for fabrics\n"); |
| 1606 | ret = -EINVAL; |
| 1607 | } |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1608 | } else { |
| 1609 | ctrl->cntlid = le16_to_cpu(id->cntlid); |
| 1610 | } |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1611 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1612 | kfree(id); |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1613 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1614 | if (ctrl->apsta && !prev_apsta) |
| 1615 | dev_pm_qos_expose_latency_tolerance(ctrl->device); |
| 1616 | else if (!ctrl->apsta && prev_apsta) |
| 1617 | dev_pm_qos_hide_latency_tolerance(ctrl->device); |
| 1618 | |
| 1619 | nvme_configure_apst(ctrl); |
| 1620 | |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1621 | ctrl->identified = true; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1622 | |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1623 | return ret; |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1624 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 1625 | EXPORT_SYMBOL_GPL(nvme_init_identify); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1626 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1627 | static int nvme_dev_open(struct inode *inode, struct file *file) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1628 | { |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1629 | struct nvme_ctrl *ctrl; |
| 1630 | int instance = iminor(inode); |
| 1631 | int ret = -ENODEV; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1632 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1633 | spin_lock(&dev_list_lock); |
| 1634 | list_for_each_entry(ctrl, &nvme_ctrl_list, node) { |
| 1635 | if (ctrl->instance != instance) |
| 1636 | continue; |
| 1637 | |
| 1638 | if (!ctrl->admin_q) { |
| 1639 | ret = -EWOULDBLOCK; |
| 1640 | break; |
| 1641 | } |
| 1642 | if (!kref_get_unless_zero(&ctrl->kref)) |
| 1643 | break; |
| 1644 | file->private_data = ctrl; |
| 1645 | ret = 0; |
| 1646 | break; |
| 1647 | } |
| 1648 | spin_unlock(&dev_list_lock); |
| 1649 | |
| 1650 | return ret; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1651 | } |
| 1652 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1653 | static int nvme_dev_release(struct inode *inode, struct file *file) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1654 | { |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1655 | nvme_put_ctrl(file->private_data); |
| 1656 | return 0; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1657 | } |
| 1658 | |
Christoph Hellwig | bfd8947 | 2015-12-24 15:27:01 +0100 | [diff] [blame] | 1659 | static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) |
| 1660 | { |
| 1661 | struct nvme_ns *ns; |
| 1662 | int ret; |
| 1663 | |
| 1664 | mutex_lock(&ctrl->namespaces_mutex); |
| 1665 | if (list_empty(&ctrl->namespaces)) { |
| 1666 | ret = -ENOTTY; |
| 1667 | goto out_unlock; |
| 1668 | } |
| 1669 | |
| 1670 | ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); |
| 1671 | if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1672 | dev_warn(ctrl->device, |
Christoph Hellwig | bfd8947 | 2015-12-24 15:27:01 +0100 | [diff] [blame] | 1673 | "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); |
| 1674 | ret = -EINVAL; |
| 1675 | goto out_unlock; |
| 1676 | } |
| 1677 | |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1678 | dev_warn(ctrl->device, |
Christoph Hellwig | bfd8947 | 2015-12-24 15:27:01 +0100 | [diff] [blame] | 1679 | "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); |
| 1680 | kref_get(&ns->kref); |
| 1681 | mutex_unlock(&ctrl->namespaces_mutex); |
| 1682 | |
| 1683 | ret = nvme_user_cmd(ctrl, ns, argp); |
| 1684 | nvme_put_ns(ns); |
| 1685 | return ret; |
| 1686 | |
| 1687 | out_unlock: |
| 1688 | mutex_unlock(&ctrl->namespaces_mutex); |
| 1689 | return ret; |
| 1690 | } |
| 1691 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1692 | static long nvme_dev_ioctl(struct file *file, unsigned int cmd, |
| 1693 | unsigned long arg) |
| 1694 | { |
| 1695 | struct nvme_ctrl *ctrl = file->private_data; |
| 1696 | void __user *argp = (void __user *)arg; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1697 | |
| 1698 | switch (cmd) { |
| 1699 | case NVME_IOCTL_ADMIN_CMD: |
| 1700 | return nvme_user_cmd(ctrl, NULL, argp); |
| 1701 | case NVME_IOCTL_IO_CMD: |
Christoph Hellwig | bfd8947 | 2015-12-24 15:27:01 +0100 | [diff] [blame] | 1702 | return nvme_dev_user_cmd(ctrl, argp); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1703 | case NVME_IOCTL_RESET: |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1704 | dev_warn(ctrl->device, "resetting controller\n"); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1705 | return ctrl->ops->reset_ctrl(ctrl); |
| 1706 | case NVME_IOCTL_SUBSYS_RESET: |
| 1707 | return nvme_reset_subsystem(ctrl); |
Keith Busch | 9ec3bb2 | 2016-04-29 15:45:18 -0600 | [diff] [blame] | 1708 | case NVME_IOCTL_RESCAN: |
| 1709 | nvme_queue_scan(ctrl); |
| 1710 | return 0; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1711 | default: |
| 1712 | return -ENOTTY; |
| 1713 | } |
| 1714 | } |
| 1715 | |
| 1716 | static const struct file_operations nvme_dev_fops = { |
| 1717 | .owner = THIS_MODULE, |
| 1718 | .open = nvme_dev_open, |
| 1719 | .release = nvme_dev_release, |
| 1720 | .unlocked_ioctl = nvme_dev_ioctl, |
| 1721 | .compat_ioctl = nvme_dev_ioctl, |
| 1722 | }; |
| 1723 | |
| 1724 | static ssize_t nvme_sysfs_reset(struct device *dev, |
| 1725 | struct device_attribute *attr, const char *buf, |
| 1726 | size_t count) |
| 1727 | { |
| 1728 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1729 | int ret; |
| 1730 | |
| 1731 | ret = ctrl->ops->reset_ctrl(ctrl); |
| 1732 | if (ret < 0) |
| 1733 | return ret; |
| 1734 | return count; |
| 1735 | } |
| 1736 | static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); |
| 1737 | |
Keith Busch | 9ec3bb2 | 2016-04-29 15:45:18 -0600 | [diff] [blame] | 1738 | static ssize_t nvme_sysfs_rescan(struct device *dev, |
| 1739 | struct device_attribute *attr, const char *buf, |
| 1740 | size_t count) |
| 1741 | { |
| 1742 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1743 | |
| 1744 | nvme_queue_scan(ctrl); |
| 1745 | return count; |
| 1746 | } |
| 1747 | static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); |
| 1748 | |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 1749 | static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, |
| 1750 | char *buf) |
| 1751 | { |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1752 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 1753 | struct nvme_ctrl *ctrl = ns->ctrl; |
| 1754 | int serial_len = sizeof(ctrl->serial); |
| 1755 | int model_len = sizeof(ctrl->model); |
| 1756 | |
| 1757 | if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) |
| 1758 | return sprintf(buf, "eui.%16phN\n", ns->uuid); |
| 1759 | |
| 1760 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) |
| 1761 | return sprintf(buf, "eui.%8phN\n", ns->eui); |
| 1762 | |
| 1763 | while (ctrl->serial[serial_len - 1] == ' ') |
| 1764 | serial_len--; |
| 1765 | while (ctrl->model[model_len - 1] == ' ') |
| 1766 | model_len--; |
| 1767 | |
| 1768 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, |
| 1769 | serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id); |
| 1770 | } |
| 1771 | static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); |
| 1772 | |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1773 | static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, |
| 1774 | char *buf) |
| 1775 | { |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1776 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1777 | return sprintf(buf, "%pU\n", ns->uuid); |
| 1778 | } |
| 1779 | static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); |
| 1780 | |
| 1781 | static ssize_t eui_show(struct device *dev, struct device_attribute *attr, |
| 1782 | char *buf) |
| 1783 | { |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1784 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1785 | return sprintf(buf, "%8phd\n", ns->eui); |
| 1786 | } |
| 1787 | static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); |
| 1788 | |
| 1789 | static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, |
| 1790 | char *buf) |
| 1791 | { |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1792 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1793 | return sprintf(buf, "%d\n", ns->ns_id); |
| 1794 | } |
| 1795 | static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); |
| 1796 | |
| 1797 | static struct attribute *nvme_ns_attrs[] = { |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 1798 | &dev_attr_wwid.attr, |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1799 | &dev_attr_uuid.attr, |
| 1800 | &dev_attr_eui.attr, |
| 1801 | &dev_attr_nsid.attr, |
| 1802 | NULL, |
| 1803 | }; |
| 1804 | |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1805 | static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1806 | struct attribute *a, int n) |
| 1807 | { |
| 1808 | struct device *dev = container_of(kobj, struct device, kobj); |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1809 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1810 | |
| 1811 | if (a == &dev_attr_uuid.attr) { |
| 1812 | if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) |
| 1813 | return 0; |
| 1814 | } |
| 1815 | if (a == &dev_attr_eui.attr) { |
| 1816 | if (!memchr_inv(ns->eui, 0, sizeof(ns->eui))) |
| 1817 | return 0; |
| 1818 | } |
| 1819 | return a->mode; |
| 1820 | } |
| 1821 | |
| 1822 | static const struct attribute_group nvme_ns_attr_group = { |
| 1823 | .attrs = nvme_ns_attrs, |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1824 | .is_visible = nvme_ns_attrs_are_visible, |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1825 | }; |
| 1826 | |
Ming Lin | 931e1c2 | 2016-02-26 13:24:19 -0800 | [diff] [blame] | 1827 | #define nvme_show_str_function(field) \ |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1828 | static ssize_t field##_show(struct device *dev, \ |
| 1829 | struct device_attribute *attr, char *buf) \ |
| 1830 | { \ |
| 1831 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ |
| 1832 | return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \ |
| 1833 | } \ |
| 1834 | static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); |
| 1835 | |
Ming Lin | 931e1c2 | 2016-02-26 13:24:19 -0800 | [diff] [blame] | 1836 | #define nvme_show_int_function(field) \ |
| 1837 | static ssize_t field##_show(struct device *dev, \ |
| 1838 | struct device_attribute *attr, char *buf) \ |
| 1839 | { \ |
| 1840 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ |
| 1841 | return sprintf(buf, "%d\n", ctrl->field); \ |
| 1842 | } \ |
| 1843 | static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); |
| 1844 | |
| 1845 | nvme_show_str_function(model); |
| 1846 | nvme_show_str_function(serial); |
| 1847 | nvme_show_str_function(firmware_rev); |
| 1848 | nvme_show_int_function(cntlid); |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1849 | |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1850 | static ssize_t nvme_sysfs_delete(struct device *dev, |
| 1851 | struct device_attribute *attr, const char *buf, |
| 1852 | size_t count) |
| 1853 | { |
| 1854 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1855 | |
| 1856 | if (device_remove_file_self(dev, attr)) |
| 1857 | ctrl->ops->delete_ctrl(ctrl); |
| 1858 | return count; |
| 1859 | } |
| 1860 | static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); |
| 1861 | |
| 1862 | static ssize_t nvme_sysfs_show_transport(struct device *dev, |
| 1863 | struct device_attribute *attr, |
| 1864 | char *buf) |
| 1865 | { |
| 1866 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1867 | |
| 1868 | return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); |
| 1869 | } |
| 1870 | static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); |
| 1871 | |
Sagi Grimberg | 8432bdb2 | 2016-11-28 01:47:40 +0200 | [diff] [blame] | 1872 | static ssize_t nvme_sysfs_show_state(struct device *dev, |
| 1873 | struct device_attribute *attr, |
| 1874 | char *buf) |
| 1875 | { |
| 1876 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1877 | static const char *const state_name[] = { |
| 1878 | [NVME_CTRL_NEW] = "new", |
| 1879 | [NVME_CTRL_LIVE] = "live", |
| 1880 | [NVME_CTRL_RESETTING] = "resetting", |
| 1881 | [NVME_CTRL_RECONNECTING]= "reconnecting", |
| 1882 | [NVME_CTRL_DELETING] = "deleting", |
| 1883 | [NVME_CTRL_DEAD] = "dead", |
| 1884 | }; |
| 1885 | |
| 1886 | if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && |
| 1887 | state_name[ctrl->state]) |
| 1888 | return sprintf(buf, "%s\n", state_name[ctrl->state]); |
| 1889 | |
| 1890 | return sprintf(buf, "unknown state\n"); |
| 1891 | } |
| 1892 | |
| 1893 | static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); |
| 1894 | |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1895 | static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, |
| 1896 | struct device_attribute *attr, |
| 1897 | char *buf) |
| 1898 | { |
| 1899 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1900 | |
| 1901 | return snprintf(buf, PAGE_SIZE, "%s\n", |
| 1902 | ctrl->ops->get_subsysnqn(ctrl)); |
| 1903 | } |
| 1904 | static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); |
| 1905 | |
| 1906 | static ssize_t nvme_sysfs_show_address(struct device *dev, |
| 1907 | struct device_attribute *attr, |
| 1908 | char *buf) |
| 1909 | { |
| 1910 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1911 | |
| 1912 | return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); |
| 1913 | } |
| 1914 | static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); |
| 1915 | |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1916 | static struct attribute *nvme_dev_attrs[] = { |
| 1917 | &dev_attr_reset_controller.attr, |
Keith Busch | 9ec3bb2 | 2016-04-29 15:45:18 -0600 | [diff] [blame] | 1918 | &dev_attr_rescan_controller.attr, |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1919 | &dev_attr_model.attr, |
| 1920 | &dev_attr_serial.attr, |
| 1921 | &dev_attr_firmware_rev.attr, |
Ming Lin | 931e1c2 | 2016-02-26 13:24:19 -0800 | [diff] [blame] | 1922 | &dev_attr_cntlid.attr, |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1923 | &dev_attr_delete_controller.attr, |
| 1924 | &dev_attr_transport.attr, |
| 1925 | &dev_attr_subsysnqn.attr, |
| 1926 | &dev_attr_address.attr, |
Sagi Grimberg | 8432bdb2 | 2016-11-28 01:47:40 +0200 | [diff] [blame] | 1927 | &dev_attr_state.attr, |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1928 | NULL |
| 1929 | }; |
| 1930 | |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1931 | #define CHECK_ATTR(ctrl, a, name) \ |
| 1932 | if ((a) == &dev_attr_##name.attr && \ |
| 1933 | !(ctrl)->ops->get_##name) \ |
| 1934 | return 0 |
| 1935 | |
| 1936 | static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, |
| 1937 | struct attribute *a, int n) |
| 1938 | { |
| 1939 | struct device *dev = container_of(kobj, struct device, kobj); |
| 1940 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1941 | |
| 1942 | if (a == &dev_attr_delete_controller.attr) { |
| 1943 | if (!ctrl->ops->delete_ctrl) |
| 1944 | return 0; |
| 1945 | } |
| 1946 | |
| 1947 | CHECK_ATTR(ctrl, a, subsysnqn); |
| 1948 | CHECK_ATTR(ctrl, a, address); |
| 1949 | |
| 1950 | return a->mode; |
| 1951 | } |
| 1952 | |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1953 | static struct attribute_group nvme_dev_attrs_group = { |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1954 | .attrs = nvme_dev_attrs, |
| 1955 | .is_visible = nvme_dev_attrs_are_visible, |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1956 | }; |
| 1957 | |
| 1958 | static const struct attribute_group *nvme_dev_attr_groups[] = { |
| 1959 | &nvme_dev_attrs_group, |
| 1960 | NULL, |
| 1961 | }; |
| 1962 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1963 | static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) |
| 1964 | { |
| 1965 | struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); |
| 1966 | struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); |
| 1967 | |
| 1968 | return nsa->ns_id - nsb->ns_id; |
| 1969 | } |
| 1970 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 1971 | static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1972 | { |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 1973 | struct nvme_ns *ns, *ret = NULL; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1974 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 1975 | mutex_lock(&ctrl->namespaces_mutex); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1976 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 1977 | if (ns->ns_id == nsid) { |
| 1978 | kref_get(&ns->kref); |
| 1979 | ret = ns; |
| 1980 | break; |
| 1981 | } |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1982 | if (ns->ns_id > nsid) |
| 1983 | break; |
| 1984 | } |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 1985 | mutex_unlock(&ctrl->namespaces_mutex); |
| 1986 | return ret; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1987 | } |
| 1988 | |
| 1989 | static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
| 1990 | { |
| 1991 | struct nvme_ns *ns; |
| 1992 | struct gendisk *disk; |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1993 | struct nvme_id_ns *id; |
| 1994 | char disk_name[DISK_NAME_LEN]; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1995 | int node = dev_to_node(ctrl->dev); |
| 1996 | |
| 1997 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); |
| 1998 | if (!ns) |
| 1999 | return; |
| 2000 | |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2001 | ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL); |
| 2002 | if (ns->instance < 0) |
| 2003 | goto out_free_ns; |
| 2004 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2005 | ns->queue = blk_mq_init_queue(ctrl->tagset); |
| 2006 | if (IS_ERR(ns->queue)) |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2007 | goto out_release_instance; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2008 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); |
| 2009 | ns->queue->queuedata = ns; |
| 2010 | ns->ctrl = ctrl; |
| 2011 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2012 | kref_init(&ns->kref); |
| 2013 | ns->ns_id = nsid; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2014 | ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2015 | |
| 2016 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 2017 | nvme_set_queue_limits(ctrl, ns->queue); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2018 | |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2019 | sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2020 | |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2021 | if (nvme_revalidate_ns(ns, &id)) |
| 2022 | goto out_free_queue; |
| 2023 | |
Matias Bjørling | 3dc87dd | 2016-11-28 22:38:53 +0100 | [diff] [blame] | 2024 | if (nvme_nvm_ns_supported(ns, id) && |
| 2025 | nvme_nvm_register(ns, disk_name, node)) { |
| 2026 | dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__); |
| 2027 | goto out_free_id; |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2028 | } |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2029 | |
Matias Bjørling | 3dc87dd | 2016-11-28 22:38:53 +0100 | [diff] [blame] | 2030 | disk = alloc_disk_node(0, node); |
| 2031 | if (!disk) |
| 2032 | goto out_free_id; |
| 2033 | |
| 2034 | disk->fops = &nvme_fops; |
| 2035 | disk->private_data = ns; |
| 2036 | disk->queue = ns->queue; |
| 2037 | disk->flags = GENHD_FL_EXT_DEVT; |
| 2038 | memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); |
| 2039 | ns->disk = disk; |
| 2040 | |
| 2041 | __nvme_revalidate_disk(disk, id); |
| 2042 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2043 | mutex_lock(&ctrl->namespaces_mutex); |
| 2044 | list_add_tail(&ns->list, &ctrl->namespaces); |
| 2045 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2046 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2047 | kref_get(&ctrl->kref); |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2048 | |
| 2049 | kfree(id); |
| 2050 | |
Dan Williams | 0d52c756 | 2016-06-15 19:44:20 -0700 | [diff] [blame] | 2051 | device_add_disk(ctrl->device, ns->disk); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 2052 | if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, |
| 2053 | &nvme_ns_attr_group)) |
| 2054 | pr_warn("%s: failed to create sysfs group for identification\n", |
| 2055 | ns->disk->disk_name); |
Matias Bjørling | 3dc87dd | 2016-11-28 22:38:53 +0100 | [diff] [blame] | 2056 | if (ns->ndev && nvme_nvm_register_sysfs(ns)) |
| 2057 | pr_warn("%s: failed to register lightnvm sysfs group for identification\n", |
| 2058 | ns->disk->disk_name); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2059 | return; |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2060 | out_free_id: |
| 2061 | kfree(id); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2062 | out_free_queue: |
| 2063 | blk_cleanup_queue(ns->queue); |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2064 | out_release_instance: |
| 2065 | ida_simple_remove(&ctrl->ns_ida, ns->instance); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2066 | out_free_ns: |
| 2067 | kfree(ns); |
| 2068 | } |
| 2069 | |
| 2070 | static void nvme_ns_remove(struct nvme_ns *ns) |
| 2071 | { |
Keith Busch | 646017a | 2016-02-24 09:15:54 -0700 | [diff] [blame] | 2072 | if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) |
| 2073 | return; |
| 2074 | |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 2075 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2076 | if (blk_get_integrity(ns->disk)) |
| 2077 | blk_integrity_unregister(ns->disk); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 2078 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, |
| 2079 | &nvme_ns_attr_group); |
Matias Bjørling | 3dc87dd | 2016-11-28 22:38:53 +0100 | [diff] [blame] | 2080 | if (ns->ndev) |
| 2081 | nvme_nvm_unregister_sysfs(ns); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2082 | del_gendisk(ns->disk); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2083 | blk_mq_abort_requeue_list(ns->queue); |
| 2084 | blk_cleanup_queue(ns->queue); |
| 2085 | } |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2086 | |
| 2087 | mutex_lock(&ns->ctrl->namespaces_mutex); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2088 | list_del_init(&ns->list); |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2089 | mutex_unlock(&ns->ctrl->namespaces_mutex); |
| 2090 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2091 | nvme_put_ns(ns); |
| 2092 | } |
| 2093 | |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2094 | static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
| 2095 | { |
| 2096 | struct nvme_ns *ns; |
| 2097 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2098 | ns = nvme_find_get_ns(ctrl, nsid); |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2099 | if (ns) { |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 2100 | if (ns->disk && revalidate_disk(ns->disk)) |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2101 | nvme_ns_remove(ns); |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2102 | nvme_put_ns(ns); |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2103 | } else |
| 2104 | nvme_alloc_ns(ctrl, nsid); |
| 2105 | } |
| 2106 | |
Sunad Bhandary | 47b0e50 | 2016-05-27 15:59:43 +0530 | [diff] [blame] | 2107 | static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, |
| 2108 | unsigned nsid) |
| 2109 | { |
| 2110 | struct nvme_ns *ns, *next; |
| 2111 | |
| 2112 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { |
| 2113 | if (ns->ns_id > nsid) |
| 2114 | nvme_ns_remove(ns); |
| 2115 | } |
| 2116 | } |
| 2117 | |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2118 | static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) |
| 2119 | { |
| 2120 | struct nvme_ns *ns; |
| 2121 | __le32 *ns_list; |
| 2122 | unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); |
| 2123 | int ret = 0; |
| 2124 | |
| 2125 | ns_list = kzalloc(0x1000, GFP_KERNEL); |
| 2126 | if (!ns_list) |
| 2127 | return -ENOMEM; |
| 2128 | |
| 2129 | for (i = 0; i < num_lists; i++) { |
| 2130 | ret = nvme_identify_ns_list(ctrl, prev, ns_list); |
| 2131 | if (ret) |
Sunad Bhandary | 47b0e50 | 2016-05-27 15:59:43 +0530 | [diff] [blame] | 2132 | goto free; |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2133 | |
| 2134 | for (j = 0; j < min(nn, 1024U); j++) { |
| 2135 | nsid = le32_to_cpu(ns_list[j]); |
| 2136 | if (!nsid) |
| 2137 | goto out; |
| 2138 | |
| 2139 | nvme_validate_ns(ctrl, nsid); |
| 2140 | |
| 2141 | while (++prev < nsid) { |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2142 | ns = nvme_find_get_ns(ctrl, prev); |
| 2143 | if (ns) { |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2144 | nvme_ns_remove(ns); |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2145 | nvme_put_ns(ns); |
| 2146 | } |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2147 | } |
| 2148 | } |
| 2149 | nn -= j; |
| 2150 | } |
| 2151 | out: |
Sunad Bhandary | 47b0e50 | 2016-05-27 15:59:43 +0530 | [diff] [blame] | 2152 | nvme_remove_invalid_namespaces(ctrl, prev); |
| 2153 | free: |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2154 | kfree(ns_list); |
| 2155 | return ret; |
| 2156 | } |
| 2157 | |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2158 | static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2159 | { |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2160 | unsigned i; |
| 2161 | |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2162 | for (i = 1; i <= nn; i++) |
| 2163 | nvme_validate_ns(ctrl, i); |
| 2164 | |
Sunad Bhandary | 47b0e50 | 2016-05-27 15:59:43 +0530 | [diff] [blame] | 2165 | nvme_remove_invalid_namespaces(ctrl, nn); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2166 | } |
| 2167 | |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2168 | static void nvme_scan_work(struct work_struct *work) |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2169 | { |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2170 | struct nvme_ctrl *ctrl = |
| 2171 | container_of(work, struct nvme_ctrl, scan_work); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2172 | struct nvme_id_ctrl *id; |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2173 | unsigned nn; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2174 | |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2175 | if (ctrl->state != NVME_CTRL_LIVE) |
| 2176 | return; |
| 2177 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2178 | if (nvme_identify_ctrl(ctrl, &id)) |
| 2179 | return; |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2180 | |
| 2181 | nn = le32_to_cpu(id->nn); |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 2182 | if (ctrl->vs >= NVME_VS(1, 1, 0) && |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2183 | !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { |
| 2184 | if (!nvme_scan_ns_list(ctrl, nn)) |
| 2185 | goto done; |
| 2186 | } |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2187 | nvme_scan_ns_sequential(ctrl, nn); |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2188 | done: |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2189 | mutex_lock(&ctrl->namespaces_mutex); |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2190 | list_sort(NULL, &ctrl->namespaces, ns_cmp); |
Christoph Hellwig | 69d3b8a | 2015-12-24 15:27:00 +0100 | [diff] [blame] | 2191 | mutex_unlock(&ctrl->namespaces_mutex); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2192 | kfree(id); |
| 2193 | } |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2194 | |
| 2195 | void nvme_queue_scan(struct nvme_ctrl *ctrl) |
| 2196 | { |
| 2197 | /* |
| 2198 | * Do not queue new scan work when a controller is reset during |
| 2199 | * removal. |
| 2200 | */ |
| 2201 | if (ctrl->state == NVME_CTRL_LIVE) |
| 2202 | schedule_work(&ctrl->scan_work); |
| 2203 | } |
| 2204 | EXPORT_SYMBOL_GPL(nvme_queue_scan); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2205 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2206 | /* |
| 2207 | * This function iterates the namespace list unlocked to allow recovery from |
| 2208 | * controller failure. It is up to the caller to ensure the namespace list is |
| 2209 | * not modified by scan work while this function is executing. |
| 2210 | */ |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2211 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl) |
| 2212 | { |
| 2213 | struct nvme_ns *ns, *next; |
| 2214 | |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 2215 | /* |
| 2216 | * The dead states indicates the controller was not gracefully |
| 2217 | * disconnected. In that case, we won't be able to flush any data while |
| 2218 | * removing the namespaces' disks; fail all the queues now to avoid |
| 2219 | * potentially having to clean up the failed sync later. |
| 2220 | */ |
| 2221 | if (ctrl->state == NVME_CTRL_DEAD) |
| 2222 | nvme_kill_queues(ctrl); |
| 2223 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2224 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) |
| 2225 | nvme_ns_remove(ns); |
| 2226 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2227 | EXPORT_SYMBOL_GPL(nvme_remove_namespaces); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2228 | |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2229 | static void nvme_async_event_work(struct work_struct *work) |
| 2230 | { |
| 2231 | struct nvme_ctrl *ctrl = |
| 2232 | container_of(work, struct nvme_ctrl, async_event_work); |
| 2233 | |
| 2234 | spin_lock_irq(&ctrl->lock); |
| 2235 | while (ctrl->event_limit > 0) { |
| 2236 | int aer_idx = --ctrl->event_limit; |
| 2237 | |
| 2238 | spin_unlock_irq(&ctrl->lock); |
| 2239 | ctrl->ops->submit_async_event(ctrl, aer_idx); |
| 2240 | spin_lock_irq(&ctrl->lock); |
| 2241 | } |
| 2242 | spin_unlock_irq(&ctrl->lock); |
| 2243 | } |
| 2244 | |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2245 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, |
| 2246 | union nvme_result *res) |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2247 | { |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2248 | u32 result = le32_to_cpu(res->u32); |
| 2249 | bool done = true; |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2250 | |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2251 | switch (le16_to_cpu(status) >> 1) { |
| 2252 | case NVME_SC_SUCCESS: |
| 2253 | done = false; |
| 2254 | /*FALLTHRU*/ |
| 2255 | case NVME_SC_ABORT_REQ: |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2256 | ++ctrl->event_limit; |
| 2257 | schedule_work(&ctrl->async_event_work); |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2258 | break; |
| 2259 | default: |
| 2260 | break; |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2261 | } |
| 2262 | |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2263 | if (done) |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2264 | return; |
| 2265 | |
| 2266 | switch (result & 0xff07) { |
| 2267 | case NVME_AER_NOTICE_NS_CHANGED: |
| 2268 | dev_info(ctrl->device, "rescanning\n"); |
| 2269 | nvme_queue_scan(ctrl); |
| 2270 | break; |
| 2271 | default: |
| 2272 | dev_warn(ctrl->device, "async event result %08x\n", result); |
| 2273 | } |
| 2274 | } |
| 2275 | EXPORT_SYMBOL_GPL(nvme_complete_async_event); |
| 2276 | |
| 2277 | void nvme_queue_async_events(struct nvme_ctrl *ctrl) |
| 2278 | { |
| 2279 | ctrl->event_limit = NVME_NR_AERS; |
| 2280 | schedule_work(&ctrl->async_event_work); |
| 2281 | } |
| 2282 | EXPORT_SYMBOL_GPL(nvme_queue_async_events); |
| 2283 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2284 | static DEFINE_IDA(nvme_instance_ida); |
| 2285 | |
| 2286 | static int nvme_set_instance(struct nvme_ctrl *ctrl) |
| 2287 | { |
| 2288 | int instance, error; |
| 2289 | |
| 2290 | do { |
| 2291 | if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) |
| 2292 | return -ENODEV; |
| 2293 | |
| 2294 | spin_lock(&dev_list_lock); |
| 2295 | error = ida_get_new(&nvme_instance_ida, &instance); |
| 2296 | spin_unlock(&dev_list_lock); |
| 2297 | } while (error == -EAGAIN); |
| 2298 | |
| 2299 | if (error) |
| 2300 | return -ENODEV; |
| 2301 | |
| 2302 | ctrl->instance = instance; |
| 2303 | return 0; |
| 2304 | } |
| 2305 | |
| 2306 | static void nvme_release_instance(struct nvme_ctrl *ctrl) |
| 2307 | { |
| 2308 | spin_lock(&dev_list_lock); |
| 2309 | ida_remove(&nvme_instance_ida, ctrl->instance); |
| 2310 | spin_unlock(&dev_list_lock); |
| 2311 | } |
| 2312 | |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 2313 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2314 | { |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2315 | flush_work(&ctrl->async_event_work); |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2316 | flush_work(&ctrl->scan_work); |
| 2317 | nvme_remove_namespaces(ctrl); |
| 2318 | |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 2319 | device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2320 | |
| 2321 | spin_lock(&dev_list_lock); |
| 2322 | list_del(&ctrl->node); |
| 2323 | spin_unlock(&dev_list_lock); |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 2324 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2325 | EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 2326 | |
| 2327 | static void nvme_free_ctrl(struct kref *kref) |
| 2328 | { |
| 2329 | struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2330 | |
| 2331 | put_device(ctrl->device); |
| 2332 | nvme_release_instance(ctrl); |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2333 | ida_destroy(&ctrl->ns_ida); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2334 | |
| 2335 | ctrl->ops->free_ctrl(ctrl); |
| 2336 | } |
| 2337 | |
| 2338 | void nvme_put_ctrl(struct nvme_ctrl *ctrl) |
| 2339 | { |
| 2340 | kref_put(&ctrl->kref, nvme_free_ctrl); |
| 2341 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2342 | EXPORT_SYMBOL_GPL(nvme_put_ctrl); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2343 | |
| 2344 | /* |
| 2345 | * Initialize a NVMe controller structures. This needs to be called during |
| 2346 | * earliest initialization so that we have the initialized structured around |
| 2347 | * during probing. |
| 2348 | */ |
| 2349 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
| 2350 | const struct nvme_ctrl_ops *ops, unsigned long quirks) |
| 2351 | { |
| 2352 | int ret; |
| 2353 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 2354 | ctrl->state = NVME_CTRL_NEW; |
| 2355 | spin_lock_init(&ctrl->lock); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2356 | INIT_LIST_HEAD(&ctrl->namespaces); |
Christoph Hellwig | 69d3b8a | 2015-12-24 15:27:00 +0100 | [diff] [blame] | 2357 | mutex_init(&ctrl->namespaces_mutex); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2358 | kref_init(&ctrl->kref); |
| 2359 | ctrl->dev = dev; |
| 2360 | ctrl->ops = ops; |
| 2361 | ctrl->quirks = quirks; |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2362 | INIT_WORK(&ctrl->scan_work, nvme_scan_work); |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2363 | INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2364 | |
| 2365 | ret = nvme_set_instance(ctrl); |
| 2366 | if (ret) |
| 2367 | goto out; |
| 2368 | |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 2369 | ctrl->device = device_create_with_groups(nvme_class, ctrl->dev, |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2370 | MKDEV(nvme_char_major, ctrl->instance), |
Christoph Hellwig | f4f0f63 | 2016-02-09 12:44:03 -0700 | [diff] [blame] | 2371 | ctrl, nvme_dev_attr_groups, |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 2372 | "nvme%d", ctrl->instance); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2373 | if (IS_ERR(ctrl->device)) { |
| 2374 | ret = PTR_ERR(ctrl->device); |
| 2375 | goto out_release_instance; |
| 2376 | } |
| 2377 | get_device(ctrl->device); |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2378 | ida_init(&ctrl->ns_ida); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2379 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2380 | spin_lock(&dev_list_lock); |
| 2381 | list_add_tail(&ctrl->node, &nvme_ctrl_list); |
| 2382 | spin_unlock(&dev_list_lock); |
| 2383 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 2384 | /* |
| 2385 | * Initialize latency tolerance controls. The sysfs files won't |
| 2386 | * be visible to userspace unless the device actually supports APST. |
| 2387 | */ |
| 2388 | ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; |
| 2389 | dev_pm_qos_update_user_latency_tolerance(ctrl->device, |
| 2390 | min(default_ps_max_latency_us, (unsigned long)S32_MAX)); |
| 2391 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2392 | return 0; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2393 | out_release_instance: |
| 2394 | nvme_release_instance(ctrl); |
| 2395 | out: |
| 2396 | return ret; |
| 2397 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2398 | EXPORT_SYMBOL_GPL(nvme_init_ctrl); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2399 | |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2400 | /** |
| 2401 | * nvme_kill_queues(): Ends all namespace queues |
| 2402 | * @ctrl: the dead controller that needs to end |
| 2403 | * |
| 2404 | * Call this function when the driver determines it is unable to get the |
| 2405 | * controller in a state capable of servicing IO. |
| 2406 | */ |
| 2407 | void nvme_kill_queues(struct nvme_ctrl *ctrl) |
| 2408 | { |
| 2409 | struct nvme_ns *ns; |
| 2410 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2411 | mutex_lock(&ctrl->namespaces_mutex); |
| 2412 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2413 | /* |
| 2414 | * Revalidating a dead namespace sets capacity to 0. This will |
| 2415 | * end buffered writers dirtying pages that can't be synced. |
| 2416 | */ |
Keith Busch | f33447b | 2017-02-10 18:15:51 -0500 | [diff] [blame] | 2417 | if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) |
| 2418 | continue; |
| 2419 | revalidate_disk(ns->disk); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2420 | blk_set_queue_dying(ns->queue); |
| 2421 | blk_mq_abort_requeue_list(ns->queue); |
| 2422 | blk_mq_start_stopped_hw_queues(ns->queue, true); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2423 | } |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2424 | mutex_unlock(&ctrl->namespaces_mutex); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2425 | } |
Linus Torvalds | 237045f | 2016-03-18 17:13:31 -0700 | [diff] [blame] | 2426 | EXPORT_SYMBOL_GPL(nvme_kill_queues); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2427 | |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2428 | void nvme_unfreeze(struct nvme_ctrl *ctrl) |
| 2429 | { |
| 2430 | struct nvme_ns *ns; |
| 2431 | |
| 2432 | mutex_lock(&ctrl->namespaces_mutex); |
| 2433 | list_for_each_entry(ns, &ctrl->namespaces, list) |
| 2434 | blk_mq_unfreeze_queue(ns->queue); |
| 2435 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2436 | } |
| 2437 | EXPORT_SYMBOL_GPL(nvme_unfreeze); |
| 2438 | |
| 2439 | void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) |
| 2440 | { |
| 2441 | struct nvme_ns *ns; |
| 2442 | |
| 2443 | mutex_lock(&ctrl->namespaces_mutex); |
| 2444 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
| 2445 | timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); |
| 2446 | if (timeout <= 0) |
| 2447 | break; |
| 2448 | } |
| 2449 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2450 | } |
| 2451 | EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); |
| 2452 | |
| 2453 | void nvme_wait_freeze(struct nvme_ctrl *ctrl) |
| 2454 | { |
| 2455 | struct nvme_ns *ns; |
| 2456 | |
| 2457 | mutex_lock(&ctrl->namespaces_mutex); |
| 2458 | list_for_each_entry(ns, &ctrl->namespaces, list) |
| 2459 | blk_mq_freeze_queue_wait(ns->queue); |
| 2460 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2461 | } |
| 2462 | EXPORT_SYMBOL_GPL(nvme_wait_freeze); |
| 2463 | |
| 2464 | void nvme_start_freeze(struct nvme_ctrl *ctrl) |
| 2465 | { |
| 2466 | struct nvme_ns *ns; |
| 2467 | |
| 2468 | mutex_lock(&ctrl->namespaces_mutex); |
| 2469 | list_for_each_entry(ns, &ctrl->namespaces, list) |
Ming Lei | 1671d52 | 2017-03-27 20:06:57 +0800 | [diff] [blame] | 2470 | blk_freeze_queue_start(ns->queue); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2471 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2472 | } |
| 2473 | EXPORT_SYMBOL_GPL(nvme_start_freeze); |
| 2474 | |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 2475 | void nvme_stop_queues(struct nvme_ctrl *ctrl) |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2476 | { |
| 2477 | struct nvme_ns *ns; |
| 2478 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2479 | mutex_lock(&ctrl->namespaces_mutex); |
Bart Van Assche | a6eaa88 | 2016-10-28 17:23:40 -0700 | [diff] [blame] | 2480 | list_for_each_entry(ns, &ctrl->namespaces, list) |
Bart Van Assche | 3174dd3 | 2016-10-28 17:23:19 -0700 | [diff] [blame] | 2481 | blk_mq_quiesce_queue(ns->queue); |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2482 | mutex_unlock(&ctrl->namespaces_mutex); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2483 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2484 | EXPORT_SYMBOL_GPL(nvme_stop_queues); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2485 | |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 2486 | void nvme_start_queues(struct nvme_ctrl *ctrl) |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2487 | { |
| 2488 | struct nvme_ns *ns; |
| 2489 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2490 | mutex_lock(&ctrl->namespaces_mutex); |
| 2491 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2492 | blk_mq_start_stopped_hw_queues(ns->queue, true); |
| 2493 | blk_mq_kick_requeue_list(ns->queue); |
| 2494 | } |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2495 | mutex_unlock(&ctrl->namespaces_mutex); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2496 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2497 | EXPORT_SYMBOL_GPL(nvme_start_queues); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2498 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2499 | int __init nvme_core_init(void) |
| 2500 | { |
| 2501 | int result; |
| 2502 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2503 | result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", |
| 2504 | &nvme_dev_fops); |
| 2505 | if (result < 0) |
NeilBrown | b09dcf5 | 2016-07-13 11:03:58 -0700 | [diff] [blame] | 2506 | return result; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2507 | else if (result > 0) |
| 2508 | nvme_char_major = result; |
| 2509 | |
| 2510 | nvme_class = class_create(THIS_MODULE, "nvme"); |
| 2511 | if (IS_ERR(nvme_class)) { |
| 2512 | result = PTR_ERR(nvme_class); |
| 2513 | goto unregister_chrdev; |
| 2514 | } |
| 2515 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2516 | return 0; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2517 | |
| 2518 | unregister_chrdev: |
| 2519 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2520 | return result; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2521 | } |
| 2522 | |
| 2523 | void nvme_core_exit(void) |
| 2524 | { |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2525 | class_destroy(nvme_class); |
| 2526 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2527 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2528 | |
| 2529 | MODULE_LICENSE("GPL"); |
| 2530 | MODULE_VERSION("1.0"); |
| 2531 | module_init(nvme_core_init); |
| 2532 | module_exit(nvme_core_exit); |