Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 1 | /* |
| 2 | * NVM Express device driver |
| 3 | * Copyright (c) 2011-2014, Intel Corporation. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/blk-mq.h> |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 17 | #include <linux/delay.h> |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 18 | #include <linux/errno.h> |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 19 | #include <linux/hdreg.h> |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 20 | #include <linux/kernel.h> |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 21 | #include <linux/module.h> |
| 22 | #include <linux/list_sort.h> |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 23 | #include <linux/slab.h> |
| 24 | #include <linux/types.h> |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 25 | #include <linux/pr.h> |
| 26 | #include <linux/ptrace.h> |
| 27 | #include <linux/nvme_ioctl.h> |
| 28 | #include <linux/t10-pi.h> |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 29 | #include <linux/pm_qos.h> |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 30 | #include <scsi/sg.h> |
| 31 | #include <asm/unaligned.h> |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 32 | |
| 33 | #include "nvme.h" |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 34 | #include "fabrics.h" |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 35 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 36 | #define NVME_MINORS (1U << MINORBITS) |
| 37 | |
Ming Lin | ba0ba7d | 2016-02-10 10:03:30 -0800 | [diff] [blame] | 38 | unsigned char admin_timeout = 60; |
| 39 | module_param(admin_timeout, byte, 0644); |
| 40 | MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 41 | EXPORT_SYMBOL_GPL(admin_timeout); |
Ming Lin | ba0ba7d | 2016-02-10 10:03:30 -0800 | [diff] [blame] | 42 | |
| 43 | unsigned char nvme_io_timeout = 30; |
| 44 | module_param_named(io_timeout, nvme_io_timeout, byte, 0644); |
| 45 | MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 46 | EXPORT_SYMBOL_GPL(nvme_io_timeout); |
Ming Lin | ba0ba7d | 2016-02-10 10:03:30 -0800 | [diff] [blame] | 47 | |
| 48 | unsigned char shutdown_timeout = 5; |
| 49 | module_param(shutdown_timeout, byte, 0644); |
| 50 | MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); |
| 51 | |
Christoph Hellwig | 44e44b2 | 2017-04-05 19:18:11 +0200 | [diff] [blame] | 52 | static u8 nvme_max_retries = 5; |
| 53 | module_param_named(max_retries, nvme_max_retries, byte, 0644); |
Keith Busch | f80ec96 | 2016-07-12 16:20:31 -0700 | [diff] [blame] | 54 | MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 55 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 56 | static int nvme_char_major; |
| 57 | module_param(nvme_char_major, int, 0); |
| 58 | |
Kai-Heng Feng | 9947d6a | 2017-06-07 15:25:43 +0800 | [diff] [blame] | 59 | static unsigned long default_ps_max_latency_us = 100000; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 60 | module_param(default_ps_max_latency_us, ulong, 0644); |
| 61 | MODULE_PARM_DESC(default_ps_max_latency_us, |
| 62 | "max power saving latency for new devices; use PM QOS to change per device"); |
| 63 | |
Andy Lutomirski | c35e30b | 2017-04-21 16:19:24 -0700 | [diff] [blame] | 64 | static bool force_apst; |
| 65 | module_param(force_apst, bool, 0644); |
| 66 | MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); |
| 67 | |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 68 | struct workqueue_struct *nvme_wq; |
| 69 | EXPORT_SYMBOL_GPL(nvme_wq); |
| 70 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 71 | static LIST_HEAD(nvme_ctrl_list); |
Ming Lin | 9f2482b | 2016-02-10 10:03:31 -0800 | [diff] [blame] | 72 | static DEFINE_SPINLOCK(dev_list_lock); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 73 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 74 | static struct class *nvme_class; |
| 75 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 76 | static blk_status_t nvme_error_status(struct request *req) |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 77 | { |
| 78 | switch (nvme_req(req)->status & 0x7ff) { |
| 79 | case NVME_SC_SUCCESS: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 80 | return BLK_STS_OK; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 81 | case NVME_SC_CAP_EXCEEDED: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 82 | return BLK_STS_NOSPC; |
Junxiong Guan | e02ab02 | 2017-04-21 12:59:07 +0200 | [diff] [blame] | 83 | case NVME_SC_ONCS_NOT_SUPPORTED: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 84 | return BLK_STS_NOTSUPP; |
Junxiong Guan | e02ab02 | 2017-04-21 12:59:07 +0200 | [diff] [blame] | 85 | case NVME_SC_WRITE_FAULT: |
| 86 | case NVME_SC_READ_ERROR: |
| 87 | case NVME_SC_UNWRITTEN_BLOCK: |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 88 | return BLK_STS_MEDIUM; |
| 89 | default: |
| 90 | return BLK_STS_IOERR; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 91 | } |
| 92 | } |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 93 | |
Christoph Hellwig | f6324b1 | 2017-04-05 19:18:09 +0200 | [diff] [blame] | 94 | static inline bool nvme_req_needs_retry(struct request *req) |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 95 | { |
Christoph Hellwig | f6324b1 | 2017-04-05 19:18:09 +0200 | [diff] [blame] | 96 | if (blk_noretry_request(req)) |
| 97 | return false; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 98 | if (nvme_req(req)->status & NVME_SC_DNR) |
Christoph Hellwig | f6324b1 | 2017-04-05 19:18:09 +0200 | [diff] [blame] | 99 | return false; |
| 100 | if (jiffies - req->start_time >= req->timeout) |
| 101 | return false; |
Christoph Hellwig | 44e44b2 | 2017-04-05 19:18:11 +0200 | [diff] [blame] | 102 | if (nvme_req(req)->retries >= nvme_max_retries) |
Christoph Hellwig | f6324b1 | 2017-04-05 19:18:09 +0200 | [diff] [blame] | 103 | return false; |
| 104 | return true; |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | void nvme_complete_rq(struct request *req) |
| 108 | { |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 109 | if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) { |
| 110 | nvme_req(req)->retries++; |
| 111 | blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q)); |
| 112 | return; |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 113 | } |
| 114 | |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 115 | blk_mq_end_request(req, nvme_error_status(req)); |
Christoph Hellwig | 77f02a7 | 2017-03-30 13:41:32 +0200 | [diff] [blame] | 116 | } |
| 117 | EXPORT_SYMBOL_GPL(nvme_complete_rq); |
| 118 | |
Ming Lin | c55a2fd | 2016-05-18 14:05:02 -0700 | [diff] [blame] | 119 | void nvme_cancel_request(struct request *req, void *data, bool reserved) |
| 120 | { |
| 121 | int status; |
| 122 | |
| 123 | if (!blk_mq_request_started(req)) |
| 124 | return; |
| 125 | |
| 126 | dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, |
| 127 | "Cancelling I/O %d", req->tag); |
| 128 | |
| 129 | status = NVME_SC_ABORT_REQ; |
| 130 | if (blk_queue_dying(req->q)) |
| 131 | status |= NVME_SC_DNR; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 132 | nvme_req(req)->status = status; |
Christoph Hellwig | 08e0029 | 2017-04-20 16:03:09 +0200 | [diff] [blame] | 133 | blk_mq_complete_request(req); |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 134 | |
Ming Lin | c55a2fd | 2016-05-18 14:05:02 -0700 | [diff] [blame] | 135 | } |
| 136 | EXPORT_SYMBOL_GPL(nvme_cancel_request); |
| 137 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 138 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
| 139 | enum nvme_ctrl_state new_state) |
| 140 | { |
Gabriel Krisman Bertazi | f6b6a28 | 2016-07-29 16:15:18 -0300 | [diff] [blame] | 141 | enum nvme_ctrl_state old_state; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 142 | bool changed = false; |
| 143 | |
| 144 | spin_lock_irq(&ctrl->lock); |
Gabriel Krisman Bertazi | f6b6a28 | 2016-07-29 16:15:18 -0300 | [diff] [blame] | 145 | |
| 146 | old_state = ctrl->state; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 147 | switch (new_state) { |
| 148 | case NVME_CTRL_LIVE: |
| 149 | switch (old_state) { |
Christoph Hellwig | 7d2e800 | 2016-06-13 16:45:22 +0200 | [diff] [blame] | 150 | case NVME_CTRL_NEW: |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 151 | case NVME_CTRL_RESETTING: |
Christoph Hellwig | def61ec | 2016-07-06 21:55:49 +0900 | [diff] [blame] | 152 | case NVME_CTRL_RECONNECTING: |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 153 | changed = true; |
| 154 | /* FALLTHRU */ |
| 155 | default: |
| 156 | break; |
| 157 | } |
| 158 | break; |
| 159 | case NVME_CTRL_RESETTING: |
| 160 | switch (old_state) { |
| 161 | case NVME_CTRL_NEW: |
| 162 | case NVME_CTRL_LIVE: |
Christoph Hellwig | def61ec | 2016-07-06 21:55:49 +0900 | [diff] [blame] | 163 | changed = true; |
| 164 | /* FALLTHRU */ |
| 165 | default: |
| 166 | break; |
| 167 | } |
| 168 | break; |
| 169 | case NVME_CTRL_RECONNECTING: |
| 170 | switch (old_state) { |
| 171 | case NVME_CTRL_LIVE: |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 172 | changed = true; |
| 173 | /* FALLTHRU */ |
| 174 | default: |
| 175 | break; |
| 176 | } |
| 177 | break; |
| 178 | case NVME_CTRL_DELETING: |
| 179 | switch (old_state) { |
| 180 | case NVME_CTRL_LIVE: |
| 181 | case NVME_CTRL_RESETTING: |
Christoph Hellwig | def61ec | 2016-07-06 21:55:49 +0900 | [diff] [blame] | 182 | case NVME_CTRL_RECONNECTING: |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 183 | changed = true; |
| 184 | /* FALLTHRU */ |
| 185 | default: |
| 186 | break; |
| 187 | } |
| 188 | break; |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 189 | case NVME_CTRL_DEAD: |
| 190 | switch (old_state) { |
| 191 | case NVME_CTRL_DELETING: |
| 192 | changed = true; |
| 193 | /* FALLTHRU */ |
| 194 | default: |
| 195 | break; |
| 196 | } |
| 197 | break; |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 198 | default: |
| 199 | break; |
| 200 | } |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 201 | |
| 202 | if (changed) |
| 203 | ctrl->state = new_state; |
| 204 | |
Gabriel Krisman Bertazi | f6b6a28 | 2016-07-29 16:15:18 -0300 | [diff] [blame] | 205 | spin_unlock_irq(&ctrl->lock); |
| 206 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 207 | return changed; |
| 208 | } |
| 209 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); |
| 210 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 211 | static void nvme_free_ns(struct kref *kref) |
| 212 | { |
| 213 | struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); |
| 214 | |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 215 | if (ns->ndev) |
| 216 | nvme_nvm_unregister(ns); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 217 | |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 218 | if (ns->disk) { |
| 219 | spin_lock(&dev_list_lock); |
| 220 | ns->disk->private_data = NULL; |
| 221 | spin_unlock(&dev_list_lock); |
| 222 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 223 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 224 | put_disk(ns->disk); |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 225 | ida_simple_remove(&ns->ctrl->ns_ida, ns->instance); |
| 226 | nvme_put_ctrl(ns->ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 227 | kfree(ns); |
| 228 | } |
| 229 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 230 | static void nvme_put_ns(struct nvme_ns *ns) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 231 | { |
| 232 | kref_put(&ns->kref, nvme_free_ns); |
| 233 | } |
| 234 | |
| 235 | static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk) |
| 236 | { |
| 237 | struct nvme_ns *ns; |
| 238 | |
| 239 | spin_lock(&dev_list_lock); |
| 240 | ns = disk->private_data; |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 241 | if (ns) { |
| 242 | if (!kref_get_unless_zero(&ns->kref)) |
| 243 | goto fail; |
| 244 | if (!try_module_get(ns->ctrl->ops->module)) |
| 245 | goto fail_put_ns; |
| 246 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 247 | spin_unlock(&dev_list_lock); |
| 248 | |
| 249 | return ns; |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 250 | |
| 251 | fail_put_ns: |
| 252 | kref_put(&ns->kref, nvme_free_ns); |
| 253 | fail: |
| 254 | spin_unlock(&dev_list_lock); |
| 255 | return NULL; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 256 | } |
| 257 | |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 258 | struct request *nvme_alloc_request(struct request_queue *q, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 259 | struct nvme_command *cmd, unsigned int flags, int qid) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 260 | { |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 261 | unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 262 | struct request *req; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 263 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 264 | if (qid == NVME_QID_ANY) { |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 265 | req = blk_mq_alloc_request(q, op, flags); |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 266 | } else { |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 267 | req = blk_mq_alloc_request_hctx(q, op, flags, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 268 | qid ? qid - 1 : 0); |
| 269 | } |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 270 | if (IS_ERR(req)) |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 271 | return req; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 272 | |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 273 | req->cmd_flags |= REQ_FAILFAST_DRIVER; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 274 | nvme_req(req)->cmd = cmd; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 275 | |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 276 | return req; |
| 277 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 278 | EXPORT_SYMBOL_GPL(nvme_alloc_request); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 279 | |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 280 | static inline void nvme_setup_flush(struct nvme_ns *ns, |
| 281 | struct nvme_command *cmnd) |
| 282 | { |
| 283 | memset(cmnd, 0, sizeof(*cmnd)); |
| 284 | cmnd->common.opcode = nvme_cmd_flush; |
| 285 | cmnd->common.nsid = cpu_to_le32(ns->ns_id); |
| 286 | } |
| 287 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 288 | static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 289 | struct nvme_command *cmnd) |
| 290 | { |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 291 | unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 292 | struct nvme_dsm_range *range; |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 293 | struct bio *bio; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 294 | |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 295 | range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 296 | if (!range) |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 297 | return BLK_STS_RESOURCE; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 298 | |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 299 | __rq_for_each_bio(bio, req) { |
| 300 | u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); |
| 301 | u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; |
| 302 | |
| 303 | range[n].cattr = cpu_to_le32(0); |
| 304 | range[n].nlb = cpu_to_le32(nlb); |
| 305 | range[n].slba = cpu_to_le64(slba); |
| 306 | n++; |
| 307 | } |
| 308 | |
| 309 | if (WARN_ON_ONCE(n != segments)) { |
| 310 | kfree(range); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 311 | return BLK_STS_IOERR; |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 312 | } |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 313 | |
| 314 | memset(cmnd, 0, sizeof(*cmnd)); |
| 315 | cmnd->dsm.opcode = nvme_cmd_dsm; |
| 316 | cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); |
Christoph Hellwig | f1dd03a | 2017-03-31 17:00:05 +0200 | [diff] [blame] | 317 | cmnd->dsm.nr = cpu_to_le32(segments - 1); |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 318 | cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); |
| 319 | |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 320 | req->special_vec.bv_page = virt_to_page(range); |
| 321 | req->special_vec.bv_offset = offset_in_page(range); |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 322 | req->special_vec.bv_len = sizeof(*range) * segments; |
Christoph Hellwig | f9d03f9 | 2016-12-08 15:20:32 -0700 | [diff] [blame] | 323 | req->rq_flags |= RQF_SPECIAL_PAYLOAD; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 324 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 325 | return BLK_STS_OK; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 326 | } |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 327 | |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 328 | static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, |
| 329 | struct nvme_command *cmnd) |
| 330 | { |
| 331 | u16 control = 0; |
| 332 | u32 dsmgmt = 0; |
| 333 | |
| 334 | if (req->cmd_flags & REQ_FUA) |
| 335 | control |= NVME_RW_FUA; |
| 336 | if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) |
| 337 | control |= NVME_RW_LR; |
| 338 | |
| 339 | if (req->cmd_flags & REQ_RAHEAD) |
| 340 | dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; |
| 341 | |
| 342 | memset(cmnd, 0, sizeof(*cmnd)); |
| 343 | cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 344 | cmnd->rw.nsid = cpu_to_le32(ns->ns_id); |
| 345 | cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); |
| 346 | cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); |
| 347 | |
| 348 | if (ns->ms) { |
| 349 | switch (ns->pi_type) { |
| 350 | case NVME_NS_DPS_PI_TYPE3: |
| 351 | control |= NVME_RW_PRINFO_PRCHK_GUARD; |
| 352 | break; |
| 353 | case NVME_NS_DPS_PI_TYPE1: |
| 354 | case NVME_NS_DPS_PI_TYPE2: |
| 355 | control |= NVME_RW_PRINFO_PRCHK_GUARD | |
| 356 | NVME_RW_PRINFO_PRCHK_REF; |
| 357 | cmnd->rw.reftag = cpu_to_le32( |
| 358 | nvme_block_nr(ns, blk_rq_pos(req))); |
| 359 | break; |
| 360 | } |
| 361 | if (!blk_integrity_rq(req)) |
| 362 | control |= NVME_RW_PRINFO_PRACT; |
| 363 | } |
| 364 | |
| 365 | cmnd->rw.control = cpu_to_le16(control); |
| 366 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); |
| 367 | } |
| 368 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 369 | blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 370 | struct nvme_command *cmd) |
| 371 | { |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 372 | blk_status_t ret = BLK_STS_OK; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 373 | |
Christoph Hellwig | 987f699 | 2017-04-05 19:18:08 +0200 | [diff] [blame] | 374 | if (!(req->rq_flags & RQF_DONTPREP)) { |
Christoph Hellwig | 44e44b2 | 2017-04-05 19:18:11 +0200 | [diff] [blame] | 375 | nvme_req(req)->retries = 0; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 376 | nvme_req(req)->flags = 0; |
Christoph Hellwig | 987f699 | 2017-04-05 19:18:08 +0200 | [diff] [blame] | 377 | req->rq_flags |= RQF_DONTPREP; |
| 378 | } |
| 379 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 380 | switch (req_op(req)) { |
| 381 | case REQ_OP_DRV_IN: |
| 382 | case REQ_OP_DRV_OUT: |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 383 | memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 384 | break; |
| 385 | case REQ_OP_FLUSH: |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 386 | nvme_setup_flush(ns, cmd); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 387 | break; |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 388 | case REQ_OP_WRITE_ZEROES: |
| 389 | /* currently only aliased to deallocate for a few ctrls: */ |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 390 | case REQ_OP_DISCARD: |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 391 | ret = nvme_setup_discard(ns, req, cmd); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 392 | break; |
| 393 | case REQ_OP_READ: |
| 394 | case REQ_OP_WRITE: |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 395 | nvme_setup_rw(ns, req, cmd); |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 396 | break; |
| 397 | default: |
| 398 | WARN_ON_ONCE(1); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 399 | return BLK_STS_IOERR; |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 400 | } |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 401 | |
James Smart | 721b391 | 2016-10-21 23:33:34 +0300 | [diff] [blame] | 402 | cmd->common.command_id = req->tag; |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 403 | return ret; |
| 404 | } |
| 405 | EXPORT_SYMBOL_GPL(nvme_setup_cmd); |
| 406 | |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 407 | /* |
| 408 | * Returns 0 on success. If the result is negative, it's a Linux error code; |
| 409 | * if the result is positive, it's an NVM Express status code |
| 410 | */ |
| 411 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 412 | union nvme_result *result, void *buffer, unsigned bufflen, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 413 | unsigned timeout, int qid, int at_head, int flags) |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 414 | { |
| 415 | struct request *req; |
| 416 | int ret; |
| 417 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 418 | req = nvme_alloc_request(q, cmd, flags, qid); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 419 | if (IS_ERR(req)) |
| 420 | return PTR_ERR(req); |
| 421 | |
| 422 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; |
| 423 | |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 424 | if (buffer && bufflen) { |
| 425 | ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); |
| 426 | if (ret) |
| 427 | goto out; |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 428 | } |
| 429 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 430 | blk_execute_rq(req->q, NULL, req, at_head); |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 431 | if (result) |
| 432 | *result = nvme_req(req)->result; |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 433 | if (nvme_req(req)->flags & NVME_REQ_CANCELLED) |
| 434 | ret = -EINTR; |
| 435 | else |
| 436 | ret = nvme_req(req)->status; |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 437 | out: |
| 438 | blk_mq_free_request(req); |
| 439 | return ret; |
| 440 | } |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 441 | EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 442 | |
| 443 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 444 | void *buffer, unsigned bufflen) |
| 445 | { |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 446 | return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, |
| 447 | NVME_QID_ANY, 0, 0); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 448 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 449 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 450 | |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 451 | int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 452 | void __user *ubuffer, unsigned bufflen, |
| 453 | void __user *meta_buffer, unsigned meta_len, u32 meta_seed, |
| 454 | u32 *result, unsigned timeout) |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 455 | { |
Christoph Hellwig | 7a5abb4 | 2016-06-06 23:20:49 +0200 | [diff] [blame] | 456 | bool write = nvme_is_write(cmd); |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 457 | struct nvme_ns *ns = q->queuedata; |
| 458 | struct gendisk *disk = ns ? ns->disk : NULL; |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 459 | struct request *req; |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 460 | struct bio *bio = NULL; |
| 461 | void *meta = NULL; |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 462 | int ret; |
| 463 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 464 | req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 465 | if (IS_ERR(req)) |
| 466 | return PTR_ERR(req); |
| 467 | |
| 468 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; |
| 469 | |
| 470 | if (ubuffer && bufflen) { |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 471 | ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, |
| 472 | GFP_KERNEL); |
| 473 | if (ret) |
| 474 | goto out; |
| 475 | bio = req->bio; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 476 | |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 477 | if (!disk) |
| 478 | goto submit; |
| 479 | bio->bi_bdev = bdget_disk(disk, 0); |
| 480 | if (!bio->bi_bdev) { |
| 481 | ret = -ENODEV; |
| 482 | goto out_unmap; |
| 483 | } |
| 484 | |
Keith Busch | e9fc63d | 2016-02-24 09:15:58 -0700 | [diff] [blame] | 485 | if (meta_buffer && meta_len) { |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 486 | struct bio_integrity_payload *bip; |
| 487 | |
| 488 | meta = kmalloc(meta_len, GFP_KERNEL); |
| 489 | if (!meta) { |
| 490 | ret = -ENOMEM; |
| 491 | goto out_unmap; |
| 492 | } |
| 493 | |
| 494 | if (write) { |
| 495 | if (copy_from_user(meta, meta_buffer, |
| 496 | meta_len)) { |
| 497 | ret = -EFAULT; |
| 498 | goto out_free_meta; |
| 499 | } |
| 500 | } |
| 501 | |
| 502 | bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); |
Keith Busch | 06c1e39 | 2015-12-03 09:32:21 -0700 | [diff] [blame] | 503 | if (IS_ERR(bip)) { |
| 504 | ret = PTR_ERR(bip); |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 505 | goto out_free_meta; |
| 506 | } |
| 507 | |
| 508 | bip->bip_iter.bi_size = meta_len; |
| 509 | bip->bip_iter.bi_sector = meta_seed; |
| 510 | |
| 511 | ret = bio_integrity_add_page(bio, virt_to_page(meta), |
| 512 | meta_len, offset_in_page(meta)); |
| 513 | if (ret != meta_len) { |
| 514 | ret = -ENOMEM; |
| 515 | goto out_free_meta; |
| 516 | } |
| 517 | } |
| 518 | } |
| 519 | submit: |
| 520 | blk_execute_rq(req->q, disk, req, 0); |
Christoph Hellwig | 27fa9bc | 2017-04-20 16:02:57 +0200 | [diff] [blame] | 521 | if (nvme_req(req)->flags & NVME_REQ_CANCELLED) |
| 522 | ret = -EINTR; |
| 523 | else |
| 524 | ret = nvme_req(req)->status; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 525 | if (result) |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 526 | *result = le32_to_cpu(nvme_req(req)->result.u32); |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 527 | if (meta && !ret && !write) { |
| 528 | if (copy_to_user(meta_buffer, meta, meta_len)) |
| 529 | ret = -EFAULT; |
| 530 | } |
| 531 | out_free_meta: |
| 532 | kfree(meta); |
| 533 | out_unmap: |
| 534 | if (bio) { |
| 535 | if (disk && bio->bi_bdev) |
| 536 | bdput(bio->bi_bdev); |
| 537 | blk_rq_unmap_user(bio); |
| 538 | } |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 539 | out: |
| 540 | blk_mq_free_request(req); |
| 541 | return ret; |
| 542 | } |
| 543 | |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 544 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 545 | void __user *ubuffer, unsigned bufflen, u32 *result, |
| 546 | unsigned timeout) |
| 547 | { |
| 548 | return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0, |
| 549 | result, timeout); |
| 550 | } |
| 551 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 552 | static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 553 | { |
| 554 | struct nvme_ctrl *ctrl = rq->end_io_data; |
| 555 | |
| 556 | blk_mq_free_request(rq); |
| 557 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 558 | if (status) { |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 559 | dev_err(ctrl->device, |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 560 | "failed nvme_keep_alive_end_io error=%d\n", |
| 561 | status); |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 562 | return; |
| 563 | } |
| 564 | |
| 565 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); |
| 566 | } |
| 567 | |
| 568 | static int nvme_keep_alive(struct nvme_ctrl *ctrl) |
| 569 | { |
| 570 | struct nvme_command c; |
| 571 | struct request *rq; |
| 572 | |
| 573 | memset(&c, 0, sizeof(c)); |
| 574 | c.common.opcode = nvme_admin_keep_alive; |
| 575 | |
| 576 | rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, |
| 577 | NVME_QID_ANY); |
| 578 | if (IS_ERR(rq)) |
| 579 | return PTR_ERR(rq); |
| 580 | |
| 581 | rq->timeout = ctrl->kato * HZ; |
| 582 | rq->end_io_data = ctrl; |
| 583 | |
| 584 | blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); |
| 585 | |
| 586 | return 0; |
| 587 | } |
| 588 | |
| 589 | static void nvme_keep_alive_work(struct work_struct *work) |
| 590 | { |
| 591 | struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), |
| 592 | struct nvme_ctrl, ka_work); |
| 593 | |
| 594 | if (nvme_keep_alive(ctrl)) { |
| 595 | /* allocation failure, reset the controller */ |
| 596 | dev_err(ctrl->device, "keep-alive failed\n"); |
| 597 | ctrl->ops->reset_ctrl(ctrl); |
| 598 | return; |
| 599 | } |
| 600 | } |
| 601 | |
| 602 | void nvme_start_keep_alive(struct nvme_ctrl *ctrl) |
| 603 | { |
| 604 | if (unlikely(ctrl->kato == 0)) |
| 605 | return; |
| 606 | |
| 607 | INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); |
| 608 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); |
| 609 | } |
| 610 | EXPORT_SYMBOL_GPL(nvme_start_keep_alive); |
| 611 | |
| 612 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) |
| 613 | { |
| 614 | if (unlikely(ctrl->kato == 0)) |
| 615 | return; |
| 616 | |
| 617 | cancel_delayed_work_sync(&ctrl->ka_work); |
| 618 | } |
| 619 | EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); |
| 620 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 621 | int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 622 | { |
| 623 | struct nvme_command c = { }; |
| 624 | int error; |
| 625 | |
| 626 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ |
| 627 | c.identify.opcode = nvme_admin_identify; |
Parav Pandit | 986994a | 2017-01-26 17:17:28 +0200 | [diff] [blame] | 628 | c.identify.cns = NVME_ID_CNS_CTRL; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 629 | |
| 630 | *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); |
| 631 | if (!*id) |
| 632 | return -ENOMEM; |
| 633 | |
| 634 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, |
| 635 | sizeof(struct nvme_id_ctrl)); |
| 636 | if (error) |
| 637 | kfree(*id); |
| 638 | return error; |
| 639 | } |
| 640 | |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 641 | static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) |
| 642 | { |
| 643 | struct nvme_command c = { }; |
| 644 | |
| 645 | c.identify.opcode = nvme_admin_identify; |
Parav Pandit | 986994a | 2017-01-26 17:17:28 +0200 | [diff] [blame] | 646 | c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 647 | c.identify.nsid = cpu_to_le32(nsid); |
| 648 | return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); |
| 649 | } |
| 650 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 651 | int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 652 | struct nvme_id_ns **id) |
| 653 | { |
| 654 | struct nvme_command c = { }; |
| 655 | int error; |
| 656 | |
| 657 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ |
Max Gurtovoy | 778f067 | 2017-01-26 17:17:27 +0200 | [diff] [blame] | 658 | c.identify.opcode = nvme_admin_identify; |
| 659 | c.identify.nsid = cpu_to_le32(nsid); |
Parav Pandit | 986994a | 2017-01-26 17:17:28 +0200 | [diff] [blame] | 660 | c.identify.cns = NVME_ID_CNS_NS; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 661 | |
| 662 | *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL); |
| 663 | if (!*id) |
| 664 | return -ENOMEM; |
| 665 | |
| 666 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, |
| 667 | sizeof(struct nvme_id_ns)); |
| 668 | if (error) |
| 669 | kfree(*id); |
| 670 | return error; |
| 671 | } |
| 672 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 673 | int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 674 | void *buffer, size_t buflen, u32 *result) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 675 | { |
| 676 | struct nvme_command c; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 677 | union nvme_result res; |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 678 | int ret; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 679 | |
| 680 | memset(&c, 0, sizeof(c)); |
| 681 | c.features.opcode = nvme_admin_get_features; |
| 682 | c.features.nsid = cpu_to_le32(nsid); |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 683 | c.features.fid = cpu_to_le32(fid); |
| 684 | |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 685 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 686 | NVME_QID_ANY, 0, 0); |
Andy Lutomirski | 9b47f77a | 2016-08-24 03:52:12 -0700 | [diff] [blame] | 687 | if (ret >= 0 && result) |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 688 | *result = le32_to_cpu(res.u32); |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 689 | return ret; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 690 | } |
| 691 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 692 | int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 693 | void *buffer, size_t buflen, u32 *result) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 694 | { |
| 695 | struct nvme_command c; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 696 | union nvme_result res; |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 697 | int ret; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 698 | |
| 699 | memset(&c, 0, sizeof(c)); |
| 700 | c.features.opcode = nvme_admin_set_features; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 701 | c.features.fid = cpu_to_le32(fid); |
| 702 | c.features.dword11 = cpu_to_le32(dword11); |
| 703 | |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 704 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 705 | buffer, buflen, 0, NVME_QID_ANY, 0, 0); |
Andy Lutomirski | 9b47f77a | 2016-08-24 03:52:12 -0700 | [diff] [blame] | 706 | if (ret >= 0 && result) |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 707 | *result = le32_to_cpu(res.u32); |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 708 | return ret; |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 709 | } |
| 710 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 711 | int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 712 | { |
| 713 | struct nvme_command c = { }; |
| 714 | int error; |
| 715 | |
| 716 | c.common.opcode = nvme_admin_get_log_page, |
| 717 | c.common.nsid = cpu_to_le32(0xFFFFFFFF), |
| 718 | c.common.cdw10[0] = cpu_to_le32( |
| 719 | (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) | |
| 720 | NVME_LOG_SMART), |
| 721 | |
| 722 | *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL); |
| 723 | if (!*log) |
| 724 | return -ENOMEM; |
| 725 | |
| 726 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *log, |
| 727 | sizeof(struct nvme_smart_log)); |
| 728 | if (error) |
| 729 | kfree(*log); |
| 730 | return error; |
| 731 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 732 | |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 733 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) |
| 734 | { |
| 735 | u32 q_count = (*count - 1) | ((*count - 1) << 16); |
| 736 | u32 result; |
| 737 | int status, nr_io_queues; |
| 738 | |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 739 | status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 740 | &result); |
Christoph Hellwig | f5fa90d | 2016-06-06 23:20:50 +0200 | [diff] [blame] | 741 | if (status < 0) |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 742 | return status; |
| 743 | |
Christoph Hellwig | f5fa90d | 2016-06-06 23:20:50 +0200 | [diff] [blame] | 744 | /* |
| 745 | * Degraded controllers might return an error when setting the queue |
| 746 | * count. We still want to be able to bring them online and offer |
| 747 | * access to the admin queue, as that might be only way to fix them up. |
| 748 | */ |
| 749 | if (status > 0) { |
| 750 | dev_err(ctrl->dev, "Could not set queue count (%d)\n", status); |
| 751 | *count = 0; |
| 752 | } else { |
| 753 | nr_io_queues = min(result & 0xffff, result >> 16) + 1; |
| 754 | *count = min(*count, nr_io_queues); |
| 755 | } |
| 756 | |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 757 | return 0; |
| 758 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 759 | EXPORT_SYMBOL_GPL(nvme_set_queue_count); |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 760 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 761 | static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) |
| 762 | { |
| 763 | struct nvme_user_io io; |
| 764 | struct nvme_command c; |
| 765 | unsigned length, meta_len; |
| 766 | void __user *metadata; |
| 767 | |
| 768 | if (copy_from_user(&io, uio, sizeof(io))) |
| 769 | return -EFAULT; |
Keith Busch | 63088ec | 2016-02-24 09:15:57 -0700 | [diff] [blame] | 770 | if (io.flags) |
| 771 | return -EINVAL; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 772 | |
| 773 | switch (io.opcode) { |
| 774 | case nvme_cmd_write: |
| 775 | case nvme_cmd_read: |
| 776 | case nvme_cmd_compare: |
| 777 | break; |
| 778 | default: |
| 779 | return -EINVAL; |
| 780 | } |
| 781 | |
| 782 | length = (io.nblocks + 1) << ns->lba_shift; |
| 783 | meta_len = (io.nblocks + 1) * ns->ms; |
| 784 | metadata = (void __user *)(uintptr_t)io.metadata; |
| 785 | |
| 786 | if (ns->ext) { |
| 787 | length += meta_len; |
| 788 | meta_len = 0; |
| 789 | } else if (meta_len) { |
| 790 | if ((io.metadata & 3) || !io.metadata) |
| 791 | return -EINVAL; |
| 792 | } |
| 793 | |
| 794 | memset(&c, 0, sizeof(c)); |
| 795 | c.rw.opcode = io.opcode; |
| 796 | c.rw.flags = io.flags; |
| 797 | c.rw.nsid = cpu_to_le32(ns->ns_id); |
| 798 | c.rw.slba = cpu_to_le64(io.slba); |
| 799 | c.rw.length = cpu_to_le16(io.nblocks); |
| 800 | c.rw.control = cpu_to_le16(io.control); |
| 801 | c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); |
| 802 | c.rw.reftag = cpu_to_le32(io.reftag); |
| 803 | c.rw.apptag = cpu_to_le16(io.apptag); |
| 804 | c.rw.appmask = cpu_to_le16(io.appmask); |
| 805 | |
| 806 | return __nvme_submit_user_cmd(ns->queue, &c, |
| 807 | (void __user *)(uintptr_t)io.addr, length, |
| 808 | metadata, meta_len, io.slba, NULL, 0); |
| 809 | } |
| 810 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 811 | static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 812 | struct nvme_passthru_cmd __user *ucmd) |
| 813 | { |
| 814 | struct nvme_passthru_cmd cmd; |
| 815 | struct nvme_command c; |
| 816 | unsigned timeout = 0; |
| 817 | int status; |
| 818 | |
| 819 | if (!capable(CAP_SYS_ADMIN)) |
| 820 | return -EACCES; |
| 821 | if (copy_from_user(&cmd, ucmd, sizeof(cmd))) |
| 822 | return -EFAULT; |
Keith Busch | 63088ec | 2016-02-24 09:15:57 -0700 | [diff] [blame] | 823 | if (cmd.flags) |
| 824 | return -EINVAL; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 825 | |
| 826 | memset(&c, 0, sizeof(c)); |
| 827 | c.common.opcode = cmd.opcode; |
| 828 | c.common.flags = cmd.flags; |
| 829 | c.common.nsid = cpu_to_le32(cmd.nsid); |
| 830 | c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); |
| 831 | c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); |
| 832 | c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); |
| 833 | c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); |
| 834 | c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); |
| 835 | c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); |
| 836 | c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); |
| 837 | c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); |
| 838 | |
| 839 | if (cmd.timeout_ms) |
| 840 | timeout = msecs_to_jiffies(cmd.timeout_ms); |
| 841 | |
| 842 | status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, |
Arnd Bergmann | d1ea7be | 2015-12-08 16:22:17 +0100 | [diff] [blame] | 843 | (void __user *)(uintptr_t)cmd.addr, cmd.data_len, |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 844 | &cmd.result, timeout); |
| 845 | if (status >= 0) { |
| 846 | if (put_user(cmd.result, &ucmd->result)) |
| 847 | return -EFAULT; |
| 848 | } |
| 849 | |
| 850 | return status; |
| 851 | } |
| 852 | |
| 853 | static int nvme_ioctl(struct block_device *bdev, fmode_t mode, |
| 854 | unsigned int cmd, unsigned long arg) |
| 855 | { |
| 856 | struct nvme_ns *ns = bdev->bd_disk->private_data; |
| 857 | |
| 858 | switch (cmd) { |
| 859 | case NVME_IOCTL_ID: |
| 860 | force_successful_syscall_return(); |
| 861 | return ns->ns_id; |
| 862 | case NVME_IOCTL_ADMIN_CMD: |
| 863 | return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg); |
| 864 | case NVME_IOCTL_IO_CMD: |
| 865 | return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg); |
| 866 | case NVME_IOCTL_SUBMIT_IO: |
| 867 | return nvme_submit_io(ns, (void __user *)arg); |
Christoph Hellwig | 4490733 | 2015-12-24 15:27:02 +0100 | [diff] [blame] | 868 | #ifdef CONFIG_BLK_DEV_NVME_SCSI |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 869 | case SG_GET_VERSION_NUM: |
| 870 | return nvme_sg_get_version_num((void __user *)arg); |
| 871 | case SG_IO: |
| 872 | return nvme_sg_io(ns, (void __user *)arg); |
Christoph Hellwig | 4490733 | 2015-12-24 15:27:02 +0100 | [diff] [blame] | 873 | #endif |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 874 | default: |
Matias Bjørling | 84d4add | 2017-01-31 13:17:16 +0100 | [diff] [blame] | 875 | #ifdef CONFIG_NVM |
| 876 | if (ns->ndev) |
| 877 | return nvme_nvm_ioctl(ns, cmd, arg); |
| 878 | #endif |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 879 | if (is_sed_ioctl(cmd)) |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 880 | return sed_ioctl(ns->ctrl->opal_dev, cmd, |
Scott Bauer | e225c20 | 2017-02-14 17:29:36 -0700 | [diff] [blame] | 881 | (void __user *) arg); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 882 | return -ENOTTY; |
| 883 | } |
| 884 | } |
| 885 | |
| 886 | #ifdef CONFIG_COMPAT |
| 887 | static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, |
| 888 | unsigned int cmd, unsigned long arg) |
| 889 | { |
| 890 | switch (cmd) { |
| 891 | case SG_IO: |
| 892 | return -ENOIOCTLCMD; |
| 893 | } |
| 894 | return nvme_ioctl(bdev, mode, cmd, arg); |
| 895 | } |
| 896 | #else |
| 897 | #define nvme_compat_ioctl NULL |
| 898 | #endif |
| 899 | |
| 900 | static int nvme_open(struct block_device *bdev, fmode_t mode) |
| 901 | { |
| 902 | return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO; |
| 903 | } |
| 904 | |
| 905 | static void nvme_release(struct gendisk *disk, fmode_t mode) |
| 906 | { |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 907 | struct nvme_ns *ns = disk->private_data; |
| 908 | |
| 909 | module_put(ns->ctrl->ops->module); |
| 910 | nvme_put_ns(ns); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 911 | } |
| 912 | |
| 913 | static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
| 914 | { |
| 915 | /* some standard values */ |
| 916 | geo->heads = 1 << 6; |
| 917 | geo->sectors = 1 << 5; |
| 918 | geo->cylinders = get_capacity(bdev->bd_disk) >> 11; |
| 919 | return 0; |
| 920 | } |
| 921 | |
| 922 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
Christoph Hellwig | c81bfba | 2017-05-20 15:14:45 +0200 | [diff] [blame] | 923 | static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id, |
| 924 | u16 bs) |
| 925 | { |
| 926 | struct nvme_ns *ns = disk->private_data; |
| 927 | u16 old_ms = ns->ms; |
| 928 | u8 pi_type = 0; |
| 929 | |
| 930 | ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); |
| 931 | ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); |
| 932 | |
| 933 | /* PI implementation requires metadata equal t10 pi tuple size */ |
| 934 | if (ns->ms == sizeof(struct t10_pi_tuple)) |
| 935 | pi_type = id->dps & NVME_NS_DPS_PI_MASK; |
| 936 | |
| 937 | if (blk_get_integrity(disk) && |
| 938 | (ns->pi_type != pi_type || ns->ms != old_ms || |
| 939 | bs != queue_logical_block_size(disk->queue) || |
| 940 | (ns->ms && ns->ext))) |
| 941 | blk_integrity_unregister(disk); |
| 942 | |
| 943 | ns->pi_type = pi_type; |
| 944 | } |
| 945 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 946 | static void nvme_init_integrity(struct nvme_ns *ns) |
| 947 | { |
| 948 | struct blk_integrity integrity; |
| 949 | |
Jay Freyensee | fa9a89f | 2016-07-20 21:26:16 -0600 | [diff] [blame] | 950 | memset(&integrity, 0, sizeof(integrity)); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 951 | switch (ns->pi_type) { |
| 952 | case NVME_NS_DPS_PI_TYPE3: |
| 953 | integrity.profile = &t10_pi_type3_crc; |
Nicholas Bellinger | ba36c21 | 2016-04-09 03:04:42 +0000 | [diff] [blame] | 954 | integrity.tag_size = sizeof(u16) + sizeof(u32); |
| 955 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 956 | break; |
| 957 | case NVME_NS_DPS_PI_TYPE1: |
| 958 | case NVME_NS_DPS_PI_TYPE2: |
| 959 | integrity.profile = &t10_pi_type1_crc; |
Nicholas Bellinger | ba36c21 | 2016-04-09 03:04:42 +0000 | [diff] [blame] | 960 | integrity.tag_size = sizeof(u16); |
| 961 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 962 | break; |
| 963 | default: |
| 964 | integrity.profile = NULL; |
| 965 | break; |
| 966 | } |
| 967 | integrity.tuple_size = ns->ms; |
| 968 | blk_integrity_register(ns->disk, &integrity); |
| 969 | blk_queue_max_integrity_segments(ns->queue, 1); |
| 970 | } |
| 971 | #else |
Christoph Hellwig | c81bfba | 2017-05-20 15:14:45 +0200 | [diff] [blame] | 972 | static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id, |
| 973 | u16 bs) |
| 974 | { |
| 975 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 976 | static void nvme_init_integrity(struct nvme_ns *ns) |
| 977 | { |
| 978 | } |
| 979 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 980 | |
| 981 | static void nvme_config_discard(struct nvme_ns *ns) |
| 982 | { |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 983 | struct nvme_ctrl *ctrl = ns->ctrl; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 984 | u32 logical_block_size = queue_logical_block_size(ns->queue); |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 985 | |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 986 | BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < |
| 987 | NVME_DSM_MAX_RANGES); |
| 988 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 989 | ns->queue->limits.discard_alignment = logical_block_size; |
| 990 | ns->queue->limits.discard_granularity = logical_block_size; |
Minfei Huang | bd0fc28 | 2016-05-17 15:58:41 +0800 | [diff] [blame] | 991 | blk_queue_max_discard_sectors(ns->queue, UINT_MAX); |
Christoph Hellwig | b35ba01 | 2017-02-08 14:46:50 +0100 | [diff] [blame] | 992 | blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 993 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); |
Christoph Hellwig | e850fd1 | 2017-04-05 19:21:13 +0200 | [diff] [blame] | 994 | |
| 995 | if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) |
| 996 | blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 997 | } |
| 998 | |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 999 | static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1000 | { |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1001 | if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) { |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 1002 | dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1003 | return -ENODEV; |
| 1004 | } |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1005 | |
| 1006 | if ((*id)->ncap == 0) { |
| 1007 | kfree(*id); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1008 | return -ENODEV; |
| 1009 | } |
| 1010 | |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 1011 | if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1012 | memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 1013 | if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1014 | memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); |
| 1015 | |
| 1016 | return 0; |
| 1017 | } |
| 1018 | |
| 1019 | static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) |
| 1020 | { |
| 1021 | struct nvme_ns *ns = disk->private_data; |
Christoph Hellwig | c81bfba | 2017-05-20 15:14:45 +0200 | [diff] [blame] | 1022 | u16 bs; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1023 | |
| 1024 | /* |
| 1025 | * If identify namespace failed, use default 512 byte block size so |
| 1026 | * block layer can use before failing read/write for 0 capacity. |
| 1027 | */ |
Christoph Hellwig | c81bfba | 2017-05-20 15:14:45 +0200 | [diff] [blame] | 1028 | ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1029 | if (ns->lba_shift == 0) |
| 1030 | ns->lba_shift = 9; |
| 1031 | bs = 1 << ns->lba_shift; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1032 | |
| 1033 | blk_mq_freeze_queue(disk->queue); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1034 | |
Christoph Hellwig | c81bfba | 2017-05-20 15:14:45 +0200 | [diff] [blame] | 1035 | if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) |
| 1036 | nvme_prep_integrity(disk, id, bs); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1037 | blk_queue_logical_block_size(ns->queue, bs); |
Keith Busch | 4b9d5b1 | 2015-11-20 09:13:30 +0100 | [diff] [blame] | 1038 | if (ns->ms && !blk_get_integrity(disk) && !ns->ext) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1039 | nvme_init_integrity(ns); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1040 | if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk)) |
| 1041 | set_capacity(disk, 0); |
| 1042 | else |
| 1043 | set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); |
| 1044 | |
| 1045 | if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) |
| 1046 | nvme_config_discard(ns); |
| 1047 | blk_mq_unfreeze_queue(disk->queue); |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1048 | } |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1049 | |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1050 | static int nvme_revalidate_disk(struct gendisk *disk) |
| 1051 | { |
| 1052 | struct nvme_ns *ns = disk->private_data; |
| 1053 | struct nvme_id_ns *id = NULL; |
| 1054 | int ret; |
| 1055 | |
| 1056 | if (test_bit(NVME_NS_DEAD, &ns->flags)) { |
| 1057 | set_capacity(disk, 0); |
| 1058 | return -ENODEV; |
| 1059 | } |
| 1060 | |
| 1061 | ret = nvme_revalidate_ns(ns, &id); |
| 1062 | if (ret) |
| 1063 | return ret; |
| 1064 | |
| 1065 | __nvme_revalidate_disk(disk, id); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1066 | kfree(id); |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 1067 | |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1068 | return 0; |
| 1069 | } |
| 1070 | |
| 1071 | static char nvme_pr_type(enum pr_type type) |
| 1072 | { |
| 1073 | switch (type) { |
| 1074 | case PR_WRITE_EXCLUSIVE: |
| 1075 | return 1; |
| 1076 | case PR_EXCLUSIVE_ACCESS: |
| 1077 | return 2; |
| 1078 | case PR_WRITE_EXCLUSIVE_REG_ONLY: |
| 1079 | return 3; |
| 1080 | case PR_EXCLUSIVE_ACCESS_REG_ONLY: |
| 1081 | return 4; |
| 1082 | case PR_WRITE_EXCLUSIVE_ALL_REGS: |
| 1083 | return 5; |
| 1084 | case PR_EXCLUSIVE_ACCESS_ALL_REGS: |
| 1085 | return 6; |
| 1086 | default: |
| 1087 | return 0; |
| 1088 | } |
| 1089 | }; |
| 1090 | |
| 1091 | static int nvme_pr_command(struct block_device *bdev, u32 cdw10, |
| 1092 | u64 key, u64 sa_key, u8 op) |
| 1093 | { |
| 1094 | struct nvme_ns *ns = bdev->bd_disk->private_data; |
| 1095 | struct nvme_command c; |
| 1096 | u8 data[16] = { 0, }; |
| 1097 | |
| 1098 | put_unaligned_le64(key, &data[0]); |
| 1099 | put_unaligned_le64(sa_key, &data[8]); |
| 1100 | |
| 1101 | memset(&c, 0, sizeof(c)); |
| 1102 | c.common.opcode = op; |
| 1103 | c.common.nsid = cpu_to_le32(ns->ns_id); |
| 1104 | c.common.cdw10[0] = cpu_to_le32(cdw10); |
| 1105 | |
| 1106 | return nvme_submit_sync_cmd(ns->queue, &c, data, 16); |
| 1107 | } |
| 1108 | |
| 1109 | static int nvme_pr_register(struct block_device *bdev, u64 old, |
| 1110 | u64 new, unsigned flags) |
| 1111 | { |
| 1112 | u32 cdw10; |
| 1113 | |
| 1114 | if (flags & ~PR_FL_IGNORE_KEY) |
| 1115 | return -EOPNOTSUPP; |
| 1116 | |
| 1117 | cdw10 = old ? 2 : 0; |
| 1118 | cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; |
| 1119 | cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ |
| 1120 | return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); |
| 1121 | } |
| 1122 | |
| 1123 | static int nvme_pr_reserve(struct block_device *bdev, u64 key, |
| 1124 | enum pr_type type, unsigned flags) |
| 1125 | { |
| 1126 | u32 cdw10; |
| 1127 | |
| 1128 | if (flags & ~PR_FL_IGNORE_KEY) |
| 1129 | return -EOPNOTSUPP; |
| 1130 | |
| 1131 | cdw10 = nvme_pr_type(type) << 8; |
| 1132 | cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); |
| 1133 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); |
| 1134 | } |
| 1135 | |
| 1136 | static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, |
| 1137 | enum pr_type type, bool abort) |
| 1138 | { |
| 1139 | u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1; |
| 1140 | return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); |
| 1141 | } |
| 1142 | |
| 1143 | static int nvme_pr_clear(struct block_device *bdev, u64 key) |
| 1144 | { |
Dan Carpenter | 8c0b391 | 2015-12-09 13:24:06 +0300 | [diff] [blame] | 1145 | u32 cdw10 = 1 | (key ? 1 << 3 : 0); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1146 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); |
| 1147 | } |
| 1148 | |
| 1149 | static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) |
| 1150 | { |
| 1151 | u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0; |
| 1152 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); |
| 1153 | } |
| 1154 | |
| 1155 | static const struct pr_ops nvme_pr_ops = { |
| 1156 | .pr_register = nvme_pr_register, |
| 1157 | .pr_reserve = nvme_pr_reserve, |
| 1158 | .pr_release = nvme_pr_release, |
| 1159 | .pr_preempt = nvme_pr_preempt, |
| 1160 | .pr_clear = nvme_pr_clear, |
| 1161 | }; |
| 1162 | |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1163 | #ifdef CONFIG_BLK_SED_OPAL |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 1164 | int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, |
| 1165 | bool send) |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1166 | { |
Christoph Hellwig | 4f1244c | 2017-02-17 13:59:39 +0100 | [diff] [blame] | 1167 | struct nvme_ctrl *ctrl = data; |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1168 | struct nvme_command cmd; |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1169 | |
| 1170 | memset(&cmd, 0, sizeof(cmd)); |
| 1171 | if (send) |
| 1172 | cmd.common.opcode = nvme_admin_security_send; |
| 1173 | else |
| 1174 | cmd.common.opcode = nvme_admin_security_recv; |
Scott Bauer | a98e58e5 | 2017-02-03 12:50:32 -0700 | [diff] [blame] | 1175 | cmd.common.nsid = 0; |
| 1176 | cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); |
| 1177 | cmd.common.cdw10[1] = cpu_to_le32(len); |
| 1178 | |
| 1179 | return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, |
| 1180 | ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); |
| 1181 | } |
| 1182 | EXPORT_SYMBOL_GPL(nvme_sec_submit); |
| 1183 | #endif /* CONFIG_BLK_SED_OPAL */ |
| 1184 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1185 | static const struct block_device_operations nvme_fops = { |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1186 | .owner = THIS_MODULE, |
| 1187 | .ioctl = nvme_ioctl, |
| 1188 | .compat_ioctl = nvme_compat_ioctl, |
| 1189 | .open = nvme_open, |
| 1190 | .release = nvme_release, |
| 1191 | .getgeo = nvme_getgeo, |
| 1192 | .revalidate_disk= nvme_revalidate_disk, |
| 1193 | .pr_ops = &nvme_pr_ops, |
| 1194 | }; |
| 1195 | |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1196 | static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) |
| 1197 | { |
| 1198 | unsigned long timeout = |
| 1199 | ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; |
| 1200 | u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; |
| 1201 | int ret; |
| 1202 | |
| 1203 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { |
Keith Busch | 0df1e4f | 2016-10-11 13:31:58 -0400 | [diff] [blame] | 1204 | if (csts == ~0) |
| 1205 | return -ENODEV; |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1206 | if ((csts & NVME_CSTS_RDY) == bit) |
| 1207 | break; |
| 1208 | |
| 1209 | msleep(100); |
| 1210 | if (fatal_signal_pending(current)) |
| 1211 | return -EINTR; |
| 1212 | if (time_after(jiffies, timeout)) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1213 | dev_err(ctrl->device, |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1214 | "Device not ready; aborting %s\n", enabled ? |
| 1215 | "initialisation" : "reset"); |
| 1216 | return -ENODEV; |
| 1217 | } |
| 1218 | } |
| 1219 | |
| 1220 | return ret; |
| 1221 | } |
| 1222 | |
| 1223 | /* |
| 1224 | * If the device has been passed off to us in an enabled state, just clear |
| 1225 | * the enabled bit. The spec says we should set the 'shutdown notification |
| 1226 | * bits', but doing so may cause the device to complete commands to the |
| 1227 | * admin queue ... and we don't know what memory that might be pointing at! |
| 1228 | */ |
| 1229 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) |
| 1230 | { |
| 1231 | int ret; |
| 1232 | |
| 1233 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; |
| 1234 | ctrl->ctrl_config &= ~NVME_CC_ENABLE; |
| 1235 | |
| 1236 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
| 1237 | if (ret) |
| 1238 | return ret; |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 1239 | |
Guilherme G. Piccoli | b5a10c5 | 2016-12-28 22:13:15 -0200 | [diff] [blame] | 1240 | if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 1241 | msleep(NVME_QUIRK_DELAY_AMOUNT); |
| 1242 | |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1243 | return nvme_wait_ready(ctrl, cap, false); |
| 1244 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 1245 | EXPORT_SYMBOL_GPL(nvme_disable_ctrl); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1246 | |
| 1247 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) |
| 1248 | { |
| 1249 | /* |
| 1250 | * Default to a 4K page size, with the intention to update this |
| 1251 | * path in the future to accomodate architectures with differing |
| 1252 | * kernel and IO page sizes. |
| 1253 | */ |
| 1254 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12; |
| 1255 | int ret; |
| 1256 | |
| 1257 | if (page_shift < dev_page_min) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1258 | dev_err(ctrl->device, |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1259 | "Minimum device page size %u too large for host (%u)\n", |
| 1260 | 1 << dev_page_min, 1 << page_shift); |
| 1261 | return -ENODEV; |
| 1262 | } |
| 1263 | |
| 1264 | ctrl->page_size = 1 << page_shift; |
| 1265 | |
| 1266 | ctrl->ctrl_config = NVME_CC_CSS_NVM; |
| 1267 | ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; |
| 1268 | ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; |
| 1269 | ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; |
| 1270 | ctrl->ctrl_config |= NVME_CC_ENABLE; |
| 1271 | |
| 1272 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
| 1273 | if (ret) |
| 1274 | return ret; |
| 1275 | return nvme_wait_ready(ctrl, cap, true); |
| 1276 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 1277 | EXPORT_SYMBOL_GPL(nvme_enable_ctrl); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1278 | |
| 1279 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) |
| 1280 | { |
| 1281 | unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies; |
| 1282 | u32 csts; |
| 1283 | int ret; |
| 1284 | |
| 1285 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; |
| 1286 | ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; |
| 1287 | |
| 1288 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); |
| 1289 | if (ret) |
| 1290 | return ret; |
| 1291 | |
| 1292 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { |
| 1293 | if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) |
| 1294 | break; |
| 1295 | |
| 1296 | msleep(100); |
| 1297 | if (fatal_signal_pending(current)) |
| 1298 | return -EINTR; |
| 1299 | if (time_after(jiffies, timeout)) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1300 | dev_err(ctrl->device, |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1301 | "Device shutdown incomplete; abort shutdown\n"); |
| 1302 | return -ENODEV; |
| 1303 | } |
| 1304 | } |
| 1305 | |
| 1306 | return ret; |
| 1307 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 1308 | EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 1309 | |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1310 | static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, |
| 1311 | struct request_queue *q) |
| 1312 | { |
Jens Axboe | 7c88cb0 | 2016-04-12 15:43:09 -0600 | [diff] [blame] | 1313 | bool vwc = false; |
| 1314 | |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1315 | if (ctrl->max_hw_sectors) { |
Christoph Hellwig | 45686b6 | 2016-03-02 18:07:12 +0100 | [diff] [blame] | 1316 | u32 max_segments = |
| 1317 | (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; |
| 1318 | |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1319 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); |
Christoph Hellwig | 45686b6 | 2016-03-02 18:07:12 +0100 | [diff] [blame] | 1320 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1321 | } |
Keith Busch | e6282ae | 2016-12-19 11:37:50 -0500 | [diff] [blame] | 1322 | if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) |
| 1323 | blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1324 | blk_queue_virt_boundary(q, ctrl->page_size - 1); |
Jens Axboe | 7c88cb0 | 2016-04-12 15:43:09 -0600 | [diff] [blame] | 1325 | if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) |
| 1326 | vwc = true; |
| 1327 | blk_queue_write_cache(q, vwc, vwc); |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1328 | } |
| 1329 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1330 | static void nvme_configure_apst(struct nvme_ctrl *ctrl) |
| 1331 | { |
| 1332 | /* |
| 1333 | * APST (Autonomous Power State Transition) lets us program a |
| 1334 | * table of power state transitions that the controller will |
| 1335 | * perform automatically. We configure it with a simple |
| 1336 | * heuristic: we are willing to spend at most 2% of the time |
| 1337 | * transitioning between power states. Therefore, when running |
| 1338 | * in any given state, we will enter the next lower-power |
Andy Lutomirski | 76e4ad0 | 2017-04-21 16:19:22 -0700 | [diff] [blame] | 1339 | * non-operational state after waiting 50 * (enlat + exlat) |
Kai-Heng Feng | da87591 | 2017-06-07 15:25:42 +0800 | [diff] [blame] | 1340 | * microseconds, as long as that state's exit latency is under |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1341 | * the requested maximum latency. |
| 1342 | * |
| 1343 | * We will not autonomously enter any non-operational state for |
| 1344 | * which the total latency exceeds ps_max_latency_us. Users |
| 1345 | * can set ps_max_latency_us to zero to turn off APST. |
| 1346 | */ |
| 1347 | |
| 1348 | unsigned apste; |
| 1349 | struct nvme_feat_auto_pst *table; |
Andy Lutomirski | fb0dc39 | 2017-04-21 16:19:23 -0700 | [diff] [blame] | 1350 | u64 max_lat_us = 0; |
| 1351 | int max_ps = -1; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1352 | int ret; |
| 1353 | |
| 1354 | /* |
| 1355 | * If APST isn't supported or if we haven't been initialized yet, |
| 1356 | * then don't do anything. |
| 1357 | */ |
| 1358 | if (!ctrl->apsta) |
| 1359 | return; |
| 1360 | |
| 1361 | if (ctrl->npss > 31) { |
| 1362 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); |
| 1363 | return; |
| 1364 | } |
| 1365 | |
| 1366 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
| 1367 | if (!table) |
| 1368 | return; |
| 1369 | |
| 1370 | if (ctrl->ps_max_latency_us == 0) { |
| 1371 | /* Turn off APST. */ |
| 1372 | apste = 0; |
Andy Lutomirski | fb0dc39 | 2017-04-21 16:19:23 -0700 | [diff] [blame] | 1373 | dev_dbg(ctrl->device, "APST disabled\n"); |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1374 | } else { |
| 1375 | __le64 target = cpu_to_le64(0); |
| 1376 | int state; |
| 1377 | |
| 1378 | /* |
| 1379 | * Walk through all states from lowest- to highest-power. |
| 1380 | * According to the spec, lower-numbered states use more |
| 1381 | * power. NPSS, despite the name, is the index of the |
| 1382 | * lowest-power state, not the number of states. |
| 1383 | */ |
| 1384 | for (state = (int)ctrl->npss; state >= 0; state--) { |
Kai-Heng Feng | da87591 | 2017-06-07 15:25:42 +0800 | [diff] [blame] | 1385 | u64 total_latency_us, exit_latency_us, transition_ms; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1386 | |
| 1387 | if (target) |
| 1388 | table->entries[state] = target; |
| 1389 | |
| 1390 | /* |
Andy Lutomirski | ff5350a | 2017-04-20 13:37:55 -0700 | [diff] [blame] | 1391 | * Don't allow transitions to the deepest state |
| 1392 | * if it's quirked off. |
| 1393 | */ |
| 1394 | if (state == ctrl->npss && |
| 1395 | (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) |
| 1396 | continue; |
| 1397 | |
| 1398 | /* |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1399 | * Is this state a useful non-operational state for |
| 1400 | * higher-power states to autonomously transition to? |
| 1401 | */ |
| 1402 | if (!(ctrl->psd[state].flags & |
| 1403 | NVME_PS_FLAGS_NON_OP_STATE)) |
| 1404 | continue; |
| 1405 | |
Kai-Heng Feng | da87591 | 2017-06-07 15:25:42 +0800 | [diff] [blame] | 1406 | exit_latency_us = |
| 1407 | (u64)le32_to_cpu(ctrl->psd[state].exit_lat); |
| 1408 | if (exit_latency_us > ctrl->ps_max_latency_us) |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1409 | continue; |
| 1410 | |
Kai-Heng Feng | da87591 | 2017-06-07 15:25:42 +0800 | [diff] [blame] | 1411 | total_latency_us = |
| 1412 | exit_latency_us + |
| 1413 | le32_to_cpu(ctrl->psd[state].entry_lat); |
| 1414 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1415 | /* |
| 1416 | * This state is good. Use it as the APST idle |
| 1417 | * target for higher power states. |
| 1418 | */ |
| 1419 | transition_ms = total_latency_us + 19; |
| 1420 | do_div(transition_ms, 20); |
| 1421 | if (transition_ms > (1 << 24) - 1) |
| 1422 | transition_ms = (1 << 24) - 1; |
| 1423 | |
| 1424 | target = cpu_to_le64((state << 3) | |
| 1425 | (transition_ms << 8)); |
Andy Lutomirski | fb0dc39 | 2017-04-21 16:19:23 -0700 | [diff] [blame] | 1426 | |
| 1427 | if (max_ps == -1) |
| 1428 | max_ps = state; |
| 1429 | |
| 1430 | if (total_latency_us > max_lat_us) |
| 1431 | max_lat_us = total_latency_us; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1432 | } |
| 1433 | |
| 1434 | apste = 1; |
Andy Lutomirski | fb0dc39 | 2017-04-21 16:19:23 -0700 | [diff] [blame] | 1435 | |
| 1436 | if (max_ps == -1) { |
| 1437 | dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); |
| 1438 | } else { |
| 1439 | dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", |
| 1440 | max_ps, max_lat_us, (int)sizeof(*table), table); |
| 1441 | } |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1442 | } |
| 1443 | |
| 1444 | ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, |
| 1445 | table, sizeof(*table), NULL); |
| 1446 | if (ret) |
| 1447 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); |
| 1448 | |
| 1449 | kfree(table); |
| 1450 | } |
| 1451 | |
| 1452 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) |
| 1453 | { |
| 1454 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1455 | u64 latency; |
| 1456 | |
| 1457 | switch (val) { |
| 1458 | case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: |
| 1459 | case PM_QOS_LATENCY_ANY: |
| 1460 | latency = U64_MAX; |
| 1461 | break; |
| 1462 | |
| 1463 | default: |
| 1464 | latency = val; |
| 1465 | } |
| 1466 | |
| 1467 | if (ctrl->ps_max_latency_us != latency) { |
| 1468 | ctrl->ps_max_latency_us = latency; |
| 1469 | nvme_configure_apst(ctrl); |
| 1470 | } |
| 1471 | } |
| 1472 | |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1473 | struct nvme_core_quirk_entry { |
| 1474 | /* |
| 1475 | * NVMe model and firmware strings are padded with spaces. For |
| 1476 | * simplicity, strings in the quirk table are padded with NULLs |
| 1477 | * instead. |
| 1478 | */ |
| 1479 | u16 vid; |
| 1480 | const char *mn; |
| 1481 | const char *fr; |
| 1482 | unsigned long quirks; |
| 1483 | }; |
| 1484 | |
| 1485 | static const struct nvme_core_quirk_entry core_quirks[] = { |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1486 | { |
Andy Lutomirski | be56945 | 2017-04-20 13:37:56 -0700 | [diff] [blame] | 1487 | /* |
| 1488 | * This Toshiba device seems to die using any APST states. See: |
| 1489 | * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 |
| 1490 | */ |
| 1491 | .vid = 0x1179, |
| 1492 | .mn = "THNSF5256GPUK TOSHIBA", |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1493 | .quirks = NVME_QUIRK_NO_APST, |
Andy Lutomirski | be56945 | 2017-04-20 13:37:56 -0700 | [diff] [blame] | 1494 | } |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1495 | }; |
| 1496 | |
| 1497 | /* match is null-terminated but idstr is space-padded. */ |
| 1498 | static bool string_matches(const char *idstr, const char *match, size_t len) |
| 1499 | { |
| 1500 | size_t matchlen; |
| 1501 | |
| 1502 | if (!match) |
| 1503 | return true; |
| 1504 | |
| 1505 | matchlen = strlen(match); |
| 1506 | WARN_ON_ONCE(matchlen > len); |
| 1507 | |
| 1508 | if (memcmp(idstr, match, matchlen)) |
| 1509 | return false; |
| 1510 | |
| 1511 | for (; matchlen < len; matchlen++) |
| 1512 | if (idstr[matchlen] != ' ') |
| 1513 | return false; |
| 1514 | |
| 1515 | return true; |
| 1516 | } |
| 1517 | |
| 1518 | static bool quirk_matches(const struct nvme_id_ctrl *id, |
| 1519 | const struct nvme_core_quirk_entry *q) |
| 1520 | { |
| 1521 | return q->vid == le16_to_cpu(id->vid) && |
| 1522 | string_matches(id->mn, q->mn, sizeof(id->mn)) && |
| 1523 | string_matches(id->fr, q->fr, sizeof(id->fr)); |
| 1524 | } |
| 1525 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1526 | /* |
| 1527 | * Initialize the cached copies of the Identify data and various controller |
| 1528 | * register in our nvme_ctrl structure. This should be called as soon as |
| 1529 | * the admin queue is fully up and running. |
| 1530 | */ |
| 1531 | int nvme_init_identify(struct nvme_ctrl *ctrl) |
| 1532 | { |
| 1533 | struct nvme_id_ctrl *id; |
| 1534 | u64 cap; |
| 1535 | int ret, page_shift; |
Christoph Hellwig | a229dbf | 2016-06-06 23:20:48 +0200 | [diff] [blame] | 1536 | u32 max_hw_sectors; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1537 | u8 prev_apsta; |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1538 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1539 | ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); |
| 1540 | if (ret) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1541 | dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1542 | return ret; |
| 1543 | } |
| 1544 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1545 | ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); |
| 1546 | if (ret) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1547 | dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1548 | return ret; |
| 1549 | } |
| 1550 | page_shift = NVME_CAP_MPSMIN(cap) + 12; |
| 1551 | |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 1552 | if (ctrl->vs >= NVME_VS(1, 1, 0)) |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1553 | ctrl->subsystem = NVME_CAP_NSSRC(cap); |
| 1554 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1555 | ret = nvme_identify_ctrl(ctrl, &id); |
| 1556 | if (ret) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1557 | dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1558 | return -EIO; |
| 1559 | } |
| 1560 | |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1561 | if (!ctrl->identified) { |
| 1562 | /* |
| 1563 | * Check for quirks. Quirk can depend on firmware version, |
| 1564 | * so, in principle, the set of quirks present can change |
| 1565 | * across a reset. As a possible future enhancement, we |
| 1566 | * could re-scan for quirks every time we reinitialize |
| 1567 | * the device, but we'd have to make sure that the driver |
| 1568 | * behaves intelligently if the quirks change. |
| 1569 | */ |
| 1570 | |
| 1571 | int i; |
| 1572 | |
| 1573 | for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { |
| 1574 | if (quirk_matches(id, &core_quirks[i])) |
| 1575 | ctrl->quirks |= core_quirks[i].quirks; |
| 1576 | } |
| 1577 | } |
| 1578 | |
Andy Lutomirski | c35e30b | 2017-04-21 16:19:24 -0700 | [diff] [blame] | 1579 | if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { |
| 1580 | dev_warn(ctrl->dev, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); |
| 1581 | ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; |
| 1582 | } |
| 1583 | |
Scott Bauer | 8a9ae52 | 2017-02-17 13:59:40 +0100 | [diff] [blame] | 1584 | ctrl->oacs = le16_to_cpu(id->oacs); |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 1585 | ctrl->vid = le16_to_cpu(id->vid); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1586 | ctrl->oncs = le16_to_cpup(&id->oncs); |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 1587 | atomic_set(&ctrl->abort_limit, id->acl + 1); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1588 | ctrl->vwc = id->vwc; |
Ming Lin | 931e1c2 | 2016-02-26 13:24:19 -0800 | [diff] [blame] | 1589 | ctrl->cntlid = le16_to_cpup(&id->cntlid); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1590 | memcpy(ctrl->serial, id->sn, sizeof(id->sn)); |
| 1591 | memcpy(ctrl->model, id->mn, sizeof(id->mn)); |
| 1592 | memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr)); |
| 1593 | if (id->mdts) |
Christoph Hellwig | a229dbf | 2016-06-06 23:20:48 +0200 | [diff] [blame] | 1594 | max_hw_sectors = 1 << (id->mdts + page_shift - 9); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1595 | else |
Christoph Hellwig | a229dbf | 2016-06-06 23:20:48 +0200 | [diff] [blame] | 1596 | max_hw_sectors = UINT_MAX; |
| 1597 | ctrl->max_hw_sectors = |
| 1598 | min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1599 | |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1600 | nvme_set_queue_limits(ctrl, ctrl->admin_q); |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1601 | ctrl->sgls = le32_to_cpu(id->sgls); |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 1602 | ctrl->kas = le16_to_cpu(id->kas); |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1603 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1604 | ctrl->npss = id->npss; |
| 1605 | prev_apsta = ctrl->apsta; |
Andy Lutomirski | c35e30b | 2017-04-21 16:19:24 -0700 | [diff] [blame] | 1606 | if (ctrl->quirks & NVME_QUIRK_NO_APST) { |
| 1607 | if (force_apst && id->apsta) { |
| 1608 | dev_warn(ctrl->dev, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); |
| 1609 | ctrl->apsta = 1; |
| 1610 | } else { |
| 1611 | ctrl->apsta = 0; |
| 1612 | } |
| 1613 | } else { |
| 1614 | ctrl->apsta = id->apsta; |
| 1615 | } |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1616 | memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); |
| 1617 | |
Christoph Hellwig | d3d5b87 | 2017-05-20 15:14:44 +0200 | [diff] [blame] | 1618 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1619 | ctrl->icdoff = le16_to_cpu(id->icdoff); |
| 1620 | ctrl->ioccsz = le32_to_cpu(id->ioccsz); |
| 1621 | ctrl->iorcsz = le32_to_cpu(id->iorcsz); |
| 1622 | ctrl->maxcmd = le16_to_cpu(id->maxcmd); |
| 1623 | |
| 1624 | /* |
| 1625 | * In fabrics we need to verify the cntlid matches the |
| 1626 | * admin connect |
| 1627 | */ |
| 1628 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) |
| 1629 | ret = -EINVAL; |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 1630 | |
| 1631 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { |
| 1632 | dev_err(ctrl->dev, |
| 1633 | "keep-alive support is mandatory for fabrics\n"); |
| 1634 | ret = -EINVAL; |
| 1635 | } |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1636 | } else { |
| 1637 | ctrl->cntlid = le16_to_cpu(id->cntlid); |
Christoph Hellwig | fe6d53c | 2017-05-12 17:16:10 +0200 | [diff] [blame] | 1638 | ctrl->hmpre = le32_to_cpu(id->hmpre); |
| 1639 | ctrl->hmmin = le32_to_cpu(id->hmmin); |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1640 | } |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 1641 | |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1642 | kfree(id); |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1643 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1644 | if (ctrl->apsta && !prev_apsta) |
| 1645 | dev_pm_qos_expose_latency_tolerance(ctrl->device); |
| 1646 | else if (!ctrl->apsta && prev_apsta) |
| 1647 | dev_pm_qos_hide_latency_tolerance(ctrl->device); |
| 1648 | |
| 1649 | nvme_configure_apst(ctrl); |
| 1650 | |
Andy Lutomirski | bd4da3a | 2017-02-22 13:32:36 -0700 | [diff] [blame] | 1651 | ctrl->identified = true; |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 1652 | |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 1653 | return ret; |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1654 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 1655 | EXPORT_SYMBOL_GPL(nvme_init_identify); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 1656 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1657 | static int nvme_dev_open(struct inode *inode, struct file *file) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1658 | { |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1659 | struct nvme_ctrl *ctrl; |
| 1660 | int instance = iminor(inode); |
| 1661 | int ret = -ENODEV; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1662 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1663 | spin_lock(&dev_list_lock); |
| 1664 | list_for_each_entry(ctrl, &nvme_ctrl_list, node) { |
| 1665 | if (ctrl->instance != instance) |
| 1666 | continue; |
| 1667 | |
| 1668 | if (!ctrl->admin_q) { |
| 1669 | ret = -EWOULDBLOCK; |
| 1670 | break; |
| 1671 | } |
| 1672 | if (!kref_get_unless_zero(&ctrl->kref)) |
| 1673 | break; |
| 1674 | file->private_data = ctrl; |
| 1675 | ret = 0; |
| 1676 | break; |
| 1677 | } |
| 1678 | spin_unlock(&dev_list_lock); |
| 1679 | |
| 1680 | return ret; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1681 | } |
| 1682 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1683 | static int nvme_dev_release(struct inode *inode, struct file *file) |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1684 | { |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1685 | nvme_put_ctrl(file->private_data); |
| 1686 | return 0; |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 1687 | } |
| 1688 | |
Christoph Hellwig | bfd8947 | 2015-12-24 15:27:01 +0100 | [diff] [blame] | 1689 | static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) |
| 1690 | { |
| 1691 | struct nvme_ns *ns; |
| 1692 | int ret; |
| 1693 | |
| 1694 | mutex_lock(&ctrl->namespaces_mutex); |
| 1695 | if (list_empty(&ctrl->namespaces)) { |
| 1696 | ret = -ENOTTY; |
| 1697 | goto out_unlock; |
| 1698 | } |
| 1699 | |
| 1700 | ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); |
| 1701 | if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1702 | dev_warn(ctrl->device, |
Christoph Hellwig | bfd8947 | 2015-12-24 15:27:01 +0100 | [diff] [blame] | 1703 | "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); |
| 1704 | ret = -EINVAL; |
| 1705 | goto out_unlock; |
| 1706 | } |
| 1707 | |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1708 | dev_warn(ctrl->device, |
Christoph Hellwig | bfd8947 | 2015-12-24 15:27:01 +0100 | [diff] [blame] | 1709 | "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); |
| 1710 | kref_get(&ns->kref); |
| 1711 | mutex_unlock(&ctrl->namespaces_mutex); |
| 1712 | |
| 1713 | ret = nvme_user_cmd(ctrl, ns, argp); |
| 1714 | nvme_put_ns(ns); |
| 1715 | return ret; |
| 1716 | |
| 1717 | out_unlock: |
| 1718 | mutex_unlock(&ctrl->namespaces_mutex); |
| 1719 | return ret; |
| 1720 | } |
| 1721 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1722 | static long nvme_dev_ioctl(struct file *file, unsigned int cmd, |
| 1723 | unsigned long arg) |
| 1724 | { |
| 1725 | struct nvme_ctrl *ctrl = file->private_data; |
| 1726 | void __user *argp = (void __user *)arg; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1727 | |
| 1728 | switch (cmd) { |
| 1729 | case NVME_IOCTL_ADMIN_CMD: |
| 1730 | return nvme_user_cmd(ctrl, NULL, argp); |
| 1731 | case NVME_IOCTL_IO_CMD: |
Christoph Hellwig | bfd8947 | 2015-12-24 15:27:01 +0100 | [diff] [blame] | 1732 | return nvme_dev_user_cmd(ctrl, argp); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1733 | case NVME_IOCTL_RESET: |
Sagi Grimberg | 1b3c47c | 2016-02-10 08:51:15 -0700 | [diff] [blame] | 1734 | dev_warn(ctrl->device, "resetting controller\n"); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1735 | return ctrl->ops->reset_ctrl(ctrl); |
| 1736 | case NVME_IOCTL_SUBSYS_RESET: |
| 1737 | return nvme_reset_subsystem(ctrl); |
Keith Busch | 9ec3bb2 | 2016-04-29 15:45:18 -0600 | [diff] [blame] | 1738 | case NVME_IOCTL_RESCAN: |
| 1739 | nvme_queue_scan(ctrl); |
| 1740 | return 0; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 1741 | default: |
| 1742 | return -ENOTTY; |
| 1743 | } |
| 1744 | } |
| 1745 | |
| 1746 | static const struct file_operations nvme_dev_fops = { |
| 1747 | .owner = THIS_MODULE, |
| 1748 | .open = nvme_dev_open, |
| 1749 | .release = nvme_dev_release, |
| 1750 | .unlocked_ioctl = nvme_dev_ioctl, |
| 1751 | .compat_ioctl = nvme_dev_ioctl, |
| 1752 | }; |
| 1753 | |
| 1754 | static ssize_t nvme_sysfs_reset(struct device *dev, |
| 1755 | struct device_attribute *attr, const char *buf, |
| 1756 | size_t count) |
| 1757 | { |
| 1758 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1759 | int ret; |
| 1760 | |
| 1761 | ret = ctrl->ops->reset_ctrl(ctrl); |
| 1762 | if (ret < 0) |
| 1763 | return ret; |
| 1764 | return count; |
| 1765 | } |
| 1766 | static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); |
| 1767 | |
Keith Busch | 9ec3bb2 | 2016-04-29 15:45:18 -0600 | [diff] [blame] | 1768 | static ssize_t nvme_sysfs_rescan(struct device *dev, |
| 1769 | struct device_attribute *attr, const char *buf, |
| 1770 | size_t count) |
| 1771 | { |
| 1772 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1773 | |
| 1774 | nvme_queue_scan(ctrl); |
| 1775 | return count; |
| 1776 | } |
| 1777 | static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); |
| 1778 | |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 1779 | static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, |
| 1780 | char *buf) |
| 1781 | { |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1782 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 1783 | struct nvme_ctrl *ctrl = ns->ctrl; |
| 1784 | int serial_len = sizeof(ctrl->serial); |
| 1785 | int model_len = sizeof(ctrl->model); |
| 1786 | |
| 1787 | if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) |
| 1788 | return sprintf(buf, "eui.%16phN\n", ns->uuid); |
| 1789 | |
| 1790 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) |
| 1791 | return sprintf(buf, "eui.%8phN\n", ns->eui); |
| 1792 | |
| 1793 | while (ctrl->serial[serial_len - 1] == ' ') |
| 1794 | serial_len--; |
| 1795 | while (ctrl->model[model_len - 1] == ' ') |
| 1796 | model_len--; |
| 1797 | |
| 1798 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, |
| 1799 | serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id); |
| 1800 | } |
| 1801 | static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); |
| 1802 | |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1803 | static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, |
| 1804 | char *buf) |
| 1805 | { |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1806 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1807 | return sprintf(buf, "%pU\n", ns->uuid); |
| 1808 | } |
| 1809 | static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); |
| 1810 | |
| 1811 | static ssize_t eui_show(struct device *dev, struct device_attribute *attr, |
| 1812 | char *buf) |
| 1813 | { |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1814 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1815 | return sprintf(buf, "%8phd\n", ns->eui); |
| 1816 | } |
| 1817 | static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); |
| 1818 | |
| 1819 | static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, |
| 1820 | char *buf) |
| 1821 | { |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1822 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1823 | return sprintf(buf, "%d\n", ns->ns_id); |
| 1824 | } |
| 1825 | static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); |
| 1826 | |
| 1827 | static struct attribute *nvme_ns_attrs[] = { |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 1828 | &dev_attr_wwid.attr, |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1829 | &dev_attr_uuid.attr, |
| 1830 | &dev_attr_eui.attr, |
| 1831 | &dev_attr_nsid.attr, |
| 1832 | NULL, |
| 1833 | }; |
| 1834 | |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1835 | static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1836 | struct attribute *a, int n) |
| 1837 | { |
| 1838 | struct device *dev = container_of(kobj, struct device, kobj); |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 1839 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1840 | |
| 1841 | if (a == &dev_attr_uuid.attr) { |
| 1842 | if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) |
| 1843 | return 0; |
| 1844 | } |
| 1845 | if (a == &dev_attr_eui.attr) { |
| 1846 | if (!memchr_inv(ns->eui, 0, sizeof(ns->eui))) |
| 1847 | return 0; |
| 1848 | } |
| 1849 | return a->mode; |
| 1850 | } |
| 1851 | |
| 1852 | static const struct attribute_group nvme_ns_attr_group = { |
| 1853 | .attrs = nvme_ns_attrs, |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1854 | .is_visible = nvme_ns_attrs_are_visible, |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 1855 | }; |
| 1856 | |
Ming Lin | 931e1c2 | 2016-02-26 13:24:19 -0800 | [diff] [blame] | 1857 | #define nvme_show_str_function(field) \ |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1858 | static ssize_t field##_show(struct device *dev, \ |
| 1859 | struct device_attribute *attr, char *buf) \ |
| 1860 | { \ |
| 1861 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ |
| 1862 | return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \ |
| 1863 | } \ |
| 1864 | static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); |
| 1865 | |
Ming Lin | 931e1c2 | 2016-02-26 13:24:19 -0800 | [diff] [blame] | 1866 | #define nvme_show_int_function(field) \ |
| 1867 | static ssize_t field##_show(struct device *dev, \ |
| 1868 | struct device_attribute *attr, char *buf) \ |
| 1869 | { \ |
| 1870 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ |
| 1871 | return sprintf(buf, "%d\n", ctrl->field); \ |
| 1872 | } \ |
| 1873 | static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); |
| 1874 | |
| 1875 | nvme_show_str_function(model); |
| 1876 | nvme_show_str_function(serial); |
| 1877 | nvme_show_str_function(firmware_rev); |
| 1878 | nvme_show_int_function(cntlid); |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1879 | |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1880 | static ssize_t nvme_sysfs_delete(struct device *dev, |
| 1881 | struct device_attribute *attr, const char *buf, |
| 1882 | size_t count) |
| 1883 | { |
| 1884 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1885 | |
| 1886 | if (device_remove_file_self(dev, attr)) |
| 1887 | ctrl->ops->delete_ctrl(ctrl); |
| 1888 | return count; |
| 1889 | } |
| 1890 | static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); |
| 1891 | |
| 1892 | static ssize_t nvme_sysfs_show_transport(struct device *dev, |
| 1893 | struct device_attribute *attr, |
| 1894 | char *buf) |
| 1895 | { |
| 1896 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1897 | |
| 1898 | return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); |
| 1899 | } |
| 1900 | static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); |
| 1901 | |
Sagi Grimberg | 8432bdb2 | 2016-11-28 01:47:40 +0200 | [diff] [blame] | 1902 | static ssize_t nvme_sysfs_show_state(struct device *dev, |
| 1903 | struct device_attribute *attr, |
| 1904 | char *buf) |
| 1905 | { |
| 1906 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1907 | static const char *const state_name[] = { |
| 1908 | [NVME_CTRL_NEW] = "new", |
| 1909 | [NVME_CTRL_LIVE] = "live", |
| 1910 | [NVME_CTRL_RESETTING] = "resetting", |
| 1911 | [NVME_CTRL_RECONNECTING]= "reconnecting", |
| 1912 | [NVME_CTRL_DELETING] = "deleting", |
| 1913 | [NVME_CTRL_DEAD] = "dead", |
| 1914 | }; |
| 1915 | |
| 1916 | if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && |
| 1917 | state_name[ctrl->state]) |
| 1918 | return sprintf(buf, "%s\n", state_name[ctrl->state]); |
| 1919 | |
| 1920 | return sprintf(buf, "unknown state\n"); |
| 1921 | } |
| 1922 | |
| 1923 | static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); |
| 1924 | |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1925 | static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, |
| 1926 | struct device_attribute *attr, |
| 1927 | char *buf) |
| 1928 | { |
| 1929 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1930 | |
| 1931 | return snprintf(buf, PAGE_SIZE, "%s\n", |
| 1932 | ctrl->ops->get_subsysnqn(ctrl)); |
| 1933 | } |
| 1934 | static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); |
| 1935 | |
| 1936 | static ssize_t nvme_sysfs_show_address(struct device *dev, |
| 1937 | struct device_attribute *attr, |
| 1938 | char *buf) |
| 1939 | { |
| 1940 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1941 | |
| 1942 | return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); |
| 1943 | } |
| 1944 | static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); |
| 1945 | |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1946 | static struct attribute *nvme_dev_attrs[] = { |
| 1947 | &dev_attr_reset_controller.attr, |
Keith Busch | 9ec3bb2 | 2016-04-29 15:45:18 -0600 | [diff] [blame] | 1948 | &dev_attr_rescan_controller.attr, |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1949 | &dev_attr_model.attr, |
| 1950 | &dev_attr_serial.attr, |
| 1951 | &dev_attr_firmware_rev.attr, |
Ming Lin | 931e1c2 | 2016-02-26 13:24:19 -0800 | [diff] [blame] | 1952 | &dev_attr_cntlid.attr, |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1953 | &dev_attr_delete_controller.attr, |
| 1954 | &dev_attr_transport.attr, |
| 1955 | &dev_attr_subsysnqn.attr, |
| 1956 | &dev_attr_address.attr, |
Sagi Grimberg | 8432bdb2 | 2016-11-28 01:47:40 +0200 | [diff] [blame] | 1957 | &dev_attr_state.attr, |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1958 | NULL |
| 1959 | }; |
| 1960 | |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1961 | #define CHECK_ATTR(ctrl, a, name) \ |
| 1962 | if ((a) == &dev_attr_##name.attr && \ |
| 1963 | !(ctrl)->ops->get_##name) \ |
| 1964 | return 0 |
| 1965 | |
| 1966 | static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, |
| 1967 | struct attribute *a, int n) |
| 1968 | { |
| 1969 | struct device *dev = container_of(kobj, struct device, kobj); |
| 1970 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); |
| 1971 | |
| 1972 | if (a == &dev_attr_delete_controller.attr) { |
| 1973 | if (!ctrl->ops->delete_ctrl) |
| 1974 | return 0; |
| 1975 | } |
| 1976 | |
| 1977 | CHECK_ATTR(ctrl, a, subsysnqn); |
| 1978 | CHECK_ATTR(ctrl, a, address); |
| 1979 | |
| 1980 | return a->mode; |
| 1981 | } |
| 1982 | |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1983 | static struct attribute_group nvme_dev_attrs_group = { |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 1984 | .attrs = nvme_dev_attrs, |
| 1985 | .is_visible = nvme_dev_attrs_are_visible, |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 1986 | }; |
| 1987 | |
| 1988 | static const struct attribute_group *nvme_dev_attr_groups[] = { |
| 1989 | &nvme_dev_attrs_group, |
| 1990 | NULL, |
| 1991 | }; |
| 1992 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 1993 | static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) |
| 1994 | { |
| 1995 | struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); |
| 1996 | struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); |
| 1997 | |
| 1998 | return nsa->ns_id - nsb->ns_id; |
| 1999 | } |
| 2000 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2001 | static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2002 | { |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2003 | struct nvme_ns *ns, *ret = NULL; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2004 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2005 | mutex_lock(&ctrl->namespaces_mutex); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2006 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2007 | if (ns->ns_id == nsid) { |
| 2008 | kref_get(&ns->kref); |
| 2009 | ret = ns; |
| 2010 | break; |
| 2011 | } |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2012 | if (ns->ns_id > nsid) |
| 2013 | break; |
| 2014 | } |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2015 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2016 | return ret; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2017 | } |
| 2018 | |
| 2019 | static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
| 2020 | { |
| 2021 | struct nvme_ns *ns; |
| 2022 | struct gendisk *disk; |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2023 | struct nvme_id_ns *id; |
| 2024 | char disk_name[DISK_NAME_LEN]; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2025 | int node = dev_to_node(ctrl->dev); |
| 2026 | |
| 2027 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); |
| 2028 | if (!ns) |
| 2029 | return; |
| 2030 | |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2031 | ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL); |
| 2032 | if (ns->instance < 0) |
| 2033 | goto out_free_ns; |
| 2034 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2035 | ns->queue = blk_mq_init_queue(ctrl->tagset); |
| 2036 | if (IS_ERR(ns->queue)) |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2037 | goto out_release_instance; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2038 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); |
| 2039 | ns->queue->queuedata = ns; |
| 2040 | ns->ctrl = ctrl; |
| 2041 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2042 | kref_init(&ns->kref); |
| 2043 | ns->ns_id = nsid; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2044 | ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2045 | |
| 2046 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); |
Christoph Hellwig | da35825 | 2016-03-02 18:07:11 +0100 | [diff] [blame] | 2047 | nvme_set_queue_limits(ctrl, ns->queue); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2048 | |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2049 | sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2050 | |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2051 | if (nvme_revalidate_ns(ns, &id)) |
| 2052 | goto out_free_queue; |
| 2053 | |
Matias Bjørling | 3dc87dd | 2016-11-28 22:38:53 +0100 | [diff] [blame] | 2054 | if (nvme_nvm_ns_supported(ns, id) && |
| 2055 | nvme_nvm_register(ns, disk_name, node)) { |
| 2056 | dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__); |
| 2057 | goto out_free_id; |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2058 | } |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2059 | |
Matias Bjørling | 3dc87dd | 2016-11-28 22:38:53 +0100 | [diff] [blame] | 2060 | disk = alloc_disk_node(0, node); |
| 2061 | if (!disk) |
| 2062 | goto out_free_id; |
| 2063 | |
| 2064 | disk->fops = &nvme_fops; |
| 2065 | disk->private_data = ns; |
| 2066 | disk->queue = ns->queue; |
| 2067 | disk->flags = GENHD_FL_EXT_DEVT; |
| 2068 | memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); |
| 2069 | ns->disk = disk; |
| 2070 | |
| 2071 | __nvme_revalidate_disk(disk, id); |
| 2072 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2073 | mutex_lock(&ctrl->namespaces_mutex); |
| 2074 | list_add_tail(&ns->list, &ctrl->namespaces); |
| 2075 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2076 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2077 | kref_get(&ctrl->kref); |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2078 | |
| 2079 | kfree(id); |
| 2080 | |
Dan Williams | 0d52c756 | 2016-06-15 19:44:20 -0700 | [diff] [blame] | 2081 | device_add_disk(ctrl->device, ns->disk); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 2082 | if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, |
| 2083 | &nvme_ns_attr_group)) |
| 2084 | pr_warn("%s: failed to create sysfs group for identification\n", |
| 2085 | ns->disk->disk_name); |
Matias Bjørling | 3dc87dd | 2016-11-28 22:38:53 +0100 | [diff] [blame] | 2086 | if (ns->ndev && nvme_nvm_register_sysfs(ns)) |
| 2087 | pr_warn("%s: failed to register lightnvm sysfs group for identification\n", |
| 2088 | ns->disk->disk_name); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2089 | return; |
Matias Bjørling | ac81bfa9 | 2016-09-16 14:25:04 +0200 | [diff] [blame] | 2090 | out_free_id: |
| 2091 | kfree(id); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2092 | out_free_queue: |
| 2093 | blk_cleanup_queue(ns->queue); |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2094 | out_release_instance: |
| 2095 | ida_simple_remove(&ctrl->ns_ida, ns->instance); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2096 | out_free_ns: |
| 2097 | kfree(ns); |
| 2098 | } |
| 2099 | |
| 2100 | static void nvme_ns_remove(struct nvme_ns *ns) |
| 2101 | { |
Keith Busch | 646017a | 2016-02-24 09:15:54 -0700 | [diff] [blame] | 2102 | if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) |
| 2103 | return; |
| 2104 | |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 2105 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2106 | if (blk_get_integrity(ns->disk)) |
| 2107 | blk_integrity_unregister(ns->disk); |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 2108 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, |
| 2109 | &nvme_ns_attr_group); |
Matias Bjørling | 3dc87dd | 2016-11-28 22:38:53 +0100 | [diff] [blame] | 2110 | if (ns->ndev) |
| 2111 | nvme_nvm_unregister_sysfs(ns); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2112 | del_gendisk(ns->disk); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2113 | blk_cleanup_queue(ns->queue); |
| 2114 | } |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2115 | |
| 2116 | mutex_lock(&ns->ctrl->namespaces_mutex); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2117 | list_del_init(&ns->list); |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2118 | mutex_unlock(&ns->ctrl->namespaces_mutex); |
| 2119 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2120 | nvme_put_ns(ns); |
| 2121 | } |
| 2122 | |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2123 | static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
| 2124 | { |
| 2125 | struct nvme_ns *ns; |
| 2126 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2127 | ns = nvme_find_get_ns(ctrl, nsid); |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2128 | if (ns) { |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 2129 | if (ns->disk && revalidate_disk(ns->disk)) |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2130 | nvme_ns_remove(ns); |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2131 | nvme_put_ns(ns); |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2132 | } else |
| 2133 | nvme_alloc_ns(ctrl, nsid); |
| 2134 | } |
| 2135 | |
Sunad Bhandary | 47b0e50 | 2016-05-27 15:59:43 +0530 | [diff] [blame] | 2136 | static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, |
| 2137 | unsigned nsid) |
| 2138 | { |
| 2139 | struct nvme_ns *ns, *next; |
| 2140 | |
| 2141 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { |
| 2142 | if (ns->ns_id > nsid) |
| 2143 | nvme_ns_remove(ns); |
| 2144 | } |
| 2145 | } |
| 2146 | |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2147 | static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) |
| 2148 | { |
| 2149 | struct nvme_ns *ns; |
| 2150 | __le32 *ns_list; |
| 2151 | unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); |
| 2152 | int ret = 0; |
| 2153 | |
| 2154 | ns_list = kzalloc(0x1000, GFP_KERNEL); |
| 2155 | if (!ns_list) |
| 2156 | return -ENOMEM; |
| 2157 | |
| 2158 | for (i = 0; i < num_lists; i++) { |
| 2159 | ret = nvme_identify_ns_list(ctrl, prev, ns_list); |
| 2160 | if (ret) |
Sunad Bhandary | 47b0e50 | 2016-05-27 15:59:43 +0530 | [diff] [blame] | 2161 | goto free; |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2162 | |
| 2163 | for (j = 0; j < min(nn, 1024U); j++) { |
| 2164 | nsid = le32_to_cpu(ns_list[j]); |
| 2165 | if (!nsid) |
| 2166 | goto out; |
| 2167 | |
| 2168 | nvme_validate_ns(ctrl, nsid); |
| 2169 | |
| 2170 | while (++prev < nsid) { |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2171 | ns = nvme_find_get_ns(ctrl, prev); |
| 2172 | if (ns) { |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2173 | nvme_ns_remove(ns); |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2174 | nvme_put_ns(ns); |
| 2175 | } |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2176 | } |
| 2177 | } |
| 2178 | nn -= j; |
| 2179 | } |
| 2180 | out: |
Sunad Bhandary | 47b0e50 | 2016-05-27 15:59:43 +0530 | [diff] [blame] | 2181 | nvme_remove_invalid_namespaces(ctrl, prev); |
| 2182 | free: |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2183 | kfree(ns_list); |
| 2184 | return ret; |
| 2185 | } |
| 2186 | |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2187 | static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2188 | { |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2189 | unsigned i; |
| 2190 | |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2191 | for (i = 1; i <= nn; i++) |
| 2192 | nvme_validate_ns(ctrl, i); |
| 2193 | |
Sunad Bhandary | 47b0e50 | 2016-05-27 15:59:43 +0530 | [diff] [blame] | 2194 | nvme_remove_invalid_namespaces(ctrl, nn); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2195 | } |
| 2196 | |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2197 | static void nvme_scan_work(struct work_struct *work) |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2198 | { |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2199 | struct nvme_ctrl *ctrl = |
| 2200 | container_of(work, struct nvme_ctrl, scan_work); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2201 | struct nvme_id_ctrl *id; |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2202 | unsigned nn; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2203 | |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2204 | if (ctrl->state != NVME_CTRL_LIVE) |
| 2205 | return; |
| 2206 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2207 | if (nvme_identify_ctrl(ctrl, &id)) |
| 2208 | return; |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2209 | |
| 2210 | nn = le32_to_cpu(id->nn); |
Gabriel Krisman Bertazi | 8ef2074 | 2016-10-19 09:51:05 -0600 | [diff] [blame] | 2211 | if (ctrl->vs >= NVME_VS(1, 1, 0) && |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2212 | !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { |
| 2213 | if (!nvme_scan_ns_list(ctrl, nn)) |
| 2214 | goto done; |
| 2215 | } |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2216 | nvme_scan_ns_sequential(ctrl, nn); |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2217 | done: |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2218 | mutex_lock(&ctrl->namespaces_mutex); |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 2219 | list_sort(NULL, &ctrl->namespaces, ns_cmp); |
Christoph Hellwig | 69d3b8a | 2015-12-24 15:27:00 +0100 | [diff] [blame] | 2220 | mutex_unlock(&ctrl->namespaces_mutex); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2221 | kfree(id); |
| 2222 | } |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2223 | |
| 2224 | void nvme_queue_scan(struct nvme_ctrl *ctrl) |
| 2225 | { |
| 2226 | /* |
| 2227 | * Do not queue new scan work when a controller is reset during |
| 2228 | * removal. |
| 2229 | */ |
| 2230 | if (ctrl->state == NVME_CTRL_LIVE) |
Sagi Grimberg | c669ccd | 2017-05-04 13:33:14 +0300 | [diff] [blame] | 2231 | queue_work(nvme_wq, &ctrl->scan_work); |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2232 | } |
| 2233 | EXPORT_SYMBOL_GPL(nvme_queue_scan); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2234 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2235 | /* |
| 2236 | * This function iterates the namespace list unlocked to allow recovery from |
| 2237 | * controller failure. It is up to the caller to ensure the namespace list is |
| 2238 | * not modified by scan work while this function is executing. |
| 2239 | */ |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2240 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl) |
| 2241 | { |
| 2242 | struct nvme_ns *ns, *next; |
| 2243 | |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 2244 | /* |
| 2245 | * The dead states indicates the controller was not gracefully |
| 2246 | * disconnected. In that case, we won't be able to flush any data while |
| 2247 | * removing the namespaces' disks; fail all the queues now to avoid |
| 2248 | * potentially having to clean up the failed sync later. |
| 2249 | */ |
| 2250 | if (ctrl->state == NVME_CTRL_DEAD) |
| 2251 | nvme_kill_queues(ctrl); |
| 2252 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2253 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) |
| 2254 | nvme_ns_remove(ns); |
| 2255 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2256 | EXPORT_SYMBOL_GPL(nvme_remove_namespaces); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2257 | |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2258 | static void nvme_async_event_work(struct work_struct *work) |
| 2259 | { |
| 2260 | struct nvme_ctrl *ctrl = |
| 2261 | container_of(work, struct nvme_ctrl, async_event_work); |
| 2262 | |
| 2263 | spin_lock_irq(&ctrl->lock); |
| 2264 | while (ctrl->event_limit > 0) { |
| 2265 | int aer_idx = --ctrl->event_limit; |
| 2266 | |
| 2267 | spin_unlock_irq(&ctrl->lock); |
| 2268 | ctrl->ops->submit_async_event(ctrl, aer_idx); |
| 2269 | spin_lock_irq(&ctrl->lock); |
| 2270 | } |
| 2271 | spin_unlock_irq(&ctrl->lock); |
| 2272 | } |
| 2273 | |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2274 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, |
| 2275 | union nvme_result *res) |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2276 | { |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2277 | u32 result = le32_to_cpu(res->u32); |
| 2278 | bool done = true; |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2279 | |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2280 | switch (le16_to_cpu(status) >> 1) { |
| 2281 | case NVME_SC_SUCCESS: |
| 2282 | done = false; |
| 2283 | /*FALLTHRU*/ |
| 2284 | case NVME_SC_ABORT_REQ: |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2285 | ++ctrl->event_limit; |
Sagi Grimberg | c669ccd | 2017-05-04 13:33:14 +0300 | [diff] [blame] | 2286 | queue_work(nvme_wq, &ctrl->async_event_work); |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2287 | break; |
| 2288 | default: |
| 2289 | break; |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2290 | } |
| 2291 | |
Christoph Hellwig | 7bf5853 | 2016-11-10 07:32:34 -0800 | [diff] [blame] | 2292 | if (done) |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2293 | return; |
| 2294 | |
| 2295 | switch (result & 0xff07) { |
| 2296 | case NVME_AER_NOTICE_NS_CHANGED: |
| 2297 | dev_info(ctrl->device, "rescanning\n"); |
| 2298 | nvme_queue_scan(ctrl); |
| 2299 | break; |
| 2300 | default: |
| 2301 | dev_warn(ctrl->device, "async event result %08x\n", result); |
| 2302 | } |
| 2303 | } |
| 2304 | EXPORT_SYMBOL_GPL(nvme_complete_async_event); |
| 2305 | |
| 2306 | void nvme_queue_async_events(struct nvme_ctrl *ctrl) |
| 2307 | { |
| 2308 | ctrl->event_limit = NVME_NR_AERS; |
Sagi Grimberg | c669ccd | 2017-05-04 13:33:14 +0300 | [diff] [blame] | 2309 | queue_work(nvme_wq, &ctrl->async_event_work); |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2310 | } |
| 2311 | EXPORT_SYMBOL_GPL(nvme_queue_async_events); |
| 2312 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2313 | static DEFINE_IDA(nvme_instance_ida); |
| 2314 | |
| 2315 | static int nvme_set_instance(struct nvme_ctrl *ctrl) |
| 2316 | { |
| 2317 | int instance, error; |
| 2318 | |
| 2319 | do { |
| 2320 | if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) |
| 2321 | return -ENODEV; |
| 2322 | |
| 2323 | spin_lock(&dev_list_lock); |
| 2324 | error = ida_get_new(&nvme_instance_ida, &instance); |
| 2325 | spin_unlock(&dev_list_lock); |
| 2326 | } while (error == -EAGAIN); |
| 2327 | |
| 2328 | if (error) |
| 2329 | return -ENODEV; |
| 2330 | |
| 2331 | ctrl->instance = instance; |
| 2332 | return 0; |
| 2333 | } |
| 2334 | |
| 2335 | static void nvme_release_instance(struct nvme_ctrl *ctrl) |
| 2336 | { |
| 2337 | spin_lock(&dev_list_lock); |
| 2338 | ida_remove(&nvme_instance_ida, ctrl->instance); |
| 2339 | spin_unlock(&dev_list_lock); |
| 2340 | } |
| 2341 | |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 2342 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2343 | { |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2344 | flush_work(&ctrl->async_event_work); |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2345 | flush_work(&ctrl->scan_work); |
| 2346 | nvme_remove_namespaces(ctrl); |
| 2347 | |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 2348 | device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2349 | |
| 2350 | spin_lock(&dev_list_lock); |
| 2351 | list_del(&ctrl->node); |
| 2352 | spin_unlock(&dev_list_lock); |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 2353 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2354 | EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 2355 | |
| 2356 | static void nvme_free_ctrl(struct kref *kref) |
| 2357 | { |
| 2358 | struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2359 | |
| 2360 | put_device(ctrl->device); |
| 2361 | nvme_release_instance(ctrl); |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2362 | ida_destroy(&ctrl->ns_ida); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2363 | |
| 2364 | ctrl->ops->free_ctrl(ctrl); |
| 2365 | } |
| 2366 | |
| 2367 | void nvme_put_ctrl(struct nvme_ctrl *ctrl) |
| 2368 | { |
| 2369 | kref_put(&ctrl->kref, nvme_free_ctrl); |
| 2370 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2371 | EXPORT_SYMBOL_GPL(nvme_put_ctrl); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2372 | |
| 2373 | /* |
| 2374 | * Initialize a NVMe controller structures. This needs to be called during |
| 2375 | * earliest initialization so that we have the initialized structured around |
| 2376 | * during probing. |
| 2377 | */ |
| 2378 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
| 2379 | const struct nvme_ctrl_ops *ops, unsigned long quirks) |
| 2380 | { |
| 2381 | int ret; |
| 2382 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 2383 | ctrl->state = NVME_CTRL_NEW; |
| 2384 | spin_lock_init(&ctrl->lock); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2385 | INIT_LIST_HEAD(&ctrl->namespaces); |
Christoph Hellwig | 69d3b8a | 2015-12-24 15:27:00 +0100 | [diff] [blame] | 2386 | mutex_init(&ctrl->namespaces_mutex); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2387 | kref_init(&ctrl->kref); |
| 2388 | ctrl->dev = dev; |
| 2389 | ctrl->ops = ops; |
| 2390 | ctrl->quirks = quirks; |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 2391 | INIT_WORK(&ctrl->scan_work, nvme_scan_work); |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 2392 | INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2393 | |
| 2394 | ret = nvme_set_instance(ctrl); |
| 2395 | if (ret) |
| 2396 | goto out; |
| 2397 | |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 2398 | ctrl->device = device_create_with_groups(nvme_class, ctrl->dev, |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2399 | MKDEV(nvme_char_major, ctrl->instance), |
Christoph Hellwig | f4f0f63 | 2016-02-09 12:44:03 -0700 | [diff] [blame] | 2400 | ctrl, nvme_dev_attr_groups, |
Keith Busch | 779ff756 | 2016-01-12 15:09:31 -0700 | [diff] [blame] | 2401 | "nvme%d", ctrl->instance); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2402 | if (IS_ERR(ctrl->device)) { |
| 2403 | ret = PTR_ERR(ctrl->device); |
| 2404 | goto out_release_instance; |
| 2405 | } |
| 2406 | get_device(ctrl->device); |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 2407 | ida_init(&ctrl->ns_ida); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2408 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2409 | spin_lock(&dev_list_lock); |
| 2410 | list_add_tail(&ctrl->node, &nvme_ctrl_list); |
| 2411 | spin_unlock(&dev_list_lock); |
| 2412 | |
Andy Lutomirski | c5552fd | 2017-02-07 10:08:45 -0800 | [diff] [blame] | 2413 | /* |
| 2414 | * Initialize latency tolerance controls. The sysfs files won't |
| 2415 | * be visible to userspace unless the device actually supports APST. |
| 2416 | */ |
| 2417 | ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; |
| 2418 | dev_pm_qos_update_user_latency_tolerance(ctrl->device, |
| 2419 | min(default_ps_max_latency_us, (unsigned long)S32_MAX)); |
| 2420 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2421 | return 0; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2422 | out_release_instance: |
| 2423 | nvme_release_instance(ctrl); |
| 2424 | out: |
| 2425 | return ret; |
| 2426 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2427 | EXPORT_SYMBOL_GPL(nvme_init_ctrl); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2428 | |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2429 | /** |
| 2430 | * nvme_kill_queues(): Ends all namespace queues |
| 2431 | * @ctrl: the dead controller that needs to end |
| 2432 | * |
| 2433 | * Call this function when the driver determines it is unable to get the |
| 2434 | * controller in a state capable of servicing IO. |
| 2435 | */ |
| 2436 | void nvme_kill_queues(struct nvme_ctrl *ctrl) |
| 2437 | { |
| 2438 | struct nvme_ns *ns; |
| 2439 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2440 | mutex_lock(&ctrl->namespaces_mutex); |
Ming Lei | 82654b6 | 2017-06-02 16:32:08 +0800 | [diff] [blame] | 2441 | |
| 2442 | /* Forcibly start all queues to avoid having stuck requests */ |
| 2443 | blk_mq_start_hw_queues(ctrl->admin_q); |
| 2444 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2445 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2446 | /* |
| 2447 | * Revalidating a dead namespace sets capacity to 0. This will |
| 2448 | * end buffered writers dirtying pages that can't be synced. |
| 2449 | */ |
Keith Busch | f33447b | 2017-02-10 18:15:51 -0500 | [diff] [blame] | 2450 | if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) |
| 2451 | continue; |
| 2452 | revalidate_disk(ns->disk); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2453 | blk_set_queue_dying(ns->queue); |
Ming Lei | 806f026 | 2017-05-22 23:05:03 +0800 | [diff] [blame] | 2454 | |
| 2455 | /* |
| 2456 | * Forcibly start all queues to avoid having stuck requests. |
| 2457 | * Note that we must ensure the queues are not stopped |
| 2458 | * when the final removal happens. |
| 2459 | */ |
| 2460 | blk_mq_start_hw_queues(ns->queue); |
Ming Lei | 986f75c | 2017-05-22 23:05:04 +0800 | [diff] [blame] | 2461 | |
| 2462 | /* draining requests in requeue list */ |
| 2463 | blk_mq_kick_requeue_list(ns->queue); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2464 | } |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2465 | mutex_unlock(&ctrl->namespaces_mutex); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2466 | } |
Linus Torvalds | 237045f | 2016-03-18 17:13:31 -0700 | [diff] [blame] | 2467 | EXPORT_SYMBOL_GPL(nvme_kill_queues); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 2468 | |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2469 | void nvme_unfreeze(struct nvme_ctrl *ctrl) |
| 2470 | { |
| 2471 | struct nvme_ns *ns; |
| 2472 | |
| 2473 | mutex_lock(&ctrl->namespaces_mutex); |
| 2474 | list_for_each_entry(ns, &ctrl->namespaces, list) |
| 2475 | blk_mq_unfreeze_queue(ns->queue); |
| 2476 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2477 | } |
| 2478 | EXPORT_SYMBOL_GPL(nvme_unfreeze); |
| 2479 | |
| 2480 | void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) |
| 2481 | { |
| 2482 | struct nvme_ns *ns; |
| 2483 | |
| 2484 | mutex_lock(&ctrl->namespaces_mutex); |
| 2485 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
| 2486 | timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); |
| 2487 | if (timeout <= 0) |
| 2488 | break; |
| 2489 | } |
| 2490 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2491 | } |
| 2492 | EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); |
| 2493 | |
| 2494 | void nvme_wait_freeze(struct nvme_ctrl *ctrl) |
| 2495 | { |
| 2496 | struct nvme_ns *ns; |
| 2497 | |
| 2498 | mutex_lock(&ctrl->namespaces_mutex); |
| 2499 | list_for_each_entry(ns, &ctrl->namespaces, list) |
| 2500 | blk_mq_freeze_queue_wait(ns->queue); |
| 2501 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2502 | } |
| 2503 | EXPORT_SYMBOL_GPL(nvme_wait_freeze); |
| 2504 | |
| 2505 | void nvme_start_freeze(struct nvme_ctrl *ctrl) |
| 2506 | { |
| 2507 | struct nvme_ns *ns; |
| 2508 | |
| 2509 | mutex_lock(&ctrl->namespaces_mutex); |
| 2510 | list_for_each_entry(ns, &ctrl->namespaces, list) |
Ming Lei | 1671d52 | 2017-03-27 20:06:57 +0800 | [diff] [blame] | 2511 | blk_freeze_queue_start(ns->queue); |
Keith Busch | 302ad8c | 2017-03-01 14:22:12 -0500 | [diff] [blame] | 2512 | mutex_unlock(&ctrl->namespaces_mutex); |
| 2513 | } |
| 2514 | EXPORT_SYMBOL_GPL(nvme_start_freeze); |
| 2515 | |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 2516 | void nvme_stop_queues(struct nvme_ctrl *ctrl) |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2517 | { |
| 2518 | struct nvme_ns *ns; |
| 2519 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2520 | mutex_lock(&ctrl->namespaces_mutex); |
Bart Van Assche | a6eaa88 | 2016-10-28 17:23:40 -0700 | [diff] [blame] | 2521 | list_for_each_entry(ns, &ctrl->namespaces, list) |
Bart Van Assche | 3174dd3 | 2016-10-28 17:23:19 -0700 | [diff] [blame] | 2522 | blk_mq_quiesce_queue(ns->queue); |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2523 | mutex_unlock(&ctrl->namespaces_mutex); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2524 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2525 | EXPORT_SYMBOL_GPL(nvme_stop_queues); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2526 | |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 2527 | void nvme_start_queues(struct nvme_ctrl *ctrl) |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2528 | { |
| 2529 | struct nvme_ns *ns; |
| 2530 | |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2531 | mutex_lock(&ctrl->namespaces_mutex); |
| 2532 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2533 | blk_mq_start_stopped_hw_queues(ns->queue, true); |
| 2534 | blk_mq_kick_requeue_list(ns->queue); |
| 2535 | } |
Keith Busch | 32f0c4a | 2016-07-13 11:45:02 -0600 | [diff] [blame] | 2536 | mutex_unlock(&ctrl->namespaces_mutex); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2537 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2538 | EXPORT_SYMBOL_GPL(nvme_start_queues); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 2539 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2540 | int __init nvme_core_init(void) |
| 2541 | { |
| 2542 | int result; |
| 2543 | |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 2544 | nvme_wq = alloc_workqueue("nvme-wq", |
| 2545 | WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); |
| 2546 | if (!nvme_wq) |
| 2547 | return -ENOMEM; |
| 2548 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2549 | result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", |
| 2550 | &nvme_dev_fops); |
| 2551 | if (result < 0) |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 2552 | goto destroy_wq; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2553 | else if (result > 0) |
| 2554 | nvme_char_major = result; |
| 2555 | |
| 2556 | nvme_class = class_create(THIS_MODULE, "nvme"); |
| 2557 | if (IS_ERR(nvme_class)) { |
| 2558 | result = PTR_ERR(nvme_class); |
| 2559 | goto unregister_chrdev; |
| 2560 | } |
| 2561 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2562 | return 0; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2563 | |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 2564 | unregister_chrdev: |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2565 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 2566 | destroy_wq: |
| 2567 | destroy_workqueue(nvme_wq); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2568 | return result; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2569 | } |
| 2570 | |
| 2571 | void nvme_core_exit(void) |
| 2572 | { |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 2573 | class_destroy(nvme_class); |
| 2574 | __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); |
Sagi Grimberg | 9a6327d | 2017-06-07 20:31:55 +0200 | [diff] [blame] | 2575 | destroy_workqueue(nvme_wq); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 2576 | } |
Ming Lin | 576d55d | 2016-02-10 10:03:32 -0800 | [diff] [blame] | 2577 | |
| 2578 | MODULE_LICENSE("GPL"); |
| 2579 | MODULE_VERSION("1.0"); |
| 2580 | module_init(nvme_core_init); |
| 2581 | module_exit(nvme_core_exit); |