Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 1 | /* |
| 2 | * NVMe Fabrics command implementation. |
| 3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 15 | #include <linux/blkdev.h> |
| 16 | #include "nvmet.h" |
| 17 | |
| 18 | static void nvmet_execute_prop_set(struct nvmet_req *req) |
| 19 | { |
| 20 | u16 status = 0; |
| 21 | |
| 22 | if (!(req->cmd->prop_set.attrib & 1)) { |
| 23 | u64 val = le64_to_cpu(req->cmd->prop_set.value); |
| 24 | |
| 25 | switch (le32_to_cpu(req->cmd->prop_set.offset)) { |
| 26 | case NVME_REG_CC: |
| 27 | nvmet_update_cc(req->sq->ctrl, val); |
| 28 | break; |
| 29 | default: |
| 30 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 31 | break; |
| 32 | } |
| 33 | } else { |
| 34 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 35 | } |
| 36 | |
| 37 | nvmet_req_complete(req, status); |
| 38 | } |
| 39 | |
| 40 | static void nvmet_execute_prop_get(struct nvmet_req *req) |
| 41 | { |
| 42 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
| 43 | u16 status = 0; |
| 44 | u64 val = 0; |
| 45 | |
| 46 | if (req->cmd->prop_get.attrib & 1) { |
| 47 | switch (le32_to_cpu(req->cmd->prop_get.offset)) { |
| 48 | case NVME_REG_CAP: |
| 49 | val = ctrl->cap; |
| 50 | break; |
| 51 | default: |
| 52 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 53 | break; |
| 54 | } |
| 55 | } else { |
| 56 | switch (le32_to_cpu(req->cmd->prop_get.offset)) { |
| 57 | case NVME_REG_VS: |
| 58 | val = ctrl->subsys->ver; |
| 59 | break; |
| 60 | case NVME_REG_CC: |
| 61 | val = ctrl->cc; |
| 62 | break; |
| 63 | case NVME_REG_CSTS: |
| 64 | val = ctrl->csts; |
| 65 | break; |
| 66 | default: |
| 67 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
| 68 | break; |
| 69 | } |
| 70 | } |
| 71 | |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 72 | req->rsp->result.u64 = cpu_to_le64(val); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 73 | nvmet_req_complete(req, status); |
| 74 | } |
| 75 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 76 | u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 77 | { |
| 78 | struct nvme_command *cmd = req->cmd; |
| 79 | |
| 80 | req->ns = NULL; |
| 81 | |
| 82 | switch (cmd->fabrics.fctype) { |
| 83 | case nvme_fabrics_type_property_set: |
| 84 | req->data_len = 0; |
| 85 | req->execute = nvmet_execute_prop_set; |
| 86 | break; |
| 87 | case nvme_fabrics_type_property_get: |
| 88 | req->data_len = 0; |
| 89 | req->execute = nvmet_execute_prop_get; |
| 90 | break; |
| 91 | default: |
| 92 | pr_err("received unknown capsule type 0x%x\n", |
| 93 | cmd->fabrics.fctype); |
| 94 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 95 | } |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) |
| 101 | { |
| 102 | struct nvmf_connect_command *c = &req->cmd->connect; |
| 103 | u16 qid = le16_to_cpu(c->qid); |
| 104 | u16 sqsize = le16_to_cpu(c->sqsize); |
| 105 | struct nvmet_ctrl *old; |
| 106 | |
| 107 | old = cmpxchg(&req->sq->ctrl, NULL, ctrl); |
| 108 | if (old) { |
| 109 | pr_warn("queue already connected!\n"); |
| 110 | return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; |
| 111 | } |
| 112 | |
| 113 | nvmet_cq_setup(ctrl, req->cq, qid, sqsize); |
| 114 | nvmet_sq_setup(ctrl, req->sq, qid, sqsize); |
| 115 | return 0; |
| 116 | } |
| 117 | |
| 118 | static void nvmet_execute_admin_connect(struct nvmet_req *req) |
| 119 | { |
| 120 | struct nvmf_connect_command *c = &req->cmd->connect; |
| 121 | struct nvmf_connect_data *d; |
| 122 | struct nvmet_ctrl *ctrl = NULL; |
| 123 | u16 status = 0; |
| 124 | |
Logan Gunthorpe | 1c05cf9 | 2017-04-18 17:32:15 -0600 | [diff] [blame] | 125 | d = kmalloc(sizeof(*d), GFP_KERNEL); |
| 126 | if (!d) { |
| 127 | status = NVME_SC_INTERNAL; |
| 128 | goto complete; |
| 129 | } |
| 130 | |
| 131 | status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); |
| 132 | if (status) |
| 133 | goto out; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 134 | |
| 135 | /* zero out initial completion result, assign values as needed */ |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 136 | req->rsp->result.u32 = 0; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 137 | |
| 138 | if (c->recfmt != 0) { |
| 139 | pr_warn("invalid connect version (%d).\n", |
| 140 | le16_to_cpu(c->recfmt)); |
| 141 | status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; |
| 142 | goto out; |
| 143 | } |
| 144 | |
| 145 | if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { |
| 146 | pr_warn("connect attempt for invalid controller ID %#x\n", |
| 147 | d->cntlid); |
| 148 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 149 | req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 150 | goto out; |
| 151 | } |
| 152 | |
| 153 | status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, |
Logan Gunthorpe | 1c05cf9 | 2017-04-18 17:32:15 -0600 | [diff] [blame] | 154 | le32_to_cpu(c->kato), &ctrl); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 155 | if (status) |
| 156 | goto out; |
| 157 | |
| 158 | status = nvmet_install_queue(ctrl, req); |
| 159 | if (status) { |
| 160 | nvmet_ctrl_put(ctrl); |
| 161 | goto out; |
| 162 | } |
| 163 | |
Sagi Grimberg | 15fbad9 | 2016-11-14 14:24:21 +0200 | [diff] [blame] | 164 | pr_info("creating controller %d for subsystem %s for NQN %s.\n", |
| 165 | ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn); |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 166 | req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 167 | |
| 168 | out: |
Logan Gunthorpe | 1c05cf9 | 2017-04-18 17:32:15 -0600 | [diff] [blame] | 169 | kfree(d); |
| 170 | complete: |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 171 | nvmet_req_complete(req, status); |
| 172 | } |
| 173 | |
| 174 | static void nvmet_execute_io_connect(struct nvmet_req *req) |
| 175 | { |
| 176 | struct nvmf_connect_command *c = &req->cmd->connect; |
| 177 | struct nvmf_connect_data *d; |
| 178 | struct nvmet_ctrl *ctrl = NULL; |
| 179 | u16 qid = le16_to_cpu(c->qid); |
| 180 | u16 status = 0; |
| 181 | |
Logan Gunthorpe | 1c05cf9 | 2017-04-18 17:32:15 -0600 | [diff] [blame] | 182 | d = kmalloc(sizeof(*d), GFP_KERNEL); |
| 183 | if (!d) { |
| 184 | status = NVME_SC_INTERNAL; |
| 185 | goto complete; |
| 186 | } |
| 187 | |
| 188 | status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); |
| 189 | if (status) |
| 190 | goto out; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 191 | |
| 192 | /* zero out initial completion result, assign values as needed */ |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 193 | req->rsp->result.u32 = 0; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 194 | |
| 195 | if (c->recfmt != 0) { |
| 196 | pr_warn("invalid connect version (%d).\n", |
| 197 | le16_to_cpu(c->recfmt)); |
| 198 | status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; |
| 199 | goto out; |
| 200 | } |
| 201 | |
| 202 | status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, |
Logan Gunthorpe | 1c05cf9 | 2017-04-18 17:32:15 -0600 | [diff] [blame] | 203 | le16_to_cpu(d->cntlid), |
| 204 | req, &ctrl); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 205 | if (status) |
| 206 | goto out; |
| 207 | |
| 208 | if (unlikely(qid > ctrl->subsys->max_qid)) { |
| 209 | pr_warn("invalid queue id (%d)\n", qid); |
| 210 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 211 | req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 212 | goto out_ctrl_put; |
| 213 | } |
| 214 | |
| 215 | status = nvmet_install_queue(ctrl, req); |
| 216 | if (status) { |
| 217 | /* pass back cntlid that had the issue of installing queue */ |
Christoph Hellwig | d49187e | 2016-11-10 07:32:33 -0800 | [diff] [blame] | 218 | req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 219 | goto out_ctrl_put; |
| 220 | } |
| 221 | |
| 222 | pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); |
| 223 | |
| 224 | out: |
Logan Gunthorpe | 1c05cf9 | 2017-04-18 17:32:15 -0600 | [diff] [blame] | 225 | kfree(d); |
| 226 | complete: |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 227 | nvmet_req_complete(req, status); |
| 228 | return; |
| 229 | |
| 230 | out_ctrl_put: |
| 231 | nvmet_ctrl_put(ctrl); |
| 232 | goto out; |
| 233 | } |
| 234 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 235 | u16 nvmet_parse_connect_cmd(struct nvmet_req *req) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 236 | { |
| 237 | struct nvme_command *cmd = req->cmd; |
| 238 | |
| 239 | req->ns = NULL; |
| 240 | |
Max Gurtovoy | 2dbf581 | 2017-01-23 11:01:12 +0200 | [diff] [blame] | 241 | if (cmd->common.opcode != nvme_fabrics_command) { |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 242 | pr_err("invalid command 0x%x on unconnected queue.\n", |
| 243 | cmd->fabrics.opcode); |
| 244 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 245 | } |
| 246 | if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { |
| 247 | pr_err("invalid capsule type 0x%x on unconnected queue.\n", |
| 248 | cmd->fabrics.fctype); |
| 249 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 250 | } |
| 251 | |
| 252 | req->data_len = sizeof(struct nvmf_connect_data); |
| 253 | if (cmd->connect.qid == 0) |
| 254 | req->execute = nvmet_execute_admin_connect; |
| 255 | else |
| 256 | req->execute = nvmet_execute_io_connect; |
| 257 | return 0; |
| 258 | } |