Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 1 | /* |
| 2 | * NVMe I/O command implementation. |
| 3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/module.h> |
| 17 | #include "nvmet.h" |
| 18 | |
| 19 | static void nvmet_bio_done(struct bio *bio) |
| 20 | { |
| 21 | struct nvmet_req *req = bio->bi_private; |
| 22 | |
| 23 | nvmet_req_complete(req, |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 24 | bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 25 | |
| 26 | if (bio != &req->inline_bio) |
| 27 | bio_put(bio); |
| 28 | } |
| 29 | |
| 30 | static inline u32 nvmet_rw_len(struct nvmet_req *req) |
| 31 | { |
| 32 | return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << |
| 33 | req->ns->blksize_shift; |
| 34 | } |
| 35 | |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 36 | static void nvmet_execute_rw(struct nvmet_req *req) |
| 37 | { |
| 38 | int sg_cnt = req->sg_cnt; |
Christoph Hellwig | e454d12 | 2017-11-09 14:29:27 +0100 | [diff] [blame] | 39 | struct bio *bio = &req->inline_bio; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 40 | struct scatterlist *sg; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 41 | sector_t sector; |
| 42 | blk_qc_t cookie; |
| 43 | int op, op_flags = 0, i; |
| 44 | |
| 45 | if (!req->sg_cnt) { |
| 46 | nvmet_req_complete(req, 0); |
| 47 | return; |
| 48 | } |
| 49 | |
| 50 | if (req->cmd->rw.opcode == nvme_cmd_write) { |
| 51 | op = REQ_OP_WRITE; |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 52 | op_flags = REQ_SYNC | REQ_IDLE; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 53 | if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) |
| 54 | op_flags |= REQ_FUA; |
| 55 | } else { |
| 56 | op = REQ_OP_READ; |
| 57 | } |
| 58 | |
| 59 | sector = le64_to_cpu(req->cmd->rw.slba); |
| 60 | sector <<= (req->ns->blksize_shift - 9); |
| 61 | |
Christoph Hellwig | e454d12 | 2017-11-09 14:29:27 +0100 | [diff] [blame] | 62 | bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 63 | bio_set_dev(bio, req->ns->bdev); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 64 | bio->bi_iter.bi_sector = sector; |
| 65 | bio->bi_private = req; |
| 66 | bio->bi_end_io = nvmet_bio_done; |
| 67 | bio_set_op_attrs(bio, op, op_flags); |
| 68 | |
| 69 | for_each_sg(req->sg, sg, req->sg_cnt, i) { |
| 70 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) |
| 71 | != sg->length) { |
| 72 | struct bio *prev = bio; |
| 73 | |
| 74 | bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 75 | bio_set_dev(bio, req->ns->bdev); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 76 | bio->bi_iter.bi_sector = sector; |
| 77 | bio_set_op_attrs(bio, op, op_flags); |
| 78 | |
| 79 | bio_chain(bio, prev); |
Max Gurtovoy | c2f30f08 | 2017-07-10 17:24:02 +0300 | [diff] [blame] | 80 | submit_bio(prev); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | sector += sg->length >> 9; |
| 84 | sg_cnt--; |
| 85 | } |
| 86 | |
| 87 | cookie = submit_bio(bio); |
| 88 | |
Christoph Hellwig | ea435e1 | 2017-11-02 21:29:54 +0300 | [diff] [blame] | 89 | blk_poll(bdev_get_queue(req->ns->bdev), cookie); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static void nvmet_execute_flush(struct nvmet_req *req) |
| 93 | { |
Christoph Hellwig | e454d12 | 2017-11-09 14:29:27 +0100 | [diff] [blame] | 94 | struct bio *bio = &req->inline_bio; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 95 | |
Christoph Hellwig | e454d12 | 2017-11-09 14:29:27 +0100 | [diff] [blame] | 96 | bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 97 | bio_set_dev(bio, req->ns->bdev); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 98 | bio->bi_private = req; |
| 99 | bio->bi_end_io = nvmet_bio_done; |
Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 100 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 101 | |
| 102 | submit_bio(bio); |
| 103 | } |
| 104 | |
| 105 | static u16 nvmet_discard_range(struct nvmet_ns *ns, |
| 106 | struct nvme_dsm_range *range, struct bio **bio) |
| 107 | { |
| 108 | if (__blkdev_issue_discard(ns->bdev, |
| 109 | le64_to_cpu(range->slba) << (ns->blksize_shift - 9), |
| 110 | le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), |
| 111 | GFP_KERNEL, 0, bio)) |
| 112 | return NVME_SC_INTERNAL | NVME_SC_DNR; |
| 113 | return 0; |
| 114 | } |
| 115 | |
| 116 | static void nvmet_execute_discard(struct nvmet_req *req) |
| 117 | { |
| 118 | struct nvme_dsm_range range; |
| 119 | struct bio *bio = NULL; |
| 120 | int i; |
| 121 | u16 status; |
| 122 | |
| 123 | for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { |
| 124 | status = nvmet_copy_from_sgl(req, i * sizeof(range), &range, |
| 125 | sizeof(range)); |
| 126 | if (status) |
| 127 | break; |
| 128 | |
| 129 | status = nvmet_discard_range(req->ns, &range, &bio); |
| 130 | if (status) |
| 131 | break; |
| 132 | } |
| 133 | |
| 134 | if (bio) { |
| 135 | bio->bi_private = req; |
| 136 | bio->bi_end_io = nvmet_bio_done; |
| 137 | if (status) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 138 | bio->bi_status = BLK_STS_IOERR; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 139 | bio_endio(bio); |
| 140 | } else { |
| 141 | submit_bio(bio); |
| 142 | } |
| 143 | } else { |
| 144 | nvmet_req_complete(req, status); |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | static void nvmet_execute_dsm(struct nvmet_req *req) |
| 149 | { |
| 150 | switch (le32_to_cpu(req->cmd->dsm.attributes)) { |
| 151 | case NVME_DSMGMT_AD: |
| 152 | nvmet_execute_discard(req); |
| 153 | return; |
| 154 | case NVME_DSMGMT_IDR: |
| 155 | case NVME_DSMGMT_IDW: |
| 156 | default: |
| 157 | /* Not supported yet */ |
| 158 | nvmet_req_complete(req, 0); |
| 159 | return; |
| 160 | } |
| 161 | } |
| 162 | |
Chaitanya Kulkarni | d262920 | 2016-11-30 12:29:02 -0800 | [diff] [blame] | 163 | static void nvmet_execute_write_zeroes(struct nvmet_req *req) |
| 164 | { |
| 165 | struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; |
| 166 | struct bio *bio = NULL; |
| 167 | u16 status = NVME_SC_SUCCESS; |
| 168 | sector_t sector; |
| 169 | sector_t nr_sector; |
| 170 | |
| 171 | sector = le64_to_cpu(write_zeroes->slba) << |
| 172 | (req->ns->blksize_shift - 9); |
Christoph Hellwig | 78ce3da | 2017-03-31 17:00:07 +0200 | [diff] [blame] | 173 | nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) << |
Chaitanya Kulkarni | d262920 | 2016-11-30 12:29:02 -0800 | [diff] [blame] | 174 | (req->ns->blksize_shift - 9)) + 1; |
| 175 | |
| 176 | if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 177 | GFP_KERNEL, &bio, 0)) |
Chaitanya Kulkarni | d262920 | 2016-11-30 12:29:02 -0800 | [diff] [blame] | 178 | status = NVME_SC_INTERNAL | NVME_SC_DNR; |
| 179 | |
| 180 | if (bio) { |
| 181 | bio->bi_private = req; |
| 182 | bio->bi_end_io = nvmet_bio_done; |
| 183 | submit_bio(bio); |
| 184 | } else { |
| 185 | nvmet_req_complete(req, status); |
| 186 | } |
| 187 | } |
| 188 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 189 | u16 nvmet_parse_io_cmd(struct nvmet_req *req) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 190 | { |
| 191 | struct nvme_command *cmd = req->cmd; |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 192 | u16 ret; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 193 | |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 194 | ret = nvmet_check_ctrl_status(req, cmd); |
| 195 | if (unlikely(ret)) { |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 196 | req->ns = NULL; |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 197 | return ret; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 201 | if (unlikely(!req->ns)) |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 202 | return NVME_SC_INVALID_NS | NVME_SC_DNR; |
| 203 | |
| 204 | switch (cmd->common.opcode) { |
| 205 | case nvme_cmd_read: |
| 206 | case nvme_cmd_write: |
| 207 | req->execute = nvmet_execute_rw; |
| 208 | req->data_len = nvmet_rw_len(req); |
| 209 | return 0; |
| 210 | case nvme_cmd_flush: |
| 211 | req->execute = nvmet_execute_flush; |
| 212 | req->data_len = 0; |
| 213 | return 0; |
| 214 | case nvme_cmd_dsm: |
| 215 | req->execute = nvmet_execute_dsm; |
Christoph Hellwig | 793c7ed | 2017-03-31 17:00:08 +0200 | [diff] [blame] | 216 | req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 217 | sizeof(struct nvme_dsm_range); |
| 218 | return 0; |
Chaitanya Kulkarni | d262920 | 2016-11-30 12:29:02 -0800 | [diff] [blame] | 219 | case nvme_cmd_write_zeroes: |
| 220 | req->execute = nvmet_execute_write_zeroes; |
| 221 | return 0; |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 222 | default: |
Parav Pandit | 64a0ca8 | 2017-02-27 23:21:33 -0600 | [diff] [blame] | 223 | pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, |
| 224 | req->sq->qid); |
Christoph Hellwig | a07b497 | 2016-06-21 18:04:20 +0200 | [diff] [blame] | 225 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 226 | } |
| 227 | } |