blob: 0a4372a016f21fdb9e9e29ed293dd8fe0524d26b [file] [log] [blame]
Christoph Hellwiga07b4972016-06-21 18:04:20 +02001/*
2 * NVMe I/O command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/blkdev.h>
16#include <linux/module.h>
17#include "nvmet.h"
18
19static void nvmet_bio_done(struct bio *bio)
20{
21 struct nvmet_req *req = bio->bi_private;
22
23 nvmet_req_complete(req,
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020024 bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020025
26 if (bio != &req->inline_bio)
27 bio_put(bio);
28}
29
30static inline u32 nvmet_rw_len(struct nvmet_req *req)
31{
32 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
33 req->ns->blksize_shift;
34}
35
Christoph Hellwiga07b4972016-06-21 18:04:20 +020036static void nvmet_execute_rw(struct nvmet_req *req)
37{
38 int sg_cnt = req->sg_cnt;
Christoph Hellwige454d122017-11-09 14:29:27 +010039 struct bio *bio = &req->inline_bio;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020040 struct scatterlist *sg;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020041 sector_t sector;
42 blk_qc_t cookie;
43 int op, op_flags = 0, i;
44
45 if (!req->sg_cnt) {
46 nvmet_req_complete(req, 0);
47 return;
48 }
49
50 if (req->cmd->rw.opcode == nvme_cmd_write) {
51 op = REQ_OP_WRITE;
Christoph Hellwig70fd7612016-11-01 07:40:10 -060052 op_flags = REQ_SYNC | REQ_IDLE;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020053 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
54 op_flags |= REQ_FUA;
55 } else {
56 op = REQ_OP_READ;
57 }
58
59 sector = le64_to_cpu(req->cmd->rw.slba);
60 sector <<= (req->ns->blksize_shift - 9);
61
Christoph Hellwige454d122017-11-09 14:29:27 +010062 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
Christoph Hellwig74d46992017-08-23 19:10:32 +020063 bio_set_dev(bio, req->ns->bdev);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020064 bio->bi_iter.bi_sector = sector;
65 bio->bi_private = req;
66 bio->bi_end_io = nvmet_bio_done;
67 bio_set_op_attrs(bio, op, op_flags);
68
69 for_each_sg(req->sg, sg, req->sg_cnt, i) {
70 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
71 != sg->length) {
72 struct bio *prev = bio;
73
74 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
Christoph Hellwig74d46992017-08-23 19:10:32 +020075 bio_set_dev(bio, req->ns->bdev);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020076 bio->bi_iter.bi_sector = sector;
77 bio_set_op_attrs(bio, op, op_flags);
78
79 bio_chain(bio, prev);
Max Gurtovoyc2f30f082017-07-10 17:24:02 +030080 submit_bio(prev);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020081 }
82
83 sector += sg->length >> 9;
84 sg_cnt--;
85 }
86
87 cookie = submit_bio(bio);
88
Christoph Hellwigea435e12017-11-02 21:29:54 +030089 blk_poll(bdev_get_queue(req->ns->bdev), cookie);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020090}
91
92static void nvmet_execute_flush(struct nvmet_req *req)
93{
Christoph Hellwige454d122017-11-09 14:29:27 +010094 struct bio *bio = &req->inline_bio;
Christoph Hellwiga07b4972016-06-21 18:04:20 +020095
Christoph Hellwige454d122017-11-09 14:29:27 +010096 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
Christoph Hellwig74d46992017-08-23 19:10:32 +020097 bio_set_dev(bio, req->ns->bdev);
Christoph Hellwiga07b4972016-06-21 18:04:20 +020098 bio->bi_private = req;
99 bio->bi_end_io = nvmet_bio_done;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600100 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200101
102 submit_bio(bio);
103}
104
105static u16 nvmet_discard_range(struct nvmet_ns *ns,
106 struct nvme_dsm_range *range, struct bio **bio)
107{
108 if (__blkdev_issue_discard(ns->bdev,
109 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
110 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
111 GFP_KERNEL, 0, bio))
112 return NVME_SC_INTERNAL | NVME_SC_DNR;
113 return 0;
114}
115
116static void nvmet_execute_discard(struct nvmet_req *req)
117{
118 struct nvme_dsm_range range;
119 struct bio *bio = NULL;
120 int i;
121 u16 status;
122
123 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
124 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
125 sizeof(range));
126 if (status)
127 break;
128
129 status = nvmet_discard_range(req->ns, &range, &bio);
130 if (status)
131 break;
132 }
133
134 if (bio) {
135 bio->bi_private = req;
136 bio->bi_end_io = nvmet_bio_done;
137 if (status) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200138 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200139 bio_endio(bio);
140 } else {
141 submit_bio(bio);
142 }
143 } else {
144 nvmet_req_complete(req, status);
145 }
146}
147
148static void nvmet_execute_dsm(struct nvmet_req *req)
149{
150 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
151 case NVME_DSMGMT_AD:
152 nvmet_execute_discard(req);
153 return;
154 case NVME_DSMGMT_IDR:
155 case NVME_DSMGMT_IDW:
156 default:
157 /* Not supported yet */
158 nvmet_req_complete(req, 0);
159 return;
160 }
161}
162
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800163static void nvmet_execute_write_zeroes(struct nvmet_req *req)
164{
165 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
166 struct bio *bio = NULL;
167 u16 status = NVME_SC_SUCCESS;
168 sector_t sector;
169 sector_t nr_sector;
170
171 sector = le64_to_cpu(write_zeroes->slba) <<
172 (req->ns->blksize_shift - 9);
Christoph Hellwig78ce3da2017-03-31 17:00:07 +0200173 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800174 (req->ns->blksize_shift - 9)) + 1;
175
176 if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
Christoph Hellwigee472d82017-04-05 19:21:08 +0200177 GFP_KERNEL, &bio, 0))
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800178 status = NVME_SC_INTERNAL | NVME_SC_DNR;
179
180 if (bio) {
181 bio->bi_private = req;
182 bio->bi_end_io = nvmet_bio_done;
183 submit_bio(bio);
184 } else {
185 nvmet_req_complete(req, status);
186 }
187}
188
Parav Pandit64a0ca82017-02-27 23:21:33 -0600189u16 nvmet_parse_io_cmd(struct nvmet_req *req)
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200190{
191 struct nvme_command *cmd = req->cmd;
Parav Pandit64a0ca82017-02-27 23:21:33 -0600192 u16 ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200193
Parav Pandit64a0ca82017-02-27 23:21:33 -0600194 ret = nvmet_check_ctrl_status(req, cmd);
195 if (unlikely(ret)) {
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200196 req->ns = NULL;
Parav Pandit64a0ca82017-02-27 23:21:33 -0600197 return ret;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200198 }
199
200 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
Parav Pandit64a0ca82017-02-27 23:21:33 -0600201 if (unlikely(!req->ns))
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200202 return NVME_SC_INVALID_NS | NVME_SC_DNR;
203
204 switch (cmd->common.opcode) {
205 case nvme_cmd_read:
206 case nvme_cmd_write:
207 req->execute = nvmet_execute_rw;
208 req->data_len = nvmet_rw_len(req);
209 return 0;
210 case nvme_cmd_flush:
211 req->execute = nvmet_execute_flush;
212 req->data_len = 0;
213 return 0;
214 case nvme_cmd_dsm:
215 req->execute = nvmet_execute_dsm;
Christoph Hellwig793c7ed2017-03-31 17:00:08 +0200216 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200217 sizeof(struct nvme_dsm_range);
218 return 0;
Chaitanya Kulkarnid2629202016-11-30 12:29:02 -0800219 case nvme_cmd_write_zeroes:
220 req->execute = nvmet_execute_write_zeroes;
221 return 0;
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200222 default:
Parav Pandit64a0ca82017-02-27 23:21:33 -0600223 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
224 req->sq->qid);
Christoph Hellwiga07b4972016-06-21 18:04:20 +0200225 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
226 }
227}