blob: 1921298563429a8165838e6c9dad6f0e9e6cb28d [file] [log] [blame]
Mike Christieaa387cc2011-07-31 22:05:09 +02001/*
2 * BSG helper library
3 *
4 * Copyright (C) 2008 James Smart, Emulex Corporation
5 * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2011 Mike Christie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23#include <linux/slab.h>
Jens Axboecd2f0762018-10-24 07:11:39 -060024#include <linux/blk-mq.h>
Mike Christieaa387cc2011-07-31 22:05:09 +020025#include <linux/delay.h>
26#include <linux/scatterlist.h>
27#include <linux/bsg-lib.h>
Paul Gortmaker6adb1232011-09-28 18:26:05 -040028#include <linux/export.h>
Mike Christieaa387cc2011-07-31 22:05:09 +020029#include <scsi/scsi_cmnd.h>
Christoph Hellwig17cb9602018-03-13 17:28:41 +010030#include <scsi/sg.h>
31
32#define uptr64(val) ((void __user *)(uintptr_t)(val))
33
Jens Axboe1028e4b2018-10-29 09:47:17 -060034struct bsg_set {
35 struct blk_mq_tag_set tag_set;
36 bsg_job_fn *job_fn;
37 bsg_timeout_fn *timeout_fn;
38};
39
Christoph Hellwig17cb9602018-03-13 17:28:41 +010040static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
41{
42 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
43 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
44 return -EINVAL;
45 if (!capable(CAP_SYS_RAWIO))
46 return -EPERM;
47 return 0;
48}
49
50static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
51 fmode_t mode)
52{
53 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
54
55 job->request_len = hdr->request_len;
56 job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
zhong jiang47255492018-08-01 00:13:14 +080057
58 return PTR_ERR_OR_ZERO(job->request);
Christoph Hellwig17cb9602018-03-13 17:28:41 +010059}
60
61static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
62{
63 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
64 int ret = 0;
65
66 /*
67 * The assignments below don't make much sense, but are kept for
68 * bug by bug backwards compatibility:
69 */
70 hdr->device_status = job->result & 0xff;
71 hdr->transport_status = host_byte(job->result);
72 hdr->driver_status = driver_byte(job->result);
73 hdr->info = 0;
74 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
75 hdr->info |= SG_INFO_CHECK;
76 hdr->response_len = 0;
77
78 if (job->result < 0) {
79 /* we're only returning the result field in the reply */
80 job->reply_len = sizeof(u32);
81 ret = job->result;
82 }
83
84 if (job->reply_len && hdr->response) {
85 int len = min(hdr->max_response_len, job->reply_len);
86
87 if (copy_to_user(uptr64(hdr->response), job->reply, len))
88 ret = -EFAULT;
89 else
90 hdr->response_len = len;
91 }
92
93 /* we assume all request payload was transferred, residual == 0 */
94 hdr->dout_resid = 0;
95
96 if (rq->next_rq) {
97 unsigned int rsp_len = job->reply_payload.payload_len;
98
99 if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
100 hdr->din_resid = 0;
101 else
102 hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
103 } else {
104 hdr->din_resid = 0;
105 }
106
107 return ret;
108}
109
110static void bsg_transport_free_rq(struct request *rq)
111{
112 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
113
114 kfree(job->request);
115}
116
117static const struct bsg_ops bsg_transport_ops = {
118 .check_proto = bsg_transport_check_proto,
119 .fill_hdr = bsg_transport_fill_hdr,
120 .complete_rq = bsg_transport_complete_rq,
121 .free_rq = bsg_transport_free_rq,
122};
Mike Christieaa387cc2011-07-31 22:05:09 +0200123
124/**
Benjamin Block50b4d482017-08-24 01:57:56 +0200125 * bsg_teardown_job - routine to teardown a bsg job
Bart Van Asscheaa981922018-01-09 10:11:00 -0800126 * @kref: kref inside bsg_job that is to be torn down
Mike Christieaa387cc2011-07-31 22:05:09 +0200127 */
Benjamin Block50b4d482017-08-24 01:57:56 +0200128static void bsg_teardown_job(struct kref *kref)
Mike Christieaa387cc2011-07-31 22:05:09 +0200129{
Johannes Thumshirnbf0f2d32016-11-17 10:31:18 +0100130 struct bsg_job *job = container_of(kref, struct bsg_job, kref);
Christoph Hellwigef6fa642018-03-13 17:28:40 +0100131 struct request *rq = blk_mq_rq_from_pdu(job);
Johannes Thumshirnc00da4c2016-11-17 10:31:20 +0100132
Mike Christieaa387cc2011-07-31 22:05:09 +0200133 put_device(job->dev); /* release reference for the request */
134
135 kfree(job->request_payload.sg_list);
136 kfree(job->reply_payload.sg_list);
Benjamin Block50b4d482017-08-24 01:57:56 +0200137
Jens Axboecd2f0762018-10-24 07:11:39 -0600138 blk_mq_end_request(rq, BLK_STS_OK);
Mike Christieaa387cc2011-07-31 22:05:09 +0200139}
140
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100141void bsg_job_put(struct bsg_job *job)
142{
Benjamin Block50b4d482017-08-24 01:57:56 +0200143 kref_put(&job->kref, bsg_teardown_job);
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100144}
145EXPORT_SYMBOL_GPL(bsg_job_put);
146
147int bsg_job_get(struct bsg_job *job)
148{
149 return kref_get_unless_zero(&job->kref);
150}
151EXPORT_SYMBOL_GPL(bsg_job_get);
Mike Christieaa387cc2011-07-31 22:05:09 +0200152
153/**
154 * bsg_job_done - completion routine for bsg requests
155 * @job: bsg_job that is complete
156 * @result: job reply result
157 * @reply_payload_rcv_len: length of payload recvd
158 *
159 * The LLD should call this when the bsg job has completed.
160 */
161void bsg_job_done(struct bsg_job *job, int result,
162 unsigned int reply_payload_rcv_len)
163{
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100164 job->result = result;
165 job->reply_payload_rcv_len = reply_payload_rcv_len;
Jens Axboecd2f0762018-10-24 07:11:39 -0600166 blk_mq_complete_request(blk_mq_rq_from_pdu(job));
Mike Christieaa387cc2011-07-31 22:05:09 +0200167}
168EXPORT_SYMBOL_GPL(bsg_job_done);
169
170/**
Jens Axboecd2f0762018-10-24 07:11:39 -0600171 * bsg_complete - softirq done routine for destroying the bsg requests
Mike Christieaa387cc2011-07-31 22:05:09 +0200172 * @rq: BSG request that holds the job to be destroyed
173 */
Jens Axboecd2f0762018-10-24 07:11:39 -0600174static void bsg_complete(struct request *rq)
Mike Christieaa387cc2011-07-31 22:05:09 +0200175{
Benjamin Block50b4d482017-08-24 01:57:56 +0200176 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
Mike Christieaa387cc2011-07-31 22:05:09 +0200177
Johannes Thumshirnfb6f7c82016-11-17 10:31:23 +0100178 bsg_job_put(job);
Mike Christieaa387cc2011-07-31 22:05:09 +0200179}
180
181static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
182{
183 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
184
185 BUG_ON(!req->nr_phys_segments);
186
187 buf->sg_list = kzalloc(sz, GFP_KERNEL);
188 if (!buf->sg_list)
189 return -ENOMEM;
190 sg_init_table(buf->sg_list, req->nr_phys_segments);
191 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
192 buf->payload_len = blk_rq_bytes(req);
193 return 0;
194}
195
196/**
Benjamin Block50b4d482017-08-24 01:57:56 +0200197 * bsg_prepare_job - create the bsg_job structure for the bsg request
Mike Christieaa387cc2011-07-31 22:05:09 +0200198 * @dev: device that is being sent the bsg request
199 * @req: BSG request that needs a job structure
200 */
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100201static bool bsg_prepare_job(struct device *dev, struct request *req)
Mike Christieaa387cc2011-07-31 22:05:09 +0200202{
203 struct request *rsp = req->next_rq;
Benjamin Block50b4d482017-08-24 01:57:56 +0200204 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Mike Christieaa387cc2011-07-31 22:05:09 +0200205 int ret;
206
Christoph Hellwig31156ec2018-03-13 17:28:39 +0100207 job->timeout = req->timeout;
Benjamin Block50b4d482017-08-24 01:57:56 +0200208
Mike Christieaa387cc2011-07-31 22:05:09 +0200209 if (req->bio) {
210 ret = bsg_map_buffer(&job->request_payload, req);
211 if (ret)
212 goto failjob_rls_job;
213 }
214 if (rsp && rsp->bio) {
215 ret = bsg_map_buffer(&job->reply_payload, rsp);
216 if (ret)
217 goto failjob_rls_rqst_payload;
218 }
219 job->dev = dev;
220 /* take a reference for the request */
221 get_device(job->dev);
Johannes Thumshirnbf0f2d32016-11-17 10:31:18 +0100222 kref_init(&job->kref);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100223 return true;
Mike Christieaa387cc2011-07-31 22:05:09 +0200224
225failjob_rls_rqst_payload:
226 kfree(job->request_payload.sg_list);
227failjob_rls_job:
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100228 job->result = -ENOMEM;
229 return false;
Mike Christieaa387cc2011-07-31 22:05:09 +0200230}
231
Mike Christieaa387cc2011-07-31 22:05:09 +0200232/**
Jens Axboecd2f0762018-10-24 07:11:39 -0600233 * bsg_queue_rq - generic handler for bsg requests
234 * @hctx: hardware queue
235 * @bd: queue data
Mike Christieaa387cc2011-07-31 22:05:09 +0200236 *
237 * On error the create_bsg_job function should return a -Exyz error value
Christoph Hellwig17d53632017-04-20 16:03:01 +0200238 * that will be set to ->result.
Mike Christieaa387cc2011-07-31 22:05:09 +0200239 *
240 * Drivers/subsys should pass this to the queue init function.
241 */
Jens Axboecd2f0762018-10-24 07:11:39 -0600242static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
243 const struct blk_mq_queue_data *bd)
Mike Christieaa387cc2011-07-31 22:05:09 +0200244{
Jens Axboecd2f0762018-10-24 07:11:39 -0600245 struct request_queue *q = hctx->queue;
Mike Christieaa387cc2011-07-31 22:05:09 +0200246 struct device *dev = q->queuedata;
Jens Axboecd2f0762018-10-24 07:11:39 -0600247 struct request *req = bd->rq;
Jens Axboe1028e4b2018-10-29 09:47:17 -0600248 struct bsg_set *bset =
249 container_of(q->tag_set, struct bsg_set, tag_set);
Mike Christieaa387cc2011-07-31 22:05:09 +0200250 int ret;
251
Jens Axboecd2f0762018-10-24 07:11:39 -0600252 blk_mq_start_request(req);
253
Mike Christieaa387cc2011-07-31 22:05:09 +0200254 if (!get_device(dev))
Jens Axboecd2f0762018-10-24 07:11:39 -0600255 return BLK_STS_IOERR;
Mike Christieaa387cc2011-07-31 22:05:09 +0200256
Jens Axboecd2f0762018-10-24 07:11:39 -0600257 if (!bsg_prepare_job(dev, req))
258 return BLK_STS_IOERR;
Mike Christieaa387cc2011-07-31 22:05:09 +0200259
Jens Axboe1028e4b2018-10-29 09:47:17 -0600260 ret = bset->job_fn(blk_mq_rq_to_pdu(req));
Jens Axboecd2f0762018-10-24 07:11:39 -0600261 if (ret)
262 return BLK_STS_IOERR;
Mike Christieaa387cc2011-07-31 22:05:09 +0200263
Mike Christieaa387cc2011-07-31 22:05:09 +0200264 put_device(dev);
Jens Axboecd2f0762018-10-24 07:11:39 -0600265 return BLK_STS_OK;
Mike Christieaa387cc2011-07-31 22:05:09 +0200266}
Mike Christieaa387cc2011-07-31 22:05:09 +0200267
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100268/* called right after the request is allocated for the request_queue */
Jens Axboecd2f0762018-10-24 07:11:39 -0600269static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
270 unsigned int hctx_idx, unsigned int numa_node)
Benjamin Block50b4d482017-08-24 01:57:56 +0200271{
272 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Benjamin Block50b4d482017-08-24 01:57:56 +0200273
Jens Axboecd2f0762018-10-24 07:11:39 -0600274 job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100275 if (!job->reply)
Benjamin Block50b4d482017-08-24 01:57:56 +0200276 return -ENOMEM;
Benjamin Blockeab40cf2017-10-03 12:48:37 +0200277 return 0;
278}
279
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100280/* called right before the request is given to the request_queue user */
Benjamin Blockeab40cf2017-10-03 12:48:37 +0200281static void bsg_initialize_rq(struct request *req)
282{
283 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100284 void *reply = job->reply;
Benjamin Blockeab40cf2017-10-03 12:48:37 +0200285
286 memset(job, 0, sizeof(*job));
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100287 job->reply = reply;
288 job->reply_len = SCSI_SENSE_BUFFERSIZE;
Benjamin Block50b4d482017-08-24 01:57:56 +0200289 job->dd_data = job + 1;
Benjamin Block50b4d482017-08-24 01:57:56 +0200290}
291
Jens Axboecd2f0762018-10-24 07:11:39 -0600292static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
293 unsigned int hctx_idx)
Benjamin Block50b4d482017-08-24 01:57:56 +0200294{
295 struct bsg_job *job = blk_mq_rq_to_pdu(req);
Benjamin Block50b4d482017-08-24 01:57:56 +0200296
Christoph Hellwig17cb9602018-03-13 17:28:41 +0100297 kfree(job->reply);
Benjamin Block50b4d482017-08-24 01:57:56 +0200298}
299
Jens Axboe5e28b8d2018-10-26 11:27:02 -0600300void bsg_remove_queue(struct request_queue *q)
301{
302 if (q) {
Jens Axboe1028e4b2018-10-29 09:47:17 -0600303 struct bsg_set *bset =
304 container_of(q->tag_set, struct bsg_set, tag_set);
Jens Axboecd2f0762018-10-24 07:11:39 -0600305
Jens Axboe5e28b8d2018-10-26 11:27:02 -0600306 bsg_unregister_queue(q);
307 blk_cleanup_queue(q);
Jens Axboe1028e4b2018-10-29 09:47:17 -0600308 blk_mq_free_tag_set(&bset->tag_set);
309 kfree(bset);
Jens Axboe5e28b8d2018-10-26 11:27:02 -0600310 }
311}
312EXPORT_SYMBOL_GPL(bsg_remove_queue);
313
Jens Axboecd2f0762018-10-24 07:11:39 -0600314static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
315{
Jens Axboe1028e4b2018-10-29 09:47:17 -0600316 struct bsg_set *bset =
317 container_of(rq->q->tag_set, struct bsg_set, tag_set);
Jens Axboecd2f0762018-10-24 07:11:39 -0600318
Jens Axboe1028e4b2018-10-29 09:47:17 -0600319 if (!bset->timeout_fn)
320 return BLK_EH_DONE;
321 return bset->timeout_fn(rq);
Jens Axboecd2f0762018-10-24 07:11:39 -0600322}
323
324static const struct blk_mq_ops bsg_mq_ops = {
325 .queue_rq = bsg_queue_rq,
326 .init_request = bsg_init_rq,
327 .exit_request = bsg_exit_rq,
328 .initialize_rq_fn = bsg_initialize_rq,
329 .complete = bsg_complete,
330 .timeout = bsg_timeout,
331};
332
Mike Christieaa387cc2011-07-31 22:05:09 +0200333/**
334 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
335 * @dev: device to attach bsg device to
Mike Christieaa387cc2011-07-31 22:05:09 +0200336 * @name: device to give bsg device
337 * @job_fn: bsg job handler
338 * @dd_job_size: size of LLD data needed for each job
Mike Christieaa387cc2011-07-31 22:05:09 +0200339 */
Christoph Hellwigc1225f02017-08-25 17:37:38 +0200340struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
Jens Axboe1028e4b2018-10-29 09:47:17 -0600341 bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
Mike Christieaa387cc2011-07-31 22:05:09 +0200342{
Jens Axboe1028e4b2018-10-29 09:47:17 -0600343 struct bsg_set *bset;
Jens Axboecd2f0762018-10-24 07:11:39 -0600344 struct blk_mq_tag_set *set;
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300345 struct request_queue *q;
Jens Axboecd2f0762018-10-24 07:11:39 -0600346 int ret = -ENOMEM;
Mike Christieaa387cc2011-07-31 22:05:09 +0200347
Jens Axboe1028e4b2018-10-29 09:47:17 -0600348 bset = kzalloc(sizeof(*bset), GFP_KERNEL);
349 if (!bset)
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300350 return ERR_PTR(-ENOMEM);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100351
Jens Axboe1028e4b2018-10-29 09:47:17 -0600352 bset->job_fn = job_fn;
353 bset->timeout_fn = timeout;
354
355 set = &bset->tag_set;
Jens Axboecd2f0762018-10-24 07:11:39 -0600356 set->ops = &bsg_mq_ops,
357 set->nr_hw_queues = 1;
358 set->queue_depth = 128;
359 set->numa_node = NUMA_NO_NODE;
360 set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
361 set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
362 if (blk_mq_alloc_tag_set(set))
363 goto out_tag_set;
364
365 q = blk_mq_init_queue(set);
366 if (IS_ERR(q)) {
367 ret = PTR_ERR(q);
368 goto out_queue;
369 }
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300370
Mike Christieaa387cc2011-07-31 22:05:09 +0200371 q->queuedata = dev;
Bart Van Assche8b904b52018-03-07 17:10:10 -0800372 blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
Mike Christieaa387cc2011-07-31 22:05:09 +0200373 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
374
Christoph Hellwig5de815a2018-05-29 08:40:23 +0200375 ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
Mike Christieaa387cc2011-07-31 22:05:09 +0200376 if (ret) {
377 printk(KERN_ERR "%s: bsg interface failed to "
378 "initialize - register queue\n", dev->kobj.name);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100379 goto out_cleanup_queue;
Mike Christieaa387cc2011-07-31 22:05:09 +0200380 }
381
Christoph Hellwig8ae94eb2017-01-03 15:25:02 +0300382 return q;
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100383out_cleanup_queue:
384 blk_cleanup_queue(q);
Jens Axboecd2f0762018-10-24 07:11:39 -0600385out_queue:
386 blk_mq_free_tag_set(set);
387out_tag_set:
Jens Axboe1028e4b2018-10-29 09:47:17 -0600388 kfree(bset);
Christoph Hellwig82ed4db2017-01-27 09:46:29 +0100389 return ERR_PTR(ret);
Mike Christieaa387cc2011-07-31 22:05:09 +0200390}
391EXPORT_SYMBOL_GPL(bsg_setup_queue);