blob: cdbeb309a1051ceed84eb5b99dc4132d7f74a81b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070010 * This file is released under GPLv2 or later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070012 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080033#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070034#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020035#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020036#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070037#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080039#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/types.h>
41
42#include <linux/nbd.h>
43
Josef Bacikb0d91112017-02-01 16:11:40 -050044static DEFINE_IDR(nbd_index_idr);
45static DEFINE_MUTEX(nbd_index_mutex);
46
Josef Bacik9561a7a2016-11-22 14:04:40 -050047struct nbd_sock {
48 struct socket *sock;
49 struct mutex tx_lock;
50};
51
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070052#define NBD_TIMEDOUT 0
53#define NBD_DISCONNECT_REQUESTED 1
Josef Bacik9561a7a2016-11-22 14:04:40 -050054#define NBD_DISCONNECTED 2
55#define NBD_RUNNING 3
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070056
Markus Pargmann13e71d62015-04-02 10:11:35 +020057struct nbd_device {
Markus Pargmann22d109c2015-08-17 08:20:09 +020058 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070059 unsigned long runtime_flags;
Josef Bacik9561a7a2016-11-22 14:04:40 -050060 struct nbd_sock **socks;
Markus Pargmann13e71d62015-04-02 10:11:35 +020061 int magic;
62
Josef Bacikfd8383f2016-09-08 12:33:37 -070063 struct blk_mq_tag_set tag_set;
Markus Pargmann13e71d62015-04-02 10:11:35 +020064
Josef Bacik9561a7a2016-11-22 14:04:40 -050065 struct mutex config_lock;
Markus Pargmann13e71d62015-04-02 10:11:35 +020066 struct gendisk *disk;
Josef Bacik9561a7a2016-11-22 14:04:40 -050067 int num_connections;
68 atomic_t recv_threads;
69 wait_queue_head_t recv_wq;
Josef Bacikef77b512016-12-02 16:19:12 -050070 loff_t blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020071 loff_t bytesize;
Markus Pargmann7e2893a2015-08-17 08:20:00 +020072
Markus Pargmann7e2893a2015-08-17 08:20:00 +020073 struct task_struct *task_recv;
Josef Bacik9561a7a2016-11-22 14:04:40 -050074 struct task_struct *task_setup;
Markus Pargmann30d53d92015-08-17 08:20:06 +020075
76#if IS_ENABLED(CONFIG_DEBUG_FS)
77 struct dentry *dbg_dir;
78#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +020079};
80
Josef Bacikfd8383f2016-09-08 12:33:37 -070081struct nbd_cmd {
82 struct nbd_device *nbd;
Josef Bacik9561a7a2016-11-22 14:04:40 -050083 struct completion send_complete;
Josef Bacikfd8383f2016-09-08 12:33:37 -070084};
85
Markus Pargmann30d53d92015-08-17 08:20:06 +020086#if IS_ENABLED(CONFIG_DEBUG_FS)
87static struct dentry *nbd_dbg_dir;
88#endif
89
90#define nbd_name(nbd) ((nbd)->disk->disk_name)
91
Wanlong Gaof4507162012-03-28 14:42:51 -070092#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Ingo van Lil9c7a4162006-07-01 04:36:36 -070094static unsigned int nbds_max = 16;
Laurent Vivierd71a6d72008-04-29 01:02:51 -070095static int max_part;
Josef Bacik124d6db2017-02-01 16:11:11 -050096static struct workqueue_struct *recv_workqueue;
Josef Bacikb0d91112017-02-01 16:11:40 -050097static int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Josef Bacik9442b732017-02-07 17:10:22 -050099static int nbd_dev_dbg_init(struct nbd_device *nbd);
100static void nbd_dev_dbg_close(struct nbd_device *nbd);
101
102
Markus Pargmannd18509f2015-04-02 10:11:38 +0200103static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200105 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106}
107
Markus Pargmann37091fd2015-07-27 07:36:49 +0200108static bool nbd_is_connected(struct nbd_device *nbd)
109{
110 return !!nbd->task_recv;
111}
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113static const char *nbdcmd_to_ascii(int cmd)
114{
115 switch (cmd) {
116 case NBD_CMD_READ: return "read";
117 case NBD_CMD_WRITE: return "write";
118 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800119 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700120 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 }
122 return "invalid";
123}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Markus Pargmann37091fd2015-07-27 07:36:49 +0200125static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
126{
Josef Bacike5445412017-02-13 10:39:47 -0500127 bd_set_size(bdev, 0);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200128 set_capacity(nbd->disk, 0);
129 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
130
131 return 0;
132}
133
134static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
135{
Josef Bacike5445412017-02-13 10:39:47 -0500136 blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize);
137 blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize);
138 bd_set_size(bdev, nbd->bytesize);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200139 set_capacity(nbd->disk, nbd->bytesize >> 9);
140 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
141}
142
Josef Bacike5445412017-02-13 10:39:47 -0500143static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
Josef Bacikef77b512016-12-02 16:19:12 -0500144 loff_t blocksize, loff_t nr_blocks)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200145{
Markus Pargmann37091fd2015-07-27 07:36:49 +0200146 nbd->blksize = blocksize;
Josef Bacikef77b512016-12-02 16:19:12 -0500147 nbd->bytesize = blocksize * nr_blocks;
Josef Bacike5445412017-02-13 10:39:47 -0500148 if (nbd_is_connected(nbd))
149 nbd_size_update(nbd, bdev);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200150}
151
Josef Bacikfd8383f2016-09-08 12:33:37 -0700152static void nbd_end_request(struct nbd_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700154 struct nbd_device *nbd = cmd->nbd;
155 struct request *req = blk_mq_rq_from_pdu(cmd);
Kiyoshi Ueda097c94a2007-12-11 17:44:06 -0500156 int error = req->errors ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Josef Bacikfd8383f2016-09-08 12:33:37 -0700158 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
Markus Pargmannd18509f2015-04-02 10:11:38 +0200159 error ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Josef Bacikfd8383f2016-09-08 12:33:37 -0700161 blk_mq_complete_request(req, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
Markus Pargmanne018e752015-04-02 10:11:39 +0200164/*
165 * Forcibly shutdown the socket causing all listeners to error
166 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200167static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700168{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500169 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700170
Josef Bacik9561a7a2016-11-22 14:04:40 -0500171 if (nbd->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200172 return;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500173 if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
174 return;
175
176 for (i = 0; i < nbd->num_connections; i++) {
177 struct nbd_sock *nsock = nbd->socks[i];
178 mutex_lock(&nsock->tx_lock);
179 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
180 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100181 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500182 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700183}
184
Josef Bacik0eadf372016-09-08 12:33:40 -0700185static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
186 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700187{
Josef Bacik0eadf372016-09-08 12:33:40 -0700188 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
189 struct nbd_device *nbd = cmd->nbd;
Paul Clements7fdfd402007-10-16 23:27:37 -0700190
Markus Pargmann23272a672015-10-29 11:51:16 +0100191 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -0500192 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
193 req->errors++;
194
Josef Bacik9561a7a2016-11-22 14:04:40 -0500195 mutex_lock(&nbd->config_lock);
196 sock_shutdown(nbd);
197 mutex_unlock(&nbd->config_lock);
Josef Bacik0eadf372016-09-08 12:33:40 -0700198 return BLK_EH_HANDLED;
Paul Clements7fdfd402007-10-16 23:27:37 -0700199}
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/*
202 * Send or receive packet.
203 */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500204static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
205 int size, int msg_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500207 struct socket *sock = nbd->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 int result;
209 struct msghdr msg;
210 struct kvec iov;
Mel Gorman7f338fe2012-07-31 16:44:32 -0700211 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700213 if (unlikely(!sock)) {
Josef Bacika897b662016-12-05 16:20:29 -0500214 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200215 "Attempted %s on closed socket in sock_xmit\n",
216 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700217 return -EINVAL;
218 }
219
Mel Gorman7f338fe2012-07-31 16:44:32 -0700220 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700222 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 iov.iov_base = buf;
224 iov.iov_len = size;
225 msg.msg_name = NULL;
226 msg.msg_namelen = 0;
227 msg.msg_control = NULL;
228 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
230
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200231 if (send)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200233 else
Namhyung Kim35fbf5b2011-05-28 14:44:46 +0200234 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
235 msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 if (result <= 0) {
238 if (result == 0)
239 result = -EPIPE; /* short read */
240 break;
241 }
242 size -= result;
243 buf += result;
244 } while (size > 0);
245
Mel Gorman7f338fe2012-07-31 16:44:32 -0700246 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 return result;
249}
250
Josef Bacik9561a7a2016-11-22 14:04:40 -0500251static inline int sock_send_bvec(struct nbd_device *nbd, int index,
252 struct bio_vec *bvec, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253{
254 int result;
255 void *kaddr = kmap(bvec->bv_page);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500256 result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
Wanlong Gaof4507162012-03-28 14:42:51 -0700257 bvec->bv_len, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 kunmap(bvec->bv_page);
259 return result;
260}
261
Paul Clements7fdfd402007-10-16 23:27:37 -0700262/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500263static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700265 struct request *req = blk_mq_rq_from_pdu(cmd);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500266 int result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 struct nbd_request request;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900268 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700269 struct bio *bio;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200270 u32 type;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500271 u32 tag = blk_mq_unique_tag(req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200272
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100273 switch (req_op(req)) {
274 case REQ_OP_DISCARD:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200275 type = NBD_CMD_TRIM;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100276 break;
277 case REQ_OP_FLUSH:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200278 type = NBD_CMD_FLUSH;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100279 break;
280 case REQ_OP_WRITE:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200281 type = NBD_CMD_WRITE;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100282 break;
283 case REQ_OP_READ:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200284 type = NBD_CMD_READ;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100285 break;
286 default:
287 return -EIO;
288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Christoph Hellwig09fc54c2017-01-31 16:57:28 +0100290 if (rq_data_dir(req) == WRITE &&
291 (nbd->flags & NBD_FLAG_READ_ONLY)) {
292 dev_err_ratelimited(disk_to_dev(nbd->disk),
293 "Write on read-only\n");
294 return -EIO;
295 }
296
Hani Benhabiles04cfac42014-06-06 14:38:30 -0700297 memset(&request, 0, sizeof(request));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 request.magic = htonl(NBD_REQUEST_MAGIC);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200299 request.type = htonl(type);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500300 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800301 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
302 request.len = htonl(size);
303 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500304 memcpy(request.handle, &tag, sizeof(tag));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Markus Pargmannd18509f2015-04-02 10:11:38 +0200306 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700307 cmd, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200308 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Josef Bacik9561a7a2016-11-22 14:04:40 -0500309 result = sock_xmit(nbd, index, 1, &request, sizeof(request),
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200310 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 if (result <= 0) {
Josef Bacika897b662016-12-05 16:20:29 -0500312 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200313 "Send control failed (result %d)\n", result);
Markus Pargmanndab53132015-04-02 10:11:40 +0200314 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
316
Jens Axboe429a7872016-11-17 12:30:37 -0700317 if (type != NBD_CMD_WRITE)
318 return 0;
319
Jens Axboe429a7872016-11-17 12:30:37 -0700320 bio = req->bio;
321 while (bio) {
322 struct bio *next = bio->bi_next;
323 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800324 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700325
326 bio_for_each_segment(bvec, bio, iter) {
327 bool is_last = !next && bio_iter_last(bvec, iter);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500328 int flags = is_last ? 0 : MSG_MORE;
Jens Axboe429a7872016-11-17 12:30:37 -0700329
Markus Pargmannd18509f2015-04-02 10:11:38 +0200330 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700331 cmd, bvec.bv_len);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500332 result = sock_send_bvec(nbd, index, &bvec, flags);
Jens Axboe6c92e692007-08-16 13:43:12 +0200333 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700334 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200335 "Send data failed (result %d)\n",
336 result);
Markus Pargmanndab53132015-04-02 10:11:40 +0200337 return -EIO;
Jens Axboe6c92e692007-08-16 13:43:12 +0200338 }
Jens Axboe429a7872016-11-17 12:30:37 -0700339 /*
340 * The completion might already have come in,
341 * so break for the last one instead of letting
342 * the iterator do it. This prevents use-after-free
343 * of the bio.
344 */
345 if (is_last)
346 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 }
Jens Axboe429a7872016-11-17 12:30:37 -0700348 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351}
352
Josef Bacik9561a7a2016-11-22 14:04:40 -0500353static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
354 struct bio_vec *bvec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
356 int result;
357 void *kaddr = kmap(bvec->bv_page);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500358 result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
359 bvec->bv_len, MSG_WAITALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 kunmap(bvec->bv_page);
361 return result;
362}
363
364/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500365static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 int result;
368 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700369 struct nbd_cmd *cmd;
370 struct request *req = NULL;
371 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500372 u32 tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 reply.magic = 0;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500375 result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 if (result <= 0) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500377 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
378 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
379 dev_err(disk_to_dev(nbd->disk),
380 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200381 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700383
384 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700385 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700386 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200387 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700388 }
389
Josef Bacik9561a7a2016-11-22 14:04:40 -0500390 memcpy(&tag, reply.handle, sizeof(u32));
Herbert Xu4b2f0262006-01-06 00:09:47 -0800391
Josef Bacikfd8383f2016-09-08 12:33:37 -0700392 hwq = blk_mq_unique_tag_to_hwq(tag);
393 if (hwq < nbd->tag_set.nr_hw_queues)
394 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
395 blk_mq_unique_tag_to_tag(tag));
396 if (!req || !blk_mq_request_started(req)) {
397 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
398 tag, req);
399 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700401 cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700403 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200404 ntohl(reply.error));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 req->errors++;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700406 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 }
408
Josef Bacikfd8383f2016-09-08 12:33:37 -0700409 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200410 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200411 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800412 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200413
414 rq_for_each_segment(bvec, req, iter) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500415 result = sock_recv_bvec(nbd, index, &bvec);
Jens Axboe6c92e692007-08-16 13:43:12 +0200416 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700417 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200418 result);
Jens Axboe6c92e692007-08-16 13:43:12 +0200419 req->errors++;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700420 return cmd;
Jens Axboe6c92e692007-08-16 13:43:12 +0200421 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200422 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700423 cmd, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500425 } else {
426 /* See the comment in nbd_queue_rq. */
427 wait_for_completion(&cmd->send_complete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700429 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200432static ssize_t pid_show(struct device *dev,
433 struct device_attribute *attr, char *buf)
Paul Clements6b39bb62006-12-06 20:40:53 -0800434{
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200435 struct gendisk *disk = dev_to_disk(dev);
Markus Pargmann6521d392015-08-17 08:20:05 +0200436 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200437
Markus Pargmann6521d392015-08-17 08:20:05 +0200438 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
Paul Clements6b39bb62006-12-06 20:40:53 -0800439}
440
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200441static struct device_attribute pid_attr = {
Parag Warudkar01e8ef12008-10-18 20:28:50 -0700442 .attr = { .name = "pid", .mode = S_IRUGO},
Paul Clements6b39bb62006-12-06 20:40:53 -0800443 .show = pid_show,
444};
445
Josef Bacik9561a7a2016-11-22 14:04:40 -0500446struct recv_thread_args {
447 struct work_struct work;
448 struct nbd_device *nbd;
449 int index;
450};
451
452static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500454 struct recv_thread_args *args = container_of(work,
455 struct recv_thread_args,
456 work);
457 struct nbd_device *nbd = args->nbd;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700458 struct nbd_cmd *cmd;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500459 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Wanlong Gaof4507162012-03-28 14:42:51 -0700461 BUG_ON(nbd->magic != NBD_MAGIC);
Markus Pargmann19391832015-08-17 08:20:03 +0200462 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500463 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700464 if (IS_ERR(cmd)) {
465 ret = PTR_ERR(cmd);
Markus Pargmann19391832015-08-17 08:20:03 +0200466 break;
467 }
468
Josef Bacikfd8383f2016-09-08 12:33:37 -0700469 nbd_end_request(cmd);
Markus Pargmann19391832015-08-17 08:20:03 +0200470 }
Paul Clements6b39bb62006-12-06 20:40:53 -0800471
Josef Bacik9561a7a2016-11-22 14:04:40 -0500472 /*
473 * We got an error, shut everybody down if this wasn't the result of a
474 * disconnect request.
475 */
476 if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
477 sock_shutdown(nbd);
478 atomic_dec(&nbd->recv_threads);
479 wake_up(&nbd->recv_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
481
Josef Bacikfd8383f2016-09-08 12:33:37 -0700482static void nbd_clear_req(struct request *req, void *data, bool reserved)
483{
484 struct nbd_cmd *cmd;
485
486 if (!blk_mq_request_started(req))
487 return;
488 cmd = blk_mq_rq_to_pdu(req);
489 req->errors++;
490 nbd_end_request(cmd);
491}
492
Wanlong Gaof4507162012-03-28 14:42:51 -0700493static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
Wanlong Gaof4507162012-03-28 14:42:51 -0700495 BUG_ON(nbd->magic != NBD_MAGIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Josef Bacikfd8383f2016-09-08 12:33:37 -0700497 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200498 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499}
500
Paul Clements7fdfd402007-10-16 23:27:37 -0700501
Josef Bacik9561a7a2016-11-22 14:04:40 -0500502static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700503{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700504 struct request *req = blk_mq_rq_from_pdu(cmd);
505 struct nbd_device *nbd = cmd->nbd;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500506 struct nbd_sock *nsock;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700507
Josef Bacik9561a7a2016-11-22 14:04:40 -0500508 if (index >= nbd->num_connections) {
Josef Bacika897b662016-12-05 16:20:29 -0500509 dev_err_ratelimited(disk_to_dev(nbd->disk),
510 "Attempted send on invalid socket\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -0500511 goto error_out;
512 }
513
514 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
Josef Bacika897b662016-12-05 16:20:29 -0500515 dev_err_ratelimited(disk_to_dev(nbd->disk),
516 "Attempted send on closed socket\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -0500517 goto error_out;
518 }
519
Laurent Vivier48cf6062008-04-29 01:02:46 -0700520 req->errors = 0;
521
Josef Bacik9561a7a2016-11-22 14:04:40 -0500522 nsock = nbd->socks[index];
523 mutex_lock(&nsock->tx_lock);
524 if (unlikely(!nsock->sock)) {
525 mutex_unlock(&nsock->tx_lock);
Josef Bacika897b662016-12-05 16:20:29 -0500526 dev_err_ratelimited(disk_to_dev(nbd->disk),
527 "Attempted send on closed socket\n");
Pavel Machek15746fc2009-04-02 16:58:42 -0700528 goto error_out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700529 }
530
Josef Bacik9561a7a2016-11-22 14:04:40 -0500531 if (nbd_send_cmd(nbd, cmd, index) != 0) {
Josef Bacika897b662016-12-05 16:20:29 -0500532 dev_err_ratelimited(disk_to_dev(nbd->disk),
533 "Request send failed\n");
Laurent Vivier48cf6062008-04-29 01:02:46 -0700534 req->errors++;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700535 nbd_end_request(cmd);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700536 }
537
Josef Bacik9561a7a2016-11-22 14:04:40 -0500538 mutex_unlock(&nsock->tx_lock);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700539
540 return;
541
542error_out:
543 req->errors++;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700544 nbd_end_request(cmd);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700545}
546
Josef Bacikfd8383f2016-09-08 12:33:37 -0700547static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
548 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700549{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700550 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700551
Josef Bacik9561a7a2016-11-22 14:04:40 -0500552 /*
553 * Since we look at the bio's to send the request over the network we
554 * need to make sure the completion work doesn't mark this request done
555 * before we are done doing our send. This keeps us from dereferencing
556 * freed data if we have particularly fast completions (ie we get the
557 * completion before we exit sock_xmit on the last bvec) or in the case
558 * that the server is misbehaving (or there was an error) before we're
559 * done sending everything over the wire.
560 */
561 init_completion(&cmd->send_complete);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700562 blk_mq_start_request(bd->rq);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500563 nbd_handle_cmd(cmd, hctx->queue_num);
564 complete(&cmd->send_complete);
565
Josef Bacikfd8383f2016-09-08 12:33:37 -0700566 return BLK_MQ_RQ_QUEUE_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567}
568
Josef Bacik9442b732017-02-07 17:10:22 -0500569static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
570 unsigned long arg)
Markus Pargmann23272a672015-10-29 11:51:16 +0100571{
Josef Bacik9442b732017-02-07 17:10:22 -0500572 struct socket *sock;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500573 struct nbd_sock **socks;
574 struct nbd_sock *nsock;
Josef Bacik9442b732017-02-07 17:10:22 -0500575 int err;
576
577 sock = sockfd_lookup(arg, &err);
578 if (!sock)
579 return err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100580
Josef Bacik9561a7a2016-11-22 14:04:40 -0500581 if (!nbd->task_setup)
582 nbd->task_setup = current;
583 if (nbd->task_setup != current) {
584 dev_err(disk_to_dev(nbd->disk),
585 "Device being setup by another task");
586 return -EINVAL;
Markus Pargmann23272a672015-10-29 11:51:16 +0100587 }
588
Josef Bacik9561a7a2016-11-22 14:04:40 -0500589 socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
590 sizeof(struct nbd_sock *), GFP_KERNEL);
591 if (!socks)
592 return -ENOMEM;
593 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
594 if (!nsock)
595 return -ENOMEM;
Markus Pargmann23272a672015-10-29 11:51:16 +0100596
Josef Bacik9561a7a2016-11-22 14:04:40 -0500597 nbd->socks = socks;
Markus Pargmann23272a672015-10-29 11:51:16 +0100598
Josef Bacik9561a7a2016-11-22 14:04:40 -0500599 mutex_init(&nsock->tx_lock);
600 nsock->sock = sock;
601 socks[nbd->num_connections++] = nsock;
602
Josef Bacik9442b732017-02-07 17:10:22 -0500603 if (max_part)
604 bdev->bd_invalidated = 1;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500605 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +0100606}
607
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100608/* Reset all properties of an NBD device */
609static void nbd_reset(struct nbd_device *nbd)
610{
Josef Bacik9b4a6ba2016-09-08 12:33:39 -0700611 nbd->runtime_flags = 0;
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100612 nbd->blksize = 1024;
613 nbd->bytesize = 0;
614 set_capacity(nbd->disk, 0);
615 nbd->flags = 0;
Josef Bacik0eadf372016-09-08 12:33:40 -0700616 nbd->tag_set.timeout = 0;
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100617 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100618}
619
620static void nbd_bdev_reset(struct block_device *bdev)
621{
622 set_device_ro(bdev, false);
623 bdev->bd_inode->i_size = 0;
624 if (max_part > 0) {
625 blkdev_reread_part(bdev);
626 bdev->bd_invalidated = 1;
627 }
628}
629
Markus Pargmannd02cf532015-10-29 12:06:15 +0100630static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
631{
632 if (nbd->flags & NBD_FLAG_READ_ONLY)
633 set_device_ro(bdev, true);
634 if (nbd->flags & NBD_FLAG_SEND_TRIM)
635 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
636 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600637 blk_queue_write_cache(nbd->disk->queue, true, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100638 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600639 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100640}
641
Josef Bacik9561a7a2016-11-22 14:04:40 -0500642static void send_disconnects(struct nbd_device *nbd)
643{
644 struct nbd_request request = {};
645 int i, ret;
646
647 request.magic = htonl(NBD_REQUEST_MAGIC);
648 request.type = htonl(NBD_CMD_DISC);
649
650 for (i = 0; i < nbd->num_connections; i++) {
651 ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
652 if (ret <= 0)
653 dev_err(disk_to_dev(nbd->disk),
654 "Send disconnect failed %d\n", ret);
655 }
656}
657
Josef Bacik9442b732017-02-07 17:10:22 -0500658static int nbd_disconnect(struct nbd_device *nbd, struct block_device *bdev)
659{
660 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
661 if (!nbd->socks)
662 return -EINVAL;
663
664 mutex_unlock(&nbd->config_lock);
665 fsync_bdev(bdev);
666 mutex_lock(&nbd->config_lock);
667
668 /* Check again after getting mutex back. */
669 if (!nbd->socks)
670 return -EINVAL;
671
672 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
673 &nbd->runtime_flags))
674 send_disconnects(nbd);
675 return 0;
676}
677
678static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
679{
680 sock_shutdown(nbd);
681 nbd_clear_que(nbd);
682 kill_bdev(bdev);
683 nbd_bdev_reset(bdev);
684 /*
685 * We want to give the run thread a chance to wait for everybody
686 * to clean up and then do it's own cleanup.
687 */
688 if (!test_bit(NBD_RUNNING, &nbd->runtime_flags) &&
689 nbd->num_connections) {
690 int i;
691
692 for (i = 0; i < nbd->num_connections; i++)
693 kfree(nbd->socks[i]);
694 kfree(nbd->socks);
695 nbd->socks = NULL;
696 nbd->num_connections = 0;
697 }
698 nbd->task_setup = NULL;
699
700 return 0;
701}
702
703static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev)
704{
705 struct recv_thread_args *args;
706 int num_connections = nbd->num_connections;
707 int error = 0, i;
708
709 if (nbd->task_recv)
710 return -EBUSY;
711 if (!nbd->socks)
712 return -EINVAL;
713 if (num_connections > 1 &&
714 !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
715 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
716 error = -EINVAL;
717 goto out_err;
718 }
719
720 set_bit(NBD_RUNNING, &nbd->runtime_flags);
721 blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
722 args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
723 if (!args) {
724 error = -ENOMEM;
725 goto out_err;
726 }
727 nbd->task_recv = current;
728 mutex_unlock(&nbd->config_lock);
729
730 nbd_parse_flags(nbd, bdev);
731
732 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
733 if (error) {
734 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
735 goto out_recv;
736 }
737
738 nbd_size_update(nbd, bdev);
739
740 nbd_dev_dbg_init(nbd);
741 for (i = 0; i < num_connections; i++) {
742 sk_set_memalloc(nbd->socks[i]->sock->sk);
743 atomic_inc(&nbd->recv_threads);
744 INIT_WORK(&args[i].work, recv_work);
745 args[i].nbd = nbd;
746 args[i].index = i;
747 queue_work(recv_workqueue, &args[i].work);
748 }
749 wait_event_interruptible(nbd->recv_wq,
750 atomic_read(&nbd->recv_threads) == 0);
751 for (i = 0; i < num_connections; i++)
752 flush_work(&args[i].work);
753 nbd_dev_dbg_close(nbd);
754 nbd_size_clear(nbd, bdev);
755 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
756out_recv:
757 mutex_lock(&nbd->config_lock);
758 nbd->task_recv = NULL;
759out_err:
760 clear_bit(NBD_RUNNING, &nbd->runtime_flags);
761 nbd_clear_sock(nbd, bdev);
762
763 /* user requested, ignore socket errors */
764 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
765 error = 0;
766 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
767 error = -ETIMEDOUT;
768
769 nbd_reset(nbd);
770 return error;
771}
Markus Pargmann30d53d92015-08-17 08:20:06 +0200772
Josef Bacik9561a7a2016-11-22 14:04:40 -0500773/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -0700774static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -0700775 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 switch (cmd) {
Josef Bacik9442b732017-02-07 17:10:22 -0500778 case NBD_DISCONNECT:
779 return nbd_disconnect(nbd, bdev);
Markus Pargmann23272a672015-10-29 11:51:16 +0100780 case NBD_CLEAR_SOCK:
Josef Bacik9442b732017-02-07 17:10:22 -0500781 return nbd_clear_sock(nbd, bdev);
782 case NBD_SET_SOCK:
783 return nbd_add_socket(nbd, bdev, arg);
784 case NBD_SET_BLKSIZE:
Josef Bacike5445412017-02-13 10:39:47 -0500785 nbd_size_set(nbd, bdev, arg,
786 div_s64(nbd->bytesize, arg));
787 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 case NBD_SET_SIZE:
Josef Bacike5445412017-02-13 10:39:47 -0500789 nbd_size_set(nbd, bdev, nbd->blksize,
790 div_s64(arg, nbd->blksize));
791 return 0;
Markus Pargmann37091fd2015-07-27 07:36:49 +0200792 case NBD_SET_SIZE_BLOCKS:
Josef Bacike5445412017-02-13 10:39:47 -0500793 nbd_size_set(nbd, bdev, nbd->blksize, arg);
794 return 0;
Paul Clements7fdfd402007-10-16 23:27:37 -0700795 case NBD_SET_TIMEOUT:
Josef Bacik0eadf372016-09-08 12:33:40 -0700796 nbd->tag_set.timeout = arg * HZ;
Paul Clements7fdfd402007-10-16 23:27:37 -0700797 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700798
Paul Clements2f012502012-10-04 17:16:15 -0700799 case NBD_SET_FLAGS:
800 nbd->flags = arg;
801 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -0500802 case NBD_DO_IT:
803 return nbd_start_device(nbd, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -0800805 /*
806 * This is for compatibility only. The queue is always cleared
807 * by NBD_DO_IT or NBD_CLEAR_SOCK.
808 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 return 0;
810 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -0700811 /*
812 * For compatibility only, we no longer keep a list of
813 * outstanding requests.
814 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 return 0;
816 }
Pavel Machek1a2ad212009-04-02 16:58:41 -0700817 return -ENOTTY;
818}
819
820static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
821 unsigned int cmd, unsigned long arg)
822{
Wanlong Gaof4507162012-03-28 14:42:51 -0700823 struct nbd_device *nbd = bdev->bd_disk->private_data;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700824 int error;
825
826 if (!capable(CAP_SYS_ADMIN))
827 return -EPERM;
828
Wanlong Gaof4507162012-03-28 14:42:51 -0700829 BUG_ON(nbd->magic != NBD_MAGIC);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700830
Josef Bacik9561a7a2016-11-22 14:04:40 -0500831 mutex_lock(&nbd->config_lock);
Wanlong Gaof4507162012-03-28 14:42:51 -0700832 error = __nbd_ioctl(bdev, nbd, cmd, arg);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500833 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700834
835 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836}
837
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700838static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839{
840 .owner = THIS_MODULE,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +0200841 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -0500842 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843};
844
Markus Pargmann30d53d92015-08-17 08:20:06 +0200845#if IS_ENABLED(CONFIG_DEBUG_FS)
846
847static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
848{
849 struct nbd_device *nbd = s->private;
850
851 if (nbd->task_recv)
852 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +0200853
854 return 0;
855}
856
857static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
858{
859 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
860}
861
862static const struct file_operations nbd_dbg_tasks_ops = {
863 .open = nbd_dbg_tasks_open,
864 .read = seq_read,
865 .llseek = seq_lseek,
866 .release = single_release,
867};
868
869static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
870{
871 struct nbd_device *nbd = s->private;
872 u32 flags = nbd->flags;
873
874 seq_printf(s, "Hex: 0x%08x\n\n", flags);
875
876 seq_puts(s, "Known flags:\n");
877
878 if (flags & NBD_FLAG_HAS_FLAGS)
879 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
880 if (flags & NBD_FLAG_READ_ONLY)
881 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
882 if (flags & NBD_FLAG_SEND_FLUSH)
883 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
884 if (flags & NBD_FLAG_SEND_TRIM)
885 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
886
887 return 0;
888}
889
890static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
891{
892 return single_open(file, nbd_dbg_flags_show, inode->i_private);
893}
894
895static const struct file_operations nbd_dbg_flags_ops = {
896 .open = nbd_dbg_flags_open,
897 .read = seq_read,
898 .llseek = seq_lseek,
899 .release = single_release,
900};
901
902static int nbd_dev_dbg_init(struct nbd_device *nbd)
903{
904 struct dentry *dir;
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200905
906 if (!nbd_dbg_dir)
907 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200908
909 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200910 if (!dir) {
911 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
912 nbd_name(nbd));
913 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200914 }
915 nbd->dbg_dir = dir;
916
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200917 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
918 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -0700919 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Josef Bacikef77b512016-12-02 16:19:12 -0500920 debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -0400921 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +0200922
923 return 0;
924}
925
926static void nbd_dev_dbg_close(struct nbd_device *nbd)
927{
928 debugfs_remove_recursive(nbd->dbg_dir);
929}
930
931static int nbd_dbg_init(void)
932{
933 struct dentry *dbg_dir;
934
935 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200936 if (!dbg_dir)
937 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200938
939 nbd_dbg_dir = dbg_dir;
940
941 return 0;
942}
943
944static void nbd_dbg_close(void)
945{
946 debugfs_remove_recursive(nbd_dbg_dir);
947}
948
949#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
950
951static int nbd_dev_dbg_init(struct nbd_device *nbd)
952{
953 return 0;
954}
955
956static void nbd_dev_dbg_close(struct nbd_device *nbd)
957{
958}
959
960static int nbd_dbg_init(void)
961{
962 return 0;
963}
964
965static void nbd_dbg_close(void)
966{
967}
968
969#endif
970
Josef Bacikfd8383f2016-09-08 12:33:37 -0700971static int nbd_init_request(void *data, struct request *rq,
972 unsigned int hctx_idx, unsigned int request_idx,
973 unsigned int numa_node)
974{
975 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700976 cmd->nbd = data;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700977 return 0;
978}
979
980static struct blk_mq_ops nbd_mq_ops = {
981 .queue_rq = nbd_queue_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700982 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -0700983 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700984};
985
Josef Bacikb0d91112017-02-01 16:11:40 -0500986static void nbd_dev_remove(struct nbd_device *nbd)
987{
988 struct gendisk *disk = nbd->disk;
989 nbd->magic = 0;
990 if (disk) {
991 del_gendisk(disk);
992 blk_cleanup_queue(disk->queue);
993 blk_mq_free_tag_set(&nbd->tag_set);
994 put_disk(disk);
995 }
996 kfree(nbd);
997}
998
999static int nbd_dev_add(int index)
1000{
1001 struct nbd_device *nbd;
1002 struct gendisk *disk;
1003 struct request_queue *q;
1004 int err = -ENOMEM;
1005
1006 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1007 if (!nbd)
1008 goto out;
1009
1010 disk = alloc_disk(1 << part_shift);
1011 if (!disk)
1012 goto out_free_nbd;
1013
1014 if (index >= 0) {
1015 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1016 GFP_KERNEL);
1017 if (err == -ENOSPC)
1018 err = -EEXIST;
1019 } else {
1020 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1021 if (err >= 0)
1022 index = err;
1023 }
1024 if (err < 0)
1025 goto out_free_disk;
1026
1027 nbd->disk = disk;
1028 nbd->tag_set.ops = &nbd_mq_ops;
1029 nbd->tag_set.nr_hw_queues = 1;
1030 nbd->tag_set.queue_depth = 128;
1031 nbd->tag_set.numa_node = NUMA_NO_NODE;
1032 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1033 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1034 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1035 nbd->tag_set.driver_data = nbd;
1036
1037 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1038 if (err)
1039 goto out_free_idr;
1040
1041 q = blk_mq_init_queue(&nbd->tag_set);
1042 if (IS_ERR(q)) {
1043 err = PTR_ERR(q);
1044 goto out_free_tags;
1045 }
1046 disk->queue = q;
1047
1048 /*
1049 * Tell the block layer that we are not a rotational device
1050 */
1051 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1052 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1053 disk->queue->limits.discard_granularity = 512;
1054 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1055 disk->queue->limits.discard_zeroes_data = 0;
1056 blk_queue_max_hw_sectors(disk->queue, 65536);
1057 disk->queue->limits.max_sectors = 256;
1058
1059 nbd->magic = NBD_MAGIC;
1060 mutex_init(&nbd->config_lock);
1061 disk->major = NBD_MAJOR;
1062 disk->first_minor = index << part_shift;
1063 disk->fops = &nbd_fops;
1064 disk->private_data = nbd;
1065 sprintf(disk->disk_name, "nbd%d", index);
1066 init_waitqueue_head(&nbd->recv_wq);
1067 nbd_reset(nbd);
1068 add_disk(disk);
1069 return index;
1070
1071out_free_tags:
1072 blk_mq_free_tag_set(&nbd->tag_set);
1073out_free_idr:
1074 idr_remove(&nbd_index_idr, index);
1075out_free_disk:
1076 put_disk(disk);
1077out_free_nbd:
1078 kfree(nbd);
1079out:
1080 return err;
1081}
1082
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083/*
1084 * And here should be modules and kernel interface
1085 * (Just smiley confuses emacs :-)
1086 */
1087
1088static int __init nbd_init(void)
1089{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 int i;
1091
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08001092 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001094 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02001095 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001096 return -EINVAL;
1097 }
1098
1099 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02001100 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001101 part_shift = fls(max_part);
1102
Namhyung Kim5988ce22011-05-28 14:44:46 +02001103 /*
1104 * Adjust max_part according to part_shift as it is exported
1105 * to user space so that user can know the max number of
1106 * partition kernel should be able to manage.
1107 *
1108 * Note that -1 is required because partition 0 is reserved
1109 * for the whole disk.
1110 */
1111 max_part = (1UL << part_shift) - 1;
1112 }
1113
Namhyung Kim3b271082011-05-28 14:44:46 +02001114 if ((1UL << part_shift) > DISK_MAX_PARTS)
1115 return -EINVAL;
1116
1117 if (nbds_max > 1UL << (MINORBITS - part_shift))
1118 return -EINVAL;
Josef Bacik124d6db2017-02-01 16:11:11 -05001119 recv_workqueue = alloc_workqueue("knbd-recv",
1120 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1121 if (!recv_workqueue)
1122 return -ENOMEM;
Namhyung Kim3b271082011-05-28 14:44:46 +02001123
Josef Bacikb0d91112017-02-01 16:11:40 -05001124 if (register_blkdev(NBD_MAJOR, "nbd"))
1125 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Markus Pargmann30d53d92015-08-17 08:20:06 +02001127 nbd_dbg_init();
1128
Josef Bacikb0d91112017-02-01 16:11:40 -05001129 mutex_lock(&nbd_index_mutex);
1130 for (i = 0; i < nbds_max; i++)
1131 nbd_dev_add(i);
1132 mutex_unlock(&nbd_index_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 return 0;
Josef Bacikb0d91112017-02-01 16:11:40 -05001134}
1135
1136static int nbd_exit_cb(int id, void *ptr, void *data)
1137{
1138 struct nbd_device *nbd = ptr;
1139 nbd_dev_remove(nbd);
1140 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141}
1142
1143static void __exit nbd_cleanup(void)
1144{
Markus Pargmann30d53d92015-08-17 08:20:06 +02001145 nbd_dbg_close();
1146
Josef Bacikb0d91112017-02-01 16:11:40 -05001147 idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
1148 idr_destroy(&nbd_index_idr);
Josef Bacik124d6db2017-02-01 16:11:11 -05001149 destroy_workqueue(recv_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 unregister_blkdev(NBD_MAJOR, "nbd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151}
1152
1153module_init(nbd_init);
1154module_exit(nbd_cleanup);
1155
1156MODULE_DESCRIPTION("Network Block Device");
1157MODULE_LICENSE("GPL");
1158
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001159module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001160MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1161module_param(max_part, int, 0444);
1162MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");