blob: e683e2241cbdaed583d2314b3868c114021a559c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070010 * This file is released under GPLv2 or later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070012 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080033#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070034#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020035#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020036#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070037#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/uaccess.h>
40#include <asm/types.h>
41
42#include <linux/nbd.h>
43
Josef Bacik9561a7a2016-11-22 14:04:40 -050044struct nbd_sock {
45 struct socket *sock;
46 struct mutex tx_lock;
47};
48
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070049#define NBD_TIMEDOUT 0
50#define NBD_DISCONNECT_REQUESTED 1
Josef Bacik9561a7a2016-11-22 14:04:40 -050051#define NBD_DISCONNECTED 2
52#define NBD_RUNNING 3
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070053
Markus Pargmann13e71d62015-04-02 10:11:35 +020054struct nbd_device {
Markus Pargmann22d109c2015-08-17 08:20:09 +020055 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070056 unsigned long runtime_flags;
Josef Bacik9561a7a2016-11-22 14:04:40 -050057 struct nbd_sock **socks;
Markus Pargmann13e71d62015-04-02 10:11:35 +020058 int magic;
59
Josef Bacikfd8383f2016-09-08 12:33:37 -070060 struct blk_mq_tag_set tag_set;
Markus Pargmann13e71d62015-04-02 10:11:35 +020061
Josef Bacik9561a7a2016-11-22 14:04:40 -050062 struct mutex config_lock;
Markus Pargmann13e71d62015-04-02 10:11:35 +020063 struct gendisk *disk;
Josef Bacik9561a7a2016-11-22 14:04:40 -050064 int num_connections;
65 atomic_t recv_threads;
66 wait_queue_head_t recv_wq;
Markus Pargmann13e71d62015-04-02 10:11:35 +020067 int blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020068 loff_t bytesize;
Markus Pargmann7e2893a2015-08-17 08:20:00 +020069
Markus Pargmann7e2893a2015-08-17 08:20:00 +020070 struct task_struct *task_recv;
Josef Bacik9561a7a2016-11-22 14:04:40 -050071 struct task_struct *task_setup;
Markus Pargmann30d53d92015-08-17 08:20:06 +020072
73#if IS_ENABLED(CONFIG_DEBUG_FS)
74 struct dentry *dbg_dir;
75#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +020076};
77
Josef Bacikfd8383f2016-09-08 12:33:37 -070078struct nbd_cmd {
79 struct nbd_device *nbd;
Josef Bacik9561a7a2016-11-22 14:04:40 -050080 struct completion send_complete;
Josef Bacikfd8383f2016-09-08 12:33:37 -070081};
82
Markus Pargmann30d53d92015-08-17 08:20:06 +020083#if IS_ENABLED(CONFIG_DEBUG_FS)
84static struct dentry *nbd_dbg_dir;
85#endif
86
87#define nbd_name(nbd) ((nbd)->disk->disk_name)
88
Wanlong Gaof4507162012-03-28 14:42:51 -070089#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Ingo van Lil9c7a4162006-07-01 04:36:36 -070091static unsigned int nbds_max = 16;
Paul Clements20a81432008-02-08 04:21:51 -080092static struct nbd_device *nbd_dev;
Laurent Vivierd71a6d72008-04-29 01:02:51 -070093static int max_part;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Markus Pargmannd18509f2015-04-02 10:11:38 +020095static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Markus Pargmannd18509f2015-04-02 10:11:38 +020097 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
Markus Pargmann37091fd2015-07-27 07:36:49 +0200100static bool nbd_is_connected(struct nbd_device *nbd)
101{
102 return !!nbd->task_recv;
103}
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105static const char *nbdcmd_to_ascii(int cmd)
106{
107 switch (cmd) {
108 case NBD_CMD_READ: return "read";
109 case NBD_CMD_WRITE: return "write";
110 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800111 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700112 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 }
114 return "invalid";
115}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Markus Pargmann37091fd2015-07-27 07:36:49 +0200117static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
118{
119 bdev->bd_inode->i_size = 0;
120 set_capacity(nbd->disk, 0);
121 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
122
123 return 0;
124}
125
126static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
127{
128 if (!nbd_is_connected(nbd))
129 return;
130
131 bdev->bd_inode->i_size = nbd->bytesize;
132 set_capacity(nbd->disk, nbd->bytesize >> 9);
133 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
134}
135
136static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
137 int blocksize, int nr_blocks)
138{
139 int ret;
140
141 ret = set_blocksize(bdev, blocksize);
142 if (ret)
143 return ret;
144
145 nbd->blksize = blocksize;
146 nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
147
148 nbd_size_update(nbd, bdev);
149
150 return 0;
151}
152
Josef Bacikfd8383f2016-09-08 12:33:37 -0700153static void nbd_end_request(struct nbd_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700155 struct nbd_device *nbd = cmd->nbd;
156 struct request *req = blk_mq_rq_from_pdu(cmd);
Kiyoshi Ueda097c94a2007-12-11 17:44:06 -0500157 int error = req->errors ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Josef Bacikfd8383f2016-09-08 12:33:37 -0700159 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
Markus Pargmannd18509f2015-04-02 10:11:38 +0200160 error ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Josef Bacikfd8383f2016-09-08 12:33:37 -0700162 blk_mq_complete_request(req, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Markus Pargmanne018e752015-04-02 10:11:39 +0200165/*
166 * Forcibly shutdown the socket causing all listeners to error
167 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200168static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700169{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500170 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700171
Josef Bacik9561a7a2016-11-22 14:04:40 -0500172 if (nbd->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200173 return;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500174 if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
175 return;
176
177 for (i = 0; i < nbd->num_connections; i++) {
178 struct nbd_sock *nsock = nbd->socks[i];
179 mutex_lock(&nsock->tx_lock);
180 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
181 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100182 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500183 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700184}
185
Josef Bacik0eadf372016-09-08 12:33:40 -0700186static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
187 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700188{
Josef Bacik0eadf372016-09-08 12:33:40 -0700189 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
190 struct nbd_device *nbd = cmd->nbd;
Paul Clements7fdfd402007-10-16 23:27:37 -0700191
Markus Pargmann23272a672015-10-29 11:51:16 +0100192 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -0500193 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
194 req->errors++;
195
196 /*
197 * If our disconnect packet times out then we're already holding the
198 * config_lock and could deadlock here, so just set an error and return,
199 * we'll handle shutting everything down later.
200 */
201 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
202 return BLK_EH_HANDLED;
203 mutex_lock(&nbd->config_lock);
204 sock_shutdown(nbd);
205 mutex_unlock(&nbd->config_lock);
Josef Bacik0eadf372016-09-08 12:33:40 -0700206 return BLK_EH_HANDLED;
Paul Clements7fdfd402007-10-16 23:27:37 -0700207}
208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209/*
210 * Send or receive packet.
211 */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500212static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
213 int size, int msg_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500215 struct socket *sock = nbd->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 int result;
217 struct msghdr msg;
218 struct kvec iov;
Mel Gorman7f338fe2012-07-31 16:44:32 -0700219 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700221 if (unlikely(!sock)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700222 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200223 "Attempted %s on closed socket in sock_xmit\n",
224 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700225 return -EINVAL;
226 }
227
Mel Gorman7f338fe2012-07-31 16:44:32 -0700228 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700230 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 iov.iov_base = buf;
232 iov.iov_len = size;
233 msg.msg_name = NULL;
234 msg.msg_namelen = 0;
235 msg.msg_control = NULL;
236 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
238
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200239 if (send)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200241 else
Namhyung Kim35fbf5b2011-05-28 14:44:46 +0200242 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
243 msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (result <= 0) {
246 if (result == 0)
247 result = -EPIPE; /* short read */
248 break;
249 }
250 size -= result;
251 buf += result;
252 } while (size > 0);
253
Mel Gorman7f338fe2012-07-31 16:44:32 -0700254 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 return result;
257}
258
Josef Bacik9561a7a2016-11-22 14:04:40 -0500259static inline int sock_send_bvec(struct nbd_device *nbd, int index,
260 struct bio_vec *bvec, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261{
262 int result;
263 void *kaddr = kmap(bvec->bv_page);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500264 result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
Wanlong Gaof4507162012-03-28 14:42:51 -0700265 bvec->bv_len, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 kunmap(bvec->bv_page);
267 return result;
268}
269
Paul Clements7fdfd402007-10-16 23:27:37 -0700270/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500271static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700273 struct request *req = blk_mq_rq_from_pdu(cmd);
NeilBrown5705f702007-09-25 12:35:59 +0200274 int result, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 struct nbd_request request;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900276 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700277 struct bio *bio;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200278 u32 type;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500279 u32 tag = blk_mq_unique_tag(req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200280
Josef Bacik9561a7a2016-11-22 14:04:40 -0500281 if (req_op(req) == REQ_OP_DISCARD)
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200282 type = NBD_CMD_TRIM;
Mike Christie3a5e02c2016-06-05 14:32:23 -0500283 else if (req_op(req) == REQ_OP_FLUSH)
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200284 type = NBD_CMD_FLUSH;
285 else if (rq_data_dir(req) == WRITE)
286 type = NBD_CMD_WRITE;
287 else
288 type = NBD_CMD_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Hani Benhabiles04cfac42014-06-06 14:38:30 -0700290 memset(&request, 0, sizeof(request));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 request.magic = htonl(NBD_REQUEST_MAGIC);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200292 request.type = htonl(type);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500293 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800294 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
295 request.len = htonl(size);
296 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500297 memcpy(request.handle, &tag, sizeof(tag));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
Markus Pargmannd18509f2015-04-02 10:11:38 +0200299 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700300 cmd, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200301 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Josef Bacik9561a7a2016-11-22 14:04:40 -0500302 result = sock_xmit(nbd, index, 1, &request, sizeof(request),
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200303 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700305 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200306 "Send control failed (result %d)\n", result);
Markus Pargmanndab53132015-04-02 10:11:40 +0200307 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
309
Jens Axboe429a7872016-11-17 12:30:37 -0700310 if (type != NBD_CMD_WRITE)
311 return 0;
312
313 flags = 0;
314 bio = req->bio;
315 while (bio) {
316 struct bio *next = bio->bi_next;
317 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800318 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700319
320 bio_for_each_segment(bvec, bio, iter) {
321 bool is_last = !next && bio_iter_last(bvec, iter);
322
323 if (is_last)
Jens Axboe6c92e692007-08-16 13:43:12 +0200324 flags = MSG_MORE;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200325 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700326 cmd, bvec.bv_len);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500327 result = sock_send_bvec(nbd, index, &bvec, flags);
Jens Axboe6c92e692007-08-16 13:43:12 +0200328 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700329 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200330 "Send data failed (result %d)\n",
331 result);
Markus Pargmanndab53132015-04-02 10:11:40 +0200332 return -EIO;
Jens Axboe6c92e692007-08-16 13:43:12 +0200333 }
Jens Axboe429a7872016-11-17 12:30:37 -0700334 /*
335 * The completion might already have come in,
336 * so break for the last one instead of letting
337 * the iterator do it. This prevents use-after-free
338 * of the bio.
339 */
340 if (is_last)
341 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
Jens Axboe429a7872016-11-17 12:30:37 -0700343 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346}
347
Josef Bacik9561a7a2016-11-22 14:04:40 -0500348static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
349 struct bio_vec *bvec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
351 int result;
352 void *kaddr = kmap(bvec->bv_page);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500353 result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
354 bvec->bv_len, MSG_WAITALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 kunmap(bvec->bv_page);
356 return result;
357}
358
359/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500360static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 int result;
363 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700364 struct nbd_cmd *cmd;
365 struct request *req = NULL;
366 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500367 u32 tag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369 reply.magic = 0;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500370 result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 if (result <= 0) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500372 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
373 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
374 dev_err(disk_to_dev(nbd->disk),
375 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200376 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700378
379 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700380 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700381 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200382 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700383 }
384
Josef Bacik9561a7a2016-11-22 14:04:40 -0500385 memcpy(&tag, reply.handle, sizeof(u32));
Herbert Xu4b2f0262006-01-06 00:09:47 -0800386
Josef Bacikfd8383f2016-09-08 12:33:37 -0700387 hwq = blk_mq_unique_tag_to_hwq(tag);
388 if (hwq < nbd->tag_set.nr_hw_queues)
389 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
390 blk_mq_unique_tag_to_tag(tag));
391 if (!req || !blk_mq_request_started(req)) {
392 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
393 tag, req);
394 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700396 cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700398 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200399 ntohl(reply.error));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 req->errors++;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700401 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 }
403
Josef Bacikfd8383f2016-09-08 12:33:37 -0700404 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200405 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200406 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800407 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200408
409 rq_for_each_segment(bvec, req, iter) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500410 result = sock_recv_bvec(nbd, index, &bvec);
Jens Axboe6c92e692007-08-16 13:43:12 +0200411 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700412 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200413 result);
Jens Axboe6c92e692007-08-16 13:43:12 +0200414 req->errors++;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700415 return cmd;
Jens Axboe6c92e692007-08-16 13:43:12 +0200416 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200417 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700418 cmd, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500420 } else {
421 /* See the comment in nbd_queue_rq. */
422 wait_for_completion(&cmd->send_complete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700424 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425}
426
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200427static ssize_t pid_show(struct device *dev,
428 struct device_attribute *attr, char *buf)
Paul Clements6b39bb62006-12-06 20:40:53 -0800429{
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200430 struct gendisk *disk = dev_to_disk(dev);
Markus Pargmann6521d392015-08-17 08:20:05 +0200431 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200432
Markus Pargmann6521d392015-08-17 08:20:05 +0200433 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
Paul Clements6b39bb62006-12-06 20:40:53 -0800434}
435
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200436static struct device_attribute pid_attr = {
Parag Warudkar01e8ef12008-10-18 20:28:50 -0700437 .attr = { .name = "pid", .mode = S_IRUGO},
Paul Clements6b39bb62006-12-06 20:40:53 -0800438 .show = pid_show,
439};
440
Josef Bacik9561a7a2016-11-22 14:04:40 -0500441struct recv_thread_args {
442 struct work_struct work;
443 struct nbd_device *nbd;
444 int index;
445};
446
447static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500449 struct recv_thread_args *args = container_of(work,
450 struct recv_thread_args,
451 work);
452 struct nbd_device *nbd = args->nbd;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700453 struct nbd_cmd *cmd;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500454 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Wanlong Gaof4507162012-03-28 14:42:51 -0700456 BUG_ON(nbd->magic != NBD_MAGIC);
Markus Pargmann19391832015-08-17 08:20:03 +0200457 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500458 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700459 if (IS_ERR(cmd)) {
460 ret = PTR_ERR(cmd);
Markus Pargmann19391832015-08-17 08:20:03 +0200461 break;
462 }
463
Josef Bacikfd8383f2016-09-08 12:33:37 -0700464 nbd_end_request(cmd);
Markus Pargmann19391832015-08-17 08:20:03 +0200465 }
Paul Clements6b39bb62006-12-06 20:40:53 -0800466
Josef Bacik9561a7a2016-11-22 14:04:40 -0500467 /*
468 * We got an error, shut everybody down if this wasn't the result of a
469 * disconnect request.
470 */
471 if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
472 sock_shutdown(nbd);
473 atomic_dec(&nbd->recv_threads);
474 wake_up(&nbd->recv_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
Josef Bacikfd8383f2016-09-08 12:33:37 -0700477static void nbd_clear_req(struct request *req, void *data, bool reserved)
478{
479 struct nbd_cmd *cmd;
480
481 if (!blk_mq_request_started(req))
482 return;
483 cmd = blk_mq_rq_to_pdu(req);
484 req->errors++;
485 nbd_end_request(cmd);
486}
487
Wanlong Gaof4507162012-03-28 14:42:51 -0700488static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Wanlong Gaof4507162012-03-28 14:42:51 -0700490 BUG_ON(nbd->magic != NBD_MAGIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Josef Bacikfd8383f2016-09-08 12:33:37 -0700492 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200493 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
495
Paul Clements7fdfd402007-10-16 23:27:37 -0700496
Josef Bacik9561a7a2016-11-22 14:04:40 -0500497static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700498{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700499 struct request *req = blk_mq_rq_from_pdu(cmd);
500 struct nbd_device *nbd = cmd->nbd;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500501 struct nbd_sock *nsock;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700502
Josef Bacik9561a7a2016-11-22 14:04:40 -0500503 if (index >= nbd->num_connections) {
504 dev_err(disk_to_dev(nbd->disk),
505 "Attempted send on invalid socket\n");
506 goto error_out;
507 }
508
509 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
510 dev_err(disk_to_dev(nbd->disk),
511 "Attempted send on closed socket\n");
512 goto error_out;
513 }
514
515 if (req->cmd_type != REQ_TYPE_FS &&
516 req->cmd_type != REQ_TYPE_DRV_PRIV)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700517 goto error_out;
518
Josef Bacik9561a7a2016-11-22 14:04:40 -0500519 if (req->cmd_type == REQ_TYPE_FS &&
520 rq_data_dir(req) == WRITE &&
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200521 (nbd->flags & NBD_FLAG_READ_ONLY)) {
522 dev_err(disk_to_dev(nbd->disk),
523 "Write on read-only\n");
524 goto error_out;
Alex Bligh75f187a2013-02-27 17:05:23 -0800525 }
526
Laurent Vivier48cf6062008-04-29 01:02:46 -0700527 req->errors = 0;
528
Josef Bacik9561a7a2016-11-22 14:04:40 -0500529 nsock = nbd->socks[index];
530 mutex_lock(&nsock->tx_lock);
531 if (unlikely(!nsock->sock)) {
532 mutex_unlock(&nsock->tx_lock);
Wanlong Gaof4507162012-03-28 14:42:51 -0700533 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200534 "Attempted send on closed socket\n");
Pavel Machek15746fc2009-04-02 16:58:42 -0700535 goto error_out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700536 }
537
Josef Bacik9561a7a2016-11-22 14:04:40 -0500538 if (nbd_send_cmd(nbd, cmd, index) != 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700539 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
Laurent Vivier48cf6062008-04-29 01:02:46 -0700540 req->errors++;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700541 nbd_end_request(cmd);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700542 }
543
Josef Bacik9561a7a2016-11-22 14:04:40 -0500544 mutex_unlock(&nsock->tx_lock);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700545
546 return;
547
548error_out:
549 req->errors++;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700550 nbd_end_request(cmd);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700551}
552
Josef Bacikfd8383f2016-09-08 12:33:37 -0700553static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
554 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700555{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700556 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700557
Josef Bacik9561a7a2016-11-22 14:04:40 -0500558 /*
559 * Since we look at the bio's to send the request over the network we
560 * need to make sure the completion work doesn't mark this request done
561 * before we are done doing our send. This keeps us from dereferencing
562 * freed data if we have particularly fast completions (ie we get the
563 * completion before we exit sock_xmit on the last bvec) or in the case
564 * that the server is misbehaving (or there was an error) before we're
565 * done sending everything over the wire.
566 */
567 init_completion(&cmd->send_complete);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700568 blk_mq_start_request(bd->rq);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500569 nbd_handle_cmd(cmd, hctx->queue_num);
570 complete(&cmd->send_complete);
571
Josef Bacikfd8383f2016-09-08 12:33:37 -0700572 return BLK_MQ_RQ_QUEUE_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573}
574
Josef Bacik9561a7a2016-11-22 14:04:40 -0500575static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
Markus Pargmann23272a672015-10-29 11:51:16 +0100576{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500577 struct nbd_sock **socks;
578 struct nbd_sock *nsock;
Markus Pargmann23272a672015-10-29 11:51:16 +0100579
Josef Bacik9561a7a2016-11-22 14:04:40 -0500580 if (!nbd->task_setup)
581 nbd->task_setup = current;
582 if (nbd->task_setup != current) {
583 dev_err(disk_to_dev(nbd->disk),
584 "Device being setup by another task");
585 return -EINVAL;
Markus Pargmann23272a672015-10-29 11:51:16 +0100586 }
587
Josef Bacik9561a7a2016-11-22 14:04:40 -0500588 socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
589 sizeof(struct nbd_sock *), GFP_KERNEL);
590 if (!socks)
591 return -ENOMEM;
592 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
593 if (!nsock)
594 return -ENOMEM;
Markus Pargmann23272a672015-10-29 11:51:16 +0100595
Josef Bacik9561a7a2016-11-22 14:04:40 -0500596 nbd->socks = socks;
Markus Pargmann23272a672015-10-29 11:51:16 +0100597
Josef Bacik9561a7a2016-11-22 14:04:40 -0500598 mutex_init(&nsock->tx_lock);
599 nsock->sock = sock;
600 socks[nbd->num_connections++] = nsock;
601
602 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +0100603}
604
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100605/* Reset all properties of an NBD device */
606static void nbd_reset(struct nbd_device *nbd)
607{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500608 int i;
609
610 for (i = 0; i < nbd->num_connections; i++)
611 kfree(nbd->socks[i]);
612 kfree(nbd->socks);
613 nbd->socks = NULL;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -0700614 nbd->runtime_flags = 0;
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100615 nbd->blksize = 1024;
616 nbd->bytesize = 0;
617 set_capacity(nbd->disk, 0);
618 nbd->flags = 0;
Josef Bacik0eadf372016-09-08 12:33:40 -0700619 nbd->tag_set.timeout = 0;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500620 nbd->num_connections = 0;
621 nbd->task_setup = NULL;
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100622 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100623}
624
625static void nbd_bdev_reset(struct block_device *bdev)
626{
627 set_device_ro(bdev, false);
628 bdev->bd_inode->i_size = 0;
629 if (max_part > 0) {
630 blkdev_reread_part(bdev);
631 bdev->bd_invalidated = 1;
632 }
633}
634
Markus Pargmannd02cf532015-10-29 12:06:15 +0100635static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
636{
637 if (nbd->flags & NBD_FLAG_READ_ONLY)
638 set_device_ro(bdev, true);
639 if (nbd->flags & NBD_FLAG_SEND_TRIM)
640 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
641 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600642 blk_queue_write_cache(nbd->disk->queue, true, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100643 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600644 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100645}
646
Josef Bacik9561a7a2016-11-22 14:04:40 -0500647static void send_disconnects(struct nbd_device *nbd)
648{
649 struct nbd_request request = {};
650 int i, ret;
651
652 request.magic = htonl(NBD_REQUEST_MAGIC);
653 request.type = htonl(NBD_CMD_DISC);
654
655 for (i = 0; i < nbd->num_connections; i++) {
656 ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
657 if (ret <= 0)
658 dev_err(disk_to_dev(nbd->disk),
659 "Send disconnect failed %d\n", ret);
660 }
661}
662
Markus Pargmann30d53d92015-08-17 08:20:06 +0200663static int nbd_dev_dbg_init(struct nbd_device *nbd);
664static void nbd_dev_dbg_close(struct nbd_device *nbd);
665
Josef Bacik9561a7a2016-11-22 14:04:40 -0500666/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -0700667static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -0700668 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 switch (cmd) {
Pavel Machek1a2ad212009-04-02 16:58:41 -0700671 case NBD_DISCONNECT: {
Wanlong Gaof4507162012-03-28 14:42:51 -0700672 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -0500673 if (!nbd->socks)
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800674 return -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700675
Josef Bacik9561a7a2016-11-22 14:04:40 -0500676 mutex_unlock(&nbd->config_lock);
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800677 fsync_bdev(bdev);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500678 mutex_lock(&nbd->config_lock);
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800679
680 /* Check again after getting mutex back. */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500681 if (!nbd->socks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 return -EINVAL;
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800683
Josef Bacik9561a7a2016-11-22 14:04:40 -0500684 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
685 &nbd->runtime_flags))
686 send_disconnects(nbd);
Paul Clementsc378f702013-07-03 15:09:04 -0700687 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700688 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500689
Markus Pargmann23272a672015-10-29 11:51:16 +0100690 case NBD_CLEAR_SOCK:
691 sock_shutdown(nbd);
Wanlong Gaof4507162012-03-28 14:42:51 -0700692 nbd_clear_que(nbd);
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800693 kill_bdev(bdev);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500694 nbd_bdev_reset(bdev);
695 /*
696 * We want to give the run thread a chance to wait for everybody
697 * to clean up and then do it's own cleanup.
698 */
699 if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
700 int i;
701
702 for (i = 0; i < nbd->num_connections; i++)
703 kfree(nbd->socks[i]);
704 kfree(nbd->socks);
705 nbd->socks = NULL;
706 nbd->num_connections = 0;
707 }
Pavel Machek1a2ad212009-04-02 16:58:41 -0700708 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700709
710 case NBD_SET_SOCK: {
Al Viroe2511572014-03-05 20:41:36 -0500711 int err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100712 struct socket *sock = sockfd_lookup(arg, &err);
713
714 if (!sock)
715 return err;
716
Josef Bacik9561a7a2016-11-22 14:04:40 -0500717 err = nbd_add_socket(nbd, sock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100718 if (!err && max_part)
719 bdev->bd_invalidated = 1;
720
721 return err;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700722 }
723
Markus Pargmann37091fd2015-07-27 07:36:49 +0200724 case NBD_SET_BLKSIZE: {
Arnd Bergmann5e454c62016-03-05 00:49:31 +0100725 loff_t bsize = div_s64(nbd->bytesize, arg);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200726
727 return nbd_size_set(nbd, bdev, arg, bsize);
728 }
Pavel Machek1a2ad212009-04-02 16:58:41 -0700729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 case NBD_SET_SIZE:
Markus Pargmann37091fd2015-07-27 07:36:49 +0200731 return nbd_size_set(nbd, bdev, nbd->blksize,
732 arg / nbd->blksize);
733
734 case NBD_SET_SIZE_BLOCKS:
735 return nbd_size_set(nbd, bdev, nbd->blksize, arg);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700736
Paul Clements7fdfd402007-10-16 23:27:37 -0700737 case NBD_SET_TIMEOUT:
Josef Bacik0eadf372016-09-08 12:33:40 -0700738 nbd->tag_set.timeout = arg * HZ;
Paul Clements7fdfd402007-10-16 23:27:37 -0700739 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700740
Paul Clements2f012502012-10-04 17:16:15 -0700741 case NBD_SET_FLAGS:
742 nbd->flags = arg;
743 return 0;
744
Pavel Machek1a2ad212009-04-02 16:58:41 -0700745 case NBD_DO_IT: {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500746 struct recv_thread_args *args;
747 int num_connections = nbd->num_connections;
748 int error, i;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700749
Markus Pargmann6521d392015-08-17 08:20:05 +0200750 if (nbd->task_recv)
Pavel Machekc91192d2009-01-15 13:51:03 -0800751 return -EBUSY;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500752 if (!nbd->socks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 return -EINVAL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500754 if (num_connections > 1 &&
755 !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
756 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
757 goto out_err;
758 }
Pavel Machek1a2ad212009-04-02 16:58:41 -0700759
Josef Bacik9561a7a2016-11-22 14:04:40 -0500760 set_bit(NBD_RUNNING, &nbd->runtime_flags);
761 blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
762 args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
763 if (!args)
764 goto out_err;
Vegard Nossum97240962016-05-27 12:59:35 +0200765 nbd->task_recv = current;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500766 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700767
Markus Pargmannd02cf532015-10-29 12:06:15 +0100768 nbd_parse_flags(nbd, bdev);
Paul Clementsa336d292012-10-04 17:16:18 -0700769
Josef Bacik9561a7a2016-11-22 14:04:40 -0500770 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
771 if (error) {
772 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
773 goto out_recv;
774 }
775
776 nbd_size_update(nbd, bdev);
777
Markus Pargmann30d53d92015-08-17 08:20:06 +0200778 nbd_dev_dbg_init(nbd);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500779 for (i = 0; i < num_connections; i++) {
780 sk_set_memalloc(nbd->socks[i]->sock->sk);
781 atomic_inc(&nbd->recv_threads);
782 INIT_WORK(&args[i].work, recv_work);
783 args[i].nbd = nbd;
784 args[i].index = i;
785 queue_work(system_long_wq, &args[i].work);
786 }
787 wait_event_interruptible(nbd->recv_wq,
788 atomic_read(&nbd->recv_threads) == 0);
789 for (i = 0; i < num_connections; i++)
790 flush_work(&args[i].work);
Markus Pargmann30d53d92015-08-17 08:20:06 +0200791 nbd_dev_dbg_close(nbd);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500792 nbd_size_clear(nbd, bdev);
793 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
794out_recv:
795 mutex_lock(&nbd->config_lock);
Vegard Nossum97240962016-05-27 12:59:35 +0200796 nbd->task_recv = NULL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500797out_err:
Markus Pargmann36e47be2015-08-17 08:20:01 +0200798 sock_shutdown(nbd);
Wanlong Gaof4507162012-03-28 14:42:51 -0700799 nbd_clear_que(nbd);
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800800 kill_bdev(bdev);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100801 nbd_bdev_reset(bdev);
802
Josef Bacik9b4a6ba2016-09-08 12:33:39 -0700803 /* user requested, ignore socket errors */
804 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
Markus Pargmann1f7b5cf2015-10-29 12:01:34 +0100805 error = 0;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -0700806 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
Markus Pargmann1f7b5cf2015-10-29 12:01:34 +0100807 error = -ETIMEDOUT;
808
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100809 nbd_reset(nbd);
Markus Pargmann19391832015-08-17 08:20:03 +0200810 return error;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700811 }
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -0800814 /*
815 * This is for compatibility only. The queue is always cleared
816 * by NBD_DO_IT or NBD_CLEAR_SOCK.
817 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -0700821 /*
822 * For compatibility only, we no longer keep a list of
823 * outstanding requests.
824 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 return 0;
826 }
Pavel Machek1a2ad212009-04-02 16:58:41 -0700827 return -ENOTTY;
828}
829
830static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
831 unsigned int cmd, unsigned long arg)
832{
Wanlong Gaof4507162012-03-28 14:42:51 -0700833 struct nbd_device *nbd = bdev->bd_disk->private_data;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700834 int error;
835
836 if (!capable(CAP_SYS_ADMIN))
837 return -EPERM;
838
Wanlong Gaof4507162012-03-28 14:42:51 -0700839 BUG_ON(nbd->magic != NBD_MAGIC);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700840
Josef Bacik9561a7a2016-11-22 14:04:40 -0500841 mutex_lock(&nbd->config_lock);
Wanlong Gaof4507162012-03-28 14:42:51 -0700842 error = __nbd_ioctl(bdev, nbd, cmd, arg);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500843 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700844
845 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846}
847
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700848static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
850 .owner = THIS_MODULE,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +0200851 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -0500852 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853};
854
Markus Pargmann30d53d92015-08-17 08:20:06 +0200855#if IS_ENABLED(CONFIG_DEBUG_FS)
856
857static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
858{
859 struct nbd_device *nbd = s->private;
860
861 if (nbd->task_recv)
862 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +0200863
864 return 0;
865}
866
867static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
868{
869 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
870}
871
872static const struct file_operations nbd_dbg_tasks_ops = {
873 .open = nbd_dbg_tasks_open,
874 .read = seq_read,
875 .llseek = seq_lseek,
876 .release = single_release,
877};
878
879static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
880{
881 struct nbd_device *nbd = s->private;
882 u32 flags = nbd->flags;
883
884 seq_printf(s, "Hex: 0x%08x\n\n", flags);
885
886 seq_puts(s, "Known flags:\n");
887
888 if (flags & NBD_FLAG_HAS_FLAGS)
889 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
890 if (flags & NBD_FLAG_READ_ONLY)
891 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
892 if (flags & NBD_FLAG_SEND_FLUSH)
893 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
894 if (flags & NBD_FLAG_SEND_TRIM)
895 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
896
897 return 0;
898}
899
900static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
901{
902 return single_open(file, nbd_dbg_flags_show, inode->i_private);
903}
904
905static const struct file_operations nbd_dbg_flags_ops = {
906 .open = nbd_dbg_flags_open,
907 .read = seq_read,
908 .llseek = seq_lseek,
909 .release = single_release,
910};
911
912static int nbd_dev_dbg_init(struct nbd_device *nbd)
913{
914 struct dentry *dir;
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200915
916 if (!nbd_dbg_dir)
917 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200918
919 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200920 if (!dir) {
921 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
922 nbd_name(nbd));
923 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200924 }
925 nbd->dbg_dir = dir;
926
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200927 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
928 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -0700929 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200930 debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -0400931 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +0200932
933 return 0;
934}
935
936static void nbd_dev_dbg_close(struct nbd_device *nbd)
937{
938 debugfs_remove_recursive(nbd->dbg_dir);
939}
940
941static int nbd_dbg_init(void)
942{
943 struct dentry *dbg_dir;
944
945 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +0200946 if (!dbg_dir)
947 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +0200948
949 nbd_dbg_dir = dbg_dir;
950
951 return 0;
952}
953
954static void nbd_dbg_close(void)
955{
956 debugfs_remove_recursive(nbd_dbg_dir);
957}
958
959#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
960
961static int nbd_dev_dbg_init(struct nbd_device *nbd)
962{
963 return 0;
964}
965
966static void nbd_dev_dbg_close(struct nbd_device *nbd)
967{
968}
969
970static int nbd_dbg_init(void)
971{
972 return 0;
973}
974
975static void nbd_dbg_close(void)
976{
977}
978
979#endif
980
Josef Bacikfd8383f2016-09-08 12:33:37 -0700981static int nbd_init_request(void *data, struct request *rq,
982 unsigned int hctx_idx, unsigned int request_idx,
983 unsigned int numa_node)
984{
985 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700986 cmd->nbd = data;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700987 return 0;
988}
989
990static struct blk_mq_ops nbd_mq_ops = {
991 .queue_rq = nbd_queue_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700992 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -0700993 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700994};
995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996/*
997 * And here should be modules and kernel interface
998 * (Just smiley confuses emacs :-)
999 */
1000
1001static int __init nbd_init(void)
1002{
1003 int err = -ENOMEM;
1004 int i;
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001005 int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08001007 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001009 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02001010 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001011 return -EINVAL;
1012 }
1013
1014 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02001015 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001016 part_shift = fls(max_part);
1017
Namhyung Kim5988ce22011-05-28 14:44:46 +02001018 /*
1019 * Adjust max_part according to part_shift as it is exported
1020 * to user space so that user can know the max number of
1021 * partition kernel should be able to manage.
1022 *
1023 * Note that -1 is required because partition 0 is reserved
1024 * for the whole disk.
1025 */
1026 max_part = (1UL << part_shift) - 1;
1027 }
1028
Namhyung Kim3b271082011-05-28 14:44:46 +02001029 if ((1UL << part_shift) > DISK_MAX_PARTS)
1030 return -EINVAL;
1031
1032 if (nbds_max > 1UL << (MINORBITS - part_shift))
1033 return -EINVAL;
1034
Sudip Mukherjeeff6b8092015-01-27 18:08:22 +05301035 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
1036 if (!nbd_dev)
1037 return -ENOMEM;
1038
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001039 for (i = 0; i < nbds_max; i++) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001040 struct gendisk *disk = alloc_disk(1 << part_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 if (!disk)
1042 goto out;
1043 nbd_dev[i].disk = disk;
Josef Bacikfd8383f2016-09-08 12:33:37 -07001044
1045 nbd_dev[i].tag_set.ops = &nbd_mq_ops;
1046 nbd_dev[i].tag_set.nr_hw_queues = 1;
1047 nbd_dev[i].tag_set.queue_depth = 128;
1048 nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
1049 nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
1050 nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
Josef Bacik005043a2016-09-21 16:55:31 -04001051 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
Josef Bacikfd8383f2016-09-08 12:33:37 -07001052 nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
1053
1054 err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
1055 if (err) {
1056 put_disk(disk);
1057 goto out;
1058 }
1059
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 /*
1061 * The new linux 2.5 block layer implementation requires
1062 * every gendisk to have its very own request_queue struct.
1063 * These structs are big so we dynamically allocate them.
1064 */
Josef Bacikfd8383f2016-09-08 12:33:37 -07001065 disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 if (!disk->queue) {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001067 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 put_disk(disk);
1069 goto out;
1070 }
Josef Bacikfd8383f2016-09-08 12:33:37 -07001071
Jens Axboe31dcfab2008-10-31 10:06:37 +01001072 /*
1073 * Tell the block layer that we are not a rotational device
1074 */
1075 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001076 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
Paul Clementsa336d292012-10-04 17:16:18 -07001077 disk->queue->limits.discard_granularity = 512;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06001078 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
Paul Clementsa336d292012-10-04 17:16:18 -07001079 disk->queue->limits.discard_zeroes_data = 0;
Michal Belczyk078be022013-04-30 15:28:28 -07001080 blk_queue_max_hw_sectors(disk->queue, 65536);
1081 disk->queue->limits.max_sectors = 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 }
1083
1084 if (register_blkdev(NBD_MAJOR, "nbd")) {
1085 err = -EIO;
1086 goto out;
1087 }
1088
1089 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Markus Pargmann30d53d92015-08-17 08:20:06 +02001091 nbd_dbg_init();
1092
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001093 for (i = 0; i < nbds_max; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 struct gendisk *disk = nbd_dev[i].disk;
Wanlong Gaof4507162012-03-28 14:42:51 -07001095 nbd_dev[i].magic = NBD_MAGIC;
Josef Bacik9561a7a2016-11-22 14:04:40 -05001096 mutex_init(&nbd_dev[i].config_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 disk->major = NBD_MAJOR;
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001098 disk->first_minor = i << part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 disk->fops = &nbd_fops;
1100 disk->private_data = &nbd_dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 sprintf(disk->disk_name, "nbd%d", i);
Josef Bacik9561a7a2016-11-22 14:04:40 -05001102 init_waitqueue_head(&nbd_dev[i].recv_wq);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +01001103 nbd_reset(&nbd_dev[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 add_disk(disk);
1105 }
1106
1107 return 0;
1108out:
1109 while (i--) {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001110 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 blk_cleanup_queue(nbd_dev[i].disk->queue);
1112 put_disk(nbd_dev[i].disk);
1113 }
Sven Wegenerf3944d62008-08-20 14:09:07 -07001114 kfree(nbd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 return err;
1116}
1117
1118static void __exit nbd_cleanup(void)
1119{
1120 int i;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001121
1122 nbd_dbg_close();
1123
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001124 for (i = 0; i < nbds_max; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 struct gendisk *disk = nbd_dev[i].disk;
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001126 nbd_dev[i].magic = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 if (disk) {
1128 del_gendisk(disk);
1129 blk_cleanup_queue(disk->queue);
Josef Bacikfd8383f2016-09-08 12:33:37 -07001130 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 put_disk(disk);
1132 }
1133 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 unregister_blkdev(NBD_MAJOR, "nbd");
Sven Wegenerf3944d62008-08-20 14:09:07 -07001135 kfree(nbd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1137}
1138
1139module_init(nbd_init);
1140module_exit(nbd_cleanup);
1141
1142MODULE_DESCRIPTION("Network Block Device");
1143MODULE_LICENSE("GPL");
1144
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001145module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001146MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1147module_param(max_part, int, 0444);
1148MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");