blob: 929217e63855929dd74bcbc5c4350d17fac999bb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070010 * This file is released under GPLv2 or later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070012 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/bio.h>
23#include <linux/stat.h>
24#include <linux/errno.h>
25#include <linux/file.h>
26#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020027#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080028#include <linux/compiler.h>
29#include <linux/err.h>
30#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080033#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070034#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020035#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020036#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/uaccess.h>
39#include <asm/types.h>
40
41#include <linux/nbd.h>
42
Markus Pargmann13e71d62015-04-02 10:11:35 +020043struct nbd_device {
44 int flags;
Markus Pargmann13e71d62015-04-02 10:11:35 +020045 struct socket * sock; /* If == NULL, device is not ready, yet */
46 int magic;
47
48 spinlock_t queue_lock;
49 struct list_head queue_head; /* Requests waiting result */
50 struct request *active_req;
51 wait_queue_head_t active_wq;
52 struct list_head waiting_queue; /* Requests to be sent */
53 wait_queue_head_t waiting_wq;
54
55 struct mutex tx_lock;
56 struct gendisk *disk;
57 int blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020058 loff_t bytesize;
Markus Pargmann13e71d62015-04-02 10:11:35 +020059 int xmit_timeout;
Markus Pargmann696697c2015-08-17 08:20:07 +020060 bool disconnect; /* a disconnect has been requested by user */
Markus Pargmann7e2893a2015-08-17 08:20:00 +020061
62 struct timer_list timeout_timer;
63 struct task_struct *task_recv;
64 struct task_struct *task_send;
Markus Pargmann30d53d92015-08-17 08:20:06 +020065
66#if IS_ENABLED(CONFIG_DEBUG_FS)
67 struct dentry *dbg_dir;
68#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +020069};
70
Markus Pargmann30d53d92015-08-17 08:20:06 +020071#if IS_ENABLED(CONFIG_DEBUG_FS)
72static struct dentry *nbd_dbg_dir;
73#endif
74
75#define nbd_name(nbd) ((nbd)->disk->disk_name)
76
Wanlong Gaof4507162012-03-28 14:42:51 -070077#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Ingo van Lil9c7a4162006-07-01 04:36:36 -070079static unsigned int nbds_max = 16;
Paul Clements20a81432008-02-08 04:21:51 -080080static struct nbd_device *nbd_dev;
Laurent Vivierd71a6d72008-04-29 01:02:51 -070081static int max_part;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83/*
84 * Use just one lock (or at most 1 per NIC). Two arguments for this:
85 * 1. Each NIC is essentially a synchronization point for all servers
86 * accessed through that NIC so there's no need to have more locks
87 * than NICs anyway.
88 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
89 * down each lock to the point where they're actually slower than just
90 * a single lock.
91 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
92 */
93static DEFINE_SPINLOCK(nbd_lock);
94
Markus Pargmannd18509f2015-04-02 10:11:38 +020095static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Markus Pargmannd18509f2015-04-02 10:11:38 +020097 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static const char *nbdcmd_to_ascii(int cmd)
101{
102 switch (cmd) {
103 case NBD_CMD_READ: return "read";
104 case NBD_CMD_WRITE: return "write";
105 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800106 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700107 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 }
109 return "invalid";
110}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Markus Pargmannd18509f2015-04-02 10:11:38 +0200112static void nbd_end_request(struct nbd_device *nbd, struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
Kiyoshi Ueda097c94a2007-12-11 17:44:06 -0500114 int error = req->errors ? -EIO : 0;
Jens Axboe165125e2007-07-24 09:28:11 +0200115 struct request_queue *q = req->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 unsigned long flags;
117
Markus Pargmannd18509f2015-04-02 10:11:38 +0200118 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
119 error ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121 spin_lock_irqsave(q->queue_lock, flags);
Tejun Heo1011c1b2009-05-07 22:24:45 +0900122 __blk_end_request_all(req, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 spin_unlock_irqrestore(q->queue_lock, flags);
124}
125
Markus Pargmanne018e752015-04-02 10:11:39 +0200126/*
127 * Forcibly shutdown the socket causing all listeners to error
128 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200129static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700130{
Markus Pargmann260bbce2015-08-17 08:20:02 +0200131 if (!nbd->sock)
132 return;
133
134 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
135 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
136 nbd->sock = NULL;
137 del_timer_sync(&nbd->timeout_timer);
Paul Clements7fdfd402007-10-16 23:27:37 -0700138}
139
140static void nbd_xmit_timeout(unsigned long arg)
141{
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200142 struct nbd_device *nbd = (struct nbd_device *)arg;
143 struct task_struct *task;
Paul Clements7fdfd402007-10-16 23:27:37 -0700144
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200145 if (list_empty(&nbd->queue_head))
146 return;
147
Markus Pargmann696697c2015-08-17 08:20:07 +0200148 nbd->disconnect = true;
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200149
150 task = READ_ONCE(nbd->task_recv);
151 if (task)
152 force_sig(SIGKILL, task);
153
154 task = READ_ONCE(nbd->task_send);
155 if (task)
156 force_sig(SIGKILL, nbd->task_send);
157
158 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700159}
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161/*
162 * Send or receive packet.
163 */
Wanlong Gaof4507162012-03-28 14:42:51 -0700164static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 int msg_flags)
166{
Wanlong Gaof4507162012-03-28 14:42:51 -0700167 struct socket *sock = nbd->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 int result;
169 struct msghdr msg;
170 struct kvec iov;
Oleg Nesterovbe0ef952007-07-15 23:41:32 -0700171 sigset_t blocked, oldset;
Mel Gorman7f338fe2012-07-31 16:44:32 -0700172 unsigned long pflags = current->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700174 if (unlikely(!sock)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700175 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200176 "Attempted %s on closed socket in sock_xmit\n",
177 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700178 return -EINVAL;
179 }
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 /* Allow interception of SIGKILL only
182 * Don't allow other signals to interrupt the transmission */
Oleg Nesterovbe0ef952007-07-15 23:41:32 -0700183 siginitsetinv(&blocked, sigmask(SIGKILL));
184 sigprocmask(SIG_SETMASK, &blocked, &oldset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Mel Gorman7f338fe2012-07-31 16:44:32 -0700186 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700188 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 iov.iov_base = buf;
190 iov.iov_len = size;
191 msg.msg_name = NULL;
192 msg.msg_namelen = 0;
193 msg.msg_control = NULL;
194 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
196
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200197 if (send)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200199 else
Namhyung Kim35fbf5b2011-05-28 14:44:46 +0200200 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
201 msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 if (result <= 0) {
204 if (result == 0)
205 result = -EPIPE; /* short read */
206 break;
207 }
208 size -= result;
209 buf += result;
210 } while (size > 0);
211
Oleg Nesterovbe0ef952007-07-15 23:41:32 -0700212 sigprocmask(SIG_SETMASK, &oldset, NULL);
Mel Gorman7f338fe2012-07-31 16:44:32 -0700213 tsk_restore_flags(current, pflags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200215 if (!send && nbd->xmit_timeout)
216 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 return result;
219}
220
Wanlong Gaof4507162012-03-28 14:42:51 -0700221static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 int flags)
223{
224 int result;
225 void *kaddr = kmap(bvec->bv_page);
Wanlong Gaof4507162012-03-28 14:42:51 -0700226 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
227 bvec->bv_len, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 kunmap(bvec->bv_page);
229 return result;
230}
231
Paul Clements7fdfd402007-10-16 23:27:37 -0700232/* always call with the tx_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -0700233static int nbd_send_req(struct nbd_device *nbd, struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
NeilBrown5705f702007-09-25 12:35:59 +0200235 int result, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 struct nbd_request request;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900237 unsigned long size = blk_rq_bytes(req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200238 u32 type;
239
240 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
241 type = NBD_CMD_DISC;
242 else if (req->cmd_flags & REQ_DISCARD)
243 type = NBD_CMD_TRIM;
244 else if (req->cmd_flags & REQ_FLUSH)
245 type = NBD_CMD_FLUSH;
246 else if (rq_data_dir(req) == WRITE)
247 type = NBD_CMD_WRITE;
248 else
249 type = NBD_CMD_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Hani Benhabiles04cfac42014-06-06 14:38:30 -0700251 memset(&request, 0, sizeof(request));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 request.magic = htonl(NBD_REQUEST_MAGIC);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200253 request.type = htonl(type);
254 if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800255 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
256 request.len = htonl(size);
257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 memcpy(request.handle, &req, sizeof(req));
259
Markus Pargmannd18509f2015-04-02 10:11:38 +0200260 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200261 req, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200262 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Wanlong Gaof4507162012-03-28 14:42:51 -0700263 result = sock_xmit(nbd, 1, &request, sizeof(request),
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200264 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700266 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200267 "Send control failed (result %d)\n", result);
Markus Pargmanndab53132015-04-02 10:11:40 +0200268 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200271 if (type == NBD_CMD_WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200272 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800273 struct bio_vec bvec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 /*
275 * we are really probing at internals to determine
276 * whether to set MSG_MORE or not...
277 */
NeilBrown5705f702007-09-25 12:35:59 +0200278 rq_for_each_segment(bvec, req, iter) {
Jens Axboe6c92e692007-08-16 13:43:12 +0200279 flags = 0;
Kent Overstreet4550dd62013-08-07 14:26:21 -0700280 if (!rq_iter_last(bvec, iter))
Jens Axboe6c92e692007-08-16 13:43:12 +0200281 flags = MSG_MORE;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200282 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
283 req, bvec.bv_len);
Kent Overstreet79886132013-11-23 17:19:00 -0800284 result = sock_send_bvec(nbd, &bvec, flags);
Jens Axboe6c92e692007-08-16 13:43:12 +0200285 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700286 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200287 "Send data failed (result %d)\n",
288 result);
Markus Pargmanndab53132015-04-02 10:11:40 +0200289 return -EIO;
Jens Axboe6c92e692007-08-16 13:43:12 +0200290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 }
292 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
295
Wanlong Gaof4507162012-03-28 14:42:51 -0700296static struct request *nbd_find_request(struct nbd_device *nbd,
Denis Cheng0cbc591b2007-10-16 23:26:14 -0700297 struct request *xreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298{
Denis Chengd2c97402007-10-16 23:26:14 -0700299 struct request *req, *tmp;
Herbert Xu4b2f0262006-01-06 00:09:47 -0800300 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Wanlong Gaof4507162012-03-28 14:42:51 -0700302 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800303 if (unlikely(err))
Markus Pargmannde9ad6d2015-04-02 10:11:41 +0200304 return ERR_PTR(err);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800305
Wanlong Gaof4507162012-03-28 14:42:51 -0700306 spin_lock(&nbd->queue_lock);
307 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 if (req != xreq)
309 continue;
310 list_del_init(&req->queuelist);
Wanlong Gaof4507162012-03-28 14:42:51 -0700311 spin_unlock(&nbd->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 return req;
313 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700314 spin_unlock(&nbd->queue_lock);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800315
Markus Pargmannde9ad6d2015-04-02 10:11:41 +0200316 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
Wanlong Gaof4507162012-03-28 14:42:51 -0700319static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
321 int result;
322 void *kaddr = kmap(bvec->bv_page);
Wanlong Gaof4507162012-03-28 14:42:51 -0700323 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 MSG_WAITALL);
325 kunmap(bvec->bv_page);
326 return result;
327}
328
329/* NULL returned = something went wrong, inform userspace */
Wanlong Gaof4507162012-03-28 14:42:51 -0700330static struct request *nbd_read_stat(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
332 int result;
333 struct nbd_reply reply;
334 struct request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 reply.magic = 0;
Wanlong Gaof4507162012-03-28 14:42:51 -0700337 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700339 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200340 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200341 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700343
344 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700345 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700346 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200347 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700348 }
349
Wanlong Gaof4507162012-03-28 14:42:51 -0700350 req = nbd_find_request(nbd, *(struct request **)reply.handle);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -0700351 if (IS_ERR(req)) {
Herbert Xu4b2f0262006-01-06 00:09:47 -0800352 result = PTR_ERR(req);
353 if (result != -ENOENT)
Markus Pargmann19391832015-08-17 08:20:03 +0200354 return ERR_PTR(result);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800355
Wanlong Gaof4507162012-03-28 14:42:51 -0700356 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200357 reply.handle);
Markus Pargmann19391832015-08-17 08:20:03 +0200358 return ERR_PTR(-EBADR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 }
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700362 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200363 ntohl(reply.error));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 req->errors++;
365 return req;
366 }
367
Markus Pargmannd18509f2015-04-02 10:11:38 +0200368 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200369 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200370 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800371 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200372
373 rq_for_each_segment(bvec, req, iter) {
Kent Overstreet79886132013-11-23 17:19:00 -0800374 result = sock_recv_bvec(nbd, &bvec);
Jens Axboe6c92e692007-08-16 13:43:12 +0200375 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700376 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200377 result);
Jens Axboe6c92e692007-08-16 13:43:12 +0200378 req->errors++;
379 return req;
380 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200381 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
382 req, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 }
384 }
385 return req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
387
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200388static ssize_t pid_show(struct device *dev,
389 struct device_attribute *attr, char *buf)
Paul Clements6b39bb62006-12-06 20:40:53 -0800390{
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200391 struct gendisk *disk = dev_to_disk(dev);
Markus Pargmann6521d392015-08-17 08:20:05 +0200392 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200393
Markus Pargmann6521d392015-08-17 08:20:05 +0200394 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
Paul Clements6b39bb62006-12-06 20:40:53 -0800395}
396
Kay Sieversedfaa7c2007-05-21 22:08:01 +0200397static struct device_attribute pid_attr = {
Parag Warudkar01e8ef12008-10-18 20:28:50 -0700398 .attr = { .name = "pid", .mode = S_IRUGO},
Paul Clements6b39bb62006-12-06 20:40:53 -0800399 .show = pid_show,
400};
401
Wanlong Gaof4507162012-03-28 14:42:51 -0700402static int nbd_do_it(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
404 struct request *req;
WANG Cong84963042007-05-09 02:33:36 -0700405 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Wanlong Gaof4507162012-03-28 14:42:51 -0700407 BUG_ON(nbd->magic != NBD_MAGIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Mel Gorman7f338fe2012-07-31 16:44:32 -0700409 sk_set_memalloc(nbd->sock->sk);
Markus Pargmann6521d392015-08-17 08:20:05 +0200410
411 nbd->task_recv = current;
412
Wanlong Gaof4507162012-03-28 14:42:51 -0700413 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
WANG Cong84963042007-05-09 02:33:36 -0700414 if (ret) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700415 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Markus Pargmann6521d392015-08-17 08:20:05 +0200416 nbd->task_recv = NULL;
WANG Cong84963042007-05-09 02:33:36 -0700417 return ret;
418 }
Paul Clements6b39bb62006-12-06 20:40:53 -0800419
Markus Pargmann19391832015-08-17 08:20:03 +0200420 while (1) {
421 req = nbd_read_stat(nbd);
422 if (IS_ERR(req)) {
423 ret = PTR_ERR(req);
424 break;
425 }
426
Markus Pargmannd18509f2015-04-02 10:11:38 +0200427 nbd_end_request(nbd, req);
Markus Pargmann19391832015-08-17 08:20:03 +0200428 }
Paul Clements6b39bb62006-12-06 20:40:53 -0800429
Markus Pargmann6521d392015-08-17 08:20:05 +0200430 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
431
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200432 nbd->task_recv = NULL;
433
434 if (signal_pending(current)) {
435 siginfo_t info;
436
437 ret = dequeue_signal_lock(current, &current->blocked, &info);
438 dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
439 task_pid_nr(current), current->comm, ret);
Markus Pargmann36e47be2015-08-17 08:20:01 +0200440 mutex_lock(&nbd->tx_lock);
441 sock_shutdown(nbd);
442 mutex_unlock(&nbd->tx_lock);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200443 ret = -ETIMEDOUT;
444 }
445
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200446 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447}
448
Wanlong Gaof4507162012-03-28 14:42:51 -0700449static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
451 struct request *req;
452
Wanlong Gaof4507162012-03-28 14:42:51 -0700453 BUG_ON(nbd->magic != NBD_MAGIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
Herbert Xu4b2f0262006-01-06 00:09:47 -0800455 /*
Wanlong Gaof4507162012-03-28 14:42:51 -0700456 * Because we have set nbd->sock to NULL under the tx_lock, all
Herbert Xu4b2f0262006-01-06 00:09:47 -0800457 * modifications to the list must have completed by now. For
458 * the same reason, the active_req must be NULL.
459 *
460 * As a consequence, we don't need to take the spin lock while
461 * purging the list here.
462 */
Wanlong Gaof4507162012-03-28 14:42:51 -0700463 BUG_ON(nbd->sock);
464 BUG_ON(nbd->active_req);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800465
Wanlong Gaof4507162012-03-28 14:42:51 -0700466 while (!list_empty(&nbd->queue_head)) {
467 req = list_entry(nbd->queue_head.next, struct request,
Herbert Xu4b2f0262006-01-06 00:09:47 -0800468 queuelist);
469 list_del_init(&req->queuelist);
470 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200471 nbd_end_request(nbd, req);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800472 }
Paul Clementsfded4e02012-09-17 14:09:02 -0700473
474 while (!list_empty(&nbd->waiting_queue)) {
475 req = list_entry(nbd->waiting_queue.next, struct request,
476 queuelist);
477 list_del_init(&req->queuelist);
478 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200479 nbd_end_request(nbd, req);
Paul Clementsfded4e02012-09-17 14:09:02 -0700480 }
Markus Pargmanne78273c2015-08-17 08:20:04 +0200481 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482}
483
Paul Clements7fdfd402007-10-16 23:27:37 -0700484
Wanlong Gaof4507162012-03-28 14:42:51 -0700485static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700486{
Christoph Hellwig33659eb2010-08-07 18:17:56 +0200487 if (req->cmd_type != REQ_TYPE_FS)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700488 goto error_out;
489
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200490 if (rq_data_dir(req) == WRITE &&
491 (nbd->flags & NBD_FLAG_READ_ONLY)) {
492 dev_err(disk_to_dev(nbd->disk),
493 "Write on read-only\n");
494 goto error_out;
Alex Bligh75f187a2013-02-27 17:05:23 -0800495 }
496
Laurent Vivier48cf6062008-04-29 01:02:46 -0700497 req->errors = 0;
498
Wanlong Gaof4507162012-03-28 14:42:51 -0700499 mutex_lock(&nbd->tx_lock);
500 if (unlikely(!nbd->sock)) {
501 mutex_unlock(&nbd->tx_lock);
502 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200503 "Attempted send on closed socket\n");
Pavel Machek15746fc2009-04-02 16:58:42 -0700504 goto error_out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700505 }
506
Wanlong Gaof4507162012-03-28 14:42:51 -0700507 nbd->active_req = req;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700508
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200509 if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
510 mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
511
Wanlong Gaof4507162012-03-28 14:42:51 -0700512 if (nbd_send_req(nbd, req) != 0) {
513 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
Laurent Vivier48cf6062008-04-29 01:02:46 -0700514 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200515 nbd_end_request(nbd, req);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700516 } else {
Wanlong Gaof4507162012-03-28 14:42:51 -0700517 spin_lock(&nbd->queue_lock);
Chetan Loke01ff5db2012-07-31 08:47:13 +0200518 list_add_tail(&req->queuelist, &nbd->queue_head);
Wanlong Gaof4507162012-03-28 14:42:51 -0700519 spin_unlock(&nbd->queue_lock);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700520 }
521
Wanlong Gaof4507162012-03-28 14:42:51 -0700522 nbd->active_req = NULL;
523 mutex_unlock(&nbd->tx_lock);
524 wake_up_all(&nbd->active_wq);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700525
526 return;
527
528error_out:
529 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200530 nbd_end_request(nbd, req);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700531}
532
533static int nbd_thread(void *data)
534{
Wanlong Gaof4507162012-03-28 14:42:51 -0700535 struct nbd_device *nbd = data;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700536 struct request *req;
537
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200538 nbd->task_send = current;
539
Dongsheng Yang8698a742014-03-11 18:09:12 +0800540 set_user_nice(current, MIN_NICE);
Wanlong Gaof4507162012-03-28 14:42:51 -0700541 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
Laurent Vivier48cf6062008-04-29 01:02:46 -0700542 /* wait for something to do */
Wanlong Gaof4507162012-03-28 14:42:51 -0700543 wait_event_interruptible(nbd->waiting_wq,
Laurent Vivier48cf6062008-04-29 01:02:46 -0700544 kthread_should_stop() ||
Wanlong Gaof4507162012-03-28 14:42:51 -0700545 !list_empty(&nbd->waiting_queue));
Laurent Vivier48cf6062008-04-29 01:02:46 -0700546
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200547 if (signal_pending(current)) {
548 siginfo_t info;
549 int ret;
550
551 ret = dequeue_signal_lock(current, &current->blocked,
552 &info);
553 dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
554 task_pid_nr(current), current->comm, ret);
Markus Pargmann36e47be2015-08-17 08:20:01 +0200555 mutex_lock(&nbd->tx_lock);
556 sock_shutdown(nbd);
557 mutex_unlock(&nbd->tx_lock);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200558 break;
559 }
560
Laurent Vivier48cf6062008-04-29 01:02:46 -0700561 /* extract request */
Wanlong Gaof4507162012-03-28 14:42:51 -0700562 if (list_empty(&nbd->waiting_queue))
Laurent Vivier48cf6062008-04-29 01:02:46 -0700563 continue;
564
Wanlong Gaof4507162012-03-28 14:42:51 -0700565 spin_lock_irq(&nbd->queue_lock);
566 req = list_entry(nbd->waiting_queue.next, struct request,
Laurent Vivier48cf6062008-04-29 01:02:46 -0700567 queuelist);
568 list_del_init(&req->queuelist);
Wanlong Gaof4507162012-03-28 14:42:51 -0700569 spin_unlock_irq(&nbd->queue_lock);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700570
571 /* handle request */
Wanlong Gaof4507162012-03-28 14:42:51 -0700572 nbd_handle_req(nbd, req);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700573 }
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200574
575 nbd->task_send = NULL;
576
Laurent Vivier48cf6062008-04-29 01:02:46 -0700577 return 0;
578}
579
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580/*
581 * We always wait for result of write, for now. It would be nice to make it optional
582 * in future
Wanlong Gaof4507162012-03-28 14:42:51 -0700583 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
585 */
586
Pavel Machek15746fc2009-04-02 16:58:42 -0700587static void do_nbd_request(struct request_queue *q)
Alex Elder398eb082013-02-27 17:05:28 -0800588 __releases(q->queue_lock) __acquires(q->queue_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
590 struct request *req;
591
Tejun Heo9934c8c2009-05-08 11:54:16 +0900592 while ((req = blk_fetch_request(q)) != NULL) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700593 struct nbd_device *nbd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Laurent Vivier48cf6062008-04-29 01:02:46 -0700595 spin_unlock_irq(q->queue_lock);
596
Wanlong Gaof4507162012-03-28 14:42:51 -0700597 nbd = req->rq_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Wanlong Gaof4507162012-03-28 14:42:51 -0700599 BUG_ON(nbd->magic != NBD_MAGIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Markus Pargmannd18509f2015-04-02 10:11:38 +0200601 dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
602 req, req->cmd_type);
603
Wanlong Gaof4507162012-03-28 14:42:51 -0700604 if (unlikely(!nbd->sock)) {
605 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200606 "Attempted send on closed socket\n");
Paul Clements4d48a542009-02-11 13:04:45 -0800607 req->errors++;
Markus Pargmannd18509f2015-04-02 10:11:38 +0200608 nbd_end_request(nbd, req);
Paul Clements4d48a542009-02-11 13:04:45 -0800609 spin_lock_irq(q->queue_lock);
610 continue;
611 }
612
Wanlong Gaof4507162012-03-28 14:42:51 -0700613 spin_lock_irq(&nbd->queue_lock);
614 list_add_tail(&req->queuelist, &nbd->waiting_queue);
615 spin_unlock_irq(&nbd->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Wanlong Gaof4507162012-03-28 14:42:51 -0700617 wake_up(&nbd->waiting_wq);
Herbert Xu4b2f0262006-01-06 00:09:47 -0800618
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 spin_lock_irq(q->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621}
622
Markus Pargmann30d53d92015-08-17 08:20:06 +0200623static int nbd_dev_dbg_init(struct nbd_device *nbd);
624static void nbd_dev_dbg_close(struct nbd_device *nbd);
625
Pavel Machek1a2ad212009-04-02 16:58:41 -0700626/* Must be called with tx_lock held */
627
Wanlong Gaof4507162012-03-28 14:42:51 -0700628static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -0700629 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 switch (cmd) {
Pavel Machek1a2ad212009-04-02 16:58:41 -0700632 case NBD_DISCONNECT: {
633 struct request sreq;
634
Wanlong Gaof4507162012-03-28 14:42:51 -0700635 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800636 if (!nbd->sock)
637 return -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700638
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800639 mutex_unlock(&nbd->tx_lock);
640 fsync_bdev(bdev);
641 mutex_lock(&nbd->tx_lock);
FUJITA Tomonori4f54eec2008-04-29 09:54:37 +0200642 blk_rq_init(NULL, &sreq);
Christoph Hellwig4f8c9512015-04-17 22:37:16 +0200643 sreq.cmd_type = REQ_TYPE_DRV_PRIV;
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800644
645 /* Check again after getting mutex back. */
Wanlong Gaof4507162012-03-28 14:42:51 -0700646 if (!nbd->sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 return -EINVAL;
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800648
Markus Pargmann696697c2015-08-17 08:20:07 +0200649 nbd->disconnect = true;
Paul Clementsc378f702013-07-03 15:09:04 -0700650
Wanlong Gaof4507162012-03-28 14:42:51 -0700651 nbd_send_req(nbd, &sreq);
Paul Clementsc378f702013-07-03 15:09:04 -0700652 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700653 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Pavel Machek1a2ad212009-04-02 16:58:41 -0700655 case NBD_CLEAR_SOCK: {
Al Viroe2511572014-03-05 20:41:36 -0500656 struct socket *sock = nbd->sock;
Wanlong Gaof4507162012-03-28 14:42:51 -0700657 nbd->sock = NULL;
Wanlong Gaof4507162012-03-28 14:42:51 -0700658 nbd_clear_que(nbd);
659 BUG_ON(!list_empty(&nbd->queue_head));
Paul Clementsfded4e02012-09-17 14:09:02 -0700660 BUG_ON(!list_empty(&nbd->waiting_queue));
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800661 kill_bdev(bdev);
Al Viroe2511572014-03-05 20:41:36 -0500662 if (sock)
663 sockfd_put(sock);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700664 return 0;
665 }
666
667 case NBD_SET_SOCK: {
Al Viroe2511572014-03-05 20:41:36 -0500668 struct socket *sock;
669 int err;
670 if (nbd->sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return -EBUSY;
Al Viroe2511572014-03-05 20:41:36 -0500672 sock = sockfd_lookup(arg, &err);
673 if (sock) {
674 nbd->sock = sock;
675 if (max_part > 0)
676 bdev->bd_invalidated = 1;
Markus Pargmann696697c2015-08-17 08:20:07 +0200677 nbd->disconnect = false; /* we're connected now */
Al Viroe2511572014-03-05 20:41:36 -0500678 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 }
Pavel Machek1a2ad212009-04-02 16:58:41 -0700680 return -EINVAL;
681 }
682
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 case NBD_SET_BLKSIZE:
Wanlong Gaof4507162012-03-28 14:42:51 -0700684 nbd->blksize = arg;
685 nbd->bytesize &= ~(nbd->blksize-1);
686 bdev->bd_inode->i_size = nbd->bytesize;
687 set_blocksize(bdev, nbd->blksize);
688 set_capacity(nbd->disk, nbd->bytesize >> 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700690
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 case NBD_SET_SIZE:
Wanlong Gaof4507162012-03-28 14:42:51 -0700692 nbd->bytesize = arg & ~(nbd->blksize-1);
693 bdev->bd_inode->i_size = nbd->bytesize;
694 set_blocksize(bdev, nbd->blksize);
695 set_capacity(nbd->disk, nbd->bytesize >> 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700697
Paul Clements7fdfd402007-10-16 23:27:37 -0700698 case NBD_SET_TIMEOUT:
Wanlong Gaof4507162012-03-28 14:42:51 -0700699 nbd->xmit_timeout = arg * HZ;
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200700 if (arg)
701 mod_timer(&nbd->timeout_timer,
702 jiffies + nbd->xmit_timeout);
703 else
704 del_timer_sync(&nbd->timeout_timer);
705
Paul Clements7fdfd402007-10-16 23:27:37 -0700706 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700707
Paul Clements2f012502012-10-04 17:16:15 -0700708 case NBD_SET_FLAGS:
709 nbd->flags = arg;
710 return 0;
711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 case NBD_SET_SIZE_BLOCKS:
Wanlong Gaof4507162012-03-28 14:42:51 -0700713 nbd->bytesize = ((u64) arg) * nbd->blksize;
714 bdev->bd_inode->i_size = nbd->bytesize;
715 set_blocksize(bdev, nbd->blksize);
716 set_capacity(nbd->disk, nbd->bytesize >> 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700718
719 case NBD_DO_IT: {
720 struct task_struct *thread;
Al Viroe2511572014-03-05 20:41:36 -0500721 struct socket *sock;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700722 int error;
723
Markus Pargmann6521d392015-08-17 08:20:05 +0200724 if (nbd->task_recv)
Pavel Machekc91192d2009-01-15 13:51:03 -0800725 return -EBUSY;
Al Viroe2511572014-03-05 20:41:36 -0500726 if (!nbd->sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 return -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700728
Wanlong Gaof4507162012-03-28 14:42:51 -0700729 mutex_unlock(&nbd->tx_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700730
Paolo Bonzinia83e8142013-02-27 17:05:26 -0800731 if (nbd->flags & NBD_FLAG_READ_ONLY)
732 set_device_ro(bdev, true);
Paul Clementsa336d292012-10-04 17:16:18 -0700733 if (nbd->flags & NBD_FLAG_SEND_TRIM)
734 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
735 nbd->disk->queue);
Alex Bligh75f187a2013-02-27 17:05:23 -0800736 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
737 blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
738 else
739 blk_queue_flush(nbd->disk->queue, 0);
Paul Clementsa336d292012-10-04 17:16:18 -0700740
Markus Pargmannd06df602015-04-02 10:11:36 +0200741 thread = kthread_run(nbd_thread, nbd, "%s",
Markus Pargmann30d53d92015-08-17 08:20:06 +0200742 nbd_name(nbd));
Pavel Machek1a2ad212009-04-02 16:58:41 -0700743 if (IS_ERR(thread)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700744 mutex_lock(&nbd->tx_lock);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700745 return PTR_ERR(thread);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700746 }
Markus Pargmannd06df602015-04-02 10:11:36 +0200747
Markus Pargmann30d53d92015-08-17 08:20:06 +0200748 nbd_dev_dbg_init(nbd);
Wanlong Gaof4507162012-03-28 14:42:51 -0700749 error = nbd_do_it(nbd);
Markus Pargmann30d53d92015-08-17 08:20:06 +0200750 nbd_dev_dbg_close(nbd);
Laurent Vivier48cf6062008-04-29 01:02:46 -0700751 kthread_stop(thread);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700752
Wanlong Gaof4507162012-03-28 14:42:51 -0700753 mutex_lock(&nbd->tx_lock);
Markus Pargmann19391832015-08-17 08:20:03 +0200754
Markus Pargmann36e47be2015-08-17 08:20:01 +0200755 sock_shutdown(nbd);
Al Viroe2511572014-03-05 20:41:36 -0500756 sock = nbd->sock;
757 nbd->sock = NULL;
Wanlong Gaof4507162012-03-28 14:42:51 -0700758 nbd_clear_que(nbd);
Paolo Bonzini3a2d63f82013-02-27 17:05:25 -0800759 kill_bdev(bdev);
Paul Clementsa336d292012-10-04 17:16:18 -0700760 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Paolo Bonzinia83e8142013-02-27 17:05:26 -0800761 set_device_ro(bdev, false);
Al Viroe2511572014-03-05 20:41:36 -0500762 if (sock)
763 sockfd_put(sock);
Alex Bligh75f187a2013-02-27 17:05:23 -0800764 nbd->flags = 0;
Wanlong Gaof4507162012-03-28 14:42:51 -0700765 nbd->bytesize = 0;
Al Viroa8cdc302008-03-02 09:33:33 -0500766 bdev->bd_inode->i_size = 0;
Wanlong Gaof4507162012-03-28 14:42:51 -0700767 set_capacity(nbd->disk, 0);
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700768 if (max_part > 0)
Ming Lei9dcd1372015-05-06 12:26:25 +0800769 blkdev_reread_part(bdev);
Paul Clementsc378f702013-07-03 15:09:04 -0700770 if (nbd->disconnect) /* user requested, ignore socket errors */
771 return 0;
Markus Pargmann19391832015-08-17 08:20:03 +0200772 return error;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700773 }
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -0800776 /*
777 * This is for compatibility only. The queue is always cleared
778 * by NBD_DO_IT or NBD_CLEAR_SOCK.
779 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 case NBD_PRINT_DEBUG:
Wanlong Gaof4507162012-03-28 14:42:51 -0700783 dev_info(disk_to_dev(nbd->disk),
WANG Cong5eedf542011-08-19 14:48:28 +0200784 "next = %p, prev = %p, head = %p\n",
Wanlong Gaof4507162012-03-28 14:42:51 -0700785 nbd->queue_head.next, nbd->queue_head.prev,
786 &nbd->queue_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 return 0;
788 }
Pavel Machek1a2ad212009-04-02 16:58:41 -0700789 return -ENOTTY;
790}
791
792static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
793 unsigned int cmd, unsigned long arg)
794{
Wanlong Gaof4507162012-03-28 14:42:51 -0700795 struct nbd_device *nbd = bdev->bd_disk->private_data;
Pavel Machek1a2ad212009-04-02 16:58:41 -0700796 int error;
797
798 if (!capable(CAP_SYS_ADMIN))
799 return -EPERM;
800
Wanlong Gaof4507162012-03-28 14:42:51 -0700801 BUG_ON(nbd->magic != NBD_MAGIC);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700802
Wanlong Gaof4507162012-03-28 14:42:51 -0700803 mutex_lock(&nbd->tx_lock);
804 error = __nbd_ioctl(bdev, nbd, cmd, arg);
805 mutex_unlock(&nbd->tx_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -0700806
807 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808}
809
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700810static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811{
812 .owner = THIS_MODULE,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +0200813 .ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814};
815
Markus Pargmann30d53d92015-08-17 08:20:06 +0200816#if IS_ENABLED(CONFIG_DEBUG_FS)
817
818static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
819{
820 struct nbd_device *nbd = s->private;
821
822 if (nbd->task_recv)
823 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
824 if (nbd->task_send)
825 seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
826
827 return 0;
828}
829
830static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
831{
832 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
833}
834
835static const struct file_operations nbd_dbg_tasks_ops = {
836 .open = nbd_dbg_tasks_open,
837 .read = seq_read,
838 .llseek = seq_lseek,
839 .release = single_release,
840};
841
842static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
843{
844 struct nbd_device *nbd = s->private;
845 u32 flags = nbd->flags;
846
847 seq_printf(s, "Hex: 0x%08x\n\n", flags);
848
849 seq_puts(s, "Known flags:\n");
850
851 if (flags & NBD_FLAG_HAS_FLAGS)
852 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
853 if (flags & NBD_FLAG_READ_ONLY)
854 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
855 if (flags & NBD_FLAG_SEND_FLUSH)
856 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
857 if (flags & NBD_FLAG_SEND_TRIM)
858 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
859
860 return 0;
861}
862
863static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
864{
865 return single_open(file, nbd_dbg_flags_show, inode->i_private);
866}
867
868static const struct file_operations nbd_dbg_flags_ops = {
869 .open = nbd_dbg_flags_open,
870 .read = seq_read,
871 .llseek = seq_lseek,
872 .release = single_release,
873};
874
875static int nbd_dev_dbg_init(struct nbd_device *nbd)
876{
877 struct dentry *dir;
878 struct dentry *f;
879
880 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
881 if (IS_ERR_OR_NULL(dir)) {
882 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n",
883 nbd_name(nbd), PTR_ERR(dir));
884 return PTR_ERR(dir);
885 }
886 nbd->dbg_dir = dir;
887
888 f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
889 if (IS_ERR_OR_NULL(f)) {
890 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n",
891 PTR_ERR(f));
892 return PTR_ERR(f);
893 }
894
895 f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
896 if (IS_ERR_OR_NULL(f)) {
897 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n",
898 PTR_ERR(f));
899 return PTR_ERR(f);
900 }
901
902 f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
903 if (IS_ERR_OR_NULL(f)) {
904 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n",
905 PTR_ERR(f));
906 return PTR_ERR(f);
907 }
908
909 f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
910 if (IS_ERR_OR_NULL(f)) {
911 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n",
912 PTR_ERR(f));
913 return PTR_ERR(f);
914 }
915
916 f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
917 if (IS_ERR_OR_NULL(f)) {
918 dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n",
919 PTR_ERR(f));
920 return PTR_ERR(f);
921 }
922
923 return 0;
924}
925
926static void nbd_dev_dbg_close(struct nbd_device *nbd)
927{
928 debugfs_remove_recursive(nbd->dbg_dir);
929}
930
931static int nbd_dbg_init(void)
932{
933 struct dentry *dbg_dir;
934
935 dbg_dir = debugfs_create_dir("nbd", NULL);
936 if (IS_ERR(dbg_dir))
937 return PTR_ERR(dbg_dir);
938
939 nbd_dbg_dir = dbg_dir;
940
941 return 0;
942}
943
944static void nbd_dbg_close(void)
945{
946 debugfs_remove_recursive(nbd_dbg_dir);
947}
948
949#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
950
951static int nbd_dev_dbg_init(struct nbd_device *nbd)
952{
953 return 0;
954}
955
956static void nbd_dev_dbg_close(struct nbd_device *nbd)
957{
958}
959
960static int nbd_dbg_init(void)
961{
962 return 0;
963}
964
965static void nbd_dbg_close(void)
966{
967}
968
969#endif
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/*
972 * And here should be modules and kernel interface
973 * (Just smiley confuses emacs :-)
974 */
975
976static int __init nbd_init(void)
977{
978 int err = -ENOMEM;
979 int i;
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700980 int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Adrian Bunk5b7b18c2006-03-25 03:07:04 -0800982 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700984 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +0200985 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700986 return -EINVAL;
987 }
988
989 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +0200990 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700991 part_shift = fls(max_part);
992
Namhyung Kim5988ce22011-05-28 14:44:46 +0200993 /*
994 * Adjust max_part according to part_shift as it is exported
995 * to user space so that user can know the max number of
996 * partition kernel should be able to manage.
997 *
998 * Note that -1 is required because partition 0 is reserved
999 * for the whole disk.
1000 */
1001 max_part = (1UL << part_shift) - 1;
1002 }
1003
Namhyung Kim3b271082011-05-28 14:44:46 +02001004 if ((1UL << part_shift) > DISK_MAX_PARTS)
1005 return -EINVAL;
1006
1007 if (nbds_max > 1UL << (MINORBITS - part_shift))
1008 return -EINVAL;
1009
Sudip Mukherjeeff6b8092015-01-27 18:08:22 +05301010 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
1011 if (!nbd_dev)
1012 return -ENOMEM;
1013
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001014 for (i = 0; i < nbds_max; i++) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001015 struct gendisk *disk = alloc_disk(1 << part_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 if (!disk)
1017 goto out;
1018 nbd_dev[i].disk = disk;
1019 /*
1020 * The new linux 2.5 block layer implementation requires
1021 * every gendisk to have its very own request_queue struct.
1022 * These structs are big so we dynamically allocate them.
1023 */
1024 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
1025 if (!disk->queue) {
1026 put_disk(disk);
1027 goto out;
1028 }
Jens Axboe31dcfab2008-10-31 10:06:37 +01001029 /*
1030 * Tell the block layer that we are not a rotational device
1031 */
1032 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -06001033 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
Paul Clementsa336d292012-10-04 17:16:18 -07001034 disk->queue->limits.discard_granularity = 512;
Jens Axboe2bb4cd52015-07-14 08:15:12 -06001035 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
Paul Clementsa336d292012-10-04 17:16:18 -07001036 disk->queue->limits.discard_zeroes_data = 0;
Michal Belczyk078be022013-04-30 15:28:28 -07001037 blk_queue_max_hw_sectors(disk->queue, 65536);
1038 disk->queue->limits.max_sectors = 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 }
1040
1041 if (register_blkdev(NBD_MAJOR, "nbd")) {
1042 err = -EIO;
1043 goto out;
1044 }
1045
1046 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
Markus Pargmann30d53d92015-08-17 08:20:06 +02001048 nbd_dbg_init();
1049
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001050 for (i = 0; i < nbds_max; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 struct gendisk *disk = nbd_dev[i].disk;
Wanlong Gaof4507162012-03-28 14:42:51 -07001052 nbd_dev[i].magic = NBD_MAGIC;
Laurent Vivier48cf6062008-04-29 01:02:46 -07001053 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 spin_lock_init(&nbd_dev[i].queue_lock);
1055 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
Ingo Molnar82d4dc52006-03-23 03:00:38 -08001056 mutex_init(&nbd_dev[i].tx_lock);
Markus Pargmann7e2893a2015-08-17 08:20:00 +02001057 init_timer(&nbd_dev[i].timeout_timer);
1058 nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
1059 nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
Herbert Xu4b2f0262006-01-06 00:09:47 -08001060 init_waitqueue_head(&nbd_dev[i].active_wq);
Laurent Vivier48cf6062008-04-29 01:02:46 -07001061 init_waitqueue_head(&nbd_dev[i].waiting_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 nbd_dev[i].blksize = 1024;
Paul Clements4b86a872007-10-16 23:27:36 -07001063 nbd_dev[i].bytesize = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 disk->major = NBD_MAJOR;
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001065 disk->first_minor = i << part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 disk->fops = &nbd_fops;
1067 disk->private_data = &nbd_dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 sprintf(disk->disk_name, "nbd%d", i);
Paul Clements4b86a872007-10-16 23:27:36 -07001069 set_capacity(disk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 add_disk(disk);
1071 }
1072
1073 return 0;
1074out:
1075 while (i--) {
1076 blk_cleanup_queue(nbd_dev[i].disk->queue);
1077 put_disk(nbd_dev[i].disk);
1078 }
Sven Wegenerf3944d62008-08-20 14:09:07 -07001079 kfree(nbd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 return err;
1081}
1082
1083static void __exit nbd_cleanup(void)
1084{
1085 int i;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001086
1087 nbd_dbg_close();
1088
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001089 for (i = 0; i < nbds_max; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 struct gendisk *disk = nbd_dev[i].disk;
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001091 nbd_dev[i].magic = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if (disk) {
1093 del_gendisk(disk);
1094 blk_cleanup_queue(disk->queue);
1095 put_disk(disk);
1096 }
1097 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 unregister_blkdev(NBD_MAJOR, "nbd");
Sven Wegenerf3944d62008-08-20 14:09:07 -07001099 kfree(nbd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1101}
1102
1103module_init(nbd_init);
1104module_exit(nbd_cleanup);
1105
1106MODULE_DESCRIPTION("Network Block Device");
1107MODULE_LICENSE("GPL");
1108
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07001109module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07001110MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1111module_param(max_part, int, 0444);
1112MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");