Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Network block device - make block devices work over TCP |
| 3 | * |
| 4 | * Note that you can not swap over this thing, yet. Seems to work but |
| 5 | * deadlocks sometimes - you can not swap over TCP in general. |
| 6 | * |
Pavel Machek | a253129 | 2010-07-18 14:27:13 +0200 | [diff] [blame] | 7 | * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> |
| 9 | * |
Pavel Machek | dbf492d | 2006-06-25 05:47:42 -0700 | [diff] [blame] | 10 | * This file is released under GPLv2 or later. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * |
Pavel Machek | dbf492d | 2006-06-25 05:47:42 -0700 | [diff] [blame] | 12 | * (part of code stolen from loop.c) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | #include <linux/major.h> |
| 16 | |
| 17 | #include <linux/blkdev.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/sched.h> |
| 21 | #include <linux/fs.h> |
| 22 | #include <linux/bio.h> |
| 23 | #include <linux/stat.h> |
| 24 | #include <linux/errno.h> |
| 25 | #include <linux/file.h> |
| 26 | #include <linux/ioctl.h> |
Arnd Bergmann | 2a48fc0 | 2010-06-02 14:28:52 +0200 | [diff] [blame] | 27 | #include <linux/mutex.h> |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 28 | #include <linux/compiler.h> |
| 29 | #include <linux/err.h> |
| 30 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 31 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <net/sock.h> |
Trond Myklebust | 91cf45f | 2007-11-12 18:10:39 -0800 | [diff] [blame] | 33 | #include <linux/net.h> |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 34 | #include <linux/kthread.h> |
Markus Pargmann | b9c495b | 2015-04-02 10:11:37 +0200 | [diff] [blame] | 35 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <asm/uaccess.h> |
| 38 | #include <asm/types.h> |
| 39 | |
| 40 | #include <linux/nbd.h> |
| 41 | |
Markus Pargmann | 13e71d6 | 2015-04-02 10:11:35 +0200 | [diff] [blame] | 42 | struct nbd_device { |
| 43 | int flags; |
| 44 | int harderror; /* Code of hard error */ |
| 45 | struct socket * sock; /* If == NULL, device is not ready, yet */ |
| 46 | int magic; |
| 47 | |
| 48 | spinlock_t queue_lock; |
| 49 | struct list_head queue_head; /* Requests waiting result */ |
| 50 | struct request *active_req; |
| 51 | wait_queue_head_t active_wq; |
| 52 | struct list_head waiting_queue; /* Requests to be sent */ |
| 53 | wait_queue_head_t waiting_wq; |
| 54 | |
| 55 | struct mutex tx_lock; |
| 56 | struct gendisk *disk; |
| 57 | int blksize; |
Markus Pargmann | b9c495b | 2015-04-02 10:11:37 +0200 | [diff] [blame] | 58 | loff_t bytesize; |
Markus Pargmann | 13e71d6 | 2015-04-02 10:11:35 +0200 | [diff] [blame] | 59 | pid_t pid; /* pid of nbd-client, if attached */ |
| 60 | int xmit_timeout; |
| 61 | int disconnect; /* a disconnect has been requested by user */ |
| 62 | }; |
| 63 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 64 | #define NBD_MAGIC 0x68797548 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Ingo van Lil | 9c7a416 | 2006-07-01 04:36:36 -0700 | [diff] [blame] | 66 | static unsigned int nbds_max = 16; |
Paul Clements | 20a8143 | 2008-02-08 04:21:51 -0800 | [diff] [blame] | 67 | static struct nbd_device *nbd_dev; |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 68 | static int max_part; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
| 70 | /* |
| 71 | * Use just one lock (or at most 1 per NIC). Two arguments for this: |
| 72 | * 1. Each NIC is essentially a synchronization point for all servers |
| 73 | * accessed through that NIC so there's no need to have more locks |
| 74 | * than NICs anyway. |
| 75 | * 2. More locks lead to more "Dirty cache line bouncing" which will slow |
| 76 | * down each lock to the point where they're actually slower than just |
| 77 | * a single lock. |
| 78 | * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this! |
| 79 | */ |
| 80 | static DEFINE_SPINLOCK(nbd_lock); |
| 81 | |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 82 | static inline struct device *nbd_to_dev(struct nbd_device *nbd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | { |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 84 | return disk_to_dev(nbd->disk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | static const char *nbdcmd_to_ascii(int cmd) |
| 88 | { |
| 89 | switch (cmd) { |
| 90 | case NBD_CMD_READ: return "read"; |
| 91 | case NBD_CMD_WRITE: return "write"; |
| 92 | case NBD_CMD_DISC: return "disconnect"; |
Alex Bligh | 75f187a | 2013-02-27 17:05:23 -0800 | [diff] [blame] | 93 | case NBD_CMD_FLUSH: return "flush"; |
Paul Clements | a336d29 | 2012-10-04 17:16:18 -0700 | [diff] [blame] | 94 | case NBD_CMD_TRIM: return "trim/discard"; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | } |
| 96 | return "invalid"; |
| 97 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 99 | static void nbd_end_request(struct nbd_device *nbd, struct request *req) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | { |
Kiyoshi Ueda | 097c94a | 2007-12-11 17:44:06 -0500 | [diff] [blame] | 101 | int error = req->errors ? -EIO : 0; |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 102 | struct request_queue *q = req->q; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | unsigned long flags; |
| 104 | |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 105 | dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req, |
| 106 | error ? "failed" : "done"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
| 108 | spin_lock_irqsave(q->queue_lock, flags); |
Tejun Heo | 1011c1b | 2009-05-07 22:24:45 +0900 | [diff] [blame] | 109 | __blk_end_request_all(req, error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 111 | } |
| 112 | |
Markus Pargmann | e018e75 | 2015-04-02 10:11:39 +0200 | [diff] [blame] | 113 | /* |
| 114 | * Forcibly shutdown the socket causing all listeners to error |
| 115 | */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 116 | static void sock_shutdown(struct nbd_device *nbd, int lock) |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 117 | { |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 118 | if (lock) |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 119 | mutex_lock(&nbd->tx_lock); |
| 120 | if (nbd->sock) { |
| 121 | dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); |
| 122 | kernel_sock_shutdown(nbd->sock, SHUT_RDWR); |
| 123 | nbd->sock = NULL; |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 124 | } |
| 125 | if (lock) |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 126 | mutex_unlock(&nbd->tx_lock); |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | static void nbd_xmit_timeout(unsigned long arg) |
| 130 | { |
| 131 | struct task_struct *task = (struct task_struct *)arg; |
| 132 | |
| 133 | printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n", |
| 134 | task->comm, task->pid); |
| 135 | force_sig(SIGKILL, task); |
| 136 | } |
| 137 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | /* |
| 139 | * Send or receive packet. |
| 140 | */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 141 | static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | int msg_flags) |
| 143 | { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 144 | struct socket *sock = nbd->sock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | int result; |
| 146 | struct msghdr msg; |
| 147 | struct kvec iov; |
Oleg Nesterov | be0ef95 | 2007-07-15 23:41:32 -0700 | [diff] [blame] | 148 | sigset_t blocked, oldset; |
Mel Gorman | 7f338fe | 2012-07-31 16:44:32 -0700 | [diff] [blame] | 149 | unsigned long pflags = current->flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Mike Snitzer | ffc41cf | 2008-04-02 13:04:47 -0700 | [diff] [blame] | 151 | if (unlikely(!sock)) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 152 | dev_err(disk_to_dev(nbd->disk), |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 153 | "Attempted %s on closed socket in sock_xmit\n", |
| 154 | (send ? "send" : "recv")); |
Mike Snitzer | ffc41cf | 2008-04-02 13:04:47 -0700 | [diff] [blame] | 155 | return -EINVAL; |
| 156 | } |
| 157 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | /* Allow interception of SIGKILL only |
| 159 | * Don't allow other signals to interrupt the transmission */ |
Oleg Nesterov | be0ef95 | 2007-07-15 23:41:32 -0700 | [diff] [blame] | 160 | siginitsetinv(&blocked, sigmask(SIGKILL)); |
| 161 | sigprocmask(SIG_SETMASK, &blocked, &oldset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | |
Mel Gorman | 7f338fe | 2012-07-31 16:44:32 -0700 | [diff] [blame] | 163 | current->flags |= PF_MEMALLOC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | do { |
Mel Gorman | 7f338fe | 2012-07-31 16:44:32 -0700 | [diff] [blame] | 165 | sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | iov.iov_base = buf; |
| 167 | iov.iov_len = size; |
| 168 | msg.msg_name = NULL; |
| 169 | msg.msg_namelen = 0; |
| 170 | msg.msg_control = NULL; |
| 171 | msg.msg_controllen = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | msg.msg_flags = msg_flags | MSG_NOSIGNAL; |
| 173 | |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 174 | if (send) { |
| 175 | struct timer_list ti; |
| 176 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 177 | if (nbd->xmit_timeout) { |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 178 | init_timer(&ti); |
| 179 | ti.function = nbd_xmit_timeout; |
| 180 | ti.data = (unsigned long)current; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 181 | ti.expires = jiffies + nbd->xmit_timeout; |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 182 | add_timer(&ti); |
| 183 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | result = kernel_sendmsg(sock, &msg, &iov, 1, size); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 185 | if (nbd->xmit_timeout) |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 186 | del_timer_sync(&ti); |
| 187 | } else |
Namhyung Kim | 35fbf5b | 2011-05-28 14:44:46 +0200 | [diff] [blame] | 188 | result = kernel_recvmsg(sock, &msg, &iov, 1, size, |
| 189 | msg.msg_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | |
| 191 | if (signal_pending(current)) { |
| 192 | siginfo_t info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 194 | task_pid_nr(current), current->comm, |
Oleg Nesterov | be0ef95 | 2007-07-15 23:41:32 -0700 | [diff] [blame] | 195 | dequeue_signal_lock(current, ¤t->blocked, &info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | result = -EINTR; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 197 | sock_shutdown(nbd, !send); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | break; |
| 199 | } |
| 200 | |
| 201 | if (result <= 0) { |
| 202 | if (result == 0) |
| 203 | result = -EPIPE; /* short read */ |
| 204 | break; |
| 205 | } |
| 206 | size -= result; |
| 207 | buf += result; |
| 208 | } while (size > 0); |
| 209 | |
Oleg Nesterov | be0ef95 | 2007-07-15 23:41:32 -0700 | [diff] [blame] | 210 | sigprocmask(SIG_SETMASK, &oldset, NULL); |
Mel Gorman | 7f338fe | 2012-07-31 16:44:32 -0700 | [diff] [blame] | 211 | tsk_restore_flags(current, pflags, PF_MEMALLOC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | |
| 213 | return result; |
| 214 | } |
| 215 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 216 | static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | int flags) |
| 218 | { |
| 219 | int result; |
| 220 | void *kaddr = kmap(bvec->bv_page); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 221 | result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset, |
| 222 | bvec->bv_len, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | kunmap(bvec->bv_page); |
| 224 | return result; |
| 225 | } |
| 226 | |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 227 | /* always call with the tx_lock held */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 228 | static int nbd_send_req(struct nbd_device *nbd, struct request *req) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | { |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 230 | int result, flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | struct nbd_request request; |
Tejun Heo | 1011c1b | 2009-05-07 22:24:45 +0900 | [diff] [blame] | 232 | unsigned long size = blk_rq_bytes(req); |
Christoph Hellwig | 9dc6c80 | 2015-04-17 22:37:21 +0200 | [diff] [blame] | 233 | u32 type; |
| 234 | |
| 235 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) |
| 236 | type = NBD_CMD_DISC; |
| 237 | else if (req->cmd_flags & REQ_DISCARD) |
| 238 | type = NBD_CMD_TRIM; |
| 239 | else if (req->cmd_flags & REQ_FLUSH) |
| 240 | type = NBD_CMD_FLUSH; |
| 241 | else if (rq_data_dir(req) == WRITE) |
| 242 | type = NBD_CMD_WRITE; |
| 243 | else |
| 244 | type = NBD_CMD_READ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | |
Hani Benhabiles | 04cfac4 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 246 | memset(&request, 0, sizeof(request)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | request.magic = htonl(NBD_REQUEST_MAGIC); |
Christoph Hellwig | 9dc6c80 | 2015-04-17 22:37:21 +0200 | [diff] [blame] | 248 | request.type = htonl(type); |
| 249 | if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) { |
Alex Bligh | 75f187a | 2013-02-27 17:05:23 -0800 | [diff] [blame] | 250 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); |
| 251 | request.len = htonl(size); |
| 252 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | memcpy(request.handle, &req, sizeof(req)); |
| 254 | |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 255 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", |
Christoph Hellwig | 9dc6c80 | 2015-04-17 22:37:21 +0200 | [diff] [blame] | 256 | req, nbdcmd_to_ascii(type), |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 257 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 258 | result = sock_xmit(nbd, 1, &request, sizeof(request), |
Christoph Hellwig | 9dc6c80 | 2015-04-17 22:37:21 +0200 | [diff] [blame] | 259 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | if (result <= 0) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 261 | dev_err(disk_to_dev(nbd->disk), |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 262 | "Send control failed (result %d)\n", result); |
Markus Pargmann | dab5313 | 2015-04-02 10:11:40 +0200 | [diff] [blame] | 263 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | } |
| 265 | |
Christoph Hellwig | 9dc6c80 | 2015-04-17 22:37:21 +0200 | [diff] [blame] | 266 | if (type == NBD_CMD_WRITE) { |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 267 | struct req_iterator iter; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 268 | struct bio_vec bvec; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | /* |
| 270 | * we are really probing at internals to determine |
| 271 | * whether to set MSG_MORE or not... |
| 272 | */ |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 273 | rq_for_each_segment(bvec, req, iter) { |
Jens Axboe | 6c92e69 | 2007-08-16 13:43:12 +0200 | [diff] [blame] | 274 | flags = 0; |
Kent Overstreet | 4550dd6 | 2013-08-07 14:26:21 -0700 | [diff] [blame] | 275 | if (!rq_iter_last(bvec, iter)) |
Jens Axboe | 6c92e69 | 2007-08-16 13:43:12 +0200 | [diff] [blame] | 276 | flags = MSG_MORE; |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 277 | dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", |
| 278 | req, bvec.bv_len); |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 279 | result = sock_send_bvec(nbd, &bvec, flags); |
Jens Axboe | 6c92e69 | 2007-08-16 13:43:12 +0200 | [diff] [blame] | 280 | if (result <= 0) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 281 | dev_err(disk_to_dev(nbd->disk), |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 282 | "Send data failed (result %d)\n", |
| 283 | result); |
Markus Pargmann | dab5313 | 2015-04-02 10:11:40 +0200 | [diff] [blame] | 284 | return -EIO; |
Jens Axboe | 6c92e69 | 2007-08-16 13:43:12 +0200 | [diff] [blame] | 285 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | } |
| 287 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } |
| 290 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 291 | static struct request *nbd_find_request(struct nbd_device *nbd, |
Denis Cheng | 0cbc591b | 2007-10-16 23:26:14 -0700 | [diff] [blame] | 292 | struct request *xreq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | { |
Denis Cheng | d2c9740 | 2007-10-16 23:26:14 -0700 | [diff] [blame] | 294 | struct request *req, *tmp; |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 295 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 297 | err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 298 | if (unlikely(err)) |
Markus Pargmann | de9ad6d | 2015-04-02 10:11:41 +0200 | [diff] [blame] | 299 | return ERR_PTR(err); |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 300 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 301 | spin_lock(&nbd->queue_lock); |
| 302 | list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | if (req != xreq) |
| 304 | continue; |
| 305 | list_del_init(&req->queuelist); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 306 | spin_unlock(&nbd->queue_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | return req; |
| 308 | } |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 309 | spin_unlock(&nbd->queue_lock); |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 310 | |
Markus Pargmann | de9ad6d | 2015-04-02 10:11:41 +0200 | [diff] [blame] | 311 | return ERR_PTR(-ENOENT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | } |
| 313 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 314 | static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | { |
| 316 | int result; |
| 317 | void *kaddr = kmap(bvec->bv_page); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 318 | result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | MSG_WAITALL); |
| 320 | kunmap(bvec->bv_page); |
| 321 | return result; |
| 322 | } |
| 323 | |
| 324 | /* NULL returned = something went wrong, inform userspace */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 325 | static struct request *nbd_read_stat(struct nbd_device *nbd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | { |
| 327 | int result; |
| 328 | struct nbd_reply reply; |
| 329 | struct request *req; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | |
| 331 | reply.magic = 0; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 332 | result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | if (result <= 0) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 334 | dev_err(disk_to_dev(nbd->disk), |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 335 | "Receive control failed (result %d)\n", result); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | goto harderror; |
| 337 | } |
Michal Feix | e4b57e0 | 2006-07-30 03:03:31 -0700 | [diff] [blame] | 338 | |
| 339 | if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 340 | dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", |
Michal Feix | e4b57e0 | 2006-07-30 03:03:31 -0700 | [diff] [blame] | 341 | (unsigned long)ntohl(reply.magic)); |
| 342 | result = -EPROTO; |
| 343 | goto harderror; |
| 344 | } |
| 345 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 346 | req = nbd_find_request(nbd, *(struct request **)reply.handle); |
Hirofumi Nakagawa | 801678c | 2008-04-29 01:03:09 -0700 | [diff] [blame] | 347 | if (IS_ERR(req)) { |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 348 | result = PTR_ERR(req); |
| 349 | if (result != -ENOENT) |
| 350 | goto harderror; |
| 351 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 352 | dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 353 | reply.handle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | result = -EBADR; |
| 355 | goto harderror; |
| 356 | } |
| 357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | if (ntohl(reply.error)) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 359 | dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 360 | ntohl(reply.error)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | req->errors++; |
| 362 | return req; |
| 363 | } |
| 364 | |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 365 | dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); |
Christoph Hellwig | 9dc6c80 | 2015-04-17 22:37:21 +0200 | [diff] [blame] | 366 | if (rq_data_dir(req) != WRITE) { |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 367 | struct req_iterator iter; |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 368 | struct bio_vec bvec; |
NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 369 | |
| 370 | rq_for_each_segment(bvec, req, iter) { |
Kent Overstreet | 7988613 | 2013-11-23 17:19:00 -0800 | [diff] [blame] | 371 | result = sock_recv_bvec(nbd, &bvec); |
Jens Axboe | 6c92e69 | 2007-08-16 13:43:12 +0200 | [diff] [blame] | 372 | if (result <= 0) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 373 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 374 | result); |
Jens Axboe | 6c92e69 | 2007-08-16 13:43:12 +0200 | [diff] [blame] | 375 | req->errors++; |
| 376 | return req; |
| 377 | } |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 378 | dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", |
| 379 | req, bvec.bv_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | } |
| 381 | } |
| 382 | return req; |
| 383 | harderror: |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 384 | nbd->harderror = result; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | return NULL; |
| 386 | } |
| 387 | |
Kay Sievers | edfaa7c | 2007-05-21 22:08:01 +0200 | [diff] [blame] | 388 | static ssize_t pid_show(struct device *dev, |
| 389 | struct device_attribute *attr, char *buf) |
Paul Clements | 6b39bb6 | 2006-12-06 20:40:53 -0800 | [diff] [blame] | 390 | { |
Kay Sievers | edfaa7c | 2007-05-21 22:08:01 +0200 | [diff] [blame] | 391 | struct gendisk *disk = dev_to_disk(dev); |
| 392 | |
| 393 | return sprintf(buf, "%ld\n", |
Paul Clements | 6b39bb6 | 2006-12-06 20:40:53 -0800 | [diff] [blame] | 394 | (long) ((struct nbd_device *)disk->private_data)->pid); |
| 395 | } |
| 396 | |
Kay Sievers | edfaa7c | 2007-05-21 22:08:01 +0200 | [diff] [blame] | 397 | static struct device_attribute pid_attr = { |
Parag Warudkar | 01e8ef1 | 2008-10-18 20:28:50 -0700 | [diff] [blame] | 398 | .attr = { .name = "pid", .mode = S_IRUGO}, |
Paul Clements | 6b39bb6 | 2006-12-06 20:40:53 -0800 | [diff] [blame] | 399 | .show = pid_show, |
| 400 | }; |
| 401 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 402 | static int nbd_do_it(struct nbd_device *nbd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | { |
| 404 | struct request *req; |
WANG Cong | 8496304 | 2007-05-09 02:33:36 -0700 | [diff] [blame] | 405 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 407 | BUG_ON(nbd->magic != NBD_MAGIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | |
Mel Gorman | 7f338fe | 2012-07-31 16:44:32 -0700 | [diff] [blame] | 409 | sk_set_memalloc(nbd->sock->sk); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 410 | nbd->pid = task_pid_nr(current); |
| 411 | ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); |
WANG Cong | 8496304 | 2007-05-09 02:33:36 -0700 | [diff] [blame] | 412 | if (ret) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 413 | dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); |
| 414 | nbd->pid = 0; |
WANG Cong | 8496304 | 2007-05-09 02:33:36 -0700 | [diff] [blame] | 415 | return ret; |
| 416 | } |
Paul Clements | 6b39bb6 | 2006-12-06 20:40:53 -0800 | [diff] [blame] | 417 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 418 | while ((req = nbd_read_stat(nbd)) != NULL) |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 419 | nbd_end_request(nbd, req); |
Paul Clements | 6b39bb6 | 2006-12-06 20:40:53 -0800 | [diff] [blame] | 420 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 421 | device_remove_file(disk_to_dev(nbd->disk), &pid_attr); |
| 422 | nbd->pid = 0; |
WANG Cong | 8496304 | 2007-05-09 02:33:36 -0700 | [diff] [blame] | 423 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | } |
| 425 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 426 | static void nbd_clear_que(struct nbd_device *nbd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | { |
| 428 | struct request *req; |
| 429 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 430 | BUG_ON(nbd->magic != NBD_MAGIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 432 | /* |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 433 | * Because we have set nbd->sock to NULL under the tx_lock, all |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 434 | * modifications to the list must have completed by now. For |
| 435 | * the same reason, the active_req must be NULL. |
| 436 | * |
| 437 | * As a consequence, we don't need to take the spin lock while |
| 438 | * purging the list here. |
| 439 | */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 440 | BUG_ON(nbd->sock); |
| 441 | BUG_ON(nbd->active_req); |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 442 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 443 | while (!list_empty(&nbd->queue_head)) { |
| 444 | req = list_entry(nbd->queue_head.next, struct request, |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 445 | queuelist); |
| 446 | list_del_init(&req->queuelist); |
| 447 | req->errors++; |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 448 | nbd_end_request(nbd, req); |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 449 | } |
Paul Clements | fded4e0 | 2012-09-17 14:09:02 -0700 | [diff] [blame] | 450 | |
| 451 | while (!list_empty(&nbd->waiting_queue)) { |
| 452 | req = list_entry(nbd->waiting_queue.next, struct request, |
| 453 | queuelist); |
| 454 | list_del_init(&req->queuelist); |
| 455 | req->errors++; |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 456 | nbd_end_request(nbd, req); |
Paul Clements | fded4e0 | 2012-09-17 14:09:02 -0700 | [diff] [blame] | 457 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | } |
| 459 | |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 460 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 461 | static void nbd_handle_req(struct nbd_device *nbd, struct request *req) |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 462 | { |
Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 463 | if (req->cmd_type != REQ_TYPE_FS) |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 464 | goto error_out; |
| 465 | |
Christoph Hellwig | 9dc6c80 | 2015-04-17 22:37:21 +0200 | [diff] [blame] | 466 | if (rq_data_dir(req) == WRITE && |
| 467 | (nbd->flags & NBD_FLAG_READ_ONLY)) { |
| 468 | dev_err(disk_to_dev(nbd->disk), |
| 469 | "Write on read-only\n"); |
| 470 | goto error_out; |
Alex Bligh | 75f187a | 2013-02-27 17:05:23 -0800 | [diff] [blame] | 471 | } |
| 472 | |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 473 | req->errors = 0; |
| 474 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 475 | mutex_lock(&nbd->tx_lock); |
| 476 | if (unlikely(!nbd->sock)) { |
| 477 | mutex_unlock(&nbd->tx_lock); |
| 478 | dev_err(disk_to_dev(nbd->disk), |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 479 | "Attempted send on closed socket\n"); |
Pavel Machek | 15746fc | 2009-04-02 16:58:42 -0700 | [diff] [blame] | 480 | goto error_out; |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 481 | } |
| 482 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 483 | nbd->active_req = req; |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 484 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 485 | if (nbd_send_req(nbd, req) != 0) { |
| 486 | dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 487 | req->errors++; |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 488 | nbd_end_request(nbd, req); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 489 | } else { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 490 | spin_lock(&nbd->queue_lock); |
Chetan Loke | 01ff5db | 2012-07-31 08:47:13 +0200 | [diff] [blame] | 491 | list_add_tail(&req->queuelist, &nbd->queue_head); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 492 | spin_unlock(&nbd->queue_lock); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 493 | } |
| 494 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 495 | nbd->active_req = NULL; |
| 496 | mutex_unlock(&nbd->tx_lock); |
| 497 | wake_up_all(&nbd->active_wq); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 498 | |
| 499 | return; |
| 500 | |
| 501 | error_out: |
| 502 | req->errors++; |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 503 | nbd_end_request(nbd, req); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 504 | } |
| 505 | |
| 506 | static int nbd_thread(void *data) |
| 507 | { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 508 | struct nbd_device *nbd = data; |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 509 | struct request *req; |
| 510 | |
Dongsheng Yang | 8698a74 | 2014-03-11 18:09:12 +0800 | [diff] [blame] | 511 | set_user_nice(current, MIN_NICE); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 512 | while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 513 | /* wait for something to do */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 514 | wait_event_interruptible(nbd->waiting_wq, |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 515 | kthread_should_stop() || |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 516 | !list_empty(&nbd->waiting_queue)); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 517 | |
| 518 | /* extract request */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 519 | if (list_empty(&nbd->waiting_queue)) |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 520 | continue; |
| 521 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 522 | spin_lock_irq(&nbd->queue_lock); |
| 523 | req = list_entry(nbd->waiting_queue.next, struct request, |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 524 | queuelist); |
| 525 | list_del_init(&req->queuelist); |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 526 | spin_unlock_irq(&nbd->queue_lock); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 527 | |
| 528 | /* handle request */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 529 | nbd_handle_req(nbd, req); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 530 | } |
| 531 | return 0; |
| 532 | } |
| 533 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | /* |
| 535 | * We always wait for result of write, for now. It would be nice to make it optional |
| 536 | * in future |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 537 | * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } |
| 539 | */ |
| 540 | |
Pavel Machek | 15746fc | 2009-04-02 16:58:42 -0700 | [diff] [blame] | 541 | static void do_nbd_request(struct request_queue *q) |
Alex Elder | 398eb08 | 2013-02-27 17:05:28 -0800 | [diff] [blame] | 542 | __releases(q->queue_lock) __acquires(q->queue_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | { |
| 544 | struct request *req; |
| 545 | |
Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 546 | while ((req = blk_fetch_request(q)) != NULL) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 547 | struct nbd_device *nbd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 549 | spin_unlock_irq(q->queue_lock); |
| 550 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 551 | nbd = req->rq_disk->private_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 553 | BUG_ON(nbd->magic != NBD_MAGIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 555 | dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n", |
| 556 | req, req->cmd_type); |
| 557 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 558 | if (unlikely(!nbd->sock)) { |
| 559 | dev_err(disk_to_dev(nbd->disk), |
WANG Cong | 7f1b90f | 2011-08-19 14:48:22 +0200 | [diff] [blame] | 560 | "Attempted send on closed socket\n"); |
Paul Clements | 4d48a54 | 2009-02-11 13:04:45 -0800 | [diff] [blame] | 561 | req->errors++; |
Markus Pargmann | d18509f | 2015-04-02 10:11:38 +0200 | [diff] [blame] | 562 | nbd_end_request(nbd, req); |
Paul Clements | 4d48a54 | 2009-02-11 13:04:45 -0800 | [diff] [blame] | 563 | spin_lock_irq(q->queue_lock); |
| 564 | continue; |
| 565 | } |
| 566 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 567 | spin_lock_irq(&nbd->queue_lock); |
| 568 | list_add_tail(&req->queuelist, &nbd->waiting_queue); |
| 569 | spin_unlock_irq(&nbd->queue_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 571 | wake_up(&nbd->waiting_wq); |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 572 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | spin_lock_irq(q->queue_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | } |
| 576 | |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 577 | /* Must be called with tx_lock held */ |
| 578 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 579 | static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 580 | unsigned int cmd, unsigned long arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | switch (cmd) { |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 583 | case NBD_DISCONNECT: { |
| 584 | struct request sreq; |
| 585 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 586 | dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); |
Paolo Bonzini | 3a2d63f8 | 2013-02-27 17:05:25 -0800 | [diff] [blame] | 587 | if (!nbd->sock) |
| 588 | return -EINVAL; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 589 | |
Paolo Bonzini | 3a2d63f8 | 2013-02-27 17:05:25 -0800 | [diff] [blame] | 590 | mutex_unlock(&nbd->tx_lock); |
| 591 | fsync_bdev(bdev); |
| 592 | mutex_lock(&nbd->tx_lock); |
FUJITA Tomonori | 4f54eec | 2008-04-29 09:54:37 +0200 | [diff] [blame] | 593 | blk_rq_init(NULL, &sreq); |
Christoph Hellwig | 4f8c951 | 2015-04-17 22:37:16 +0200 | [diff] [blame] | 594 | sreq.cmd_type = REQ_TYPE_DRV_PRIV; |
Paolo Bonzini | 3a2d63f8 | 2013-02-27 17:05:25 -0800 | [diff] [blame] | 595 | |
| 596 | /* Check again after getting mutex back. */ |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 597 | if (!nbd->sock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | return -EINVAL; |
Paolo Bonzini | 3a2d63f8 | 2013-02-27 17:05:25 -0800 | [diff] [blame] | 599 | |
Paul Clements | c378f70 | 2013-07-03 15:09:04 -0700 | [diff] [blame] | 600 | nbd->disconnect = 1; |
| 601 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 602 | nbd_send_req(nbd, &sreq); |
Paul Clements | c378f70 | 2013-07-03 15:09:04 -0700 | [diff] [blame] | 603 | return 0; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 604 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 606 | case NBD_CLEAR_SOCK: { |
Al Viro | e251157 | 2014-03-05 20:41:36 -0500 | [diff] [blame] | 607 | struct socket *sock = nbd->sock; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 608 | nbd->sock = NULL; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 609 | nbd_clear_que(nbd); |
| 610 | BUG_ON(!list_empty(&nbd->queue_head)); |
Paul Clements | fded4e0 | 2012-09-17 14:09:02 -0700 | [diff] [blame] | 611 | BUG_ON(!list_empty(&nbd->waiting_queue)); |
Paolo Bonzini | 3a2d63f8 | 2013-02-27 17:05:25 -0800 | [diff] [blame] | 612 | kill_bdev(bdev); |
Al Viro | e251157 | 2014-03-05 20:41:36 -0500 | [diff] [blame] | 613 | if (sock) |
| 614 | sockfd_put(sock); |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 615 | return 0; |
| 616 | } |
| 617 | |
| 618 | case NBD_SET_SOCK: { |
Al Viro | e251157 | 2014-03-05 20:41:36 -0500 | [diff] [blame] | 619 | struct socket *sock; |
| 620 | int err; |
| 621 | if (nbd->sock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | return -EBUSY; |
Al Viro | e251157 | 2014-03-05 20:41:36 -0500 | [diff] [blame] | 623 | sock = sockfd_lookup(arg, &err); |
| 624 | if (sock) { |
| 625 | nbd->sock = sock; |
| 626 | if (max_part > 0) |
| 627 | bdev->bd_invalidated = 1; |
| 628 | nbd->disconnect = 0; /* we're connected now */ |
| 629 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | } |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 631 | return -EINVAL; |
| 632 | } |
| 633 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | case NBD_SET_BLKSIZE: |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 635 | nbd->blksize = arg; |
| 636 | nbd->bytesize &= ~(nbd->blksize-1); |
| 637 | bdev->bd_inode->i_size = nbd->bytesize; |
| 638 | set_blocksize(bdev, nbd->blksize); |
| 639 | set_capacity(nbd->disk, nbd->bytesize >> 9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | return 0; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 641 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | case NBD_SET_SIZE: |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 643 | nbd->bytesize = arg & ~(nbd->blksize-1); |
| 644 | bdev->bd_inode->i_size = nbd->bytesize; |
| 645 | set_blocksize(bdev, nbd->blksize); |
| 646 | set_capacity(nbd->disk, nbd->bytesize >> 9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | return 0; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 648 | |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 649 | case NBD_SET_TIMEOUT: |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 650 | nbd->xmit_timeout = arg * HZ; |
Paul Clements | 7fdfd40 | 2007-10-16 23:27:37 -0700 | [diff] [blame] | 651 | return 0; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 652 | |
Paul Clements | 2f01250 | 2012-10-04 17:16:15 -0700 | [diff] [blame] | 653 | case NBD_SET_FLAGS: |
| 654 | nbd->flags = arg; |
| 655 | return 0; |
| 656 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | case NBD_SET_SIZE_BLOCKS: |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 658 | nbd->bytesize = ((u64) arg) * nbd->blksize; |
| 659 | bdev->bd_inode->i_size = nbd->bytesize; |
| 660 | set_blocksize(bdev, nbd->blksize); |
| 661 | set_capacity(nbd->disk, nbd->bytesize >> 9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | return 0; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 663 | |
| 664 | case NBD_DO_IT: { |
| 665 | struct task_struct *thread; |
Al Viro | e251157 | 2014-03-05 20:41:36 -0500 | [diff] [blame] | 666 | struct socket *sock; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 667 | int error; |
| 668 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 669 | if (nbd->pid) |
Pavel Machek | c91192d | 2009-01-15 13:51:03 -0800 | [diff] [blame] | 670 | return -EBUSY; |
Al Viro | e251157 | 2014-03-05 20:41:36 -0500 | [diff] [blame] | 671 | if (!nbd->sock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | return -EINVAL; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 673 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 674 | mutex_unlock(&nbd->tx_lock); |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 675 | |
Paolo Bonzini | a83e814 | 2013-02-27 17:05:26 -0800 | [diff] [blame] | 676 | if (nbd->flags & NBD_FLAG_READ_ONLY) |
| 677 | set_device_ro(bdev, true); |
Paul Clements | a336d29 | 2012-10-04 17:16:18 -0700 | [diff] [blame] | 678 | if (nbd->flags & NBD_FLAG_SEND_TRIM) |
| 679 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, |
| 680 | nbd->disk->queue); |
Alex Bligh | 75f187a | 2013-02-27 17:05:23 -0800 | [diff] [blame] | 681 | if (nbd->flags & NBD_FLAG_SEND_FLUSH) |
| 682 | blk_queue_flush(nbd->disk->queue, REQ_FLUSH); |
| 683 | else |
| 684 | blk_queue_flush(nbd->disk->queue, 0); |
Paul Clements | a336d29 | 2012-10-04 17:16:18 -0700 | [diff] [blame] | 685 | |
Markus Pargmann | d06df60 | 2015-04-02 10:11:36 +0200 | [diff] [blame] | 686 | thread = kthread_run(nbd_thread, nbd, "%s", |
| 687 | nbd->disk->disk_name); |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 688 | if (IS_ERR(thread)) { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 689 | mutex_lock(&nbd->tx_lock); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 690 | return PTR_ERR(thread); |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 691 | } |
Markus Pargmann | d06df60 | 2015-04-02 10:11:36 +0200 | [diff] [blame] | 692 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 693 | error = nbd_do_it(nbd); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 694 | kthread_stop(thread); |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 695 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 696 | mutex_lock(&nbd->tx_lock); |
WANG Cong | 8496304 | 2007-05-09 02:33:36 -0700 | [diff] [blame] | 697 | if (error) |
| 698 | return error; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 699 | sock_shutdown(nbd, 0); |
Al Viro | e251157 | 2014-03-05 20:41:36 -0500 | [diff] [blame] | 700 | sock = nbd->sock; |
| 701 | nbd->sock = NULL; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 702 | nbd_clear_que(nbd); |
| 703 | dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); |
Paolo Bonzini | 3a2d63f8 | 2013-02-27 17:05:25 -0800 | [diff] [blame] | 704 | kill_bdev(bdev); |
Paul Clements | a336d29 | 2012-10-04 17:16:18 -0700 | [diff] [blame] | 705 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); |
Paolo Bonzini | a83e814 | 2013-02-27 17:05:26 -0800 | [diff] [blame] | 706 | set_device_ro(bdev, false); |
Al Viro | e251157 | 2014-03-05 20:41:36 -0500 | [diff] [blame] | 707 | if (sock) |
| 708 | sockfd_put(sock); |
Alex Bligh | 75f187a | 2013-02-27 17:05:23 -0800 | [diff] [blame] | 709 | nbd->flags = 0; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 710 | nbd->bytesize = 0; |
Al Viro | a8cdc30 | 2008-03-02 09:33:33 -0500 | [diff] [blame] | 711 | bdev->bd_inode->i_size = 0; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 712 | set_capacity(nbd->disk, 0); |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 713 | if (max_part > 0) |
Ming Lei | 9dcd137 | 2015-05-06 12:26:25 +0800 | [diff] [blame] | 714 | blkdev_reread_part(bdev); |
Paul Clements | c378f70 | 2013-07-03 15:09:04 -0700 | [diff] [blame] | 715 | if (nbd->disconnect) /* user requested, ignore socket errors */ |
| 716 | return 0; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 717 | return nbd->harderror; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 718 | } |
| 719 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | case NBD_CLEAR_QUE: |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 721 | /* |
| 722 | * This is for compatibility only. The queue is always cleared |
| 723 | * by NBD_DO_IT or NBD_CLEAR_SOCK. |
| 724 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | return 0; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 726 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | case NBD_PRINT_DEBUG: |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 728 | dev_info(disk_to_dev(nbd->disk), |
WANG Cong | 5eedf54 | 2011-08-19 14:48:28 +0200 | [diff] [blame] | 729 | "next = %p, prev = %p, head = %p\n", |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 730 | nbd->queue_head.next, nbd->queue_head.prev, |
| 731 | &nbd->queue_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | return 0; |
| 733 | } |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 734 | return -ENOTTY; |
| 735 | } |
| 736 | |
| 737 | static int nbd_ioctl(struct block_device *bdev, fmode_t mode, |
| 738 | unsigned int cmd, unsigned long arg) |
| 739 | { |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 740 | struct nbd_device *nbd = bdev->bd_disk->private_data; |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 741 | int error; |
| 742 | |
| 743 | if (!capable(CAP_SYS_ADMIN)) |
| 744 | return -EPERM; |
| 745 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 746 | BUG_ON(nbd->magic != NBD_MAGIC); |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 747 | |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 748 | mutex_lock(&nbd->tx_lock); |
| 749 | error = __nbd_ioctl(bdev, nbd, cmd, arg); |
| 750 | mutex_unlock(&nbd->tx_lock); |
Pavel Machek | 1a2ad21 | 2009-04-02 16:58:41 -0700 | [diff] [blame] | 751 | |
| 752 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | } |
| 754 | |
Alexey Dobriyan | 83d5cde | 2009-09-21 17:01:13 -0700 | [diff] [blame] | 755 | static const struct block_device_operations nbd_fops = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | { |
| 757 | .owner = THIS_MODULE, |
Arnd Bergmann | 8a6cfeb | 2010-07-08 10:18:46 +0200 | [diff] [blame] | 758 | .ioctl = nbd_ioctl, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | }; |
| 760 | |
| 761 | /* |
| 762 | * And here should be modules and kernel interface |
| 763 | * (Just smiley confuses emacs :-) |
| 764 | */ |
| 765 | |
| 766 | static int __init nbd_init(void) |
| 767 | { |
| 768 | int err = -ENOMEM; |
| 769 | int i; |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 770 | int part_shift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | |
Adrian Bunk | 5b7b18c | 2006-03-25 03:07:04 -0800 | [diff] [blame] | 772 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 774 | if (max_part < 0) { |
WANG Cong | 7742ce4 | 2011-08-19 14:48:28 +0200 | [diff] [blame] | 775 | printk(KERN_ERR "nbd: max_part must be >= 0\n"); |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 776 | return -EINVAL; |
| 777 | } |
| 778 | |
| 779 | part_shift = 0; |
Namhyung Kim | 5988ce2 | 2011-05-28 14:44:46 +0200 | [diff] [blame] | 780 | if (max_part > 0) { |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 781 | part_shift = fls(max_part); |
| 782 | |
Namhyung Kim | 5988ce2 | 2011-05-28 14:44:46 +0200 | [diff] [blame] | 783 | /* |
| 784 | * Adjust max_part according to part_shift as it is exported |
| 785 | * to user space so that user can know the max number of |
| 786 | * partition kernel should be able to manage. |
| 787 | * |
| 788 | * Note that -1 is required because partition 0 is reserved |
| 789 | * for the whole disk. |
| 790 | */ |
| 791 | max_part = (1UL << part_shift) - 1; |
| 792 | } |
| 793 | |
Namhyung Kim | 3b27108 | 2011-05-28 14:44:46 +0200 | [diff] [blame] | 794 | if ((1UL << part_shift) > DISK_MAX_PARTS) |
| 795 | return -EINVAL; |
| 796 | |
| 797 | if (nbds_max > 1UL << (MINORBITS - part_shift)) |
| 798 | return -EINVAL; |
| 799 | |
Sudip Mukherjee | ff6b809 | 2015-01-27 18:08:22 +0530 | [diff] [blame] | 800 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); |
| 801 | if (!nbd_dev) |
| 802 | return -ENOMEM; |
| 803 | |
Lars Marowsky-Bree | 40be0c2 | 2005-05-01 08:59:07 -0700 | [diff] [blame] | 804 | for (i = 0; i < nbds_max; i++) { |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 805 | struct gendisk *disk = alloc_disk(1 << part_shift); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | if (!disk) |
| 807 | goto out; |
| 808 | nbd_dev[i].disk = disk; |
| 809 | /* |
| 810 | * The new linux 2.5 block layer implementation requires |
| 811 | * every gendisk to have its very own request_queue struct. |
| 812 | * These structs are big so we dynamically allocate them. |
| 813 | */ |
| 814 | disk->queue = blk_init_queue(do_nbd_request, &nbd_lock); |
| 815 | if (!disk->queue) { |
| 816 | put_disk(disk); |
| 817 | goto out; |
| 818 | } |
Jens Axboe | 31dcfab | 2008-10-31 10:06:37 +0100 | [diff] [blame] | 819 | /* |
| 820 | * Tell the block layer that we are not a rotational device |
| 821 | */ |
| 822 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); |
Mike Snitzer | b277da0 | 2014-10-04 10:55:32 -0600 | [diff] [blame] | 823 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); |
Paul Clements | a336d29 | 2012-10-04 17:16:18 -0700 | [diff] [blame] | 824 | disk->queue->limits.discard_granularity = 512; |
Jens Axboe | 2bb4cd5 | 2015-07-14 08:15:12 -0600 | [diff] [blame] | 825 | blk_queue_max_discard_sectors(disk->queue, UINT_MAX); |
Paul Clements | a336d29 | 2012-10-04 17:16:18 -0700 | [diff] [blame] | 826 | disk->queue->limits.discard_zeroes_data = 0; |
Michal Belczyk | 078be02 | 2013-04-30 15:28:28 -0700 | [diff] [blame] | 827 | blk_queue_max_hw_sectors(disk->queue, 65536); |
| 828 | disk->queue->limits.max_sectors = 256; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | } |
| 830 | |
| 831 | if (register_blkdev(NBD_MAJOR, "nbd")) { |
| 832 | err = -EIO; |
| 833 | goto out; |
| 834 | } |
| 835 | |
| 836 | printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | |
Lars Marowsky-Bree | 40be0c2 | 2005-05-01 08:59:07 -0700 | [diff] [blame] | 838 | for (i = 0; i < nbds_max; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | struct gendisk *disk = nbd_dev[i].disk; |
Wanlong Gao | f450716 | 2012-03-28 14:42:51 -0700 | [diff] [blame] | 840 | nbd_dev[i].magic = NBD_MAGIC; |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 841 | INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | spin_lock_init(&nbd_dev[i].queue_lock); |
| 843 | INIT_LIST_HEAD(&nbd_dev[i].queue_head); |
Ingo Molnar | 82d4dc5 | 2006-03-23 03:00:38 -0800 | [diff] [blame] | 844 | mutex_init(&nbd_dev[i].tx_lock); |
Herbert Xu | 4b2f026 | 2006-01-06 00:09:47 -0800 | [diff] [blame] | 845 | init_waitqueue_head(&nbd_dev[i].active_wq); |
Laurent Vivier | 48cf606 | 2008-04-29 01:02:46 -0700 | [diff] [blame] | 846 | init_waitqueue_head(&nbd_dev[i].waiting_wq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | nbd_dev[i].blksize = 1024; |
Paul Clements | 4b86a87 | 2007-10-16 23:27:36 -0700 | [diff] [blame] | 848 | nbd_dev[i].bytesize = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | disk->major = NBD_MAJOR; |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 850 | disk->first_minor = i << part_shift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | disk->fops = &nbd_fops; |
| 852 | disk->private_data = &nbd_dev[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | sprintf(disk->disk_name, "nbd%d", i); |
Paul Clements | 4b86a87 | 2007-10-16 23:27:36 -0700 | [diff] [blame] | 854 | set_capacity(disk, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | add_disk(disk); |
| 856 | } |
| 857 | |
| 858 | return 0; |
| 859 | out: |
| 860 | while (i--) { |
| 861 | blk_cleanup_queue(nbd_dev[i].disk->queue); |
| 862 | put_disk(nbd_dev[i].disk); |
| 863 | } |
Sven Wegener | f3944d6 | 2008-08-20 14:09:07 -0700 | [diff] [blame] | 864 | kfree(nbd_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | return err; |
| 866 | } |
| 867 | |
| 868 | static void __exit nbd_cleanup(void) |
| 869 | { |
| 870 | int i; |
Lars Marowsky-Bree | 40be0c2 | 2005-05-01 08:59:07 -0700 | [diff] [blame] | 871 | for (i = 0; i < nbds_max; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | struct gendisk *disk = nbd_dev[i].disk; |
Lars Marowsky-Bree | 40be0c2 | 2005-05-01 08:59:07 -0700 | [diff] [blame] | 873 | nbd_dev[i].magic = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | if (disk) { |
| 875 | del_gendisk(disk); |
| 876 | blk_cleanup_queue(disk->queue); |
| 877 | put_disk(disk); |
| 878 | } |
| 879 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | unregister_blkdev(NBD_MAJOR, "nbd"); |
Sven Wegener | f3944d6 | 2008-08-20 14:09:07 -0700 | [diff] [blame] | 881 | kfree(nbd_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); |
| 883 | } |
| 884 | |
| 885 | module_init(nbd_init); |
| 886 | module_exit(nbd_cleanup); |
| 887 | |
| 888 | MODULE_DESCRIPTION("Network Block Device"); |
| 889 | MODULE_LICENSE("GPL"); |
| 890 | |
Lars Marowsky-Bree | 40be0c2 | 2005-05-01 08:59:07 -0700 | [diff] [blame] | 891 | module_param(nbds_max, int, 0444); |
Laurent Vivier | d71a6d7 | 2008-04-29 01:02:51 -0700 | [diff] [blame] | 892 | MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); |
| 893 | module_param(max_part, int, 0444); |
| 894 | MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); |