blob: 87a0a29f6e7e3c5a5f84332cc72de4543e6a86dd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
Pavel Macheka2531292010-07-18 14:27:13 +02007 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070010 * This file is released under GPLv2 or later.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Pavel Machekdbf492d2006-06-25 05:47:42 -070012 * (part of code stolen from loop.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
14
15#include <linux/major.h>
16
17#include <linux/blkdev.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/sched.h>
Vlastimil Babkaf1083042017-05-08 15:59:53 -070021#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/fs.h>
23#include <linux/bio.h>
24#include <linux/stat.h>
25#include <linux/errno.h>
26#include <linux/file.h>
27#include <linux/ioctl.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020028#include <linux/mutex.h>
Herbert Xu4b2f0262006-01-06 00:09:47 -080029#include <linux/compiler.h>
30#include <linux/err.h>
31#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <net/sock.h>
Trond Myklebust91cf45f2007-11-12 18:10:39 -080034#include <linux/net.h>
Laurent Vivier48cf6062008-04-29 01:02:46 -070035#include <linux/kthread.h>
Markus Pargmannb9c495b2015-04-02 10:11:37 +020036#include <linux/types.h>
Markus Pargmann30d53d92015-08-17 08:20:06 +020037#include <linux/debugfs.h>
Josef Bacikfd8383f2016-09-08 12:33:37 -070038#include <linux/blk-mq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/types.h>
42
43#include <linux/nbd.h>
Josef Bacike46c7282017-04-06 17:02:00 -040044#include <linux/nbd-netlink.h>
45#include <net/genetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Josef Bacikb0d91112017-02-01 16:11:40 -050047static DEFINE_IDR(nbd_index_idr);
48static DEFINE_MUTEX(nbd_index_mutex);
Josef Bacik47d902b2017-04-06 17:02:05 -040049static int nbd_total_devices = 0;
Josef Bacikb0d91112017-02-01 16:11:40 -050050
Josef Bacik9561a7a2016-11-22 14:04:40 -050051struct nbd_sock {
52 struct socket *sock;
53 struct mutex tx_lock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -040054 struct request *pending;
55 int sent;
Josef Bacikf3733242017-04-06 17:01:57 -040056 bool dead;
57 int fallback_index;
Josef Bacik799f9a32017-04-06 17:02:02 -040058 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -050059};
60
Josef Bacik5ea8d102017-04-06 17:01:58 -040061struct recv_thread_args {
62 struct work_struct work;
63 struct nbd_device *nbd;
64 int index;
65};
66
Josef Bacik799f9a32017-04-06 17:02:02 -040067struct link_dead_args {
68 struct work_struct work;
69 int index;
70};
71
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070072#define NBD_TIMEDOUT 0
73#define NBD_DISCONNECT_REQUESTED 1
Josef Bacik9561a7a2016-11-22 14:04:40 -050074#define NBD_DISCONNECTED 2
Josef Bacik5ea8d102017-04-06 17:01:58 -040075#define NBD_HAS_PID_FILE 3
Josef Bacike46c7282017-04-06 17:02:00 -040076#define NBD_HAS_CONFIG_REF 4
77#define NBD_BOUND 5
Josef Bacika2c97902017-04-06 17:02:07 -040078#define NBD_DESTROY_ON_DISCONNECT 6
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070079
Josef Bacik5ea8d102017-04-06 17:01:58 -040080struct nbd_config {
Markus Pargmann22d109c2015-08-17 08:20:09 +020081 u32 flags;
Josef Bacik9b4a6ba2016-09-08 12:33:39 -070082 unsigned long runtime_flags;
Josef Bacik560bc4b2017-04-06 17:02:04 -040083 u64 dead_conn_timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -040084
Josef Bacik9561a7a2016-11-22 14:04:40 -050085 struct nbd_sock **socks;
Josef Bacik9561a7a2016-11-22 14:04:40 -050086 int num_connections;
Josef Bacik560bc4b2017-04-06 17:02:04 -040087 atomic_t live_connections;
88 wait_queue_head_t conn_wait;
Josef Bacik5ea8d102017-04-06 17:01:58 -040089
Josef Bacik9561a7a2016-11-22 14:04:40 -050090 atomic_t recv_threads;
91 wait_queue_head_t recv_wq;
Josef Bacikef77b512016-12-02 16:19:12 -050092 loff_t blksize;
Markus Pargmannb9c495b2015-04-02 10:11:37 +020093 loff_t bytesize;
Markus Pargmann30d53d92015-08-17 08:20:06 +020094#if IS_ENABLED(CONFIG_DEBUG_FS)
95 struct dentry *dbg_dir;
96#endif
Markus Pargmann13e71d62015-04-02 10:11:35 +020097};
98
Josef Bacik5ea8d102017-04-06 17:01:58 -040099struct nbd_device {
100 struct blk_mq_tag_set tag_set;
101
Josef Bacike46c7282017-04-06 17:02:00 -0400102 int index;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400103 refcount_t config_refs;
Josef Bacikc6a47592017-04-06 17:02:06 -0400104 refcount_t refs;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400105 struct nbd_config *config;
106 struct mutex config_lock;
107 struct gendisk *disk;
108
Josef Bacikc6a47592017-04-06 17:02:06 -0400109 struct list_head list;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400110 struct task_struct *task_recv;
111 struct task_struct *task_setup;
112};
113
Josef Bacikfd8383f2016-09-08 12:33:37 -0700114struct nbd_cmd {
115 struct nbd_device *nbd;
Josef Bacikf3733242017-04-06 17:01:57 -0400116 int index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400117 int cookie;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500118 struct completion send_complete;
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200119 blk_status_t status;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700120};
121
Markus Pargmann30d53d92015-08-17 08:20:06 +0200122#if IS_ENABLED(CONFIG_DEBUG_FS)
123static struct dentry *nbd_dbg_dir;
124#endif
125
126#define nbd_name(nbd) ((nbd)->disk->disk_name)
127
Wanlong Gaof4507162012-03-28 14:42:51 -0700128#define NBD_MAGIC 0x68797548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Ingo van Lil9c7a4162006-07-01 04:36:36 -0700130static unsigned int nbds_max = 16;
Laurent Vivierd71a6d72008-04-29 01:02:51 -0700131static int max_part;
Josef Bacik124d6db2017-02-01 16:11:11 -0500132static struct workqueue_struct *recv_workqueue;
Josef Bacikb0d91112017-02-01 16:11:40 -0500133static int part_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Josef Bacik9442b732017-02-07 17:10:22 -0500135static int nbd_dev_dbg_init(struct nbd_device *nbd);
136static void nbd_dev_dbg_close(struct nbd_device *nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400137static void nbd_config_put(struct nbd_device *nbd);
Josef Bacike46c7282017-04-06 17:02:00 -0400138static void nbd_connect_reply(struct genl_info *info, int index);
Josef Bacik47d902b2017-04-06 17:02:05 -0400139static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
Josef Bacik799f9a32017-04-06 17:02:02 -0400140static void nbd_dead_link_work(struct work_struct *work);
Josef Bacik9442b732017-02-07 17:10:22 -0500141
Markus Pargmannd18509f2015-04-02 10:11:38 +0200142static inline struct device *nbd_to_dev(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
Markus Pargmannd18509f2015-04-02 10:11:38 +0200144 return disk_to_dev(nbd->disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
146
147static const char *nbdcmd_to_ascii(int cmd)
148{
149 switch (cmd) {
150 case NBD_CMD_READ: return "read";
151 case NBD_CMD_WRITE: return "write";
152 case NBD_CMD_DISC: return "disconnect";
Alex Bligh75f187a2013-02-27 17:05:23 -0800153 case NBD_CMD_FLUSH: return "flush";
Paul Clementsa336d292012-10-04 17:16:18 -0700154 case NBD_CMD_TRIM: return "trim/discard";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156 return "invalid";
157}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Josef Bacik5ea8d102017-04-06 17:01:58 -0400159static ssize_t pid_show(struct device *dev,
160 struct device_attribute *attr, char *buf)
161{
162 struct gendisk *disk = dev_to_disk(dev);
163 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
164
165 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
166}
167
168static struct device_attribute pid_attr = {
169 .attr = { .name = "pid", .mode = S_IRUGO},
170 .show = pid_show,
171};
172
Josef Bacikc6a47592017-04-06 17:02:06 -0400173static void nbd_dev_remove(struct nbd_device *nbd)
174{
175 struct gendisk *disk = nbd->disk;
176 if (disk) {
177 del_gendisk(disk);
178 blk_cleanup_queue(disk->queue);
179 blk_mq_free_tag_set(&nbd->tag_set);
Josef Bacika2c97902017-04-06 17:02:07 -0400180 disk->private_data = NULL;
Josef Bacikc6a47592017-04-06 17:02:06 -0400181 put_disk(disk);
182 }
183 kfree(nbd);
184}
185
186static void nbd_put(struct nbd_device *nbd)
187{
188 if (refcount_dec_and_mutex_lock(&nbd->refs,
189 &nbd_index_mutex)) {
190 idr_remove(&nbd_index_idr, nbd->index);
191 mutex_unlock(&nbd_index_mutex);
192 nbd_dev_remove(nbd);
193 }
194}
195
Josef Bacik799f9a32017-04-06 17:02:02 -0400196static int nbd_disconnected(struct nbd_config *config)
Josef Bacikf3733242017-04-06 17:01:57 -0400197{
Josef Bacik799f9a32017-04-06 17:02:02 -0400198 return test_bit(NBD_DISCONNECTED, &config->runtime_flags) ||
199 test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
200}
201
202static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
203 int notify)
204{
205 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
206 struct link_dead_args *args;
207 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
208 if (args) {
209 INIT_WORK(&args->work, nbd_dead_link_work);
210 args->index = nbd->index;
211 queue_work(system_wq, &args->work);
212 }
213 }
Josef Bacik560bc4b2017-04-06 17:02:04 -0400214 if (!nsock->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400215 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400216 atomic_dec(&nbd->config->live_connections);
217 }
Josef Bacikf3733242017-04-06 17:01:57 -0400218 nsock->dead = true;
219 nsock->pending = NULL;
220 nsock->sent = 0;
221}
222
Josef Bacik29eaadc2017-04-06 17:01:59 -0400223static void nbd_size_clear(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200224{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400225 if (nbd->config->bytesize) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400226 set_capacity(nbd->disk, 0);
227 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
228 }
Markus Pargmann37091fd2015-07-27 07:36:49 +0200229}
230
Josef Bacik29eaadc2017-04-06 17:01:59 -0400231static void nbd_size_update(struct nbd_device *nbd)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200232{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400233 struct nbd_config *config = nbd->config;
234 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
235 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400236 set_capacity(nbd->disk, config->bytesize >> 9);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200237 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
238}
239
Josef Bacik29eaadc2017-04-06 17:01:59 -0400240static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
241 loff_t nr_blocks)
Markus Pargmann37091fd2015-07-27 07:36:49 +0200242{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400243 struct nbd_config *config = nbd->config;
244 config->blksize = blocksize;
245 config->bytesize = blocksize * nr_blocks;
Josef Bacik29eaadc2017-04-06 17:01:59 -0400246 nbd_size_update(nbd);
Markus Pargmann37091fd2015-07-27 07:36:49 +0200247}
248
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200249static void nbd_complete_rq(struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250{
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200251 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200253 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", cmd,
254 cmd->status ? "failed" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Christoph Hellwig1e388ae2017-04-20 16:03:06 +0200256 blk_mq_end_request(req, cmd->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
258
Markus Pargmanne018e752015-04-02 10:11:39 +0200259/*
260 * Forcibly shutdown the socket causing all listeners to error
261 */
Markus Pargmann36e47be2015-08-17 08:20:01 +0200262static void sock_shutdown(struct nbd_device *nbd)
Paul Clements7fdfd402007-10-16 23:27:37 -0700263{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400264 struct nbd_config *config = nbd->config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500265 int i;
Josef Bacikc2611892016-09-08 12:33:38 -0700266
Josef Bacik5ea8d102017-04-06 17:01:58 -0400267 if (config->num_connections == 0)
Markus Pargmann260bbce2015-08-17 08:20:02 +0200268 return;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400269 if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500270 return;
271
Josef Bacik5ea8d102017-04-06 17:01:58 -0400272 for (i = 0; i < config->num_connections; i++) {
273 struct nbd_sock *nsock = config->socks[i];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500274 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400275 nbd_mark_nsock_dead(nbd, nsock, 0);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500276 mutex_unlock(&nsock->tx_lock);
Markus Pargmann23272a672015-10-29 11:51:16 +0100277 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500278 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
Paul Clements7fdfd402007-10-16 23:27:37 -0700279}
280
Josef Bacik0eadf372016-09-08 12:33:40 -0700281static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
282 bool reserved)
Paul Clements7fdfd402007-10-16 23:27:37 -0700283{
Josef Bacik0eadf372016-09-08 12:33:40 -0700284 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
285 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400286 struct nbd_config *config;
Paul Clements7fdfd402007-10-16 23:27:37 -0700287
Josef Bacik5ea8d102017-04-06 17:01:58 -0400288 if (!refcount_inc_not_zero(&nbd->config_refs)) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200289 cmd->status = BLK_STS_TIMEOUT;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400290 return BLK_EH_HANDLED;
291 }
292
Josef Bacik560bc4b2017-04-06 17:02:04 -0400293 /* If we are waiting on our dead timer then we could get timeout
294 * callbacks for our request. For this we just want to reset the timer
295 * and let the queue side take care of everything.
296 */
297 if (!completion_done(&cmd->send_complete)) {
298 nbd_config_put(nbd);
299 return BLK_EH_RESET_TIMER;
300 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400301 config = nbd->config;
302
303 if (config->num_connections > 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400304 dev_err_ratelimited(nbd_to_dev(nbd),
305 "Connection timed out, retrying\n");
Josef Bacikf3733242017-04-06 17:01:57 -0400306 /*
307 * Hooray we have more connections, requeue this IO, the submit
308 * path will put it on a real connection.
309 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400310 if (config->socks && config->num_connections > 1) {
311 if (cmd->index < config->num_connections) {
Josef Bacikf3733242017-04-06 17:01:57 -0400312 struct nbd_sock *nsock =
Josef Bacik5ea8d102017-04-06 17:01:58 -0400313 config->socks[cmd->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400314 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400315 /* We can have multiple outstanding requests, so
316 * we don't want to mark the nsock dead if we've
317 * already reconnected with a new socket, so
318 * only mark it dead if its the same socket we
319 * were sent out on.
320 */
321 if (cmd->cookie == nsock->cookie)
322 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400323 mutex_unlock(&nsock->tx_lock);
324 }
Josef Bacikf3733242017-04-06 17:01:57 -0400325 blk_mq_requeue_request(req, true);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400326 nbd_config_put(nbd);
Josef Bacikf3733242017-04-06 17:01:57 -0400327 return BLK_EH_NOT_HANDLED;
328 }
Josef Bacikf3733242017-04-06 17:01:57 -0400329 } else {
330 dev_err_ratelimited(nbd_to_dev(nbd),
331 "Connection timed out\n");
332 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400333 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200334 cmd->status = BLK_STS_IOERR;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500335 sock_shutdown(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400336 nbd_config_put(nbd);
337
Josef Bacik0eadf372016-09-08 12:33:40 -0700338 return BLK_EH_HANDLED;
Paul Clements7fdfd402007-10-16 23:27:37 -0700339}
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341/*
342 * Send or receive packet.
343 */
Al Viroc9f2b6a2015-11-12 05:09:35 -0500344static int sock_xmit(struct nbd_device *nbd, int index, int send,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400345 struct iov_iter *iter, int msg_flags, int *sent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400347 struct nbd_config *config = nbd->config;
348 struct socket *sock = config->socks[index]->sock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 int result;
350 struct msghdr msg;
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700351 unsigned int noreclaim_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700353 if (unlikely(!sock)) {
Josef Bacika897b662016-12-05 16:20:29 -0500354 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200355 "Attempted %s on closed socket in sock_xmit\n",
356 (send ? "send" : "recv"));
Mike Snitzerffc41cf2008-04-02 13:04:47 -0700357 return -EINVAL;
358 }
359
Al Viroc9f2b6a2015-11-12 05:09:35 -0500360 msg.msg_iter = *iter;
Al Viroc1696ca2015-11-12 04:51:19 -0500361
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700362 noreclaim_flag = memalloc_noreclaim_save();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 do {
Mel Gorman7f338fe2012-07-31 16:44:32 -0700364 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 msg.msg_name = NULL;
366 msg.msg_namelen = 0;
367 msg.msg_control = NULL;
368 msg.msg_controllen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
370
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200371 if (send)
Al Viroc1696ca2015-11-12 04:51:19 -0500372 result = sock_sendmsg(sock, &msg);
Markus Pargmann7e2893a2015-08-17 08:20:00 +0200373 else
Al Viroc1696ca2015-11-12 04:51:19 -0500374 result = sock_recvmsg(sock, &msg, msg.msg_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 if (result <= 0) {
377 if (result == 0)
378 result = -EPIPE; /* short read */
379 break;
380 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400381 if (sent)
382 *sent += result;
Al Viroc1696ca2015-11-12 04:51:19 -0500383 } while (msg_data_left(&msg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Vlastimil Babkaf1083042017-05-08 15:59:53 -0700385 memalloc_noreclaim_restore(noreclaim_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
387 return result;
388}
389
Paul Clements7fdfd402007-10-16 23:27:37 -0700390/* always call with the tx_lock held */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500391static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700393 struct request *req = blk_mq_rq_from_pdu(cmd);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400394 struct nbd_config *config = nbd->config;
395 struct nbd_sock *nsock = config->socks[index];
Josef Bacikd61b7f92017-01-19 16:08:49 -0500396 int result;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500397 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
398 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
399 struct iov_iter from;
Tejun Heo1011c1b2009-05-07 22:24:45 +0900400 unsigned long size = blk_rq_bytes(req);
Jens Axboe429a7872016-11-17 12:30:37 -0700401 struct bio *bio;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200402 u32 type;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400403 u32 nbd_cmd_flags = 0;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500404 u32 tag = blk_mq_unique_tag(req);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400405 int sent = nsock->sent, skip = 0;
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200406
Al Viroc9f2b6a2015-11-12 05:09:35 -0500407 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
408
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100409 switch (req_op(req)) {
410 case REQ_OP_DISCARD:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200411 type = NBD_CMD_TRIM;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100412 break;
413 case REQ_OP_FLUSH:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200414 type = NBD_CMD_FLUSH;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100415 break;
416 case REQ_OP_WRITE:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200417 type = NBD_CMD_WRITE;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100418 break;
419 case REQ_OP_READ:
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200420 type = NBD_CMD_READ;
Christoph Hellwigaebf5262017-01-31 16:57:31 +0100421 break;
422 default:
423 return -EIO;
424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100426 if (rq_data_dir(req) == WRITE &&
Josef Bacik5ea8d102017-04-06 17:01:58 -0400427 (config->flags & NBD_FLAG_READ_ONLY)) {
Christoph Hellwig09fc54cc2017-01-31 16:57:28 +0100428 dev_err_ratelimited(disk_to_dev(nbd->disk),
429 "Write on read-only\n");
430 return -EIO;
431 }
432
Shaun McDowell685c9b22017-05-25 23:55:54 -0400433 if (req->cmd_flags & REQ_FUA)
434 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
435
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400436 /* We did a partial send previously, and we at least sent the whole
437 * request struct, so just go and send the rest of the pages in the
438 * request.
439 */
440 if (sent) {
441 if (sent >= sizeof(request)) {
442 skip = sent - sizeof(request);
443 goto send_pages;
444 }
445 iov_iter_advance(&from, sent);
446 }
Josef Bacikf3733242017-04-06 17:01:57 -0400447 cmd->index = index;
Josef Bacik799f9a32017-04-06 17:02:02 -0400448 cmd->cookie = nsock->cookie;
Shaun McDowell685c9b22017-05-25 23:55:54 -0400449 request.type = htonl(type | nbd_cmd_flags);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500450 if (type != NBD_CMD_FLUSH) {
Alex Bligh75f187a2013-02-27 17:05:23 -0800451 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
452 request.len = htonl(size);
453 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500454 memcpy(request.handle, &tag, sizeof(tag));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Markus Pargmannd18509f2015-04-02 10:11:38 +0200456 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700457 cmd, nbdcmd_to_ascii(type),
Markus Pargmannd18509f2015-04-02 10:11:38 +0200458 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
Al Viroc9f2b6a2015-11-12 05:09:35 -0500459 result = sock_xmit(nbd, index, 1, &from,
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400460 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 if (result <= 0) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400462 if (result == -ERESTARTSYS) {
463 /* If we havne't sent anything we can just return BUSY,
464 * however if we have sent something we need to make
465 * sure we only allow this req to be sent until we are
466 * completely done.
467 */
468 if (sent) {
469 nsock->pending = req;
470 nsock->sent = sent;
471 }
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200472 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400473 }
Josef Bacika897b662016-12-05 16:20:29 -0500474 dev_err_ratelimited(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200475 "Send control failed (result %d)\n", result);
Josef Bacikf3733242017-04-06 17:01:57 -0400476 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400478send_pages:
Jens Axboe429a7872016-11-17 12:30:37 -0700479 if (type != NBD_CMD_WRITE)
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400480 goto out;
Jens Axboe429a7872016-11-17 12:30:37 -0700481
Jens Axboe429a7872016-11-17 12:30:37 -0700482 bio = req->bio;
483 while (bio) {
484 struct bio *next = bio->bi_next;
485 struct bvec_iter iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800486 struct bio_vec bvec;
Jens Axboe429a7872016-11-17 12:30:37 -0700487
488 bio_for_each_segment(bvec, bio, iter) {
489 bool is_last = !next && bio_iter_last(bvec, iter);
Josef Bacikd61b7f92017-01-19 16:08:49 -0500490 int flags = is_last ? 0 : MSG_MORE;
Jens Axboe429a7872016-11-17 12:30:37 -0700491
Markus Pargmannd18509f2015-04-02 10:11:38 +0200492 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700493 cmd, bvec.bv_len);
Al Viroc9f2b6a2015-11-12 05:09:35 -0500494 iov_iter_bvec(&from, ITER_BVEC | WRITE,
495 &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400496 if (skip) {
497 if (skip >= iov_iter_count(&from)) {
498 skip -= iov_iter_count(&from);
499 continue;
500 }
501 iov_iter_advance(&from, skip);
502 skip = 0;
503 }
504 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
Jens Axboe6c92e692007-08-16 13:43:12 +0200505 if (result <= 0) {
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400506 if (result == -ERESTARTSYS) {
507 /* We've already sent the header, we
508 * have no choice but to set pending and
509 * return BUSY.
510 */
511 nsock->pending = req;
512 nsock->sent = sent;
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200513 return BLK_STS_RESOURCE;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400514 }
Wanlong Gaof4507162012-03-28 14:42:51 -0700515 dev_err(disk_to_dev(nbd->disk),
WANG Cong7f1b90f2011-08-19 14:48:22 +0200516 "Send data failed (result %d)\n",
517 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400518 return -EAGAIN;
Jens Axboe6c92e692007-08-16 13:43:12 +0200519 }
Jens Axboe429a7872016-11-17 12:30:37 -0700520 /*
521 * The completion might already have come in,
522 * so break for the last one instead of letting
523 * the iterator do it. This prevents use-after-free
524 * of the bio.
525 */
526 if (is_last)
527 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 }
Jens Axboe429a7872016-11-17 12:30:37 -0700529 bio = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400531out:
532 nsock->pending = NULL;
533 nsock->sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535}
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537/* NULL returned = something went wrong, inform userspace */
Josef Bacik9561a7a2016-11-22 14:04:40 -0500538static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400540 struct nbd_config *config = nbd->config;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 int result;
542 struct nbd_reply reply;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700543 struct nbd_cmd *cmd;
544 struct request *req = NULL;
545 u16 hwq;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500546 u32 tag;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500547 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
548 struct iov_iter to;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 reply.magic = 0;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500551 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400552 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 if (result <= 0) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400554 if (!nbd_disconnected(config))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500555 dev_err(disk_to_dev(nbd->disk),
556 "Receive control failed (result %d)\n", result);
Markus Pargmann19391832015-08-17 08:20:03 +0200557 return ERR_PTR(result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 }
Michal Feixe4b57e02006-07-30 03:03:31 -0700559
560 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700561 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
Michal Feixe4b57e02006-07-30 03:03:31 -0700562 (unsigned long)ntohl(reply.magic));
Markus Pargmann19391832015-08-17 08:20:03 +0200563 return ERR_PTR(-EPROTO);
Michal Feixe4b57e02006-07-30 03:03:31 -0700564 }
565
Josef Bacik9561a7a2016-11-22 14:04:40 -0500566 memcpy(&tag, reply.handle, sizeof(u32));
Herbert Xu4b2f0262006-01-06 00:09:47 -0800567
Josef Bacikfd8383f2016-09-08 12:33:37 -0700568 hwq = blk_mq_unique_tag_to_hwq(tag);
569 if (hwq < nbd->tag_set.nr_hw_queues)
570 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
571 blk_mq_unique_tag_to_tag(tag));
572 if (!req || !blk_mq_request_started(req)) {
573 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
574 tag, req);
575 return ERR_PTR(-ENOENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700577 cmd = blk_mq_rq_to_pdu(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 if (ntohl(reply.error)) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700579 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200580 ntohl(reply.error));
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200581 cmd->status = BLK_STS_IOERR;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700582 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 }
584
Josef Bacikfd8383f2016-09-08 12:33:37 -0700585 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
Christoph Hellwig9dc6c802015-04-17 22:37:21 +0200586 if (rq_data_dir(req) != WRITE) {
NeilBrown5705f702007-09-25 12:35:59 +0200587 struct req_iterator iter;
Kent Overstreet79886132013-11-23 17:19:00 -0800588 struct bio_vec bvec;
NeilBrown5705f702007-09-25 12:35:59 +0200589
590 rq_for_each_segment(bvec, req, iter) {
Al Viroc9f2b6a2015-11-12 05:09:35 -0500591 iov_iter_bvec(&to, ITER_BVEC | READ,
592 &bvec, 1, bvec.bv_len);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400593 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
Jens Axboe6c92e692007-08-16 13:43:12 +0200594 if (result <= 0) {
Wanlong Gaof4507162012-03-28 14:42:51 -0700595 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
WANG Cong7f1b90f2011-08-19 14:48:22 +0200596 result);
Josef Bacikf3733242017-04-06 17:01:57 -0400597 /*
598 * If we've disconnected or we only have 1
599 * connection then we need to make sure we
600 * complete this request, otherwise error out
601 * and let the timeout stuff handle resubmitting
602 * this request onto another connection.
603 */
Josef Bacik5ea8d102017-04-06 17:01:58 -0400604 if (nbd_disconnected(config) ||
605 config->num_connections <= 1) {
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200606 cmd->status = BLK_STS_IOERR;
Josef Bacikf3733242017-04-06 17:01:57 -0400607 return cmd;
608 }
609 return ERR_PTR(-EIO);
Jens Axboe6c92e692007-08-16 13:43:12 +0200610 }
Markus Pargmannd18509f2015-04-02 10:11:38 +0200611 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
Josef Bacikfd8383f2016-09-08 12:33:37 -0700612 cmd, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500614 } else {
615 /* See the comment in nbd_queue_rq. */
616 wait_for_completion(&cmd->send_complete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 }
Josef Bacikfd8383f2016-09-08 12:33:37 -0700618 return cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
620
Josef Bacik9561a7a2016-11-22 14:04:40 -0500621static void recv_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622{
Josef Bacik9561a7a2016-11-22 14:04:40 -0500623 struct recv_thread_args *args = container_of(work,
624 struct recv_thread_args,
625 work);
626 struct nbd_device *nbd = args->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400627 struct nbd_config *config = nbd->config;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700628 struct nbd_cmd *cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Markus Pargmann19391832015-08-17 08:20:03 +0200630 while (1) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500631 cmd = nbd_read_stat(nbd, args->index);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700632 if (IS_ERR(cmd)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -0400633 struct nbd_sock *nsock = config->socks[args->index];
Josef Bacikf3733242017-04-06 17:01:57 -0400634
635 mutex_lock(&nsock->tx_lock);
Josef Bacik799f9a32017-04-06 17:02:02 -0400636 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400637 mutex_unlock(&nsock->tx_lock);
Markus Pargmann19391832015-08-17 08:20:03 +0200638 break;
639 }
640
Christoph Hellwig08e00292017-04-20 16:03:09 +0200641 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
Markus Pargmann19391832015-08-17 08:20:03 +0200642 }
Josef Bacik5ea8d102017-04-06 17:01:58 -0400643 atomic_dec(&config->recv_threads);
644 wake_up(&config->recv_wq);
645 nbd_config_put(nbd);
646 kfree(args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647}
648
Josef Bacikfd8383f2016-09-08 12:33:37 -0700649static void nbd_clear_req(struct request *req, void *data, bool reserved)
650{
651 struct nbd_cmd *cmd;
652
653 if (!blk_mq_request_started(req))
654 return;
655 cmd = blk_mq_rq_to_pdu(req);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200656 cmd->status = BLK_STS_IOERR;
Christoph Hellwig08e00292017-04-20 16:03:09 +0200657 blk_mq_complete_request(req);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700658}
659
Wanlong Gaof4507162012-03-28 14:42:51 -0700660static void nbd_clear_que(struct nbd_device *nbd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300662 blk_mq_quiesce_queue(nbd->disk->queue);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700663 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
Sagi Grimbergb52c2e92017-07-04 09:57:09 +0300664 blk_mq_unquiesce_queue(nbd->disk->queue);
Markus Pargmanne78273c2015-08-17 08:20:04 +0200665 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667
Josef Bacikf3733242017-04-06 17:01:57 -0400668static int find_fallback(struct nbd_device *nbd, int index)
669{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400670 struct nbd_config *config = nbd->config;
Josef Bacikf3733242017-04-06 17:01:57 -0400671 int new_index = -1;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400672 struct nbd_sock *nsock = config->socks[index];
Josef Bacikf3733242017-04-06 17:01:57 -0400673 int fallback = nsock->fallback_index;
674
Josef Bacik5ea8d102017-04-06 17:01:58 -0400675 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
Josef Bacikf3733242017-04-06 17:01:57 -0400676 return new_index;
677
Josef Bacik5ea8d102017-04-06 17:01:58 -0400678 if (config->num_connections <= 1) {
Josef Bacikf3733242017-04-06 17:01:57 -0400679 dev_err_ratelimited(disk_to_dev(nbd->disk),
680 "Attempted send on invalid socket\n");
681 return new_index;
682 }
683
Josef Bacik5ea8d102017-04-06 17:01:58 -0400684 if (fallback >= 0 && fallback < config->num_connections &&
685 !config->socks[fallback]->dead)
Josef Bacikf3733242017-04-06 17:01:57 -0400686 return fallback;
687
688 if (nsock->fallback_index < 0 ||
Josef Bacik5ea8d102017-04-06 17:01:58 -0400689 nsock->fallback_index >= config->num_connections ||
690 config->socks[nsock->fallback_index]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400691 int i;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400692 for (i = 0; i < config->num_connections; i++) {
Josef Bacikf3733242017-04-06 17:01:57 -0400693 if (i == index)
694 continue;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400695 if (!config->socks[i]->dead) {
Josef Bacikf3733242017-04-06 17:01:57 -0400696 new_index = i;
697 break;
698 }
699 }
700 nsock->fallback_index = new_index;
701 if (new_index < 0) {
702 dev_err_ratelimited(disk_to_dev(nbd->disk),
703 "Dead connection, failed to find a fallback\n");
704 return new_index;
705 }
706 }
707 new_index = nsock->fallback_index;
708 return new_index;
709}
Paul Clements7fdfd402007-10-16 23:27:37 -0700710
Josef Bacik560bc4b2017-04-06 17:02:04 -0400711static int wait_for_reconnect(struct nbd_device *nbd)
712{
713 struct nbd_config *config = nbd->config;
714 if (!config->dead_conn_timeout)
715 return 0;
716 if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
717 return 0;
718 wait_event_interruptible_timeout(config->conn_wait,
719 atomic_read(&config->live_connections),
720 config->dead_conn_timeout);
721 return atomic_read(&config->live_connections);
722}
723
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400724static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700725{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700726 struct request *req = blk_mq_rq_from_pdu(cmd);
727 struct nbd_device *nbd = cmd->nbd;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400728 struct nbd_config *config;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500729 struct nbd_sock *nsock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400730 int ret;
Josef Bacikfd8383f2016-09-08 12:33:37 -0700731
Josef Bacik5ea8d102017-04-06 17:01:58 -0400732 if (!refcount_inc_not_zero(&nbd->config_refs)) {
733 dev_err_ratelimited(disk_to_dev(nbd->disk),
734 "Socks array is empty\n");
735 return -EINVAL;
736 }
737 config = nbd->config;
738
739 if (index >= config->num_connections) {
Josef Bacika897b662016-12-05 16:20:29 -0500740 dev_err_ratelimited(disk_to_dev(nbd->disk),
741 "Attempted send on invalid socket\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -0400742 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400743 return -EINVAL;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500744 }
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200745 cmd->status = BLK_STS_OK;
Josef Bacikf3733242017-04-06 17:01:57 -0400746again:
Josef Bacik5ea8d102017-04-06 17:01:58 -0400747 nsock = config->socks[index];
Josef Bacik9561a7a2016-11-22 14:04:40 -0500748 mutex_lock(&nsock->tx_lock);
Josef Bacikf3733242017-04-06 17:01:57 -0400749 if (nsock->dead) {
Josef Bacik560bc4b2017-04-06 17:02:04 -0400750 int old_index = index;
Josef Bacikf3733242017-04-06 17:01:57 -0400751 index = find_fallback(nbd, index);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500752 mutex_unlock(&nsock->tx_lock);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400753 if (index < 0) {
754 if (wait_for_reconnect(nbd)) {
755 index = old_index;
756 goto again;
757 }
758 /* All the sockets should already be down at this point,
759 * we just want to make sure that DISCONNECTED is set so
760 * any requests that come in that were queue'ed waiting
761 * for the reconnect timer don't trigger the timer again
762 * and instead just error out.
763 */
764 sock_shutdown(nbd);
765 nbd_config_put(nbd);
766 return -EIO;
767 }
Josef Bacikf3733242017-04-06 17:01:57 -0400768 goto again;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700769 }
770
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400771 /* Handle the case that we have a pending request that was partially
772 * transmitted that _has_ to be serviced first. We need to call requeue
773 * here so that it gets put _after_ the request that is already on the
774 * dispatch list.
775 */
776 if (unlikely(nsock->pending && nsock->pending != req)) {
777 blk_mq_requeue_request(req, true);
778 ret = 0;
779 goto out;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700780 }
Josef Bacikf3733242017-04-06 17:01:57 -0400781 /*
782 * Some failures are related to the link going down, so anything that
783 * returns EAGAIN can be retried on a different socket.
784 */
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400785 ret = nbd_send_cmd(nbd, cmd, index);
Josef Bacikf3733242017-04-06 17:01:57 -0400786 if (ret == -EAGAIN) {
787 dev_err_ratelimited(disk_to_dev(nbd->disk),
788 "Request send failed trying another connection\n");
Josef Bacik799f9a32017-04-06 17:02:02 -0400789 nbd_mark_nsock_dead(nbd, nsock, 1);
Josef Bacikf3733242017-04-06 17:01:57 -0400790 mutex_unlock(&nsock->tx_lock);
791 goto again;
792 }
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400793out:
Josef Bacik9561a7a2016-11-22 14:04:40 -0500794 mutex_unlock(&nsock->tx_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400795 nbd_config_put(nbd);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400796 return ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700797}
798
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200799static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
Josef Bacikfd8383f2016-09-08 12:33:37 -0700800 const struct blk_mq_queue_data *bd)
Laurent Vivier48cf6062008-04-29 01:02:46 -0700801{
Josef Bacikfd8383f2016-09-08 12:33:37 -0700802 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400803 int ret;
Laurent Vivier48cf6062008-04-29 01:02:46 -0700804
Josef Bacik9561a7a2016-11-22 14:04:40 -0500805 /*
806 * Since we look at the bio's to send the request over the network we
807 * need to make sure the completion work doesn't mark this request done
808 * before we are done doing our send. This keeps us from dereferencing
809 * freed data if we have particularly fast completions (ie we get the
810 * completion before we exit sock_xmit on the last bvec) or in the case
811 * that the server is misbehaving (or there was an error) before we're
812 * done sending everything over the wire.
813 */
814 init_completion(&cmd->send_complete);
Josef Bacikfd8383f2016-09-08 12:33:37 -0700815 blk_mq_start_request(bd->rq);
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400816
817 /* We can be called directly from the user space process, which means we
818 * could possibly have signals pending so our sendmsg will fail. In
819 * this case we need to return that we are busy, otherwise error out as
820 * appropriate.
821 */
822 ret = nbd_handle_cmd(cmd, hctx->queue_num);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500823 complete(&cmd->send_complete);
824
Christoph Hellwigfc17b652017-06-03 09:38:05 +0200825 return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
Josef Bacike46c7282017-04-06 17:02:00 -0400828static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
829 bool netlink)
Markus Pargmann23272a672015-10-29 11:51:16 +0100830{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400831 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500832 struct socket *sock;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500833 struct nbd_sock **socks;
834 struct nbd_sock *nsock;
Josef Bacik9442b732017-02-07 17:10:22 -0500835 int err;
836
837 sock = sockfd_lookup(arg, &err);
838 if (!sock)
839 return err;
Markus Pargmann23272a672015-10-29 11:51:16 +0100840
Josef Bacike46c7282017-04-06 17:02:00 -0400841 if (!netlink && !nbd->task_setup &&
842 !test_bit(NBD_BOUND, &config->runtime_flags))
Josef Bacik9561a7a2016-11-22 14:04:40 -0500843 nbd->task_setup = current;
Josef Bacike46c7282017-04-06 17:02:00 -0400844
845 if (!netlink &&
846 (nbd->task_setup != current ||
847 test_bit(NBD_BOUND, &config->runtime_flags))) {
Josef Bacik9561a7a2016-11-22 14:04:40 -0500848 dev_err(disk_to_dev(nbd->disk),
849 "Device being setup by another task");
Josef Bacik9b1355d2017-04-06 17:01:56 -0400850 sockfd_put(sock);
Josef Bacike46c7282017-04-06 17:02:00 -0400851 return -EBUSY;
Markus Pargmann23272a672015-10-29 11:51:16 +0100852 }
853
Josef Bacik5ea8d102017-04-06 17:01:58 -0400854 socks = krealloc(config->socks, (config->num_connections + 1) *
Josef Bacik9561a7a2016-11-22 14:04:40 -0500855 sizeof(struct nbd_sock *), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400856 if (!socks) {
857 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500858 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400859 }
Josef Bacik9561a7a2016-11-22 14:04:40 -0500860 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
Josef Bacik9b1355d2017-04-06 17:01:56 -0400861 if (!nsock) {
862 sockfd_put(sock);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500863 return -ENOMEM;
Josef Bacik9b1355d2017-04-06 17:01:56 -0400864 }
Markus Pargmann23272a672015-10-29 11:51:16 +0100865
Josef Bacik5ea8d102017-04-06 17:01:58 -0400866 config->socks = socks;
Markus Pargmann23272a672015-10-29 11:51:16 +0100867
Josef Bacikf3733242017-04-06 17:01:57 -0400868 nsock->fallback_index = -1;
869 nsock->dead = false;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500870 mutex_init(&nsock->tx_lock);
871 nsock->sock = sock;
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400872 nsock->pending = NULL;
873 nsock->sent = 0;
Josef Bacik799f9a32017-04-06 17:02:02 -0400874 nsock->cookie = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -0400875 socks[config->num_connections++] = nsock;
Josef Bacik560bc4b2017-04-06 17:02:04 -0400876 atomic_inc(&config->live_connections);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500877
878 return 0;
Markus Pargmann23272a672015-10-29 11:51:16 +0100879}
880
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400881static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
882{
883 struct nbd_config *config = nbd->config;
884 struct socket *sock, *old;
885 struct recv_thread_args *args;
886 int i;
887 int err;
888
889 sock = sockfd_lookup(arg, &err);
890 if (!sock)
891 return err;
892
893 args = kzalloc(sizeof(*args), GFP_KERNEL);
894 if (!args) {
895 sockfd_put(sock);
896 return -ENOMEM;
897 }
898
899 for (i = 0; i < config->num_connections; i++) {
900 struct nbd_sock *nsock = config->socks[i];
901
902 if (!nsock->dead)
903 continue;
904
905 mutex_lock(&nsock->tx_lock);
906 if (!nsock->dead) {
907 mutex_unlock(&nsock->tx_lock);
908 continue;
909 }
910 sk_set_memalloc(sock->sk);
Josef Bacikdc88e342017-06-08 15:39:30 -0400911 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400912 atomic_inc(&config->recv_threads);
913 refcount_inc(&nbd->config_refs);
914 old = nsock->sock;
915 nsock->fallback_index = -1;
916 nsock->sock = sock;
917 nsock->dead = false;
918 INIT_WORK(&args->work, recv_work);
919 args->index = i;
920 args->nbd = nbd;
Josef Bacik799f9a32017-04-06 17:02:02 -0400921 nsock->cookie++;
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400922 mutex_unlock(&nsock->tx_lock);
923 sockfd_put(old);
924
925 /* We take the tx_mutex in an error path in the recv_work, so we
926 * need to queue_work outside of the tx_mutex.
927 */
928 queue_work(recv_workqueue, &args->work);
Josef Bacik560bc4b2017-04-06 17:02:04 -0400929
930 atomic_inc(&config->live_connections);
931 wake_up(&config->conn_wait);
Josef Bacikb7aa3d32017-04-06 17:02:01 -0400932 return 0;
933 }
934 sockfd_put(sock);
935 kfree(args);
936 return -ENOSPC;
937}
938
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100939static void nbd_bdev_reset(struct block_device *bdev)
940{
Ratna Manoj Bollaabbbdf12017-03-24 14:08:29 -0400941 if (bdev->bd_openers > 1)
942 return;
Josef Bacik29eaadc2017-04-06 17:01:59 -0400943 bd_set_size(bdev, 0);
Markus Pargmann0e4f0f62015-10-29 12:04:51 +0100944 if (max_part > 0) {
945 blkdev_reread_part(bdev);
946 bdev->bd_invalidated = 1;
947 }
948}
949
Josef Bacik29eaadc2017-04-06 17:01:59 -0400950static void nbd_parse_flags(struct nbd_device *nbd)
Markus Pargmannd02cf532015-10-29 12:06:15 +0100951{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400952 struct nbd_config *config = nbd->config;
953 if (config->flags & NBD_FLAG_READ_ONLY)
Josef Bacik29eaadc2017-04-06 17:01:59 -0400954 set_disk_ro(nbd->disk, true);
955 else
956 set_disk_ro(nbd->disk, false);
Josef Bacik5ea8d102017-04-06 17:01:58 -0400957 if (config->flags & NBD_FLAG_SEND_TRIM)
Markus Pargmannd02cf532015-10-29 12:06:15 +0100958 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Shaun McDowell685c9b22017-05-25 23:55:54 -0400959 if (config->flags & NBD_FLAG_SEND_FLUSH) {
960 if (config->flags & NBD_FLAG_SEND_FUA)
961 blk_queue_write_cache(nbd->disk->queue, true, true);
962 else
963 blk_queue_write_cache(nbd->disk->queue, true, false);
964 }
Markus Pargmannd02cf532015-10-29 12:06:15 +0100965 else
Jens Axboeaafb1ee2016-03-30 10:10:53 -0600966 blk_queue_write_cache(nbd->disk->queue, false, false);
Markus Pargmannd02cf532015-10-29 12:06:15 +0100967}
968
Josef Bacik9561a7a2016-11-22 14:04:40 -0500969static void send_disconnects(struct nbd_device *nbd)
970{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400971 struct nbd_config *config = nbd->config;
Al Viroc9f2b6a2015-11-12 05:09:35 -0500972 struct nbd_request request = {
973 .magic = htonl(NBD_REQUEST_MAGIC),
974 .type = htonl(NBD_CMD_DISC),
975 };
976 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
977 struct iov_iter from;
Josef Bacik9561a7a2016-11-22 14:04:40 -0500978 int i, ret;
979
Josef Bacik5ea8d102017-04-06 17:01:58 -0400980 for (i = 0; i < config->num_connections; i++) {
Al Viroc9f2b6a2015-11-12 05:09:35 -0500981 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
Josef Bacik9dd5d3a2017-03-24 14:08:26 -0400982 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
Josef Bacik9561a7a2016-11-22 14:04:40 -0500983 if (ret <= 0)
984 dev_err(disk_to_dev(nbd->disk),
985 "Send disconnect failed %d\n", ret);
986 }
987}
988
Josef Bacik29eaadc2017-04-06 17:01:59 -0400989static int nbd_disconnect(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -0500990{
Josef Bacik5ea8d102017-04-06 17:01:58 -0400991 struct nbd_config *config = nbd->config;
Josef Bacik9442b732017-02-07 17:10:22 -0500992
Josef Bacik5ea8d102017-04-06 17:01:58 -0400993 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
Josef Bacik9442b732017-02-07 17:10:22 -0500994 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
Josef Bacik5ea8d102017-04-06 17:01:58 -0400995 &config->runtime_flags))
Josef Bacik9442b732017-02-07 17:10:22 -0500996 send_disconnects(nbd);
997 return 0;
998}
999
Josef Bacik29eaadc2017-04-06 17:01:59 -04001000static void nbd_clear_sock(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001001{
1002 sock_shutdown(nbd);
1003 nbd_clear_que(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001004 nbd->task_setup = NULL;
Josef Bacik9442b732017-02-07 17:10:22 -05001005}
1006
Josef Bacik5ea8d102017-04-06 17:01:58 -04001007static void nbd_config_put(struct nbd_device *nbd)
1008{
1009 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1010 &nbd->config_lock)) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001011 struct nbd_config *config = nbd->config;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001012 nbd_dev_dbg_close(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001013 nbd_size_clear(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001014 if (test_and_clear_bit(NBD_HAS_PID_FILE,
1015 &config->runtime_flags))
1016 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1017 nbd->task_recv = NULL;
Josef Bacik29eaadc2017-04-06 17:01:59 -04001018 nbd_clear_sock(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001019 if (config->num_connections) {
1020 int i;
1021 for (i = 0; i < config->num_connections; i++) {
1022 sockfd_put(config->socks[i]->sock);
1023 kfree(config->socks[i]);
1024 }
1025 kfree(config->socks);
1026 }
Ilya Dryomovfa976532017-05-23 17:49:55 +02001027 kfree(nbd->config);
Ilya Dryomovaf622b82017-05-23 17:49:54 +02001028 nbd->config = NULL;
1029
1030 nbd->tag_set.timeout = 0;
1031 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
Josef Bacika2c97902017-04-06 17:02:07 -04001032
Josef Bacik5ea8d102017-04-06 17:01:58 -04001033 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001034 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001035 module_put(THIS_MODULE);
1036 }
1037}
1038
Josef Bacike46c7282017-04-06 17:02:00 -04001039static int nbd_start_device(struct nbd_device *nbd)
Josef Bacik9442b732017-02-07 17:10:22 -05001040{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001041 struct nbd_config *config = nbd->config;
1042 int num_connections = config->num_connections;
Josef Bacik9442b732017-02-07 17:10:22 -05001043 int error = 0, i;
1044
1045 if (nbd->task_recv)
1046 return -EBUSY;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001047 if (!config->socks)
Josef Bacik9442b732017-02-07 17:10:22 -05001048 return -EINVAL;
1049 if (num_connections > 1 &&
Josef Bacik5ea8d102017-04-06 17:01:58 -04001050 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
Josef Bacik9442b732017-02-07 17:10:22 -05001051 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001052 return -EINVAL;
Josef Bacik9442b732017-02-07 17:10:22 -05001053 }
1054
Josef Bacik5ea8d102017-04-06 17:01:58 -04001055 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
Josef Bacik9442b732017-02-07 17:10:22 -05001056 nbd->task_recv = current;
Josef Bacik9442b732017-02-07 17:10:22 -05001057
Josef Bacik29eaadc2017-04-06 17:01:59 -04001058 nbd_parse_flags(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001059
1060 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1061 if (error) {
1062 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
Josef Bacik5ea8d102017-04-06 17:01:58 -04001063 return error;
Josef Bacik9442b732017-02-07 17:10:22 -05001064 }
Josef Bacik29eaadc2017-04-06 17:01:59 -04001065 set_bit(NBD_HAS_PID_FILE, &config->runtime_flags);
Josef Bacik9442b732017-02-07 17:10:22 -05001066
1067 nbd_dev_dbg_init(nbd);
1068 for (i = 0; i < num_connections; i++) {
Josef Bacik5ea8d102017-04-06 17:01:58 -04001069 struct recv_thread_args *args;
1070
1071 args = kzalloc(sizeof(*args), GFP_KERNEL);
1072 if (!args) {
1073 sock_shutdown(nbd);
1074 return -ENOMEM;
1075 }
1076 sk_set_memalloc(config->socks[i]->sock->sk);
Josef Bacikdc88e342017-06-08 15:39:30 -04001077 config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001078 atomic_inc(&config->recv_threads);
1079 refcount_inc(&nbd->config_refs);
1080 INIT_WORK(&args->work, recv_work);
1081 args->nbd = nbd;
1082 args->index = i;
1083 queue_work(recv_workqueue, &args->work);
Josef Bacik9442b732017-02-07 17:10:22 -05001084 }
Josef Bacike46c7282017-04-06 17:02:00 -04001085 return error;
1086}
1087
1088static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1089{
1090 struct nbd_config *config = nbd->config;
1091 int ret;
1092
1093 ret = nbd_start_device(nbd);
1094 if (ret)
1095 return ret;
1096
1097 bd_set_size(bdev, config->bytesize);
1098 if (max_part)
1099 bdev->bd_invalidated = 1;
1100 mutex_unlock(&nbd->config_lock);
1101 ret = wait_event_interruptible(config->recv_wq,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001102 atomic_read(&config->recv_threads) == 0);
Josef Bacike46c7282017-04-06 17:02:00 -04001103 if (ret)
Josef Bacik5ea8d102017-04-06 17:01:58 -04001104 sock_shutdown(nbd);
Josef Bacik9442b732017-02-07 17:10:22 -05001105 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001106 bd_set_size(bdev, 0);
Josef Bacik9442b732017-02-07 17:10:22 -05001107 /* user requested, ignore socket errors */
Josef Bacik5ea8d102017-04-06 17:01:58 -04001108 if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001109 ret = 0;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001110 if (test_bit(NBD_TIMEDOUT, &config->runtime_flags))
Josef Bacike46c7282017-04-06 17:02:00 -04001111 ret = -ETIMEDOUT;
1112 return ret;
Josef Bacik9442b732017-02-07 17:10:22 -05001113}
Markus Pargmann30d53d92015-08-17 08:20:06 +02001114
Josef Bacik29eaadc2017-04-06 17:01:59 -04001115static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1116 struct block_device *bdev)
1117{
Josef Bacik2516ab12017-04-06 17:02:03 -04001118 sock_shutdown(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001119 kill_bdev(bdev);
1120 nbd_bdev_reset(bdev);
Josef Bacike46c7282017-04-06 17:02:00 -04001121 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1122 &nbd->config->runtime_flags))
1123 nbd_config_put(nbd);
Josef Bacik29eaadc2017-04-06 17:01:59 -04001124}
1125
Josef Bacik9561a7a2016-11-22 14:04:40 -05001126/* Must be called with config_lock held */
Wanlong Gaof4507162012-03-28 14:42:51 -07001127static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
Pavel Machek1a2ad212009-04-02 16:58:41 -07001128 unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001130 struct nbd_config *config = nbd->config;
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 switch (cmd) {
Josef Bacik9442b732017-02-07 17:10:22 -05001133 case NBD_DISCONNECT:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001134 return nbd_disconnect(nbd);
Markus Pargmann23272a672015-10-29 11:51:16 +01001135 case NBD_CLEAR_SOCK:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001136 nbd_clear_sock_ioctl(nbd, bdev);
1137 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001138 case NBD_SET_SOCK:
Josef Bacike46c7282017-04-06 17:02:00 -04001139 return nbd_add_socket(nbd, arg, false);
Josef Bacik9442b732017-02-07 17:10:22 -05001140 case NBD_SET_BLKSIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001141 nbd_size_set(nbd, arg,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001142 div_s64(config->bytesize, arg));
Josef Bacike5445412017-02-13 10:39:47 -05001143 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 case NBD_SET_SIZE:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001145 nbd_size_set(nbd, config->blksize,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001146 div_s64(arg, config->blksize));
Josef Bacike5445412017-02-13 10:39:47 -05001147 return 0;
Markus Pargmann37091fd2015-07-27 07:36:49 +02001148 case NBD_SET_SIZE_BLOCKS:
Josef Bacik29eaadc2017-04-06 17:01:59 -04001149 nbd_size_set(nbd, config->blksize, arg);
Josef Bacike5445412017-02-13 10:39:47 -05001150 return 0;
Paul Clements7fdfd402007-10-16 23:27:37 -07001151 case NBD_SET_TIMEOUT:
Josef Bacikf8586852017-03-24 14:08:28 -04001152 if (arg) {
1153 nbd->tag_set.timeout = arg * HZ;
1154 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
1155 }
Paul Clements7fdfd402007-10-16 23:27:37 -07001156 return 0;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001157
Paul Clements2f012502012-10-04 17:16:15 -07001158 case NBD_SET_FLAGS:
Josef Bacik5ea8d102017-04-06 17:01:58 -04001159 config->flags = arg;
Paul Clements2f012502012-10-04 17:16:15 -07001160 return 0;
Josef Bacik9442b732017-02-07 17:10:22 -05001161 case NBD_DO_IT:
Josef Bacike46c7282017-04-06 17:02:00 -04001162 return nbd_start_device_ioctl(nbd, bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 case NBD_CLEAR_QUE:
Herbert Xu4b2f0262006-01-06 00:09:47 -08001164 /*
1165 * This is for compatibility only. The queue is always cleared
1166 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1167 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 return 0;
1169 case NBD_PRINT_DEBUG:
Josef Bacikfd8383f2016-09-08 12:33:37 -07001170 /*
1171 * For compatibility only, we no longer keep a list of
1172 * outstanding requests.
1173 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 return 0;
1175 }
Pavel Machek1a2ad212009-04-02 16:58:41 -07001176 return -ENOTTY;
1177}
1178
1179static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1180 unsigned int cmd, unsigned long arg)
1181{
Wanlong Gaof4507162012-03-28 14:42:51 -07001182 struct nbd_device *nbd = bdev->bd_disk->private_data;
Josef Bacike46c7282017-04-06 17:02:00 -04001183 struct nbd_config *config = nbd->config;
1184 int error = -EINVAL;
Pavel Machek1a2ad212009-04-02 16:58:41 -07001185
1186 if (!capable(CAP_SYS_ADMIN))
1187 return -EPERM;
1188
Josef Bacik9561a7a2016-11-22 14:04:40 -05001189 mutex_lock(&nbd->config_lock);
Josef Bacike46c7282017-04-06 17:02:00 -04001190
1191 /* Don't allow ioctl operations on a nbd device that was created with
1192 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1193 */
1194 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1195 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1196 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1197 else
1198 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
Josef Bacik9561a7a2016-11-22 14:04:40 -05001199 mutex_unlock(&nbd->config_lock);
Pavel Machek1a2ad212009-04-02 16:58:41 -07001200 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201}
1202
Josef Bacik5ea8d102017-04-06 17:01:58 -04001203static struct nbd_config *nbd_alloc_config(void)
1204{
1205 struct nbd_config *config;
1206
1207 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1208 if (!config)
1209 return NULL;
1210 atomic_set(&config->recv_threads, 0);
1211 init_waitqueue_head(&config->recv_wq);
Josef Bacik560bc4b2017-04-06 17:02:04 -04001212 init_waitqueue_head(&config->conn_wait);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001213 config->blksize = 1024;
Josef Bacik560bc4b2017-04-06 17:02:04 -04001214 atomic_set(&config->live_connections, 0);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001215 try_module_get(THIS_MODULE);
1216 return config;
1217}
1218
1219static int nbd_open(struct block_device *bdev, fmode_t mode)
1220{
1221 struct nbd_device *nbd;
1222 int ret = 0;
1223
1224 mutex_lock(&nbd_index_mutex);
1225 nbd = bdev->bd_disk->private_data;
1226 if (!nbd) {
1227 ret = -ENXIO;
1228 goto out;
1229 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001230 if (!refcount_inc_not_zero(&nbd->refs)) {
1231 ret = -ENXIO;
1232 goto out;
1233 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001234 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1235 struct nbd_config *config;
1236
1237 mutex_lock(&nbd->config_lock);
1238 if (refcount_inc_not_zero(&nbd->config_refs)) {
1239 mutex_unlock(&nbd->config_lock);
1240 goto out;
1241 }
1242 config = nbd->config = nbd_alloc_config();
1243 if (!config) {
1244 ret = -ENOMEM;
1245 mutex_unlock(&nbd->config_lock);
1246 goto out;
1247 }
1248 refcount_set(&nbd->config_refs, 1);
Josef Bacikc6a47592017-04-06 17:02:06 -04001249 refcount_inc(&nbd->refs);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001250 mutex_unlock(&nbd->config_lock);
1251 }
1252out:
1253 mutex_unlock(&nbd_index_mutex);
1254 return ret;
1255}
1256
1257static void nbd_release(struct gendisk *disk, fmode_t mode)
1258{
1259 struct nbd_device *nbd = disk->private_data;
1260 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001261 nbd_put(nbd);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001262}
1263
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07001264static const struct block_device_operations nbd_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265{
1266 .owner = THIS_MODULE,
Josef Bacik5ea8d102017-04-06 17:01:58 -04001267 .open = nbd_open,
1268 .release = nbd_release,
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +02001269 .ioctl = nbd_ioctl,
Al Viro263a3df2016-01-07 10:04:37 -05001270 .compat_ioctl = nbd_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271};
1272
Markus Pargmann30d53d92015-08-17 08:20:06 +02001273#if IS_ENABLED(CONFIG_DEBUG_FS)
1274
1275static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1276{
1277 struct nbd_device *nbd = s->private;
1278
1279 if (nbd->task_recv)
1280 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
Markus Pargmann30d53d92015-08-17 08:20:06 +02001281
1282 return 0;
1283}
1284
1285static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1286{
1287 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1288}
1289
1290static const struct file_operations nbd_dbg_tasks_ops = {
1291 .open = nbd_dbg_tasks_open,
1292 .read = seq_read,
1293 .llseek = seq_lseek,
1294 .release = single_release,
1295};
1296
1297static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1298{
1299 struct nbd_device *nbd = s->private;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001300 u32 flags = nbd->config->flags;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001301
1302 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1303
1304 seq_puts(s, "Known flags:\n");
1305
1306 if (flags & NBD_FLAG_HAS_FLAGS)
1307 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1308 if (flags & NBD_FLAG_READ_ONLY)
1309 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1310 if (flags & NBD_FLAG_SEND_FLUSH)
1311 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
Shaun McDowell685c9b22017-05-25 23:55:54 -04001312 if (flags & NBD_FLAG_SEND_FUA)
1313 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
Markus Pargmann30d53d92015-08-17 08:20:06 +02001314 if (flags & NBD_FLAG_SEND_TRIM)
1315 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1316
1317 return 0;
1318}
1319
1320static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1321{
1322 return single_open(file, nbd_dbg_flags_show, inode->i_private);
1323}
1324
1325static const struct file_operations nbd_dbg_flags_ops = {
1326 .open = nbd_dbg_flags_open,
1327 .read = seq_read,
1328 .llseek = seq_lseek,
1329 .release = single_release,
1330};
1331
1332static int nbd_dev_dbg_init(struct nbd_device *nbd)
1333{
1334 struct dentry *dir;
Josef Bacik5ea8d102017-04-06 17:01:58 -04001335 struct nbd_config *config = nbd->config;
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001336
1337 if (!nbd_dbg_dir)
1338 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001339
1340 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001341 if (!dir) {
1342 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1343 nbd_name(nbd));
1344 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001345 }
Josef Bacik5ea8d102017-04-06 17:01:58 -04001346 config->dbg_dir = dir;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001347
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001348 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001349 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
Josef Bacik0eadf372016-09-08 12:33:40 -07001350 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001351 debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
Josef Bacikd366a0f2016-06-08 10:32:10 -04001352 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001353
1354 return 0;
1355}
1356
1357static void nbd_dev_dbg_close(struct nbd_device *nbd)
1358{
Josef Bacik5ea8d102017-04-06 17:01:58 -04001359 debugfs_remove_recursive(nbd->config->dbg_dir);
Markus Pargmann30d53d92015-08-17 08:20:06 +02001360}
1361
1362static int nbd_dbg_init(void)
1363{
1364 struct dentry *dbg_dir;
1365
1366 dbg_dir = debugfs_create_dir("nbd", NULL);
Markus Pargmann27ea43f2015-10-24 21:15:34 +02001367 if (!dbg_dir)
1368 return -EIO;
Markus Pargmann30d53d92015-08-17 08:20:06 +02001369
1370 nbd_dbg_dir = dbg_dir;
1371
1372 return 0;
1373}
1374
1375static void nbd_dbg_close(void)
1376{
1377 debugfs_remove_recursive(nbd_dbg_dir);
1378}
1379
1380#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1381
1382static int nbd_dev_dbg_init(struct nbd_device *nbd)
1383{
1384 return 0;
1385}
1386
1387static void nbd_dev_dbg_close(struct nbd_device *nbd)
1388{
1389}
1390
1391static int nbd_dbg_init(void)
1392{
1393 return 0;
1394}
1395
1396static void nbd_dbg_close(void)
1397{
1398}
1399
1400#endif
1401
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001402static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1403 unsigned int hctx_idx, unsigned int numa_node)
Josef Bacikfd8383f2016-09-08 12:33:37 -07001404{
1405 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001406 cmd->nbd = set->driver_data;
Josef Bacikfd8383f2016-09-08 12:33:37 -07001407 return 0;
1408}
1409
Eric Biggersf363b082017-03-30 13:39:16 -07001410static const struct blk_mq_ops nbd_mq_ops = {
Josef Bacikfd8383f2016-09-08 12:33:37 -07001411 .queue_rq = nbd_queue_rq,
Christoph Hellwig1e388ae2017-04-20 16:03:06 +02001412 .complete = nbd_complete_rq,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001413 .init_request = nbd_init_request,
Josef Bacik0eadf372016-09-08 12:33:40 -07001414 .timeout = nbd_xmit_timeout,
Josef Bacikfd8383f2016-09-08 12:33:37 -07001415};
1416
Josef Bacikb0d91112017-02-01 16:11:40 -05001417static int nbd_dev_add(int index)
1418{
1419 struct nbd_device *nbd;
1420 struct gendisk *disk;
1421 struct request_queue *q;
1422 int err = -ENOMEM;
1423
1424 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1425 if (!nbd)
1426 goto out;
1427
1428 disk = alloc_disk(1 << part_shift);
1429 if (!disk)
1430 goto out_free_nbd;
1431
1432 if (index >= 0) {
1433 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1434 GFP_KERNEL);
1435 if (err == -ENOSPC)
1436 err = -EEXIST;
1437 } else {
1438 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1439 if (err >= 0)
1440 index = err;
1441 }
1442 if (err < 0)
1443 goto out_free_disk;
1444
Josef Bacike46c7282017-04-06 17:02:00 -04001445 nbd->index = index;
Josef Bacikb0d91112017-02-01 16:11:40 -05001446 nbd->disk = disk;
1447 nbd->tag_set.ops = &nbd_mq_ops;
1448 nbd->tag_set.nr_hw_queues = 1;
1449 nbd->tag_set.queue_depth = 128;
1450 nbd->tag_set.numa_node = NUMA_NO_NODE;
1451 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1452 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1453 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1454 nbd->tag_set.driver_data = nbd;
1455
1456 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1457 if (err)
1458 goto out_free_idr;
1459
1460 q = blk_mq_init_queue(&nbd->tag_set);
1461 if (IS_ERR(q)) {
1462 err = PTR_ERR(q);
1463 goto out_free_tags;
1464 }
1465 disk->queue = q;
1466
1467 /*
1468 * Tell the block layer that we are not a rotational device
1469 */
1470 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1471 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1472 disk->queue->limits.discard_granularity = 512;
1473 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
Josef Bacikebb16d02017-04-18 16:22:51 -04001474 blk_queue_max_segment_size(disk->queue, UINT_MAX);
Josef Bacik1cc1f172017-04-20 15:47:01 -04001475 blk_queue_max_segments(disk->queue, USHRT_MAX);
Josef Bacikb0d91112017-02-01 16:11:40 -05001476 blk_queue_max_hw_sectors(disk->queue, 65536);
1477 disk->queue->limits.max_sectors = 256;
1478
Josef Bacikb0d91112017-02-01 16:11:40 -05001479 mutex_init(&nbd->config_lock);
Josef Bacik5ea8d102017-04-06 17:01:58 -04001480 refcount_set(&nbd->config_refs, 0);
Josef Bacikc6a47592017-04-06 17:02:06 -04001481 refcount_set(&nbd->refs, 1);
1482 INIT_LIST_HEAD(&nbd->list);
Josef Bacikb0d91112017-02-01 16:11:40 -05001483 disk->major = NBD_MAJOR;
1484 disk->first_minor = index << part_shift;
1485 disk->fops = &nbd_fops;
1486 disk->private_data = nbd;
1487 sprintf(disk->disk_name, "nbd%d", index);
Josef Bacikb0d91112017-02-01 16:11:40 -05001488 add_disk(disk);
Josef Bacik47d902b2017-04-06 17:02:05 -04001489 nbd_total_devices++;
Josef Bacikb0d91112017-02-01 16:11:40 -05001490 return index;
1491
1492out_free_tags:
1493 blk_mq_free_tag_set(&nbd->tag_set);
1494out_free_idr:
1495 idr_remove(&nbd_index_idr, index);
1496out_free_disk:
1497 put_disk(disk);
1498out_free_nbd:
1499 kfree(nbd);
1500out:
1501 return err;
1502}
1503
Josef Bacike46c7282017-04-06 17:02:00 -04001504static int find_free_cb(int id, void *ptr, void *data)
1505{
1506 struct nbd_device *nbd = ptr;
1507 struct nbd_device **found = data;
1508
1509 if (!refcount_read(&nbd->config_refs)) {
1510 *found = nbd;
1511 return 1;
1512 }
1513 return 0;
1514}
1515
1516/* Netlink interface. */
1517static struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1518 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1519 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1520 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1521 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1522 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1523 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1524 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
Josef Bacik560bc4b2017-04-06 17:02:04 -04001525 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
Josef Bacik47d902b2017-04-06 17:02:05 -04001526 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
Josef Bacike46c7282017-04-06 17:02:00 -04001527};
1528
1529static struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1530 [NBD_SOCK_FD] = { .type = NLA_U32 },
1531};
1532
Josef Bacik47d902b2017-04-06 17:02:05 -04001533/* We don't use this right now since we don't parse the incoming list, but we
1534 * still want it here so userspace knows what to expect.
1535 */
1536static struct nla_policy __attribute__((unused))
1537nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1538 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1539 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1540};
1541
Josef Bacike46c7282017-04-06 17:02:00 -04001542static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1543{
1544 struct nbd_device *nbd = NULL;
1545 struct nbd_config *config;
1546 int index = -1;
1547 int ret;
Josef Bacika2c97902017-04-06 17:02:07 -04001548 bool put_dev = false;
Josef Bacike46c7282017-04-06 17:02:00 -04001549
1550 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1551 return -EPERM;
1552
1553 if (info->attrs[NBD_ATTR_INDEX])
1554 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1555 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1556 printk(KERN_ERR "nbd: must specify at least one socket\n");
1557 return -EINVAL;
1558 }
1559 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1560 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1561 return -EINVAL;
1562 }
1563again:
1564 mutex_lock(&nbd_index_mutex);
1565 if (index == -1) {
1566 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1567 if (ret == 0) {
1568 int new_index;
1569 new_index = nbd_dev_add(-1);
1570 if (new_index < 0) {
1571 mutex_unlock(&nbd_index_mutex);
1572 printk(KERN_ERR "nbd: failed to add new device\n");
1573 return ret;
1574 }
1575 nbd = idr_find(&nbd_index_idr, new_index);
1576 }
1577 } else {
1578 nbd = idr_find(&nbd_index_idr, index);
1579 }
Josef Bacike46c7282017-04-06 17:02:00 -04001580 if (!nbd) {
1581 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1582 index);
Josef Bacikc6a47592017-04-06 17:02:06 -04001583 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001584 return -EINVAL;
1585 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001586 if (!refcount_inc_not_zero(&nbd->refs)) {
1587 mutex_unlock(&nbd_index_mutex);
1588 if (index == -1)
1589 goto again;
1590 printk(KERN_ERR "nbd: device at index %d is going down\n",
1591 index);
1592 return -EINVAL;
1593 }
1594 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001595
1596 mutex_lock(&nbd->config_lock);
1597 if (refcount_read(&nbd->config_refs)) {
1598 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001599 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001600 if (index == -1)
1601 goto again;
1602 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1603 return -EBUSY;
1604 }
1605 if (WARN_ON(nbd->config)) {
1606 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001607 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001608 return -EINVAL;
1609 }
1610 config = nbd->config = nbd_alloc_config();
1611 if (!nbd->config) {
1612 mutex_unlock(&nbd->config_lock);
Josef Bacikc6a47592017-04-06 17:02:06 -04001613 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001614 printk(KERN_ERR "nbd: couldn't allocate config\n");
1615 return -ENOMEM;
1616 }
1617 refcount_set(&nbd->config_refs, 1);
1618 set_bit(NBD_BOUND, &config->runtime_flags);
1619
1620 if (info->attrs[NBD_ATTR_SIZE_BYTES]) {
1621 u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1622 nbd_size_set(nbd, config->blksize,
1623 div64_u64(bytes, config->blksize));
1624 }
1625 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
1626 u64 bsize =
1627 nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1628 nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize));
1629 }
1630 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1631 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1632 nbd->tag_set.timeout = timeout * HZ;
1633 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1634 }
Josef Bacik560bc4b2017-04-06 17:02:04 -04001635 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1636 config->dead_conn_timeout =
1637 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1638 config->dead_conn_timeout *= HZ;
1639 }
Josef Bacike46c7282017-04-06 17:02:00 -04001640 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1641 config->flags =
1642 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
Josef Bacika2c97902017-04-06 17:02:07 -04001643 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1644 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1645 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1646 set_bit(NBD_DESTROY_ON_DISCONNECT,
1647 &config->runtime_flags);
1648 put_dev = true;
1649 }
1650 }
1651
Josef Bacike46c7282017-04-06 17:02:00 -04001652 if (info->attrs[NBD_ATTR_SOCKETS]) {
1653 struct nlattr *attr;
1654 int rem, fd;
1655
1656 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1657 rem) {
1658 struct nlattr *socks[NBD_SOCK_MAX+1];
1659
1660 if (nla_type(attr) != NBD_SOCK_ITEM) {
1661 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1662 ret = -EINVAL;
1663 goto out;
1664 }
1665 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
Linus Torvalds8d65b082017-05-02 16:40:27 -07001666 nbd_sock_policy, info->extack);
Josef Bacike46c7282017-04-06 17:02:00 -04001667 if (ret != 0) {
1668 printk(KERN_ERR "nbd: error processing sock list\n");
1669 ret = -EINVAL;
1670 goto out;
1671 }
1672 if (!socks[NBD_SOCK_FD])
1673 continue;
1674 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1675 ret = nbd_add_socket(nbd, fd, true);
1676 if (ret)
1677 goto out;
1678 }
1679 }
1680 ret = nbd_start_device(nbd);
1681out:
1682 mutex_unlock(&nbd->config_lock);
1683 if (!ret) {
1684 set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags);
1685 refcount_inc(&nbd->config_refs);
1686 nbd_connect_reply(info, nbd->index);
1687 }
1688 nbd_config_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04001689 if (put_dev)
1690 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001691 return ret;
1692}
1693
1694static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1695{
1696 struct nbd_device *nbd;
1697 int index;
1698
1699 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1700 return -EPERM;
1701
1702 if (!info->attrs[NBD_ATTR_INDEX]) {
1703 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
1704 return -EINVAL;
1705 }
1706 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1707 mutex_lock(&nbd_index_mutex);
1708 nbd = idr_find(&nbd_index_idr, index);
Josef Bacike46c7282017-04-06 17:02:00 -04001709 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001710 mutex_unlock(&nbd_index_mutex);
Josef Bacike46c7282017-04-06 17:02:00 -04001711 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1712 index);
1713 return -EINVAL;
1714 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001715 if (!refcount_inc_not_zero(&nbd->refs)) {
1716 mutex_unlock(&nbd_index_mutex);
1717 printk(KERN_ERR "nbd: device at index %d is going down\n",
1718 index);
1719 return -EINVAL;
1720 }
1721 mutex_unlock(&nbd_index_mutex);
1722 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1723 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001724 return 0;
Josef Bacikc6a47592017-04-06 17:02:06 -04001725 }
Josef Bacike46c7282017-04-06 17:02:00 -04001726 mutex_lock(&nbd->config_lock);
1727 nbd_disconnect(nbd);
1728 mutex_unlock(&nbd->config_lock);
1729 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1730 &nbd->config->runtime_flags))
1731 nbd_config_put(nbd);
1732 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001733 nbd_put(nbd);
Josef Bacike46c7282017-04-06 17:02:00 -04001734 return 0;
1735}
1736
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001737static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1738{
1739 struct nbd_device *nbd = NULL;
1740 struct nbd_config *config;
1741 int index;
1742 int ret = -EINVAL;
Josef Bacika2c97902017-04-06 17:02:07 -04001743 bool put_dev = false;
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001744
1745 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1746 return -EPERM;
1747
1748 if (!info->attrs[NBD_ATTR_INDEX]) {
1749 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
1750 return -EINVAL;
1751 }
1752 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1753 mutex_lock(&nbd_index_mutex);
1754 nbd = idr_find(&nbd_index_idr, index);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001755 if (!nbd) {
Josef Bacikc6a47592017-04-06 17:02:06 -04001756 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001757 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
1758 index);
1759 return -EINVAL;
1760 }
Josef Bacikc6a47592017-04-06 17:02:06 -04001761 if (!refcount_inc_not_zero(&nbd->refs)) {
1762 mutex_unlock(&nbd_index_mutex);
1763 printk(KERN_ERR "nbd: device at index %d is going down\n",
1764 index);
1765 return -EINVAL;
1766 }
1767 mutex_unlock(&nbd_index_mutex);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001768
1769 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1770 dev_err(nbd_to_dev(nbd),
1771 "not configured, cannot reconfigure\n");
Josef Bacikc6a47592017-04-06 17:02:06 -04001772 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001773 return -EINVAL;
1774 }
1775
1776 mutex_lock(&nbd->config_lock);
1777 config = nbd->config;
1778 if (!test_bit(NBD_BOUND, &config->runtime_flags) ||
1779 !nbd->task_recv) {
1780 dev_err(nbd_to_dev(nbd),
1781 "not configured, cannot reconfigure\n");
1782 goto out;
1783 }
1784
1785 if (info->attrs[NBD_ATTR_TIMEOUT]) {
1786 u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
1787 nbd->tag_set.timeout = timeout * HZ;
1788 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1789 }
Josef Bacik560bc4b2017-04-06 17:02:04 -04001790 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1791 config->dead_conn_timeout =
1792 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1793 config->dead_conn_timeout *= HZ;
1794 }
Josef Bacika2c97902017-04-06 17:02:07 -04001795 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1796 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1797 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1798 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1799 &config->runtime_flags))
1800 put_dev = true;
1801 } else {
1802 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1803 &config->runtime_flags))
1804 refcount_inc(&nbd->refs);
1805 }
1806 }
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001807
1808 if (info->attrs[NBD_ATTR_SOCKETS]) {
1809 struct nlattr *attr;
1810 int rem, fd;
1811
1812 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1813 rem) {
1814 struct nlattr *socks[NBD_SOCK_MAX+1];
1815
1816 if (nla_type(attr) != NBD_SOCK_ITEM) {
1817 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1818 ret = -EINVAL;
1819 goto out;
1820 }
1821 ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
Linus Torvalds8d65b082017-05-02 16:40:27 -07001822 nbd_sock_policy, info->extack);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001823 if (ret != 0) {
1824 printk(KERN_ERR "nbd: error processing sock list\n");
1825 ret = -EINVAL;
1826 goto out;
1827 }
1828 if (!socks[NBD_SOCK_FD])
1829 continue;
1830 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1831 ret = nbd_reconnect_socket(nbd, fd);
1832 if (ret) {
1833 if (ret == -ENOSPC)
1834 ret = 0;
1835 goto out;
1836 }
1837 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
1838 }
1839 }
1840out:
1841 mutex_unlock(&nbd->config_lock);
1842 nbd_config_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04001843 nbd_put(nbd);
Josef Bacika2c97902017-04-06 17:02:07 -04001844 if (put_dev)
1845 nbd_put(nbd);
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001846 return ret;
1847}
1848
Josef Bacike46c7282017-04-06 17:02:00 -04001849static const struct genl_ops nbd_connect_genl_ops[] = {
1850 {
1851 .cmd = NBD_CMD_CONNECT,
1852 .policy = nbd_attr_policy,
1853 .doit = nbd_genl_connect,
1854 },
1855 {
1856 .cmd = NBD_CMD_DISCONNECT,
1857 .policy = nbd_attr_policy,
1858 .doit = nbd_genl_disconnect,
1859 },
Josef Bacikb7aa3d32017-04-06 17:02:01 -04001860 {
1861 .cmd = NBD_CMD_RECONFIGURE,
1862 .policy = nbd_attr_policy,
1863 .doit = nbd_genl_reconfigure,
1864 },
Josef Bacik47d902b2017-04-06 17:02:05 -04001865 {
1866 .cmd = NBD_CMD_STATUS,
1867 .policy = nbd_attr_policy,
1868 .doit = nbd_genl_status,
1869 },
Josef Bacike46c7282017-04-06 17:02:00 -04001870};
1871
Josef Bacik799f9a32017-04-06 17:02:02 -04001872static const struct genl_multicast_group nbd_mcast_grps[] = {
1873 { .name = NBD_GENL_MCAST_GROUP_NAME, },
1874};
1875
Josef Bacike46c7282017-04-06 17:02:00 -04001876static struct genl_family nbd_genl_family __ro_after_init = {
1877 .hdrsize = 0,
1878 .name = NBD_GENL_FAMILY_NAME,
1879 .version = NBD_GENL_VERSION,
1880 .module = THIS_MODULE,
1881 .ops = nbd_connect_genl_ops,
1882 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
1883 .maxattr = NBD_ATTR_MAX,
Josef Bacik799f9a32017-04-06 17:02:02 -04001884 .mcgrps = nbd_mcast_grps,
1885 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
Josef Bacike46c7282017-04-06 17:02:00 -04001886};
1887
Josef Bacik47d902b2017-04-06 17:02:05 -04001888static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
1889{
1890 struct nlattr *dev_opt;
1891 u8 connected = 0;
1892 int ret;
1893
1894 /* This is a little racey, but for status it's ok. The
1895 * reason we don't take a ref here is because we can't
1896 * take a ref in the index == -1 case as we would need
1897 * to put under the nbd_index_mutex, which could
1898 * deadlock if we are configured to remove ourselves
1899 * once we're disconnected.
1900 */
1901 if (refcount_read(&nbd->config_refs))
1902 connected = 1;
1903 dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
1904 if (!dev_opt)
1905 return -EMSGSIZE;
1906 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
1907 if (ret)
1908 return -EMSGSIZE;
1909 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
1910 connected);
1911 if (ret)
1912 return -EMSGSIZE;
1913 nla_nest_end(reply, dev_opt);
1914 return 0;
1915}
1916
1917static int status_cb(int id, void *ptr, void *data)
1918{
1919 struct nbd_device *nbd = ptr;
1920 return populate_nbd_status(nbd, (struct sk_buff *)data);
1921}
1922
1923static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
1924{
1925 struct nlattr *dev_list;
1926 struct sk_buff *reply;
1927 void *reply_head;
1928 size_t msg_size;
1929 int index = -1;
1930 int ret = -ENOMEM;
1931
1932 if (info->attrs[NBD_ATTR_INDEX])
1933 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1934
1935 mutex_lock(&nbd_index_mutex);
1936
1937 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
1938 nla_attr_size(sizeof(u8)));
1939 msg_size *= (index == -1) ? nbd_total_devices : 1;
1940
1941 reply = genlmsg_new(msg_size, GFP_KERNEL);
1942 if (!reply)
1943 goto out;
1944 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
1945 NBD_CMD_STATUS);
1946 if (!reply_head) {
1947 nlmsg_free(reply);
1948 goto out;
1949 }
1950
1951 dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
1952 if (index == -1) {
1953 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
1954 if (ret) {
1955 nlmsg_free(reply);
1956 goto out;
1957 }
1958 } else {
1959 struct nbd_device *nbd;
1960 nbd = idr_find(&nbd_index_idr, index);
1961 if (nbd) {
1962 ret = populate_nbd_status(nbd, reply);
1963 if (ret) {
1964 nlmsg_free(reply);
1965 goto out;
1966 }
1967 }
1968 }
1969 nla_nest_end(reply, dev_list);
1970 genlmsg_end(reply, reply_head);
1971 genlmsg_reply(reply, info);
1972 ret = 0;
1973out:
1974 mutex_unlock(&nbd_index_mutex);
1975 return ret;
1976}
1977
Josef Bacike46c7282017-04-06 17:02:00 -04001978static void nbd_connect_reply(struct genl_info *info, int index)
1979{
1980 struct sk_buff *skb;
1981 void *msg_head;
1982 int ret;
1983
1984 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
1985 if (!skb)
1986 return;
1987 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
1988 NBD_CMD_CONNECT);
1989 if (!msg_head) {
1990 nlmsg_free(skb);
1991 return;
1992 }
1993 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
1994 if (ret) {
1995 nlmsg_free(skb);
1996 return;
1997 }
1998 genlmsg_end(skb, msg_head);
1999 genlmsg_reply(skb, info);
2000}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Josef Bacik799f9a32017-04-06 17:02:02 -04002002static void nbd_mcast_index(int index)
2003{
2004 struct sk_buff *skb;
2005 void *msg_head;
2006 int ret;
2007
2008 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2009 if (!skb)
2010 return;
2011 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2012 NBD_CMD_LINK_DEAD);
2013 if (!msg_head) {
2014 nlmsg_free(skb);
2015 return;
2016 }
2017 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2018 if (ret) {
2019 nlmsg_free(skb);
2020 return;
2021 }
2022 genlmsg_end(skb, msg_head);
2023 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2024}
2025
2026static void nbd_dead_link_work(struct work_struct *work)
2027{
2028 struct link_dead_args *args = container_of(work, struct link_dead_args,
2029 work);
2030 nbd_mcast_index(args->index);
2031 kfree(args);
2032}
2033
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034static int __init nbd_init(void)
2035{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 int i;
2037
Adrian Bunk5b7b18c2006-03-25 03:07:04 -08002038 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002040 if (max_part < 0) {
WANG Cong7742ce42011-08-19 14:48:28 +02002041 printk(KERN_ERR "nbd: max_part must be >= 0\n");
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002042 return -EINVAL;
2043 }
2044
2045 part_shift = 0;
Namhyung Kim5988ce22011-05-28 14:44:46 +02002046 if (max_part > 0) {
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002047 part_shift = fls(max_part);
2048
Namhyung Kim5988ce22011-05-28 14:44:46 +02002049 /*
2050 * Adjust max_part according to part_shift as it is exported
2051 * to user space so that user can know the max number of
2052 * partition kernel should be able to manage.
2053 *
2054 * Note that -1 is required because partition 0 is reserved
2055 * for the whole disk.
2056 */
2057 max_part = (1UL << part_shift) - 1;
2058 }
2059
Namhyung Kim3b271082011-05-28 14:44:46 +02002060 if ((1UL << part_shift) > DISK_MAX_PARTS)
2061 return -EINVAL;
2062
2063 if (nbds_max > 1UL << (MINORBITS - part_shift))
2064 return -EINVAL;
Josef Bacik124d6db2017-02-01 16:11:11 -05002065 recv_workqueue = alloc_workqueue("knbd-recv",
2066 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2067 if (!recv_workqueue)
2068 return -ENOMEM;
Namhyung Kim3b271082011-05-28 14:44:46 +02002069
Josef Bacik6330a2d2017-02-15 16:49:48 -05002070 if (register_blkdev(NBD_MAJOR, "nbd")) {
2071 destroy_workqueue(recv_workqueue);
Josef Bacikb0d91112017-02-01 16:11:40 -05002072 return -EIO;
Josef Bacik6330a2d2017-02-15 16:49:48 -05002073 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Josef Bacike46c7282017-04-06 17:02:00 -04002075 if (genl_register_family(&nbd_genl_family)) {
2076 unregister_blkdev(NBD_MAJOR, "nbd");
2077 destroy_workqueue(recv_workqueue);
2078 return -EINVAL;
2079 }
Markus Pargmann30d53d92015-08-17 08:20:06 +02002080 nbd_dbg_init();
2081
Josef Bacikb0d91112017-02-01 16:11:40 -05002082 mutex_lock(&nbd_index_mutex);
2083 for (i = 0; i < nbds_max; i++)
2084 nbd_dev_add(i);
2085 mutex_unlock(&nbd_index_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 return 0;
Josef Bacikb0d91112017-02-01 16:11:40 -05002087}
2088
2089static int nbd_exit_cb(int id, void *ptr, void *data)
2090{
Josef Bacikc6a47592017-04-06 17:02:06 -04002091 struct list_head *list = (struct list_head *)data;
Josef Bacikb0d91112017-02-01 16:11:40 -05002092 struct nbd_device *nbd = ptr;
Josef Bacikc6a47592017-04-06 17:02:06 -04002093
Josef Bacikc6a47592017-04-06 17:02:06 -04002094 list_add_tail(&nbd->list, list);
Josef Bacikb0d91112017-02-01 16:11:40 -05002095 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096}
2097
2098static void __exit nbd_cleanup(void)
2099{
Josef Bacikc6a47592017-04-06 17:02:06 -04002100 struct nbd_device *nbd;
2101 LIST_HEAD(del_list);
2102
Markus Pargmann30d53d92015-08-17 08:20:06 +02002103 nbd_dbg_close();
2104
Josef Bacikc6a47592017-04-06 17:02:06 -04002105 mutex_lock(&nbd_index_mutex);
2106 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2107 mutex_unlock(&nbd_index_mutex);
2108
Josef Bacik60ae36a2017-04-28 09:49:19 -04002109 while (!list_empty(&del_list)) {
2110 nbd = list_first_entry(&del_list, struct nbd_device, list);
2111 list_del_init(&nbd->list);
2112 if (refcount_read(&nbd->refs) != 1)
Josef Bacikc6a47592017-04-06 17:02:06 -04002113 printk(KERN_ERR "nbd: possibly leaking a device\n");
2114 nbd_put(nbd);
Josef Bacikc6a47592017-04-06 17:02:06 -04002115 }
2116
Josef Bacikb0d91112017-02-01 16:11:40 -05002117 idr_destroy(&nbd_index_idr);
Josef Bacike46c7282017-04-06 17:02:00 -04002118 genl_unregister_family(&nbd_genl_family);
Josef Bacik124d6db2017-02-01 16:11:11 -05002119 destroy_workqueue(recv_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 unregister_blkdev(NBD_MAJOR, "nbd");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121}
2122
2123module_init(nbd_init);
2124module_exit(nbd_cleanup);
2125
2126MODULE_DESCRIPTION("Network Block Device");
2127MODULE_LICENSE("GPL");
2128
Lars Marowsky-Bree40be0c22005-05-01 08:59:07 -07002129module_param(nbds_max, int, 0444);
Laurent Vivierd71a6d72008-04-29 01:02:51 -07002130MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2131module_param(max_part, int, 0444);
2132MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");