blob: 9e3f441e7e8441e83d61273ed0f34f1c309c518b [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020035#include "drbd_req.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070038#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020039#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070040
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010041#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010043/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +020050int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010052int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010053
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010056int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010057int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010071int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010072int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
Andreas Gruenbacher01b39b52011-06-10 12:57:26 +020078#include "drbd_nla.h"
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010079#include <linux/genl_magic_func.h>
80
81/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070082static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010084/* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
87 */
88static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
90 unsigned int minor;
91 /* assigned from request attributes, if present */
92 unsigned int volume;
93#define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +020096 char *resource_name;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +020097 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
Philipp Reisnerb411b362009-09-25 16:07:19 -070099
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100100 /* reply buffer */
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
107} adm_ctx;
108
109static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
110{
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700114}
115
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100116/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
Lars Ellenberg8432b312011-03-08 16:11:16 +0100118int drbd_msg_put_info(const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100119{
120 struct sk_buff *skb = adm_ctx.reply_skb;
121 struct nlattr *nla;
122 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100124 if (!info || !info[0])
125 return 0;
126
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 if (!nla)
129 return err;
130
131 err = nla_put_string(skb, T_info_text, info);
132 if (err) {
133 nla_nest_cancel(skb, nla);
134 return err;
135 } else
136 nla_nest_end(skb, nla);
137 return 0;
138}
139
140/* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
144 */
145#define DRBD_ADM_NEED_MINOR 1
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200146#define DRBD_ADM_NEED_RESOURCE 2
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200147#define DRBD_ADM_NEED_CONNECTION 4
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100148static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
149 unsigned flags)
150{
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
153 int err;
154
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
156
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
Philipp Reisner98683652012-11-09 14:18:43 +0100158 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100159 return -EPERM;
160
161 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200162 if (!adm_ctx.reply_skb) {
163 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100164 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200165 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100166
167 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
168 info, &drbd_genl_family, 0, cmd);
169 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
170 * but anyways */
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200171 if (!adm_ctx.reply_dh) {
172 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100173 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200174 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100175
176 adm_ctx.reply_dh->minor = d_in->minor;
177 adm_ctx.reply_dh->ret_code = NO_ERROR;
178
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200179 adm_ctx.volume = VOLUME_UNSPECIFIED;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100180 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
181 struct nlattr *nla;
182 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100183 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100184 if (err)
185 goto fail;
186
187 /* It was present, and valid,
188 * copy it over to the reply skb. */
189 err = nla_put_nohdr(adm_ctx.reply_skb,
190 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]);
192 if (err)
193 goto fail;
194
195 /* and assign stuff to the global adm_ctx */
196 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200197 if (nla)
198 adm_ctx.volume = nla_get_u32(nla);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200199 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100200 if (nla)
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200201 adm_ctx.resource_name = nla_data(nla);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200202 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
203 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
204 if ((adm_ctx.my_addr &&
205 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
206 (adm_ctx.peer_addr &&
207 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
208 err = -EINVAL;
209 goto fail;
210 }
211 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100212
213 adm_ctx.minor = d_in->minor;
214 adm_ctx.mdev = minor_to_mdev(d_in->minor);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200215 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100216
217 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
218 drbd_msg_put_info("unknown minor");
219 return ERR_MINOR_INVALID;
220 }
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200221 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
222 drbd_msg_put_info("unknown resource");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100223 return ERR_INVALID_REQUEST;
224 }
225
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200226 if (flags & DRBD_ADM_NEED_CONNECTION) {
227 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
228 drbd_msg_put_info("no resource name expected");
229 return ERR_INVALID_REQUEST;
230 }
231 if (adm_ctx.mdev) {
232 drbd_msg_put_info("no minor number expected");
233 return ERR_INVALID_REQUEST;
234 }
235 if (adm_ctx.my_addr && adm_ctx.peer_addr)
236 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
237 nla_len(adm_ctx.my_addr),
238 nla_data(adm_ctx.peer_addr),
239 nla_len(adm_ctx.peer_addr));
240 if (!adm_ctx.tconn) {
241 drbd_msg_put_info("unknown connection");
242 return ERR_INVALID_REQUEST;
243 }
244 }
245
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100246 /* some more paranoia, if the request was over-determined */
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100247 if (adm_ctx.mdev && adm_ctx.tconn &&
248 adm_ctx.mdev->tconn != adm_ctx.tconn) {
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200249 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200250 adm_ctx.minor, adm_ctx.resource_name,
251 adm_ctx.mdev->tconn->name);
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200252 drbd_msg_put_info("minor exists in different resource");
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100253 return ERR_INVALID_REQUEST;
254 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100255 if (adm_ctx.mdev &&
256 adm_ctx.volume != VOLUME_UNSPECIFIED &&
257 adm_ctx.volume != adm_ctx.mdev->vnr) {
258 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
259 adm_ctx.minor, adm_ctx.volume,
260 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100261 drbd_msg_put_info("minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100262 return ERR_INVALID_REQUEST;
263 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200264
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100265 return NO_ERROR;
266
267fail:
268 nlmsg_free(adm_ctx.reply_skb);
269 adm_ctx.reply_skb = NULL;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200270 return err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100271}
272
273static int drbd_adm_finish(struct genl_info *info, int retcode)
274{
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200275 if (adm_ctx.tconn) {
276 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
277 adm_ctx.tconn = NULL;
278 }
279
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100280 if (!adm_ctx.reply_skb)
281 return -ENOMEM;
282
283 adm_ctx.reply_dh->ret_code = retcode;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100284 drbd_adm_send_reply(adm_ctx.reply_skb, info);
285 return 0;
286}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700287
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100288static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
289{
290 char *afs;
291
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200292 /* FIXME: A future version will not allow this case. */
293 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
294 return;
295
296 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
297 case AF_INET6:
298 afs = "ipv6";
299 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
300 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
301 break;
302 case AF_INET:
303 afs = "ipv4";
304 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
305 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
306 break;
307 default:
308 afs = "ssocks";
309 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
310 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100311 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200312 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100313}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700314
315int drbd_khelper(struct drbd_conf *mdev, char *cmd)
316{
317 char *envp[] = { "HOME=/",
318 "TERM=linux",
319 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100320 (char[20]) { }, /* address family */
321 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700322 NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100323 char mb[12];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700324 char *argv[] = {usermode_helper, cmd, mb, NULL };
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200325 struct drbd_tconn *tconn = mdev->tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100326 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327 int ret;
328
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200329 if (current == tconn->worker.task)
330 set_bit(CALLBACK_PENDING, &tconn->flags);
Lars Ellenbergc2ba6862012-06-14 15:14:06 +0200331
Philipp Reisnerb411b362009-09-25 16:07:19 -0700332 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200333 setup_khelper_env(tconn, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700334
Lars Ellenberg1090c052010-07-19 17:41:04 +0200335 /* The helper may take some time.
336 * write out any unsynced meta data changes now */
337 drbd_md_sync(mdev);
338
Philipp Reisnerb411b362009-09-25 16:07:19 -0700339 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100340 sib.sib_reason = SIB_HELPER_PRE;
341 sib.helper_name = cmd;
342 drbd_bcast_event(mdev, &sib);
Oleg Nesterov70834d32012-03-23 15:02:46 -0700343 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700344 if (ret)
345 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
346 usermode_helper, cmd, mb,
347 (ret >> 8) & 0xff, ret);
348 else
349 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
350 usermode_helper, cmd, mb,
351 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100352 sib.sib_reason = SIB_HELPER_POST;
353 sib.helper_exit_code = ret;
354 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700355
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200356 if (current == tconn->worker.task)
357 clear_bit(CALLBACK_PENDING, &tconn->flags);
Lars Ellenbergc2ba6862012-06-14 15:14:06 +0200358
Philipp Reisnerb411b362009-09-25 16:07:19 -0700359 if (ret < 0) /* Ignore any ERRNOs we got. */
360 ret = 0;
361
362 return ret;
363}
364
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100365int conn_khelper(struct drbd_tconn *tconn, char *cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700366{
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100367 char *envp[] = { "HOME=/",
368 "TERM=linux",
369 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
370 (char[20]) { }, /* address family */
371 (char[60]) { }, /* address */
372 NULL };
373 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
374 int ret;
375
376 setup_khelper_env(tconn, envp);
377 conn_md_sync(tconn);
378
379 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
380 /* TODO: conn_bcast_event() ?? */
381
Philipp Reisner98683652012-11-09 14:18:43 +0100382 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100383 if (ret)
384 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
385 usermode_helper, cmd, tconn->name,
386 (ret >> 8) & 0xff, ret);
387 else
388 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
389 usermode_helper, cmd, tconn->name,
390 (ret >> 8) & 0xff, ret);
391 /* TODO: conn_bcast_event() ?? */
392
393 if (ret < 0) /* Ignore any ERRNOs we got. */
394 ret = 0;
395
396 return ret;
397}
398
Philipp Reisnercb703452011-03-24 11:03:07 +0100399static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700400{
Philipp Reisnercb703452011-03-24 11:03:07 +0100401 enum drbd_fencing_p fp = FP_NOT_AVAIL;
402 struct drbd_conf *mdev;
403 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404
Philipp Reisner695d08f2011-04-11 22:53:32 -0700405 rcu_read_lock();
Philipp Reisnercb703452011-03-24 11:03:07 +0100406 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
407 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200408 fp = max_t(enum drbd_fencing_p, fp,
409 rcu_dereference(mdev->ldev->disk_conf)->fencing);
Philipp Reisnercb703452011-03-24 11:03:07 +0100410 put_ldev(mdev);
411 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700412 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700413 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700414
Philipp Reisnercb703452011-03-24 11:03:07 +0100415 return fp;
416}
417
418bool conn_try_outdate_peer(struct drbd_tconn *tconn)
419{
420 union drbd_state mask = { };
421 union drbd_state val = { };
422 enum drbd_fencing_p fp;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423 char *ex_to_string;
424 int r;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700425
Philipp Reisnercb703452011-03-24 11:03:07 +0100426 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
427 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
428 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700429 }
430
Philipp Reisnercb703452011-03-24 11:03:07 +0100431 fp = highest_fencing_policy(tconn);
432 switch (fp) {
433 case FP_NOT_AVAIL:
434 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
435 goto out;
436 case FP_DONT_CARE:
437 return true;
438 default: ;
439 }
440
441 r = conn_khelper(tconn, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700442
443 switch ((r>>8) & 0xff) {
444 case 3: /* peer is inconsistent */
445 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100446 mask.pdsk = D_MASK;
447 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700448 break;
449 case 4: /* peer got outdated, or was already outdated */
450 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100451 mask.pdsk = D_MASK;
452 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700453 break;
454 case 5: /* peer was down */
Philipp Reisnercb703452011-03-24 11:03:07 +0100455 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700456 /* we will(have) create(d) a new UUID anyways... */
457 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100458 mask.pdsk = D_MASK;
459 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700460 } else {
461 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700462 }
463 break;
464 case 6: /* Peer is primary, voluntarily outdate myself.
465 * This is useful when an unconnected R_SECONDARY is asked to
466 * become R_PRIMARY, but finds the other peer being active. */
467 ex_to_string = "peer is active";
Philipp Reisnercb703452011-03-24 11:03:07 +0100468 conn_warn(tconn, "Peer is primary, outdating myself.\n");
469 mask.disk = D_MASK;
470 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700471 break;
472 case 7:
473 if (fp != FP_STONITH)
Philipp Reisnercb703452011-03-24 11:03:07 +0100474 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700475 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100476 mask.pdsk = D_MASK;
477 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700478 break;
479 default:
480 /* The script is broken ... */
Philipp Reisnercb703452011-03-24 11:03:07 +0100481 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
482 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700483 }
484
Philipp Reisnercb703452011-03-24 11:03:07 +0100485 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
486 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200487
Philipp Reisnercb703452011-03-24 11:03:07 +0100488 out:
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200489
Philipp Reisnercb703452011-03-24 11:03:07 +0100490 /* Not using
491 conn_request_state(tconn, mask, val, CS_VERBOSE);
492 here, because we might were able to re-establish the connection in the
493 meantime. */
494 spin_lock_irq(&tconn->req_lock);
Philipp Reisnera1096a62012-04-06 12:07:34 +0200495 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
Philipp Reisnercb703452011-03-24 11:03:07 +0100496 _conn_request_state(tconn, mask, val, CS_VERBOSE);
497 spin_unlock_irq(&tconn->req_lock);
498
499 return conn_highest_pdsk(tconn) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700500}
501
Philipp Reisner87f7be42010-06-11 13:56:33 +0200502static int _try_outdate_peer_async(void *data)
503{
Philipp Reisnercb703452011-03-24 11:03:07 +0100504 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200505
Philipp Reisnercb703452011-03-24 11:03:07 +0100506 conn_try_outdate_peer(tconn);
Philipp Reisner21423fa2011-05-17 14:19:41 +0200507
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200508 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200509 return 0;
510}
511
Philipp Reisnercb703452011-03-24 11:03:07 +0100512void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200513{
514 struct task_struct *opa;
515
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200516 kref_get(&tconn->kref);
Philipp Reisnercb703452011-03-24 11:03:07 +0100517 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200518 if (IS_ERR(opa)) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100519 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200520 kref_put(&tconn->kref, &conn_destroy);
521 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200522}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700523
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100524enum drbd_state_rv
525drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700526{
527 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100528 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200529 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700530 int try = 0;
531 int forced = 0;
532 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700533
534 if (new_role == R_PRIMARY)
Philipp Reisner0625ac12011-02-07 14:49:19 +0100535 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700536
Philipp Reisner8410da82011-02-11 20:11:10 +0100537 mutex_lock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700538
539 mask.i = 0; mask.role = R_MASK;
540 val.i = 0; val.role = new_role;
541
542 while (try++ < max_tries) {
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100543 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700544
545 /* in case we first succeeded to outdate,
546 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100547 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700548 val.pdsk = 0;
549 mask.pdsk = 0;
550 continue;
551 }
552
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100553 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100554 (mdev->state.disk < D_UP_TO_DATE &&
555 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700556 mask.disk = D_MASK;
557 val.disk = D_UP_TO_DATE;
558 forced = 1;
559 continue;
560 }
561
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100562 if (rv == SS_NO_UP_TO_DATE_DISK &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700563 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
564 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700565
Philipp Reisnercb703452011-03-24 11:03:07 +0100566 if (conn_try_outdate_peer(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700567 val.disk = D_UP_TO_DATE;
568 mask.disk = D_MASK;
569 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700570 continue;
571 }
572
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100573 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100574 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100575 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100576 if (!conn_try_outdate_peer(mdev->tconn) && force) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700577 dev_warn(DEV, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100578 mask.pdsk = D_MASK;
579 val.pdsk = D_OUTDATED;
580
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700582 continue;
583 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100584 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700585 /* Maybe the peer is detected as dead very soon...
586 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200587 int timeo;
588 rcu_read_lock();
589 nc = rcu_dereference(mdev->tconn->net_conf);
590 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
591 rcu_read_unlock();
592 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700593 if (try < max_tries)
594 try = max_tries - 1;
595 continue;
596 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100597 if (rv < SS_SUCCESS) {
598 rv = _drbd_request_state(mdev, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700599 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100600 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100601 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700602 }
603 break;
604 }
605
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100606 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100607 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700608
609 if (forced)
610 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
611
612 /* Wait until nothing is on the fly :) */
613 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
614
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100615 /* FIXME also wait for all pending P_BARRIER_ACK? */
616
Philipp Reisnerb411b362009-09-25 16:07:19 -0700617 if (new_role == R_SECONDARY) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100618 set_disk_ro(mdev->vdisk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700619 if (get_ldev(mdev)) {
620 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
621 put_ldev(mdev);
622 }
623 } else {
Philipp Reisnera0095502011-05-03 13:14:15 +0200624 mutex_lock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200625 nc = mdev->tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200626 if (nc)
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200627 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
Philipp Reisnera0095502011-05-03 13:14:15 +0200628 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200629
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100630 set_disk_ro(mdev->vdisk, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700631 if (get_ldev(mdev)) {
632 if (((mdev->state.conn < C_CONNECTED ||
633 mdev->state.pdsk <= D_FAILED)
634 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
635 drbd_uuid_new_current(mdev);
636
637 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
638 put_ldev(mdev);
639 }
640 }
641
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100642 /* writeout of activity log covered areas of the bitmap
643 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700644
645 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
646 /* if this was forced, we should consider sync */
647 if (forced)
648 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200649 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700650 }
651
652 drbd_md_sync(mdev);
653
654 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100655out:
Philipp Reisner8410da82011-02-11 20:11:10 +0100656 mutex_unlock(mdev->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100657 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700658}
659
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100660static const char *from_attrs_err_to_txt(int err)
Lars Ellenbergef50a3e2010-09-01 14:39:30 +0200661{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100662 return err == -ENOMSG ? "required attribute missing" :
663 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100664 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100665 "invalid attribute value";
Lars Ellenbergef50a3e2010-09-01 14:39:30 +0200666}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700667
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100668int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700669{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100670 struct set_role_parms parms;
671 int err;
672 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700673
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100674 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
675 if (!adm_ctx.reply_skb)
676 return retcode;
677 if (retcode != NO_ERROR)
678 goto out;
679
680 memset(&parms, 0, sizeof(parms));
681 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100682 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100683 if (err) {
684 retcode = ERR_MANDATORY_TAG;
685 drbd_msg_put_info(from_attrs_err_to_txt(err));
686 goto out;
687 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700688 }
689
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100690 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
691 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
692 else
693 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
694out:
695 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700696 return 0;
697}
698
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100699/* Initializes the md.*_offset members, so we are able to find
700 * the on disk meta data.
701 *
702 * We currently have two possible layouts:
703 * external:
704 * |----------- md_size_sect ------------------|
705 * [ 4k superblock ][ activity log ][ Bitmap ]
706 * | al_offset == 8 |
707 * | bm_offset = al_offset + X |
708 * ==> bitmap sectors = md_size_sect - bm_offset
709 *
710 * internal:
711 * |----------- md_size_sect ------------------|
712 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
713 * | al_offset < 0 |
714 * | bm_offset = al_offset - Y |
715 * ==> bitmap sectors = Y = al_offset - bm_offset
716 *
717 * Activity log size used to be fixed 32kB,
718 * but is about to become configurable.
719 */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700720static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
721 struct drbd_backing_dev *bdev)
722{
723 sector_t md_size_sect = 0;
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +0100724 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200725
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +0100726 bdev->md.md_offset = drbd_md_ss(bdev);
727
Lars Ellenberg68e41a42013-03-19 18:16:45 +0100728 switch (bdev->md.meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700729 default:
730 /* v07 style fixed size indexed meta data */
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100731 bdev->md.md_size_sect = MD_128MB_SECT;
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100732 bdev->md.al_offset = MD_4kB_SECT;
733 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 break;
735 case DRBD_MD_INDEX_FLEX_EXT:
736 /* just occupy the full device; unit: sectors */
737 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100738 bdev->md.al_offset = MD_4kB_SECT;
739 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700740 break;
741 case DRBD_MD_INDEX_INTERNAL:
742 case DRBD_MD_INDEX_FLEX_INT:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700743 /* al size is still fixed */
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100744 bdev->md.al_offset = -al_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700745 /* we need (slightly less than) ~ this much bitmap sectors: */
746 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
747 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
748 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
749 md_size_sect = ALIGN(md_size_sect, 8);
750
751 /* plus the "drbd meta data super block",
752 * and the activity log; */
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100753 md_size_sect += MD_4kB_SECT + al_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700754
755 bdev->md.md_size_sect = md_size_sect;
756 /* bitmap offset is adjusted by 'super' block size */
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100757 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700758 break;
759 }
760}
761
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100762/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700763char *ppsize(char *buf, unsigned long long size)
764{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100765 /* Needs 9 bytes at max including trailing NUL:
766 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700767 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
768 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100769 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700770 /* shift + round */
771 size = (size >> 10) + !!(size & (1<<9));
772 base++;
773 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100774 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700775
776 return buf;
777}
778
779/* there is still a theoretical deadlock when called from receiver
780 * on an D_INCONSISTENT R_PRIMARY:
781 * remote READ does inc_ap_bio, receiver would need to receive answer
782 * packet from remote to dec_ap_bio again.
783 * receiver receive_sizes(), comes here,
784 * waits for ap_bio_cnt == 0. -> deadlock.
785 * but this cannot happen, actually, because:
786 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
787 * (not connected, or bad/no disk on peer):
788 * see drbd_fail_request_early, ap_bio_cnt is zero.
789 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
790 * peer may not initiate a resize.
791 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100792/* Note these are not to be confused with
793 * drbd_adm_suspend_io/drbd_adm_resume_io,
794 * which are (sub) state changes triggered by admin (drbdsetup),
795 * and can be long lived.
796 * This changes an mdev->flag, is triggered by drbd internals,
797 * and should be short-lived. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700798void drbd_suspend_io(struct drbd_conf *mdev)
799{
800 set_bit(SUSPEND_IO, &mdev->flags);
Philipp Reisner2aebfab2011-03-28 16:48:11 +0200801 if (drbd_suspended(mdev))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200802 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700803 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
804}
805
806void drbd_resume_io(struct drbd_conf *mdev)
807{
808 clear_bit(SUSPEND_IO, &mdev->flags);
809 wake_up(&mdev->misc_wait);
810}
811
812/**
813 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
814 * @mdev: DRBD device.
815 *
816 * Returns 0 on success, negative return values indicate errors.
817 * You should call drbd_md_sync() after calling this function.
818 */
Bart Van Assche24c48302011-05-21 18:32:29 +0200819enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700820{
821 sector_t prev_first_sect, prev_size; /* previous meta location */
Lars Ellenbergcccac982013-03-19 18:16:46 +0100822 sector_t la_size_sect, u_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700823 sector_t size;
824 char ppb[10];
825
826 int md_moved, la_size_changed;
827 enum determine_dev_size rv = unchanged;
828
829 /* race:
830 * application request passes inc_ap_bio,
831 * but then cannot get an AL-reference.
832 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
833 *
834 * to avoid that:
835 * Suspend IO right here.
836 * still lock the act_log to not trigger ASSERTs there.
837 */
838 drbd_suspend_io(mdev);
839
840 /* no wait necessary anymore, actually we could assert that */
841 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
842
843 prev_first_sect = drbd_md_first_sector(mdev->ldev);
844 prev_size = mdev->ldev->md.md_size_sect;
Lars Ellenbergcccac982013-03-19 18:16:46 +0100845 la_size_sect = mdev->ldev->md.la_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700846
847 /* TODO: should only be some assert here, not (re)init... */
848 drbd_md_set_sector_offsets(mdev, mdev->ldev);
849
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200850 rcu_read_lock();
851 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
852 rcu_read_unlock();
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200853 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700854
855 if (drbd_get_capacity(mdev->this_bdev) != size ||
856 drbd_bm_capacity(mdev) != size) {
857 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100858 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700859 if (unlikely(err)) {
860 /* currently there is only one error: ENOMEM! */
861 size = drbd_bm_capacity(mdev)>>1;
862 if (size == 0) {
863 dev_err(DEV, "OUT OF MEMORY! "
864 "Could not allocate bitmap!\n");
865 } else {
866 dev_err(DEV, "BM resizing failed. "
867 "Leaving size unchanged at size = %lu KB\n",
868 (unsigned long)size);
869 }
870 rv = dev_size_error;
871 }
872 /* racy, see comments above. */
873 drbd_set_my_capacity(mdev, size);
874 mdev->ldev->md.la_size_sect = size;
875 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
876 (unsigned long long)size>>1);
877 }
878 if (rv == dev_size_error)
879 goto out;
880
Lars Ellenbergcccac982013-03-19 18:16:46 +0100881 la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700882
883 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
884 || prev_size != mdev->ldev->md.md_size_sect;
885
886 if (la_size_changed || md_moved) {
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100887 int err;
888
Philipp Reisnerb411b362009-09-25 16:07:19 -0700889 drbd_al_shrink(mdev); /* All extents inactive. */
890 dev_info(DEV, "Writing the whole bitmap, %s\n",
891 la_size_changed && md_moved ? "size changed and md moved" :
892 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100893 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
Philipp Reisnerd1aa4d02012-08-08 21:19:09 +0200894 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
895 "size changed", BM_LOCKED_MASK);
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100896 if (err) {
897 rv = dev_size_error;
898 goto out;
899 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700900 drbd_md_mark_dirty(mdev);
901 }
902
Lars Ellenbergcccac982013-03-19 18:16:46 +0100903 if (size > la_size_sect)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904 rv = grew;
Lars Ellenbergcccac982013-03-19 18:16:46 +0100905 if (size < la_size_sect)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700906 rv = shrunk;
907out:
908 lc_unlock(mdev->act_log);
909 wake_up(&mdev->al_wait);
910 drbd_resume_io(mdev);
911
912 return rv;
913}
914
915sector_t
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200916drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
917 sector_t u_size, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700918{
919 sector_t p_size = mdev->p_size; /* partner's disk size. */
Lars Ellenbergcccac982013-03-19 18:16:46 +0100920 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700921 sector_t m_size; /* my size */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700922 sector_t size = 0;
923
924 m_size = drbd_get_max_capacity(bdev);
925
Philipp Reisnera393db62009-12-22 13:35:52 +0100926 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
927 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
928 p_size = m_size;
929 }
930
Philipp Reisnerb411b362009-09-25 16:07:19 -0700931 if (p_size && m_size) {
932 size = min_t(sector_t, p_size, m_size);
933 } else {
Lars Ellenbergcccac982013-03-19 18:16:46 +0100934 if (la_size_sect) {
935 size = la_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700936 if (m_size && m_size < size)
937 size = m_size;
938 if (p_size && p_size < size)
939 size = p_size;
940 } else {
941 if (m_size)
942 size = m_size;
943 if (p_size)
944 size = p_size;
945 }
946 }
947
948 if (size == 0)
949 dev_err(DEV, "Both nodes diskless!\n");
950
951 if (u_size) {
952 if (u_size > size)
953 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
954 (unsigned long)u_size>>1, (unsigned long)size>>1);
955 else
956 size = u_size;
957 }
958
959 return size;
960}
961
962/**
963 * drbd_check_al_size() - Ensures that the AL is of the right size
964 * @mdev: DRBD device.
965 *
966 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
967 * failed, and 0 on success. You should call drbd_md_sync() after you called
968 * this function.
969 */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100970static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700971{
972 struct lru_cache *n, *t;
973 struct lc_element *e;
974 unsigned int in_use;
975 int i;
976
Philipp Reisnerb411b362009-09-25 16:07:19 -0700977 if (mdev->act_log &&
Lars Ellenbergf3990022011-03-23 14:31:09 +0100978 mdev->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700979 return 0;
980
981 in_use = 0;
982 t = mdev->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100983 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +0100984 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985
986 if (n == NULL) {
987 dev_err(DEV, "Cannot allocate act_log lru!\n");
988 return -ENOMEM;
989 }
990 spin_lock_irq(&mdev->al_lock);
991 if (t) {
992 for (i = 0; i < t->nr_elements; i++) {
993 e = lc_element_by_index(t, i);
994 if (e->refcnt)
995 dev_err(DEV, "refcnt(%d)==%d\n",
996 e->lc_number, e->refcnt);
997 in_use += e->refcnt;
998 }
999 }
1000 if (!in_use)
1001 mdev->act_log = n;
1002 spin_unlock_irq(&mdev->al_lock);
1003 if (in_use) {
1004 dev_err(DEV, "Activity log still in use!\n");
1005 lc_destroy(n);
1006 return -EBUSY;
1007 } else {
1008 if (t)
1009 lc_destroy(t);
1010 }
1011 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1012 return 0;
1013}
1014
Philipp Reisner99432fc2011-05-20 16:39:13 +02001015static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001016{
1017 struct request_queue * const q = mdev->rq_queue;
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001018 unsigned int max_hw_sectors = max_bio_size >> 9;
1019 unsigned int max_segments = 0;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001020
1021 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1022 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1023
1024 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001025 rcu_read_lock();
1026 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1027 rcu_read_unlock();
Philipp Reisner99432fc2011-05-20 16:39:13 +02001028 put_ldev(mdev);
1029 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001030
Philipp Reisnerb411b362009-09-25 16:07:19 -07001031 blk_queue_logical_block_size(q, 512);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001032 blk_queue_max_hw_sectors(q, max_hw_sectors);
1033 /* This is the workaround for "bio would need to, but cannot, be split" */
1034 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1035 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001036
Philipp Reisner99432fc2011-05-20 16:39:13 +02001037 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1038 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001039
Philipp Reisner99432fc2011-05-20 16:39:13 +02001040 blk_queue_stack_limits(q, b);
1041
1042 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1043 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1044 q->backing_dev_info.ra_pages,
1045 b->backing_dev_info.ra_pages);
1046 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1047 }
1048 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001049 }
1050}
1051
Philipp Reisner99432fc2011-05-20 16:39:13 +02001052void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1053{
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001054 unsigned int now, new, local, peer;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001055
1056 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1057 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1058 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1059
1060 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1061 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1062 mdev->local_max_bio_size = local;
1063 put_ldev(mdev);
1064 }
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001065 local = min(local, DRBD_MAX_BIO_SIZE);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001066
1067 /* We may ignore peer limits if the peer is modern enough.
1068 Because new from 8.3.8 onwards the peer can use multiple
1069 BIOs for a single peer_request */
1070 if (mdev->state.conn >= C_CONNECTED) {
Philipp Reisner31890f42011-01-19 14:12:51 +01001071 if (mdev->tconn->agreed_pro_version < 94)
Philipp Reisner98683652012-11-09 14:18:43 +01001072 peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
Philipp Reisner68093842011-06-30 15:43:06 +02001073 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
Philipp Reisner31890f42011-01-19 14:12:51 +01001074 else if (mdev->tconn->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001075 peer = DRBD_MAX_SIZE_H80_PACKET;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001076 else if (mdev->tconn->agreed_pro_version < 100)
1077 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1078 else
Philipp Reisner99432fc2011-05-20 16:39:13 +02001079 peer = DRBD_MAX_BIO_SIZE;
1080 }
1081
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001082 new = min(local, peer);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001083
1084 if (mdev->state.role == R_PRIMARY && new < now)
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001085 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001086
1087 if (new != now)
1088 dev_info(DEV, "max BIO size = %u\n", new);
1089
1090 drbd_setup_queue_param(mdev, new);
1091}
1092
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001093/* Starts the worker thread */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001094static void conn_reconfig_start(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001095{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001096 drbd_thread_start(&tconn->worker);
1097 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001098}
1099
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001100/* if still unconfigured, stops worker again. */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001101static void conn_reconfig_done(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001102{
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001103 bool stop_threads;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001104 spin_lock_irq(&tconn->req_lock);
Philipp Reisnere0e16652011-07-11 17:04:23 +02001105 stop_threads = conn_all_vols_unconf(tconn) &&
1106 tconn->cstate == C_STANDALONE;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001107 spin_unlock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001108 if (stop_threads) {
1109 /* asender is implicitly stopped by receiver
Philipp Reisner81fa2e62011-05-04 15:10:30 +02001110 * in conn_disconnect() */
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001111 drbd_thread_stop(&tconn->receiver);
1112 drbd_thread_stop(&tconn->worker);
1113 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001114}
1115
Philipp Reisner07782862010-08-31 12:00:50 +02001116/* Make sure IO is suspended before calling this function(). */
1117static void drbd_suspend_al(struct drbd_conf *mdev)
1118{
1119 int s = 0;
1120
Lars Ellenberg61610422011-02-21 13:20:54 +01001121 if (!lc_try_lock(mdev->act_log)) {
Philipp Reisner07782862010-08-31 12:00:50 +02001122 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1123 return;
1124 }
1125
Lars Ellenberg61610422011-02-21 13:20:54 +01001126 drbd_al_shrink(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001127 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner07782862010-08-31 12:00:50 +02001128 if (mdev->state.conn < C_CONNECTED)
1129 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001130 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg61610422011-02-21 13:20:54 +01001131 lc_unlock(mdev->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001132
1133 if (s)
1134 dev_info(DEV, "Suspended AL updates\n");
1135}
1136
Lars Ellenberg5979e362011-04-27 21:09:55 +02001137
1138static bool should_set_defaults(struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001139{
Lars Ellenberg5979e362011-04-27 21:09:55 +02001140 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1141 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1142}
1143
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001144static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
Philipp Reisnerd589a212011-05-04 10:06:52 +02001145{
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001146 /* This is limited by 16 bit "slot" numbers,
1147 * and by available on-disk context storage.
1148 *
1149 * Also (u16)~0 is special (denotes a "free" extent).
1150 *
1151 * One transaction occupies one 4kB on-disk block,
1152 * we have n such blocks in the on disk ring buffer,
1153 * the "current" transaction may fail (n-1),
1154 * and there is 919 slot numbers context information per transaction.
1155 *
1156 * 72 transaction blocks amounts to more than 2**16 context slots,
1157 * so cap there first.
1158 */
1159 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1160 const unsigned int sufficient_on_disk =
1161 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1162 /AL_CONTEXT_PER_TRANSACTION;
Philipp Reisnerd589a212011-05-04 10:06:52 +02001163
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001164 unsigned int al_size_4k = bdev->md.al_size_4k;
1165
1166 if (al_size_4k > sufficient_on_disk)
1167 return max_al_nr;
1168
1169 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
Philipp Reisnerd589a212011-05-04 10:06:52 +02001170}
1171
Lars Ellenbergf3990022011-03-23 14:31:09 +01001172int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1173{
1174 enum drbd_ret_code retcode;
1175 struct drbd_conf *mdev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001176 struct disk_conf *new_disk_conf, *old_disk_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02001177 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001178 int err, fifo_size;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001179
1180 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1181 if (!adm_ctx.reply_skb)
1182 return retcode;
1183 if (retcode != NO_ERROR)
1184 goto out;
1185
1186 mdev = adm_ctx.mdev;
1187
1188 /* we also need a disk
1189 * to change the options on */
1190 if (!get_ldev(mdev)) {
1191 retcode = ERR_NO_DISK;
1192 goto out;
1193 }
1194
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001195 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001196 if (!new_disk_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001197 retcode = ERR_NOMEM;
1198 goto fail;
1199 }
1200
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001201 mutex_lock(&mdev->tconn->conf_update);
1202 old_disk_conf = mdev->ldev->disk_conf;
1203 *new_disk_conf = *old_disk_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001204 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001205 set_disk_conf_defaults(new_disk_conf);
Lars Ellenberg5979e362011-04-27 21:09:55 +02001206
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001207 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001208 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001209 retcode = ERR_MANDATORY_TAG;
1210 drbd_msg_put_info(from_attrs_err_to_txt(err));
1211 }
1212
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001213 if (!expect(new_disk_conf->resync_rate >= 1))
1214 new_disk_conf->resync_rate = 1;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001215
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001216 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1217 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1218 if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
1219 new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
1220
1221 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1222 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001223
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001224 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02001225 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02001226 new_plan = fifo_alloc(fifo_size);
1227 if (!new_plan) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001228 dev_err(DEV, "kmalloc of fifo_buffer failed");
1229 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001230 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001231 }
1232 }
1233
Lars Ellenberg0ee98e22012-08-20 14:54:48 +02001234 drbd_suspend_io(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001235 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1236 drbd_al_shrink(mdev);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001237 err = drbd_check_al_size(mdev, new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001238 lc_unlock(mdev->act_log);
1239 wake_up(&mdev->al_wait);
Lars Ellenberg0ee98e22012-08-20 14:54:48 +02001240 drbd_resume_io(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001241
1242 if (err) {
1243 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001244 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001245 }
1246
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001247 write_lock_irq(&global_state_lock);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001248 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001249 if (retcode == NO_ERROR) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001250 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001251 drbd_resync_after_changed(mdev);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001252 }
1253 write_unlock_irq(&global_state_lock);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001254
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001255 if (retcode != NO_ERROR)
1256 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001257
Philipp Reisner813472c2011-05-03 16:47:02 +02001258 if (new_plan) {
1259 old_plan = mdev->rs_plan_s;
1260 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
Philipp Reisner9958c852011-05-03 16:19:31 +02001261 }
Philipp Reisner9958c852011-05-03 16:19:31 +02001262
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001263 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001264
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001265 if (new_disk_conf->al_updates)
Philipp Reisner4035e4c2012-10-01 18:04:12 +02001266 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001267 else
1268 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1269
Lars Ellenberg691631c2012-10-26 00:41:50 +02001270 if (new_disk_conf->md_flushes)
1271 clear_bit(MD_NO_FUA, &mdev->flags);
1272 else
1273 set_bit(MD_NO_FUA, &mdev->flags);
1274
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001275 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1276
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001277 drbd_md_sync(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001278
1279 if (mdev->state.conn >= C_CONNECTED)
1280 drbd_send_sync_param(mdev);
1281
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001282 synchronize_rcu();
1283 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001284 kfree(old_plan);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001285 mod_timer(&mdev->request_timer, jiffies + HZ);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001286 goto success;
1287
1288fail_unlock:
1289 mutex_unlock(&mdev->tconn->conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001290 fail:
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001291 kfree(new_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001292 kfree(new_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001293success:
1294 put_ldev(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001295 out:
1296 drbd_adm_finish(info, retcode);
1297 return 0;
1298}
1299
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001300int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001301{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001302 struct drbd_conf *mdev;
1303 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001304 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001305 enum determine_dev_size dd;
1306 sector_t max_possible_sectors;
1307 sector_t min_md_device_sectors;
1308 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001309 struct disk_conf *new_disk_conf = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001310 struct block_device *bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001311 struct lru_cache *resync_lru = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001312 struct fifo_buffer *new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001313 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001314 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001315 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001316
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001317 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1318 if (!adm_ctx.reply_skb)
1319 return retcode;
1320 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001321 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001322
1323 mdev = adm_ctx.mdev;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001324 conn_reconfig_start(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001325
1326 /* if you want to reconfigure, please tear down first */
1327 if (mdev->state.disk > D_DISKLESS) {
1328 retcode = ERR_DISK_CONFIGURED;
1329 goto fail;
1330 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001331 /* It may just now have detached because of IO error. Make sure
1332 * drbd_ldev_destroy is done already, we may end up here very fast,
1333 * e.g. if someone calls attach from the on-io-error handler,
1334 * to realize a "hot spare" feature (not that I'd recommend that) */
1335 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001336
Lars Ellenberg383606e2012-06-14 14:21:32 +02001337 /* make sure there is no leftover from previous force-detach attempts */
Lars Ellenberg0c849662012-07-30 09:07:28 +02001338 clear_bit(FORCE_DETACH, &mdev->flags);
Lars Ellenbergedc9f5e2012-09-27 15:18:21 +02001339 clear_bit(WAS_IO_ERROR, &mdev->flags);
1340 clear_bit(WAS_READ_ERROR, &mdev->flags);
Lars Ellenberg383606e2012-06-14 14:21:32 +02001341
Lars Ellenberg0029d622012-06-14 18:02:52 +02001342 /* and no leftover from previously aborted resync or verify, either */
1343 mdev->rs_total = 0;
1344 mdev->rs_failed = 0;
1345 atomic_set(&mdev->rs_pending_cnt, 0);
1346
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001347 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001348 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1349 if (!nbc) {
1350 retcode = ERR_NOMEM;
1351 goto fail;
1352 }
Philipp Reisner9f2247b2012-08-16 14:25:58 +02001353 spin_lock_init(&nbc->md.uuid_lock);
1354
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001355 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1356 if (!new_disk_conf) {
1357 retcode = ERR_NOMEM;
1358 goto fail;
1359 }
1360 nbc->disk_conf = new_disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001361
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001362 set_disk_conf_defaults(new_disk_conf);
1363 err = disk_conf_from_attrs(new_disk_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001364 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001366 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001367 goto fail;
1368 }
1369
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001370 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1371 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
Philipp Reisnerd589a212011-05-04 10:06:52 +02001372
Philipp Reisner9958c852011-05-03 16:19:31 +02001373 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1374 if (!new_plan) {
1375 retcode = ERR_NOMEM;
1376 goto fail;
1377 }
1378
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001379 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001380 retcode = ERR_MD_IDX_INVALID;
1381 goto fail;
1382 }
1383
Lars Ellenberga3f8f7d2013-03-27 14:08:43 +01001384 write_lock_irq(&global_state_lock);
1385 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1386 write_unlock_irq(&global_state_lock);
1387 if (retcode != NO_ERROR)
1388 goto fail;
1389
Philipp Reisner44ed1672011-04-19 17:10:19 +02001390 rcu_read_lock();
1391 nc = rcu_dereference(mdev->tconn->net_conf);
1392 if (nc) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001393 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001394 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001395 retcode = ERR_STONITH_AND_PROT_A;
1396 goto fail;
1397 }
1398 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001399 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001400
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001401 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001402 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
Tejun Heoe525fd82010-11-13 11:55:17 +01001403 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001404 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001405 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001406 retcode = ERR_OPEN_DISK;
1407 goto fail;
1408 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001409 nbc->backing_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001410
Tejun Heoe525fd82010-11-13 11:55:17 +01001411 /*
1412 * meta_dev_idx >= 0: external fixed size, possibly multiple
1413 * drbd sharing one meta device. TODO in that case, paranoia
1414 * check that [md_bdev, meta_dev_idx] is not yet used by some
1415 * other drbd minor! (if you use drbd.conf + drbdadm, that
1416 * should check it for you already; but if you don't, or
1417 * someone fooled it, we need to double check here)
1418 */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001419 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001420 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001421 (new_disk_conf->meta_dev_idx < 0) ?
Tejun Heod4d77622010-11-13 11:55:18 +01001422 (void *)mdev : (void *)drbd_m_holder);
Tejun Heoe525fd82010-11-13 11:55:17 +01001423 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001424 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001425 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001426 retcode = ERR_OPEN_MD_DISK;
1427 goto fail;
1428 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001429 nbc->md_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001430
Tejun Heoe525fd82010-11-13 11:55:17 +01001431 if ((nbc->backing_bdev == nbc->md_bdev) !=
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001432 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1433 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001434 retcode = ERR_MD_IDX_INVALID;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001435 goto fail;
1436 }
1437
1438 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001439 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440 offsetof(struct bm_extent, lce));
1441 if (!resync_lru) {
1442 retcode = ERR_NOMEM;
Tejun Heoe525fd82010-11-13 11:55:17 +01001443 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001444 }
1445
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01001446 /* Read our meta data super block early.
1447 * This also sets other on-disk offsets. */
1448 retcode = drbd_md_read(mdev, nbc);
1449 if (retcode != NO_ERROR)
1450 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001451
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001452 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1453 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1454 if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
1455 new_disk_conf->al_extents = drbd_al_extents_max(nbc);
1456
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001457 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1459 (unsigned long long) drbd_get_max_capacity(nbc),
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001460 (unsigned long long) new_disk_conf->disk_size);
Lars Ellenberg7948bcd2011-06-06 15:36:04 +02001461 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001462 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001463 }
1464
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001465 if (new_disk_conf->meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001466 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1467 /* at least one MB, otherwise it does not make sense */
1468 min_md_device_sectors = (2<<10);
1469 } else {
1470 max_possible_sectors = DRBD_MAX_SECTORS;
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001471 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001472 }
1473
Philipp Reisnerb411b362009-09-25 16:07:19 -07001474 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
Lars Ellenberg7948bcd2011-06-06 15:36:04 +02001475 retcode = ERR_MD_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001476 dev_warn(DEV, "refusing attach: md-device too small, "
1477 "at least %llu sectors needed for this meta-disk type\n",
1478 (unsigned long long) min_md_device_sectors);
Tejun Heoe525fd82010-11-13 11:55:17 +01001479 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001480 }
1481
1482 /* Make sure the new disk is big enough
1483 * (we may currently be R_PRIMARY with no local disk...) */
1484 if (drbd_get_max_capacity(nbc) <
1485 drbd_get_capacity(mdev->this_bdev)) {
Lars Ellenberg7948bcd2011-06-06 15:36:04 +02001486 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001487 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001488 }
1489
1490 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1491
Lars Ellenberg13529942009-10-12 19:07:49 +02001492 if (nbc->known_size > max_possible_sectors) {
1493 dev_warn(DEV, "==> truncating very big lower level device "
1494 "to currently maximum possible %llu sectors <==\n",
1495 (unsigned long long) max_possible_sectors);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001496 if (new_disk_conf->meta_dev_idx >= 0)
Lars Ellenberg13529942009-10-12 19:07:49 +02001497 dev_warn(DEV, "==>> using internal or flexible "
1498 "meta data may help <<==\n");
1499 }
1500
Philipp Reisnerb411b362009-09-25 16:07:19 -07001501 drbd_suspend_io(mdev);
1502 /* also wait for the last barrier ack. */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01001503 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1504 * We need a way to either ignore barrier acks for barriers sent before a device
1505 * was attached, or a way to wait for all pending barrier acks to come in.
1506 * As barriers are counted per resource,
1507 * we'd need to suspend io on all devices of a resource.
1508 */
Philipp Reisner2aebfab2011-03-28 16:48:11 +02001509 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510 /* and for any other previously queued work */
1511 drbd_flush_workqueue(mdev);
1512
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001513 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1514 retcode = rv; /* FIXME: Type mismatch. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515 drbd_resume_io(mdev);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001516 if (rv < SS_SUCCESS)
Tejun Heoe525fd82010-11-13 11:55:17 +01001517 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001518
1519 if (!get_ldev_if_state(mdev, D_ATTACHING))
1520 goto force_diskless;
1521
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522 if (!mdev->bitmap) {
1523 if (drbd_bm_init(mdev)) {
1524 retcode = ERR_NOMEM;
1525 goto force_diskless_dec;
1526 }
1527 }
1528
Philipp Reisnerb411b362009-09-25 16:07:19 -07001529 if (mdev->state.conn < C_CONNECTED &&
1530 mdev->state.role == R_PRIMARY &&
1531 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1532 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1533 (unsigned long long)mdev->ed_uuid);
1534 retcode = ERR_DATA_NOT_CURRENT;
1535 goto force_diskless_dec;
1536 }
1537
1538 /* Since we are diskless, fix the activity log first... */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001539 if (drbd_check_al_size(mdev, new_disk_conf)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001540 retcode = ERR_NOMEM;
1541 goto force_diskless_dec;
1542 }
1543
1544 /* Prevent shrinking of consistent devices ! */
1545 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001546 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001547 dev_warn(DEV, "refusing to truncate a consistent device\n");
Lars Ellenberg7948bcd2011-06-06 15:36:04 +02001548 retcode = ERR_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001549 goto force_diskless_dec;
1550 }
1551
Philipp Reisnerb411b362009-09-25 16:07:19 -07001552 /* Reset the "barriers don't work" bits here, then force meta data to
1553 * be written, to ensure we determine if barriers are supported. */
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001554 if (new_disk_conf->md_flushes)
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001555 clear_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001556 else
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001557 set_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001558
1559 /* Point of no return reached.
1560 * Devices and memory are no longer released by error cleanup below.
1561 * now mdev takes over responsibility, and the state engine should
1562 * clean it up somewhere. */
1563 D_ASSERT(mdev->ldev == NULL);
1564 mdev->ldev = nbc;
1565 mdev->resync = resync_lru;
Philipp Reisner9958c852011-05-03 16:19:31 +02001566 mdev->rs_plan_s = new_plan;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001567 nbc = NULL;
1568 resync_lru = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001569 new_disk_conf = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001570 new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001571
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001572 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001573
1574 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1575 set_bit(CRASHED_PRIMARY, &mdev->flags);
1576 else
1577 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1578
Philipp Reisner894c6a92010-06-18 16:03:20 +02001579 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02001580 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001581 set_bit(CRASHED_PRIMARY, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001582
1583 mdev->send_cnt = 0;
1584 mdev->recv_cnt = 0;
1585 mdev->read_cnt = 0;
1586 mdev->writ_cnt = 0;
1587
Philipp Reisner99432fc2011-05-20 16:39:13 +02001588 drbd_reconsider_max_bio_size(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001589
1590 /* If I am currently not R_PRIMARY,
1591 * but meta data primary indicator is set,
1592 * I just now recover from a hard crash,
1593 * and have been R_PRIMARY before that crash.
1594 *
1595 * Now, if I had no connection before that crash
1596 * (have been degraded R_PRIMARY), chances are that
1597 * I won't find my peer now either.
1598 *
1599 * In that case, and _only_ in that case,
1600 * we use the degr-wfc-timeout instead of the default,
1601 * so we can automatically recover from a crash of a
1602 * degraded but active "cluster" after a certain timeout.
1603 */
1604 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1605 if (mdev->state.role != R_PRIMARY &&
1606 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1607 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1608 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1609
Bart Van Assche24c48302011-05-21 18:32:29 +02001610 dd = drbd_determine_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001611 if (dd == dev_size_error) {
1612 retcode = ERR_NOMEM_BITMAP;
1613 goto force_diskless_dec;
1614 } else if (dd == grew)
1615 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1616
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001617 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1618 (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1619 drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001620 dev_info(DEV, "Assuming that all blocks are out of sync "
1621 "(aka FullSync)\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001622 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1623 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001624 retcode = ERR_IO_MD_DISK;
1625 goto force_diskless_dec;
1626 }
1627 } else {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001628 if (drbd_bitmap_io(mdev, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01001629 "read from attaching", BM_LOCKED_MASK)) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001630 retcode = ERR_IO_MD_DISK;
1631 goto force_diskless_dec;
1632 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001633 }
1634
Philipp Reisner07782862010-08-31 12:00:50 +02001635 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1636 drbd_suspend_al(mdev); /* IO is still suspended here... */
1637
Philipp Reisner87eeee42011-01-19 14:16:30 +01001638 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02001639 os = drbd_read_state(mdev);
1640 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001641 /* If MDF_CONSISTENT is not set go into inconsistent state,
1642 otherwise investigate MDF_WasUpToDate...
1643 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1644 otherwise into D_CONSISTENT state.
1645 */
1646 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1647 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1648 ns.disk = D_CONSISTENT;
1649 else
1650 ns.disk = D_OUTDATED;
1651 } else {
1652 ns.disk = D_INCONSISTENT;
1653 }
1654
1655 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1656 ns.pdsk = D_OUTDATED;
1657
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001658 rcu_read_lock();
1659 if (ns.disk == D_CONSISTENT &&
1660 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001661 ns.disk = D_UP_TO_DATE;
1662
1663 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1664 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1665 this point, because drbd_request_state() modifies these
1666 flags. */
1667
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001668 if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
Philipp Reisner4035e4c2012-10-01 18:04:12 +02001669 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001670 else
1671 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1672
1673 rcu_read_unlock();
1674
Philipp Reisnerb411b362009-09-25 16:07:19 -07001675 /* In case we are C_CONNECTED postpone any decision on the new disk
1676 state after the negotiation phase. */
1677 if (mdev->state.conn == C_CONNECTED) {
1678 mdev->new_state_tmp.i = ns.i;
1679 ns.i = os.i;
1680 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02001681
1682 /* We expect to receive up-to-date UUIDs soon.
1683 To avoid a race in receive_state, free p_uuid while
1684 holding req_lock. I.e. atomic with the state change */
1685 kfree(mdev->p_uuid);
1686 mdev->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001687 }
1688
1689 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001690 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001691
1692 if (rv < SS_SUCCESS)
1693 goto force_diskless_dec;
1694
Philipp Reisnercdfda632011-07-05 15:38:59 +02001695 mod_timer(&mdev->request_timer, jiffies + HZ);
1696
Philipp Reisnerb411b362009-09-25 16:07:19 -07001697 if (mdev->state.role == R_PRIMARY)
1698 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1699 else
1700 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1701
1702 drbd_md_mark_dirty(mdev);
1703 drbd_md_sync(mdev);
1704
1705 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1706 put_ldev(mdev);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001707 conn_reconfig_done(mdev->tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001708 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001709 return 0;
1710
1711 force_diskless_dec:
1712 put_ldev(mdev);
1713 force_diskless:
Philipp Reisner9510b242011-07-01 17:00:57 +02001714 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001715 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001716 fail:
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001717 conn_reconfig_done(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001718 if (nbc) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001719 if (nbc->backing_bdev)
1720 blkdev_put(nbc->backing_bdev,
1721 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1722 if (nbc->md_bdev)
1723 blkdev_put(nbc->md_bdev,
1724 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001725 kfree(nbc);
1726 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001727 kfree(new_disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001728 lc_destroy(resync_lru);
Philipp Reisner9958c852011-05-03 16:19:31 +02001729 kfree(new_plan);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001730
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001731 finish:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001732 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001733 return 0;
1734}
1735
Philipp Reisnercdfda632011-07-05 15:38:59 +02001736static int adm_detach(struct drbd_conf *mdev, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001737{
Philipp Reisner19f83c72011-03-29 14:21:03 +02001738 enum drbd_state_rv retcode;
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001739 int ret;
Philipp Reisner02ee8f92011-03-14 11:54:47 +01001740
Philipp Reisnercdfda632011-07-05 15:38:59 +02001741 if (force) {
Lars Ellenberg0c849662012-07-30 09:07:28 +02001742 set_bit(FORCE_DETACH, &mdev->flags);
Philipp Reisner02ee8f92011-03-14 11:54:47 +01001743 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnercdfda632011-07-05 15:38:59 +02001744 retcode = SS_SUCCESS;
Philipp Reisner02ee8f92011-03-14 11:54:47 +01001745 goto out;
1746 }
1747
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001748 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
Lars Ellenberga2e91382011-10-06 17:30:26 +02001749 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001750 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
Lars Ellenberga2e91382011-10-06 17:30:26 +02001751 drbd_md_put_buffer(mdev);
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001752 /* D_FAILED will transition to DISKLESS. */
1753 ret = wait_event_interruptible(mdev->misc_wait,
1754 mdev->state.disk != D_FAILED);
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001755 drbd_resume_io(mdev);
Philipp Reisner9b2f61a2011-05-24 10:27:38 +02001756 if ((int)retcode == (int)SS_IS_DISKLESS)
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001757 retcode = SS_NOTHING_TO_DO;
1758 if (ret)
1759 retcode = ERR_INTR;
Philipp Reisner02ee8f92011-03-14 11:54:47 +01001760out:
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001761 return retcode;
1762}
1763
Philipp Reisnerb411b362009-09-25 16:07:19 -07001764/* Detaching the disk is a process in multiple stages. First we need to lock
1765 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1766 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1767 * internal references as well.
1768 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001769int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001770{
1771 enum drbd_ret_code retcode;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001772 struct detach_parms parms = { };
1773 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001774
1775 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1776 if (!adm_ctx.reply_skb)
1777 return retcode;
1778 if (retcode != NO_ERROR)
1779 goto out;
1780
Philipp Reisnercdfda632011-07-05 15:38:59 +02001781 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1782 err = detach_parms_from_attrs(&parms, info);
1783 if (err) {
1784 retcode = ERR_MANDATORY_TAG;
1785 drbd_msg_put_info(from_attrs_err_to_txt(err));
1786 goto out;
1787 }
1788 }
1789
1790 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001791out:
1792 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001793 return 0;
1794}
1795
Lars Ellenbergf3990022011-03-23 14:31:09 +01001796static bool conn_resync_running(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001797{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001798 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001799 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001800 int vnr;
1801
Philipp Reisner695d08f2011-04-11 22:53:32 -07001802 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001803 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1804 if (mdev->state.conn == C_SYNC_SOURCE ||
1805 mdev->state.conn == C_SYNC_TARGET ||
1806 mdev->state.conn == C_PAUSED_SYNC_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001807 mdev->state.conn == C_PAUSED_SYNC_T) {
1808 rv = true;
1809 break;
1810 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001811 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001812 rcu_read_unlock();
1813
1814 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001815}
1816
1817static bool conn_ov_running(struct drbd_tconn *tconn)
1818{
1819 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001820 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001821 int vnr;
1822
Philipp Reisner695d08f2011-04-11 22:53:32 -07001823 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001824 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1825 if (mdev->state.conn == C_VERIFY_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001826 mdev->state.conn == C_VERIFY_T) {
1827 rv = true;
1828 break;
1829 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001830 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001831 rcu_read_unlock();
1832
1833 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001834}
1835
Philipp Reisnercd643972011-04-13 18:00:59 -07001836static enum drbd_ret_code
Philipp Reisner44ed1672011-04-19 17:10:19 +02001837_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07001838{
1839 struct drbd_conf *mdev;
1840 int i;
1841
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02001842 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1843 if (new_conf->wire_protocol != old_conf->wire_protocol)
1844 return ERR_NEED_APV_100;
1845
1846 if (new_conf->two_primaries != old_conf->two_primaries)
1847 return ERR_NEED_APV_100;
1848
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02001849 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1850 return ERR_NEED_APV_100;
1851 }
1852
1853 if (!new_conf->two_primaries &&
1854 conn_highest_role(tconn) == R_PRIMARY &&
1855 conn_highest_peer(tconn) == R_PRIMARY)
1856 return ERR_NEED_ALLOW_TWO_PRI;
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001857
Philipp Reisnercd643972011-04-13 18:00:59 -07001858 if (new_conf->two_primaries &&
1859 (new_conf->wire_protocol != DRBD_PROT_C))
1860 return ERR_NOT_PROTO_C;
1861
Philipp Reisnercd643972011-04-13 18:00:59 -07001862 idr_for_each_entry(&tconn->volumes, mdev, i) {
1863 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001864 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
Philipp Reisnercd643972011-04-13 18:00:59 -07001865 put_ldev(mdev);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001866 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07001867 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07001868 }
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02001869 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
Lars Ellenbergeb120102012-08-01 12:46:20 +02001870 return ERR_DISCARD_IMPOSSIBLE;
Philipp Reisnercd643972011-04-13 18:00:59 -07001871 }
Philipp Reisnercd643972011-04-13 18:00:59 -07001872
1873 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1874 return ERR_CONG_NOT_PROTO_A;
1875
1876 return NO_ERROR;
1877}
1878
Philipp Reisner44ed1672011-04-19 17:10:19 +02001879static enum drbd_ret_code
1880check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1881{
1882 static enum drbd_ret_code rv;
1883 struct drbd_conf *mdev;
1884 int i;
1885
1886 rcu_read_lock();
1887 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1888 rcu_read_unlock();
1889
1890 /* tconn->volumes protected by genl_lock() here */
1891 idr_for_each_entry(&tconn->volumes, mdev, i) {
1892 if (!mdev->bitmap) {
1893 if(drbd_bm_init(mdev))
1894 return ERR_NOMEM;
1895 }
1896 }
1897
1898 return rv;
1899}
1900
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001901struct crypto {
1902 struct crypto_hash *verify_tfm;
1903 struct crypto_hash *csums_tfm;
1904 struct crypto_hash *cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001905 struct crypto_hash *integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001906};
1907
1908static int
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001909alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001910{
1911 if (!tfm_name[0])
1912 return NO_ERROR;
1913
1914 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1915 if (IS_ERR(*tfm)) {
1916 *tfm = NULL;
1917 return err_alg;
1918 }
1919
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001920 return NO_ERROR;
1921}
1922
1923static enum drbd_ret_code
1924alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1925{
Philipp Reisnerb411b362009-09-25 16:07:19 -07001926 char hmac_name[CRYPTO_MAX_ALG_NAME];
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001927 enum drbd_ret_code rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001928
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001929 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1930 ERR_CSUMS_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001931 if (rv != NO_ERROR)
1932 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001933 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1934 ERR_VERIFY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001935 if (rv != NO_ERROR)
1936 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001937 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1938 ERR_INTEGRITY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001939 if (rv != NO_ERROR)
1940 return rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001941 if (new_conf->cram_hmac_alg[0] != 0) {
1942 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1943 new_conf->cram_hmac_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001944
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001945 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1946 ERR_AUTH_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001947 }
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001948
1949 return rv;
1950}
1951
1952static void free_crypto(struct crypto *crypto)
1953{
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001954 crypto_free_hash(crypto->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001955 crypto_free_hash(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001956 crypto_free_hash(crypto->csums_tfm);
1957 crypto_free_hash(crypto->verify_tfm);
1958}
1959
Lars Ellenbergf3990022011-03-23 14:31:09 +01001960int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1961{
1962 enum drbd_ret_code retcode;
1963 struct drbd_tconn *tconn;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001964 struct net_conf *old_conf, *new_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001965 int err;
1966 int ovr; /* online verify running */
1967 int rsr; /* re-sync running */
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001968 struct crypto crypto = { };
Lars Ellenbergf3990022011-03-23 14:31:09 +01001969
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02001970 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001971 if (!adm_ctx.reply_skb)
1972 return retcode;
1973 if (retcode != NO_ERROR)
1974 goto out;
1975
1976 tconn = adm_ctx.tconn;
1977
1978 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1979 if (!new_conf) {
1980 retcode = ERR_NOMEM;
1981 goto out;
1982 }
1983
Lars Ellenbergf3990022011-03-23 14:31:09 +01001984 conn_reconfig_start(tconn);
1985
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001986 mutex_lock(&tconn->data.mutex);
Philipp Reisnera0095502011-05-03 13:14:15 +02001987 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001988 old_conf = tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001989
1990 if (!old_conf) {
1991 drbd_msg_put_info("net conf missing, try connect");
1992 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001993 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001994 }
1995
1996 *new_conf = *old_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001997 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001998 set_net_conf_defaults(new_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001999
Lars Ellenbergf3990022011-03-23 14:31:09 +01002000 err = net_conf_from_attrs_for_change(new_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002001 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002002 retcode = ERR_MANDATORY_TAG;
2003 drbd_msg_put_info(from_attrs_err_to_txt(err));
2004 goto fail;
2005 }
2006
Philipp Reisnercd643972011-04-13 18:00:59 -07002007 retcode = check_net_options(tconn, new_conf);
2008 if (retcode != NO_ERROR)
2009 goto fail;
2010
Lars Ellenbergf3990022011-03-23 14:31:09 +01002011 /* re-sync running */
2012 rsr = conn_resync_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002013 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002014 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002015 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002016 }
2017
Lars Ellenbergf3990022011-03-23 14:31:09 +01002018 /* online verify running */
2019 ovr = conn_ov_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002020 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
2021 retcode = ERR_VERIFY_RUNNING;
2022 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002023 }
2024
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002025 retcode = alloc_crypto(&crypto, new_conf);
2026 if (retcode != NO_ERROR)
2027 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002028
Philipp Reisner44ed1672011-04-19 17:10:19 +02002029 rcu_assign_pointer(tconn->net_conf, new_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002030
2031 if (!rsr) {
2032 crypto_free_hash(tconn->csums_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002033 tconn->csums_tfm = crypto.csums_tfm;
2034 crypto.csums_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002035 }
2036 if (!ovr) {
2037 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002038 tconn->verify_tfm = crypto.verify_tfm;
2039 crypto.verify_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002040 }
2041
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002042 crypto_free_hash(tconn->integrity_tfm);
2043 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02002044 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002045 /* Do this without trying to take tconn->data.mutex again. */
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02002046 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002047
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002048 crypto_free_hash(tconn->cram_hmac_tfm);
2049 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2050
Philipp Reisnera0095502011-05-03 13:14:15 +02002051 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002052 mutex_unlock(&tconn->data.mutex);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002053 synchronize_rcu();
2054 kfree(old_conf);
2055
Lars Ellenbergf3990022011-03-23 14:31:09 +01002056 if (tconn->cstate >= C_WF_REPORT_PARAMS)
2057 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2058
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002059 goto done;
2060
Lars Ellenbergf3990022011-03-23 14:31:09 +01002061 fail:
Philipp Reisnera0095502011-05-03 13:14:15 +02002062 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002063 mutex_unlock(&tconn->data.mutex);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002064 free_crypto(&crypto);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002065 kfree(new_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002066 done:
Lars Ellenbergf3990022011-03-23 14:31:09 +01002067 conn_reconfig_done(tconn);
2068 out:
2069 drbd_adm_finish(info, retcode);
2070 return 0;
2071}
2072
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002073int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002074{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002075 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002076 struct net_conf *old_conf, *new_conf = NULL;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002077 struct crypto crypto = { };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002078 struct drbd_tconn *tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002079 enum drbd_ret_code retcode;
2080 int i;
2081 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002082
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002083 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002084
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002085 if (!adm_ctx.reply_skb)
2086 return retcode;
2087 if (retcode != NO_ERROR)
2088 goto out;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002089 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2090 drbd_msg_put_info("connection endpoint(s) missing");
2091 retcode = ERR_INVALID_REQUEST;
2092 goto out;
2093 }
2094
2095 /* No need for _rcu here. All reconfiguration is
2096 * strictly serialized on genl_lock(). We are protected against
2097 * concurrent reconfiguration/addition/deletion */
2098 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2099 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2100 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2101 retcode = ERR_LOCAL_ADDR;
2102 goto out;
2103 }
2104
2105 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2106 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2107 retcode = ERR_PEER_ADDR;
2108 goto out;
2109 }
2110 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002111
2112 tconn = adm_ctx.tconn;
Philipp Reisner80883192011-02-18 14:56:45 +01002113 conn_reconfig_start(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002114
Philipp Reisner80883192011-02-18 14:56:45 +01002115 if (tconn->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002116 retcode = ERR_NET_CONFIGURED;
2117 goto fail;
2118 }
2119
Andreas Gruenbachera209b4a2011-08-17 12:43:25 +02002120 /* allocation not in the IO path, drbdsetup / netlink process context */
Lars Ellenberg5979e362011-04-27 21:09:55 +02002121 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002122 if (!new_conf) {
2123 retcode = ERR_NOMEM;
2124 goto fail;
2125 }
2126
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002127 set_net_conf_defaults(new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002128
Lars Ellenbergf3990022011-03-23 14:31:09 +01002129 err = net_conf_from_attrs(new_conf, info);
Lars Ellenberg25e40932011-08-19 10:39:00 +02002130 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002131 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002132 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002133 goto fail;
2134 }
2135
Philipp Reisnercd643972011-04-13 18:00:59 -07002136 retcode = check_net_options(tconn, new_conf);
2137 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002138 goto fail;
Philipp Reisner47ff2d02010-06-18 13:56:57 +02002139
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002140 retcode = alloc_crypto(&crypto, new_conf);
2141 if (retcode != NO_ERROR)
Philipp Reisner422028b2010-10-27 11:12:07 +02002142 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002143
2144 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2145
Philipp Reisner80883192011-02-18 14:56:45 +01002146 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002147
Philipp Reisnera0095502011-05-03 13:14:15 +02002148 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002149 old_conf = tconn->net_conf;
2150 if (old_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002151 retcode = ERR_NET_CONFIGURED;
Philipp Reisnera0095502011-05-03 13:14:15 +02002152 mutex_unlock(&tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002153 goto fail;
2154 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002155 rcu_assign_pointer(tconn->net_conf, new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002156
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002157 conn_free_crypto(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002158 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002159 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002160 tconn->csums_tfm = crypto.csums_tfm;
2161 tconn->verify_tfm = crypto.verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002162
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002163 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2164 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2165 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2166 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2167
Philipp Reisnera0095502011-05-03 13:14:15 +02002168 mutex_unlock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002169
Philipp Reisner695d08f2011-04-11 22:53:32 -07002170 rcu_read_lock();
Philipp Reisner80883192011-02-18 14:56:45 +01002171 idr_for_each_entry(&tconn->volumes, mdev, i) {
2172 mdev->send_cnt = 0;
2173 mdev->recv_cnt = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002174 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002175 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002176
Lars Ellenberg5ee743e2011-04-26 16:22:25 +02002177 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178
Philipp Reisner80883192011-02-18 14:56:45 +01002179 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002180 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002181 return 0;
2182
2183fail:
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002184 free_crypto(&crypto);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185 kfree(new_conf);
2186
Philipp Reisner80883192011-02-18 14:56:45 +01002187 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002188out:
2189 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002190 return 0;
2191}
2192
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002193static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002194{
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002195 enum drbd_state_rv rv;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002196
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002197 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2198 force ? CS_HARD : 0);
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002199
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002200 switch (rv) {
2201 case SS_NOTHING_TO_DO:
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002202 break;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002203 case SS_ALREADY_STANDALONE:
2204 return SS_SUCCESS;
2205 case SS_PRIMARY_NOP:
2206 /* Our state checking code wants to see the peer outdated. */
Philipp Reisner2bd5ed52013-03-27 14:08:40 +01002207 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2208
2209 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2210 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2211
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002212 break;
2213 case SS_CW_FAILED_BY_PEER:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002214 /* The peer probably wants to see us outdated. */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002215 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2216 disk, D_OUTDATED), 0);
2217 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002218 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2219 CS_HARD);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002220 }
2221 break;
2222 default:;
2223 /* no special handling necessary */
2224 }
2225
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002226 if (rv >= SS_SUCCESS) {
2227 enum drbd_state_rv rv2;
2228 /* No one else can reconfigure the network while I am here.
2229 * The state handling only uses drbd_thread_stop_nowait(),
2230 * we want to really wait here until the receiver is no more.
2231 */
2232 drbd_thread_stop(&adm_ctx.tconn->receiver);
2233
2234 /* Race breaker. This additional state change request may be
2235 * necessary, if this was a forced disconnect during a receiver
2236 * restart. We may have "killed" the receiver thread just
2237 * after drbdd_init() returned. Typically, we should be
2238 * C_STANDALONE already, now, and this becomes a no-op.
2239 */
2240 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2241 CS_VERBOSE | CS_HARD);
2242 if (rv2 < SS_SUCCESS)
2243 conn_err(tconn,
2244 "unexpected rv2=%d in conn_try_disconnect()\n",
2245 rv2);
2246 }
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002247 return rv;
2248}
2249
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002250int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002251{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002252 struct disconnect_parms parms;
2253 struct drbd_tconn *tconn;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002254 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002255 enum drbd_ret_code retcode;
2256 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002257
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002258 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002259 if (!adm_ctx.reply_skb)
2260 return retcode;
2261 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002262 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002263
2264 tconn = adm_ctx.tconn;
2265 memset(&parms, 0, sizeof(parms));
2266 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002267 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002268 if (err) {
2269 retcode = ERR_MANDATORY_TAG;
2270 drbd_msg_put_info(from_attrs_err_to_txt(err));
2271 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002272 }
2273 }
2274
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002275 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2276 if (rv < SS_SUCCESS)
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002277 retcode = rv; /* FIXME: Type mismatch. */
2278 else
2279 retcode = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002280 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002281 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002282 return 0;
2283}
2284
2285void resync_after_online_grow(struct drbd_conf *mdev)
2286{
2287 int iass; /* I am sync source */
2288
2289 dev_info(DEV, "Resync of new storage after online grow\n");
2290 if (mdev->state.role != mdev->state.peer)
2291 iass = (mdev->state.role == R_PRIMARY);
2292 else
Lars Ellenberg427c0432012-08-01 12:43:01 +02002293 iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002294
2295 if (iass)
2296 drbd_start_resync(mdev, C_SYNC_SOURCE);
2297 else
2298 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2299}
2300
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002301int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002302{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002303 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002304 struct resize_parms rs;
2305 struct drbd_conf *mdev;
2306 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002307 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002308 enum dds_flags ddsf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002309 sector_t u_size;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002310 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002311
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002312 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2313 if (!adm_ctx.reply_skb)
2314 return retcode;
2315 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002316 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002317
2318 memset(&rs, 0, sizeof(struct resize_parms));
2319 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002320 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002321 if (err) {
2322 retcode = ERR_MANDATORY_TAG;
2323 drbd_msg_put_info(from_attrs_err_to_txt(err));
2324 goto fail;
2325 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002326 }
2327
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002328 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002329 if (mdev->state.conn > C_CONNECTED) {
2330 retcode = ERR_RESIZE_RESYNC;
2331 goto fail;
2332 }
2333
2334 if (mdev->state.role == R_SECONDARY &&
2335 mdev->state.peer == R_SECONDARY) {
2336 retcode = ERR_NO_PRIMARY;
2337 goto fail;
2338 }
2339
2340 if (!get_ldev(mdev)) {
2341 retcode = ERR_NO_DISK;
2342 goto fail;
2343 }
2344
Philipp Reisner31890f42011-01-19 14:12:51 +01002345 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002346 retcode = ERR_NEED_APV_93;
Andreas Gruenbacher7b4e4d32011-09-28 22:15:04 +02002347 goto fail_ldev;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002348 }
2349
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002350 rcu_read_lock();
2351 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2352 rcu_read_unlock();
2353 if (u_size != (sector_t)rs.resize_size) {
2354 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2355 if (!new_disk_conf) {
2356 retcode = ERR_NOMEM;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002357 goto fail_ldev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002358 }
2359 }
2360
Philipp Reisner087c2492010-03-26 13:49:56 +01002361 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002362 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002363
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002364 if (new_disk_conf) {
2365 mutex_lock(&mdev->tconn->conf_update);
2366 old_disk_conf = mdev->ldev->disk_conf;
2367 *new_disk_conf = *old_disk_conf;
2368 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2369 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2370 mutex_unlock(&mdev->tconn->conf_update);
2371 synchronize_rcu();
2372 kfree(old_disk_conf);
2373 }
2374
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002375 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Bart Van Assche24c48302011-05-21 18:32:29 +02002376 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002377 drbd_md_sync(mdev);
2378 put_ldev(mdev);
2379 if (dd == dev_size_error) {
2380 retcode = ERR_NOMEM_BITMAP;
2381 goto fail;
2382 }
2383
Philipp Reisner087c2492010-03-26 13:49:56 +01002384 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002385 if (dd == grew)
2386 set_bit(RESIZE_PENDING, &mdev->flags);
2387
2388 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002389 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002390 }
2391
2392 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002393 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002394 return 0;
Andreas Gruenbacher7b4e4d32011-09-28 22:15:04 +02002395
2396 fail_ldev:
2397 put_ldev(mdev);
2398 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002399}
2400
Lars Ellenbergf3990022011-03-23 14:31:09 +01002401int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002402{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002403 enum drbd_ret_code retcode;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002404 struct drbd_tconn *tconn;
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002405 struct res_opts res_opts;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002406 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002407
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002408 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002409 if (!adm_ctx.reply_skb)
2410 return retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002411 if (retcode != NO_ERROR)
2412 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002413 tconn = adm_ctx.tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002414
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002415 res_opts = tconn->res_opts;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002416 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002417 set_res_opts_defaults(&res_opts);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002418
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002419 err = res_opts_from_attrs(&res_opts, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002420 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002421 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002422 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002423 goto fail;
2424 }
2425
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002426 err = set_resource_options(tconn, &res_opts);
2427 if (err) {
2428 retcode = ERR_INVALID_REQUEST;
2429 if (err == -ENOMEM)
Philipp Reisner778f2712010-07-06 11:14:00 +02002430 retcode = ERR_NOMEM;
Philipp Reisner778f2712010-07-06 11:14:00 +02002431 }
2432
Philipp Reisnerb411b362009-09-25 16:07:19 -07002433fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002434 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002435 return 0;
2436}
2437
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002438int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002439{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002440 struct drbd_conf *mdev;
2441 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2442
2443 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2444 if (!adm_ctx.reply_skb)
2445 return retcode;
2446 if (retcode != NO_ERROR)
2447 goto out;
2448
2449 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002450
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002451 /* If there is still bitmap IO pending, probably because of a previous
Lars Ellenberg7ee1fb92012-06-19 10:27:58 +02002452 * resync just being finished, wait for it before requesting a new resync.
2453 * Also wait for it's after_state_ch(). */
Lars Ellenberga574daf2012-04-25 16:27:35 +02002454 drbd_suspend_io(mdev);
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002455 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg7ee1fb92012-06-19 10:27:58 +02002456 drbd_flush_workqueue(mdev);
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002457
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01002458 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2459 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2460 * try to start a resync handshake as sync target for full sync.
Philipp Reisner9376d9f2013-03-27 14:08:36 +01002461 */
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01002462 if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
2463 retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
2464 if (retcode >= SS_SUCCESS) {
2465 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
2466 "set_n_write from invalidate", BM_LOCKED_MASK))
2467 retcode = ERR_IO_MD_DISK;
2468 }
2469 } else
2470 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
Lars Ellenberga574daf2012-04-25 16:27:35 +02002471 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002472
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002473out:
2474 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002475 return 0;
2476}
2477
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002478static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2479 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002480{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002481 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002482
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002483 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2484 if (!adm_ctx.reply_skb)
2485 return retcode;
2486 if (retcode != NO_ERROR)
2487 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002488
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002489 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2490out:
2491 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002492 return 0;
2493}
2494
2495static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2496{
2497 int rv;
2498
2499 rv = drbd_bmio_set_n_write(mdev);
2500 drbd_suspend_al(mdev);
2501 return rv;
2502}
2503
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002504int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002505{
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002506 int retcode; /* drbd_ret_code, drbd_state_rv */
2507 struct drbd_conf *mdev;
2508
2509 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2510 if (!adm_ctx.reply_skb)
2511 return retcode;
2512 if (retcode != NO_ERROR)
2513 goto out;
2514
2515 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002516
2517 /* If there is still bitmap IO pending, probably because of a previous
Lars Ellenberg7ee1fb92012-06-19 10:27:58 +02002518 * resync just being finished, wait for it before requesting a new resync.
2519 * Also wait for it's after_state_ch(). */
Lars Ellenberga574daf2012-04-25 16:27:35 +02002520 drbd_suspend_io(mdev);
Lars Ellenberg5016b822012-05-07 12:00:56 +02002521 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg7ee1fb92012-06-19 10:27:58 +02002522 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002523
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01002524 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2525 * in the bitmap. Otherwise, try to start a resync handshake
2526 * as sync source for full sync.
2527 */
2528 if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
2529 /* The peer will get a resync upon connect anyways. Just make that
2530 into a full resync. */
2531 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2532 if (retcode >= SS_SUCCESS) {
2533 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2534 "set_n_write from invalidate_peer",
2535 BM_LOCKED_SET_ALLOWED))
2536 retcode = ERR_IO_MD_DISK;
2537 }
2538 } else
2539 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
Lars Ellenberga574daf2012-04-25 16:27:35 +02002540 drbd_resume_io(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02002541
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002542out:
2543 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002544 return 0;
2545}
2546
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002547int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002548{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002549 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002550
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002551 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2552 if (!adm_ctx.reply_skb)
2553 return retcode;
2554 if (retcode != NO_ERROR)
2555 goto out;
2556
2557 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002558 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002559out:
2560 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002561 return 0;
2562}
2563
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002564int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002565{
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002566 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002567 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002568
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002569 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2570 if (!adm_ctx.reply_skb)
2571 return retcode;
2572 if (retcode != NO_ERROR)
2573 goto out;
2574
2575 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2576 s = adm_ctx.mdev->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01002577 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2578 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2579 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2580 } else {
2581 retcode = ERR_PAUSE_IS_CLEAR;
2582 }
2583 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002584
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002585out:
2586 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002587 return 0;
2588}
2589
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002590int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002591{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002592 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002593}
2594
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002595int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002596{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002597 struct drbd_conf *mdev;
2598 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2599
2600 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2601 if (!adm_ctx.reply_skb)
2602 return retcode;
2603 if (retcode != NO_ERROR)
2604 goto out;
2605
2606 mdev = adm_ctx.mdev;
Philipp Reisner43a51822010-06-11 11:26:34 +02002607 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2608 drbd_uuid_new_current(mdev);
2609 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02002610 }
Philipp Reisner265be2d2010-05-31 10:14:17 +02002611 drbd_suspend_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002612 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2613 if (retcode == SS_SUCCESS) {
Philipp Reisner265be2d2010-05-31 10:14:17 +02002614 if (mdev->state.conn < C_CONNECTED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002615 tl_clear(mdev->tconn);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002616 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002617 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002618 }
2619 drbd_resume_io(mdev);
2620
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002621out:
2622 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002623 return 0;
2624}
2625
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002626int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002627{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002628 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002629}
2630
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002631int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002632{
2633 struct nlattr *nla;
2634 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2635 if (!nla)
2636 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002637 if (vnr != VOLUME_UNSPECIFIED &&
2638 nla_put_u32(skb, T_ctx_volume, vnr))
2639 goto nla_put_failure;
2640 if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2641 goto nla_put_failure;
2642 if (tconn->my_addr_len &&
2643 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2644 goto nla_put_failure;
2645 if (tconn->peer_addr_len &&
2646 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2647 goto nla_put_failure;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002648 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002649 return 0;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002650
2651nla_put_failure:
2652 if (nla)
2653 nla_nest_cancel(skb, nla);
2654 return -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002655}
2656
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002657int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2658 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002659{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002660 struct state_info *si = NULL; /* for sizeof(si->member); */
Philipp Reisner44ed1672011-04-19 17:10:19 +02002661 struct net_conf *nc;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002662 struct nlattr *nla;
2663 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002664 int err = 0;
2665 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002667 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2668 * to. So we better exclude_sensitive information.
2669 *
2670 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2671 * in the context of the requesting user process. Exclude sensitive
2672 * information, unless current has superuser.
2673 *
2674 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2675 * relies on the current implementation of netlink_dump(), which
2676 * executes the dump callback successively from netlink_recvmsg(),
2677 * always in the context of the receiving process */
2678 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002679
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002680 got_ldev = get_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002681
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002682 /* We need to add connection name and volume number information still.
2683 * Minor number is in drbd_genlmsghdr. */
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002684 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002685 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002686
Lars Ellenbergf3990022011-03-23 14:31:09 +01002687 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2688 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002689
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002690 rcu_read_lock();
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002691 if (got_ldev)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002692 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002693 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002694
Philipp Reisner44ed1672011-04-19 17:10:19 +02002695 nc = rcu_dereference(mdev->tconn->net_conf);
2696 if (nc)
2697 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2698 rcu_read_unlock();
2699 if (err)
2700 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002701
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002702 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2703 if (!nla)
2704 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002705 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2706 nla_put_u32(skb, T_current_state, mdev->state.i) ||
2707 nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
Philipp Marek3174f8c2012-03-03 21:04:30 +01002708 nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
2709 nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
2710 nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
2711 nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
2712 nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
2713 nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
2714 nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
2715 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
2716 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
2717 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002718 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002719
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002720 if (got_ldev) {
Philipp Reisner39a1aa72012-08-08 21:19:09 +02002721 int err;
2722
2723 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2724 err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2725 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2726
2727 if (err)
2728 goto nla_put_failure;
2729
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002730 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002731 nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2732 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2733 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002734 if (C_SYNC_SOURCE <= mdev->state.conn &&
2735 C_PAUSED_SYNC_T >= mdev->state.conn) {
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002736 if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2737 nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2738 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739 }
2740 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002741
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002742 if (sib) {
2743 switch(sib->sib_reason) {
2744 case SIB_SYNC_PROGRESS:
2745 case SIB_GET_STATUS_REPLY:
2746 break;
2747 case SIB_STATE_CHANGE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002748 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2749 nla_put_u32(skb, T_new_state, sib->ns.i))
2750 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002751 break;
2752 case SIB_HELPER_POST:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002753 if (nla_put_u32(skb, T_helper_exit_code,
2754 sib->helper_exit_code))
2755 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002756 /* fall through */
2757 case SIB_HELPER_PRE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002758 if (nla_put_string(skb, T_helper, sib->helper_name))
2759 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002760 break;
2761 }
2762 }
2763 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002764
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002765 if (0)
2766nla_put_failure:
2767 err = -EMSGSIZE;
2768 if (got_ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002769 put_ldev(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002770 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002771}
2772
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002773int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002774{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002775 enum drbd_ret_code retcode;
2776 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002777
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002778 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2779 if (!adm_ctx.reply_skb)
2780 return retcode;
2781 if (retcode != NO_ERROR)
2782 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002783
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002784 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2785 if (err) {
2786 nlmsg_free(adm_ctx.reply_skb);
2787 return err;
2788 }
2789out:
2790 drbd_adm_finish(info, retcode);
2791 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002792}
2793
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002794int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002795{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002796 struct drbd_conf *mdev;
2797 struct drbd_genlmsghdr *dh;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002798 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2799 struct drbd_tconn *tconn = NULL;
2800 struct drbd_tconn *tmp;
2801 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002802
Lars Ellenberg543cc102011-03-10 22:18:18 +01002803 /* Open coded, deferred, iteration:
2804 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2805 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2806 * ...
2807 * }
2808 * }
2809 * where tconn is cb->args[0];
2810 * and i is cb->args[1];
2811 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002812 * cb->args[2] indicates if we shall loop over all resources,
2813 * or just dump all volumes of a single resource.
2814 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002815 * This may miss entries inserted after this dump started,
2816 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01002817 *
2818 * We need to make sure the mdev won't disappear while
2819 * we are looking at it, and revalidate our iterators
2820 * on each iteration.
2821 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002822
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002823 /* synchronize with conn_create()/conn_destroy() */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002824 rcu_read_lock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002825 /* revalidate iterator position */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002826 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
Lars Ellenberg543cc102011-03-10 22:18:18 +01002827 if (pos == NULL) {
2828 /* first iteration */
2829 pos = tmp;
2830 tconn = pos;
2831 break;
2832 }
2833 if (tmp == pos) {
2834 tconn = pos;
2835 break;
2836 }
2837 }
2838 if (tconn) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002839next_tconn:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002840 mdev = idr_get_next(&tconn->volumes, &volume);
2841 if (!mdev) {
2842 /* No more volumes to dump on this tconn.
2843 * Advance tconn iterator. */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002844 pos = list_entry_rcu(tconn->all_tconn.next,
2845 struct drbd_tconn, all_tconn);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002846 /* Did we dump any volume on this tconn yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002847 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002848 /* If we reached the end of the list,
2849 * or only a single resource dump was requested,
2850 * we are done. */
2851 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2852 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002853 volume = 0;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002854 tconn = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002855 goto next_tconn;
2856 }
2857 }
2858
Philipp Reisner98683652012-11-09 14:18:43 +01002859 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002860 cb->nlh->nlmsg_seq, &drbd_genl_family,
2861 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2862 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002863 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002864
Lars Ellenberg543cc102011-03-10 22:18:18 +01002865 if (!mdev) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002866 /* This is a tconn without a single volume.
2867 * Suprisingly enough, it may have a network
2868 * configuration. */
2869 struct net_conf *nc;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002870 dh->minor = -1U;
2871 dh->ret_code = NO_ERROR;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002872 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002873 goto cancel;
2874 nc = rcu_dereference(tconn->net_conf);
2875 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2876 goto cancel;
2877 goto done;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002878 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002879
Lars Ellenberg543cc102011-03-10 22:18:18 +01002880 D_ASSERT(mdev->vnr == volume);
2881 D_ASSERT(mdev->tconn == tconn);
2882
2883 dh->minor = mdev_to_minor(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002884 dh->ret_code = NO_ERROR;
2885
2886 if (nla_put_status_info(skb, mdev, NULL)) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002887cancel:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002888 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002889 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002890 }
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002891done:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002892 genlmsg_end(skb, dh);
2893 }
2894
Lars Ellenberg543cc102011-03-10 22:18:18 +01002895out:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002896 rcu_read_unlock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002897 /* where to start the next iteration */
2898 cb->args[0] = (long)pos;
2899 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002900
Lars Ellenberg543cc102011-03-10 22:18:18 +01002901 /* No more tconns/volumes/minors found results in an empty skb.
2902 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002903 return skb->len;
2904}
2905
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002906/*
2907 * Request status of all resources, or of all volumes within a single resource.
2908 *
2909 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2910 * Which means we cannot use the family->attrbuf or other such members, because
2911 * dump is NOT protected by the genl_lock(). During dump, we only have access
2912 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2913 *
2914 * Once things are setup properly, we call into get_one_status().
Philipp Reisnerb411b362009-09-25 16:07:19 -07002915 */
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002916int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002917{
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002918 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2919 struct nlattr *nla;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002920 const char *resource_name;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002921 struct drbd_tconn *tconn;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002922 int maxtype;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002923
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002924 /* Is this a followup call? */
2925 if (cb->args[0]) {
2926 /* ... of a single resource dump,
2927 * and the resource iterator has been advanced already? */
2928 if (cb->args[2] && cb->args[2] != cb->args[0])
2929 return 0; /* DONE. */
2930 goto dump;
2931 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002932
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002933 /* First call (from netlink_dump_start). We need to figure out
2934 * which resource(s) the user wants us to dump. */
2935 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2936 nlmsg_attrlen(cb->nlh, hdrlen),
2937 DRBD_NLA_CFG_CONTEXT);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002938
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002939 /* No explicit context given. Dump all. */
2940 if (!nla)
2941 goto dump;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002942 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2943 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2944 if (IS_ERR(nla))
2945 return PTR_ERR(nla);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002946 /* context given, but no name present? */
2947 if (!nla)
2948 return -EINVAL;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002949 resource_name = nla_data(nla);
2950 tconn = conn_get_by_name(resource_name);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002951
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002952 if (!tconn)
2953 return -ENODEV;
2954
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002955 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2956
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002957 /* prime iterators, and set "filter" mode mark:
2958 * only dump this tconn. */
2959 cb->args[0] = (long)tconn;
2960 /* cb->args[1] = 0; passed in this way. */
2961 cb->args[2] = (long)tconn;
2962
2963dump:
2964 return get_one_status(skb, cb);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002965}
2966
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002967int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002968{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002969 enum drbd_ret_code retcode;
2970 struct timeout_parms tp;
2971 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002972
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002973 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2974 if (!adm_ctx.reply_skb)
2975 return retcode;
2976 if (retcode != NO_ERROR)
2977 goto out;
2978
2979 tp.timeout_type =
2980 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2981 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2982 UT_DEFAULT;
2983
2984 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2985 if (err) {
2986 nlmsg_free(adm_ctx.reply_skb);
2987 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002988 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002989out:
2990 drbd_adm_finish(info, retcode);
2991 return 0;
2992}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002993
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002994int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2995{
2996 struct drbd_conf *mdev;
2997 enum drbd_ret_code retcode;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002998 struct start_ov_parms parms;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002999
3000 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3001 if (!adm_ctx.reply_skb)
3002 return retcode;
3003 if (retcode != NO_ERROR)
3004 goto out;
3005
3006 mdev = adm_ctx.mdev;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003007
3008 /* resume from last known position, if possible */
3009 parms.ov_start_sector = mdev->ov_start_sector;
3010 parms.ov_stop_sector = ULLONG_MAX;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003011 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01003012 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003013 if (err) {
3014 retcode = ERR_MANDATORY_TAG;
3015 drbd_msg_put_info(from_attrs_err_to_txt(err));
3016 goto out;
3017 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003018 }
Lars Ellenberg58ffa582012-07-26 14:09:49 +02003019 /* w_make_ov_request expects position to be aligned */
3020 mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
3021 mdev->ov_stop_sector = parms.ov_stop_sector;
Lars Ellenberg873b0d52011-01-21 22:53:48 +01003022
3023 /* If there is still bitmap IO pending, e.g. previous resync or verify
3024 * just being finished, wait for it before requesting a new resync. */
Lars Ellenberga574daf2012-04-25 16:27:35 +02003025 drbd_suspend_io(mdev);
Lars Ellenberg873b0d52011-01-21 22:53:48 +01003026 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003027 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
Lars Ellenberga574daf2012-04-25 16:27:35 +02003028 drbd_resume_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003029out:
3030 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003031 return 0;
3032}
3033
3034
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003035int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003036{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003037 struct drbd_conf *mdev;
3038 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003039 int skip_initial_sync = 0;
3040 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003041 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003043 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3044 if (!adm_ctx.reply_skb)
3045 return retcode;
3046 if (retcode != NO_ERROR)
3047 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003048
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003049 mdev = adm_ctx.mdev;
3050 memset(&args, 0, sizeof(args));
3051 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01003052 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003053 if (err) {
3054 retcode = ERR_MANDATORY_TAG;
3055 drbd_msg_put_info(from_attrs_err_to_txt(err));
3056 goto out_nolock;
3057 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003058 }
3059
Philipp Reisner8410da82011-02-11 20:11:10 +01003060 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003061
3062 if (!get_ldev(mdev)) {
3063 retcode = ERR_NO_DISK;
3064 goto out;
3065 }
3066
3067 /* this is "skip initial sync", assume to be clean */
Philipp Reisner31890f42011-01-19 14:12:51 +01003068 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003069 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3070 dev_info(DEV, "Preparing to skip initial sync\n");
3071 skip_initial_sync = 1;
3072 } else if (mdev->state.conn != C_STANDALONE) {
3073 retcode = ERR_CONNECTED;
3074 goto out_dec;
3075 }
3076
3077 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3078 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3079
3080 if (args.clear_bm) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003081 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3082 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003083 if (err) {
3084 dev_err(DEV, "Writing bitmap failed with %d\n",err);
3085 retcode = ERR_IO_MD_DISK;
3086 }
3087 if (skip_initial_sync) {
3088 drbd_send_uuids_skip_initial_sync(mdev);
3089 _drbd_uuid_set(mdev, UI_BITMAP, 0);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003090 drbd_print_uuids(mdev, "cleared bitmap UUID");
Philipp Reisner87eeee42011-01-19 14:16:30 +01003091 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003092 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3093 CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003094 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003095 }
3096 }
3097
3098 drbd_md_sync(mdev);
3099out_dec:
3100 put_ldev(mdev);
3101out:
Philipp Reisner8410da82011-02-11 20:11:10 +01003102 mutex_unlock(mdev->state_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003103out_nolock:
3104 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003105 return 0;
3106}
3107
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003108static enum drbd_ret_code
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003109drbd_check_resource_name(const char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003110{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003111 if (!name || !name[0]) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003112 drbd_msg_put_info("resource name missing");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003113 return ERR_MANDATORY_TAG;
3114 }
3115 /* if we want to use these in sysfs/configfs/debugfs some day,
3116 * we must not allow slashes */
3117 if (strchr(name, '/')) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003118 drbd_msg_put_info("invalid resource name");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003119 return ERR_INVALID_REQUEST;
3120 }
3121 return NO_ERROR;
3122}
Philipp Reisnerb411b362009-09-25 16:07:19 -07003123
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003124int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003125{
3126 enum drbd_ret_code retcode;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003127 struct res_opts res_opts;
3128 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003129
3130 retcode = drbd_adm_prepare(skb, info, 0);
3131 if (!adm_ctx.reply_skb)
3132 return retcode;
3133 if (retcode != NO_ERROR)
3134 goto out;
3135
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003136 set_res_opts_defaults(&res_opts);
3137 err = res_opts_from_attrs(&res_opts, info);
3138 if (err && err != -ENOMSG) {
3139 retcode = ERR_MANDATORY_TAG;
3140 drbd_msg_put_info(from_attrs_err_to_txt(err));
3141 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003142 }
3143
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003144 retcode = drbd_check_resource_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003145 if (retcode != NO_ERROR)
3146 goto out;
3147
3148 if (adm_ctx.tconn) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01003149 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3150 retcode = ERR_INVALID_REQUEST;
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003151 drbd_msg_put_info("resource exists");
Lars Ellenberg38f19612011-03-14 13:22:35 +01003152 }
3153 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003154 goto out;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02003155 }
3156
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003157 if (!conn_create(adm_ctx.resource_name, &res_opts))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003158 retcode = ERR_NOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003159out:
3160 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003161 return 0;
3162}
3163
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003164int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003165{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003166 struct drbd_genlmsghdr *dh = info->userhdr;
3167 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003168
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003169 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003170 if (!adm_ctx.reply_skb)
3171 return retcode;
3172 if (retcode != NO_ERROR)
3173 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003174
Andreas Gruenbacherf2257a52011-07-14 16:00:40 +02003175 if (dh->minor > MINORMASK) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003176 drbd_msg_put_info("requested minor out of range");
3177 retcode = ERR_INVALID_REQUEST;
3178 goto out;
3179 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02003180 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003181 drbd_msg_put_info("requested volume id out of range");
3182 retcode = ERR_INVALID_REQUEST;
3183 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003184 }
3185
Lars Ellenberg38f19612011-03-14 13:22:35 +01003186 /* drbd_adm_prepare made sure already
3187 * that mdev->tconn and mdev->vnr match the request. */
3188 if (adm_ctx.mdev) {
3189 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3190 retcode = ERR_MINOR_EXISTS;
3191 /* else: still NO_ERROR */
3192 goto out;
3193 }
3194
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003195 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3196out:
3197 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003198 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003199}
3200
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003201static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003202{
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003203 if (mdev->state.disk == D_DISKLESS &&
3204 /* no need to be mdev->state.conn == C_STANDALONE &&
3205 * we may want to delete a minor from a live replication group.
3206 */
3207 mdev->state.role == R_SECONDARY) {
Philipp Reisner369bea62011-07-06 23:04:44 +02003208 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3209 CS_VERBOSE + CS_WAIT_COMPLETE);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02003210 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3211 idr_remove(&minors, mdev_to_minor(mdev));
Lars Ellenberg113fef92013-03-22 18:14:40 -06003212 destroy_workqueue(mdev->submit.wq);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02003213 del_gendisk(mdev->vdisk);
3214 synchronize_rcu();
3215 kref_put(&mdev->kref, &drbd_minor_destroy);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003216 return NO_ERROR;
3217 } else
3218 return ERR_MINOR_CONFIGURED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003219}
3220
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003221int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003222{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003223 enum drbd_ret_code retcode;
3224
3225 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3226 if (!adm_ctx.reply_skb)
3227 return retcode;
3228 if (retcode != NO_ERROR)
3229 goto out;
3230
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003231 retcode = adm_delete_minor(adm_ctx.mdev);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003232out:
3233 drbd_adm_finish(info, retcode);
3234 return 0;
3235}
3236
3237int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3238{
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003239 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003240 struct drbd_conf *mdev;
3241 unsigned i;
3242
3243 retcode = drbd_adm_prepare(skb, info, 0);
3244 if (!adm_ctx.reply_skb)
3245 return retcode;
3246 if (retcode != NO_ERROR)
3247 goto out;
3248
3249 if (!adm_ctx.tconn) {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003250 retcode = ERR_RES_NOT_KNOWN;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003251 goto out;
3252 }
3253
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003254 /* demote */
3255 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3256 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3257 if (retcode < SS_SUCCESS) {
3258 drbd_msg_put_info("failed to demote");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003259 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003260 }
3261 }
3262
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003263 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3264 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003265 drbd_msg_put_info("failed to disconnect");
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003266 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003267 }
3268
3269 /* detach */
3270 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
Philipp Reisnercdfda632011-07-05 15:38:59 +02003271 retcode = adm_detach(mdev, 0);
Lars Ellenberg27012382012-07-24 10:13:55 +02003272 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003273 drbd_msg_put_info("failed to detach");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003274 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003275 }
3276 }
3277
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003278 /* If we reach this, all volumes (of this tconn) are Secondary,
3279 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003280 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003281 drbd_thread_stop(&adm_ctx.tconn->worker);
3282
3283 /* Now, nothing can fail anymore */
3284
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003285 /* delete volumes */
3286 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3287 retcode = adm_delete_minor(mdev);
3288 if (retcode != NO_ERROR) {
3289 /* "can not happen" */
3290 drbd_msg_put_info("failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07003291 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003292 }
3293 }
3294
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003295 /* delete connection */
3296 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003297 list_del_rcu(&adm_ctx.tconn->all_tconn);
3298 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003299 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3300
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003301 retcode = NO_ERROR;
3302 } else {
3303 /* "can not happen" */
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003304 retcode = ERR_RES_IN_USE;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003305 drbd_msg_put_info("failed to delete connection");
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003306 }
Philipp Reisneref356262011-04-13 14:21:29 -07003307 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003308out:
3309 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003310 return 0;
3311}
3312
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003313int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003314{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003315 enum drbd_ret_code retcode;
3316
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003317 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003318 if (!adm_ctx.reply_skb)
3319 return retcode;
3320 if (retcode != NO_ERROR)
3321 goto out;
3322
3323 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003324 list_del_rcu(&adm_ctx.tconn->all_tconn);
3325 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003326 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3327
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003328 retcode = NO_ERROR;
Philipp Reisner774b3052011-02-22 02:07:03 -05003329 } else {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003330 retcode = ERR_RES_IN_USE;
Philipp Reisner774b3052011-02-22 02:07:03 -05003331 }
3332
Lars Ellenberg992d6e92011-05-02 11:47:18 +02003333 if (retcode == NO_ERROR)
3334 drbd_thread_stop(&adm_ctx.tconn->worker);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003335out:
3336 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003337 return 0;
3338}
3339
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003340void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003341{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003342 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3343 struct sk_buff *msg;
3344 struct drbd_genlmsghdr *d_out;
3345 unsigned seq;
3346 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003347
Philipp Reisneref86b772012-12-06 10:34:34 +01003348 if (sib->sib_reason == SIB_SYNC_PROGRESS) {
3349 if (time_after(jiffies, mdev->rs_last_bcast + HZ))
3350 mdev->rs_last_bcast = jiffies;
3351 else
3352 return;
3353 }
Philipp Reisner328e0f12012-10-19 14:37:47 +02003354
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003355 seq = atomic_inc_return(&drbd_genl_seq);
3356 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3357 if (!msg)
3358 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003359
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003360 err = -EMSGSIZE;
3361 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3362 if (!d_out) /* cannot happen, but anyways. */
3363 goto nla_put_failure;
3364 d_out->minor = mdev_to_minor(mdev);
Andreas Gruenbacher6f9b5f82011-05-06 01:03:32 +02003365 d_out->ret_code = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003366
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003367 if (nla_put_status_info(msg, mdev, sib))
3368 goto nla_put_failure;
3369 genlmsg_end(msg, d_out);
3370 err = drbd_genl_multicast_events(msg, 0);
3371 /* msg has been consumed or freed in netlink_broadcast() */
3372 if (err && err != -ESRCH)
3373 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003374
Philipp Reisnerb411b362009-09-25 16:07:19 -07003375 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003376
3377nla_put_failure:
3378 nlmsg_free(msg);
3379failed:
3380 dev_err(DEV, "Error %d while broadcasting event. "
3381 "Event seq:%u sib_reason:%u\n",
3382 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003383}