blob: d339a2754a8581c811271d95ea14b196d436d907 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020035#include "drbd_req.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070038#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020039#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070040
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010041#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010043/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +020050int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010052int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010053
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010056int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010057int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010071int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010072int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
Andreas Gruenbacher01b39b52011-06-10 12:57:26 +020078#include "drbd_nla.h"
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010079#include <linux/genl_magic_func.h>
80
81/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070082static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010084/* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
87 */
88static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
90 unsigned int minor;
91 /* assigned from request attributes, if present */
92 unsigned int volume;
93#define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +020096 char *resource_name;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +020097 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
Philipp Reisnerb411b362009-09-25 16:07:19 -070099
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100100 /* reply buffer */
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
107} adm_ctx;
108
109static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
110{
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700114}
115
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100116/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
Lars Ellenberg8432b312011-03-08 16:11:16 +0100118int drbd_msg_put_info(const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100119{
120 struct sk_buff *skb = adm_ctx.reply_skb;
121 struct nlattr *nla;
122 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100124 if (!info || !info[0])
125 return 0;
126
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 if (!nla)
129 return err;
130
131 err = nla_put_string(skb, T_info_text, info);
132 if (err) {
133 nla_nest_cancel(skb, nla);
134 return err;
135 } else
136 nla_nest_end(skb, nla);
137 return 0;
138}
139
140/* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
144 */
145#define DRBD_ADM_NEED_MINOR 1
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200146#define DRBD_ADM_NEED_RESOURCE 2
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200147#define DRBD_ADM_NEED_CONNECTION 4
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100148static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
149 unsigned flags)
150{
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
153 int err;
154
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
156
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd != DRBD_ADM_GET_STATUS
159 && security_netlink_recv(skb, CAP_SYS_ADMIN))
160 return -EPERM;
161
162 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200163 if (!adm_ctx.reply_skb) {
164 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100165 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200166 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100167
168 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
169 info, &drbd_genl_family, 0, cmd);
170 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
171 * but anyways */
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200172 if (!adm_ctx.reply_dh) {
173 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100174 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200175 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100176
177 adm_ctx.reply_dh->minor = d_in->minor;
178 adm_ctx.reply_dh->ret_code = NO_ERROR;
179
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200180 adm_ctx.volume = VOLUME_UNSPECIFIED;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100181 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
182 struct nlattr *nla;
183 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100184 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100185 if (err)
186 goto fail;
187
188 /* It was present, and valid,
189 * copy it over to the reply skb. */
190 err = nla_put_nohdr(adm_ctx.reply_skb,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
192 info->attrs[DRBD_NLA_CFG_CONTEXT]);
193 if (err)
194 goto fail;
195
196 /* and assign stuff to the global adm_ctx */
197 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200198 if (nla)
199 adm_ctx.volume = nla_get_u32(nla);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200200 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100201 if (nla)
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200202 adm_ctx.resource_name = nla_data(nla);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200203 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
204 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
205 if ((adm_ctx.my_addr &&
206 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
207 (adm_ctx.peer_addr &&
208 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
209 err = -EINVAL;
210 goto fail;
211 }
212 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100213
214 adm_ctx.minor = d_in->minor;
215 adm_ctx.mdev = minor_to_mdev(d_in->minor);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200216 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100217
218 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
219 drbd_msg_put_info("unknown minor");
220 return ERR_MINOR_INVALID;
221 }
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200222 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
223 drbd_msg_put_info("unknown resource");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100224 return ERR_INVALID_REQUEST;
225 }
226
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200227 if (flags & DRBD_ADM_NEED_CONNECTION) {
228 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
229 drbd_msg_put_info("no resource name expected");
230 return ERR_INVALID_REQUEST;
231 }
232 if (adm_ctx.mdev) {
233 drbd_msg_put_info("no minor number expected");
234 return ERR_INVALID_REQUEST;
235 }
236 if (adm_ctx.my_addr && adm_ctx.peer_addr)
237 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
238 nla_len(adm_ctx.my_addr),
239 nla_data(adm_ctx.peer_addr),
240 nla_len(adm_ctx.peer_addr));
241 if (!adm_ctx.tconn) {
242 drbd_msg_put_info("unknown connection");
243 return ERR_INVALID_REQUEST;
244 }
245 }
246
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100247 /* some more paranoia, if the request was over-determined */
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100248 if (adm_ctx.mdev && adm_ctx.tconn &&
249 adm_ctx.mdev->tconn != adm_ctx.tconn) {
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200251 adm_ctx.minor, adm_ctx.resource_name,
252 adm_ctx.mdev->tconn->name);
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200253 drbd_msg_put_info("minor exists in different resource");
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100254 return ERR_INVALID_REQUEST;
255 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100256 if (adm_ctx.mdev &&
257 adm_ctx.volume != VOLUME_UNSPECIFIED &&
258 adm_ctx.volume != adm_ctx.mdev->vnr) {
259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
260 adm_ctx.minor, adm_ctx.volume,
261 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100262 drbd_msg_put_info("minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100263 return ERR_INVALID_REQUEST;
264 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200265
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100266 return NO_ERROR;
267
268fail:
269 nlmsg_free(adm_ctx.reply_skb);
270 adm_ctx.reply_skb = NULL;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200271 return err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100272}
273
274static int drbd_adm_finish(struct genl_info *info, int retcode)
275{
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200276 if (adm_ctx.tconn) {
277 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
278 adm_ctx.tconn = NULL;
279 }
280
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100281 if (!adm_ctx.reply_skb)
282 return -ENOMEM;
283
284 adm_ctx.reply_dh->ret_code = retcode;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100285 drbd_adm_send_reply(adm_ctx.reply_skb, info);
286 return 0;
287}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100289static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
290{
291 char *afs;
292
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200293 /* FIXME: A future version will not allow this case. */
294 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
295 return;
296
297 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
298 case AF_INET6:
299 afs = "ipv6";
300 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
301 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
302 break;
303 case AF_INET:
304 afs = "ipv4";
305 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
306 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
307 break;
308 default:
309 afs = "ssocks";
310 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
311 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100312 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200313 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100314}
315
Philipp Reisnerb411b362009-09-25 16:07:19 -0700316int drbd_khelper(struct drbd_conf *mdev, char *cmd)
317{
318 char *envp[] = { "HOME=/",
319 "TERM=linux",
320 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100321 (char[20]) { }, /* address family */
322 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100324 char mb[12];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 char *argv[] = {usermode_helper, cmd, mb, NULL };
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200326 struct drbd_tconn *tconn = mdev->tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100327 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700328 int ret;
329
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200330 if (current == tconn->worker.task)
331 set_bit(CALLBACK_PENDING, &tconn->flags);
332
Philipp Reisnerb411b362009-09-25 16:07:19 -0700333 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200334 setup_khelper_env(tconn, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Lars Ellenberg1090c052010-07-19 17:41:04 +0200336 /* The helper may take some time.
337 * write out any unsynced meta data changes now */
338 drbd_md_sync(mdev);
339
Philipp Reisnerb411b362009-09-25 16:07:19 -0700340 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100341 sib.sib_reason = SIB_HELPER_PRE;
342 sib.helper_name = cmd;
343 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700344 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
345 if (ret)
346 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
347 usermode_helper, cmd, mb,
348 (ret >> 8) & 0xff, ret);
349 else
350 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
351 usermode_helper, cmd, mb,
352 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100353 sib.sib_reason = SIB_HELPER_POST;
354 sib.helper_exit_code = ret;
355 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700356
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200357 if (current == tconn->worker.task)
358 clear_bit(CALLBACK_PENDING, &tconn->flags);
359
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 if (ret < 0) /* Ignore any ERRNOs we got. */
361 ret = 0;
362
363 return ret;
364}
365
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100366int conn_khelper(struct drbd_tconn *tconn, char *cmd)
367{
368 char *envp[] = { "HOME=/",
369 "TERM=linux",
370 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
371 (char[20]) { }, /* address family */
372 (char[60]) { }, /* address */
373 NULL };
374 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
375 int ret;
376
377 setup_khelper_env(tconn, envp);
378 conn_md_sync(tconn);
379
380 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
381 /* TODO: conn_bcast_event() ?? */
382
383 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
384 if (ret)
385 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
386 usermode_helper, cmd, tconn->name,
387 (ret >> 8) & 0xff, ret);
388 else
389 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
390 usermode_helper, cmd, tconn->name,
391 (ret >> 8) & 0xff, ret);
392 /* TODO: conn_bcast_event() ?? */
393
394 if (ret < 0) /* Ignore any ERRNOs we got. */
395 ret = 0;
396
397 return ret;
398}
399
Philipp Reisnercb703452011-03-24 11:03:07 +0100400static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700401{
Philipp Reisnercb703452011-03-24 11:03:07 +0100402 enum drbd_fencing_p fp = FP_NOT_AVAIL;
403 struct drbd_conf *mdev;
404 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
Philipp Reisner695d08f2011-04-11 22:53:32 -0700406 rcu_read_lock();
Philipp Reisnercb703452011-03-24 11:03:07 +0100407 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
408 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200409 fp = max_t(enum drbd_fencing_p, fp,
410 rcu_dereference(mdev->ldev->disk_conf)->fencing);
Philipp Reisnercb703452011-03-24 11:03:07 +0100411 put_ldev(mdev);
412 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700413 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700414 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700415
Philipp Reisnercb703452011-03-24 11:03:07 +0100416 return fp;
417}
418
419bool conn_try_outdate_peer(struct drbd_tconn *tconn)
420{
421 union drbd_state mask = { };
422 union drbd_state val = { };
423 enum drbd_fencing_p fp;
424 char *ex_to_string;
425 int r;
426
427 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
428 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
429 return false;
430 }
431
432 fp = highest_fencing_policy(tconn);
433 switch (fp) {
434 case FP_NOT_AVAIL:
435 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
436 goto out;
437 case FP_DONT_CARE:
438 return true;
439 default: ;
440 }
441
442 r = conn_khelper(tconn, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443
444 switch ((r>>8) & 0xff) {
445 case 3: /* peer is inconsistent */
446 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100447 mask.pdsk = D_MASK;
448 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 break;
450 case 4: /* peer got outdated, or was already outdated */
451 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100452 mask.pdsk = D_MASK;
453 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700454 break;
455 case 5: /* peer was down */
Philipp Reisnercb703452011-03-24 11:03:07 +0100456 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700457 /* we will(have) create(d) a new UUID anyways... */
458 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100459 mask.pdsk = D_MASK;
460 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700461 } else {
462 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700463 }
464 break;
465 case 6: /* Peer is primary, voluntarily outdate myself.
466 * This is useful when an unconnected R_SECONDARY is asked to
467 * become R_PRIMARY, but finds the other peer being active. */
468 ex_to_string = "peer is active";
Philipp Reisnercb703452011-03-24 11:03:07 +0100469 conn_warn(tconn, "Peer is primary, outdating myself.\n");
470 mask.disk = D_MASK;
471 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700472 break;
473 case 7:
474 if (fp != FP_STONITH)
Philipp Reisnercb703452011-03-24 11:03:07 +0100475 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700476 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100477 mask.pdsk = D_MASK;
478 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479 break;
480 default:
481 /* The script is broken ... */
Philipp Reisnercb703452011-03-24 11:03:07 +0100482 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
483 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700484 }
485
Philipp Reisnercb703452011-03-24 11:03:07 +0100486 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
487 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200488
Philipp Reisnercb703452011-03-24 11:03:07 +0100489 out:
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200490
Philipp Reisnercb703452011-03-24 11:03:07 +0100491 /* Not using
492 conn_request_state(tconn, mask, val, CS_VERBOSE);
493 here, because we might were able to re-establish the connection in the
494 meantime. */
495 spin_lock_irq(&tconn->req_lock);
Philipp Reisnera1096a62012-04-06 12:07:34 +0200496 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
Philipp Reisnercb703452011-03-24 11:03:07 +0100497 _conn_request_state(tconn, mask, val, CS_VERBOSE);
498 spin_unlock_irq(&tconn->req_lock);
499
500 return conn_highest_pdsk(tconn) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700501}
502
Philipp Reisner87f7be42010-06-11 13:56:33 +0200503static int _try_outdate_peer_async(void *data)
504{
Philipp Reisnercb703452011-03-24 11:03:07 +0100505 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200506
Philipp Reisnercb703452011-03-24 11:03:07 +0100507 conn_try_outdate_peer(tconn);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200508
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200509 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200510 return 0;
511}
512
Philipp Reisnercb703452011-03-24 11:03:07 +0100513void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200514{
515 struct task_struct *opa;
516
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200517 kref_get(&tconn->kref);
Philipp Reisnercb703452011-03-24 11:03:07 +0100518 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200519 if (IS_ERR(opa)) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100520 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200521 kref_put(&tconn->kref, &conn_destroy);
522 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200523}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700524
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100525enum drbd_state_rv
526drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700527{
528 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100529 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200530 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700531 int try = 0;
532 int forced = 0;
533 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700534
535 if (new_role == R_PRIMARY)
Philipp Reisner0625ac12011-02-07 14:49:19 +0100536 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537
Philipp Reisner8410da82011-02-11 20:11:10 +0100538 mutex_lock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539
540 mask.i = 0; mask.role = R_MASK;
541 val.i = 0; val.role = new_role;
542
543 while (try++ < max_tries) {
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100544 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545
546 /* in case we first succeeded to outdate,
547 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100548 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700549 val.pdsk = 0;
550 mask.pdsk = 0;
551 continue;
552 }
553
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100554 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100555 (mdev->state.disk < D_UP_TO_DATE &&
556 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700557 mask.disk = D_MASK;
558 val.disk = D_UP_TO_DATE;
559 forced = 1;
560 continue;
561 }
562
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100563 if (rv == SS_NO_UP_TO_DATE_DISK &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700564 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
565 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700566
Philipp Reisnercb703452011-03-24 11:03:07 +0100567 if (conn_try_outdate_peer(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700568 val.disk = D_UP_TO_DATE;
569 mask.disk = D_MASK;
570 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700571 continue;
572 }
573
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100574 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100575 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100576 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100577 if (!conn_try_outdate_peer(mdev->tconn) && force) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700578 dev_warn(DEV, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100579 mask.pdsk = D_MASK;
580 val.pdsk = D_OUTDATED;
581
Philipp Reisnerb411b362009-09-25 16:07:19 -0700582 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700583 continue;
584 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100585 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700586 /* Maybe the peer is detected as dead very soon...
587 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200588 int timeo;
589 rcu_read_lock();
590 nc = rcu_dereference(mdev->tconn->net_conf);
591 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
592 rcu_read_unlock();
593 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700594 if (try < max_tries)
595 try = max_tries - 1;
596 continue;
597 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100598 if (rv < SS_SUCCESS) {
599 rv = _drbd_request_state(mdev, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700600 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100601 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100602 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700603 }
604 break;
605 }
606
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100607 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100608 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700609
610 if (forced)
611 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
612
613 /* Wait until nothing is on the fly :) */
614 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
615
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100616 /* FIXME also wait for all pending P_BARRIER_ACK? */
617
Philipp Reisnerb411b362009-09-25 16:07:19 -0700618 if (new_role == R_SECONDARY) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100619 set_disk_ro(mdev->vdisk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700620 if (get_ldev(mdev)) {
621 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
622 put_ldev(mdev);
623 }
624 } else {
Philipp Reisnera0095502011-05-03 13:14:15 +0200625 mutex_lock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200626 nc = mdev->tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200627 if (nc)
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200628 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
Philipp Reisnera0095502011-05-03 13:14:15 +0200629 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200630
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100631 set_disk_ro(mdev->vdisk, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700632 if (get_ldev(mdev)) {
633 if (((mdev->state.conn < C_CONNECTED ||
634 mdev->state.pdsk <= D_FAILED)
635 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
636 drbd_uuid_new_current(mdev);
637
638 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
639 put_ldev(mdev);
640 }
641 }
642
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100643 /* writeout of activity log covered areas of the bitmap
644 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700645
646 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
647 /* if this was forced, we should consider sync */
648 if (forced)
649 drbd_send_uuids(mdev);
Philipp Reisner43de7c82011-11-10 13:16:13 +0100650 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700651 }
652
653 drbd_md_sync(mdev);
654
655 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100656out:
Philipp Reisner8410da82011-02-11 20:11:10 +0100657 mutex_unlock(mdev->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100658 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700659}
660
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100661static const char *from_attrs_err_to_txt(int err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700662{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100663 return err == -ENOMSG ? "required attribute missing" :
664 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100665 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100666 "invalid attribute value";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700667}
668
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100669int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700670{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100671 struct set_role_parms parms;
672 int err;
673 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700674
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100675 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
676 if (!adm_ctx.reply_skb)
677 return retcode;
678 if (retcode != NO_ERROR)
679 goto out;
680
681 memset(&parms, 0, sizeof(parms));
682 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100683 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100684 if (err) {
685 retcode = ERR_MANDATORY_TAG;
686 drbd_msg_put_info(from_attrs_err_to_txt(err));
687 goto out;
688 }
689 }
690
691 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
692 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
693 else
694 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
695out:
696 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700697 return 0;
698}
699
700/* initializes the md.*_offset members, so we are able to find
701 * the on disk meta data */
702static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
703 struct drbd_backing_dev *bdev)
704{
705 sector_t md_size_sect = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200706 int meta_dev_idx;
707
708 rcu_read_lock();
709 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
710
711 switch (meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700712 default:
713 /* v07 style fixed size indexed meta data */
714 bdev->md.md_size_sect = MD_RESERVED_SECT;
715 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
716 bdev->md.al_offset = MD_AL_OFFSET;
717 bdev->md.bm_offset = MD_BM_OFFSET;
718 break;
719 case DRBD_MD_INDEX_FLEX_EXT:
720 /* just occupy the full device; unit: sectors */
721 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
722 bdev->md.md_offset = 0;
723 bdev->md.al_offset = MD_AL_OFFSET;
724 bdev->md.bm_offset = MD_BM_OFFSET;
725 break;
726 case DRBD_MD_INDEX_INTERNAL:
727 case DRBD_MD_INDEX_FLEX_INT:
728 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
729 /* al size is still fixed */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100730 bdev->md.al_offset = -MD_AL_SECTORS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700731 /* we need (slightly less than) ~ this much bitmap sectors: */
732 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
733 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
734 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
735 md_size_sect = ALIGN(md_size_sect, 8);
736
737 /* plus the "drbd meta data super block",
738 * and the activity log; */
739 md_size_sect += MD_BM_OFFSET;
740
741 bdev->md.md_size_sect = md_size_sect;
742 /* bitmap offset is adjusted by 'super' block size */
743 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
744 break;
745 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200746 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700747}
748
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100749/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700750char *ppsize(char *buf, unsigned long long size)
751{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100752 /* Needs 9 bytes at max including trailing NUL:
753 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700754 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
755 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100756 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700757 /* shift + round */
758 size = (size >> 10) + !!(size & (1<<9));
759 base++;
760 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100761 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700762
763 return buf;
764}
765
766/* there is still a theoretical deadlock when called from receiver
767 * on an D_INCONSISTENT R_PRIMARY:
768 * remote READ does inc_ap_bio, receiver would need to receive answer
769 * packet from remote to dec_ap_bio again.
770 * receiver receive_sizes(), comes here,
771 * waits for ap_bio_cnt == 0. -> deadlock.
772 * but this cannot happen, actually, because:
773 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
774 * (not connected, or bad/no disk on peer):
775 * see drbd_fail_request_early, ap_bio_cnt is zero.
776 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
777 * peer may not initiate a resize.
778 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100779/* Note these are not to be confused with
780 * drbd_adm_suspend_io/drbd_adm_resume_io,
781 * which are (sub) state changes triggered by admin (drbdsetup),
782 * and can be long lived.
783 * This changes an mdev->flag, is triggered by drbd internals,
784 * and should be short-lived. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700785void drbd_suspend_io(struct drbd_conf *mdev)
786{
787 set_bit(SUSPEND_IO, &mdev->flags);
Philipp Reisner2aebfab2011-03-28 16:48:11 +0200788 if (drbd_suspended(mdev))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200789 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700790 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
791}
792
793void drbd_resume_io(struct drbd_conf *mdev)
794{
795 clear_bit(SUSPEND_IO, &mdev->flags);
796 wake_up(&mdev->misc_wait);
797}
798
799/**
800 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
801 * @mdev: DRBD device.
802 *
803 * Returns 0 on success, negative return values indicate errors.
804 * You should call drbd_md_sync() after calling this function.
805 */
Bart Van Assche24c48302011-05-21 18:32:29 +0200806enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700807{
808 sector_t prev_first_sect, prev_size; /* previous meta location */
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200809 sector_t la_size, u_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700810 sector_t size;
811 char ppb[10];
812
813 int md_moved, la_size_changed;
814 enum determine_dev_size rv = unchanged;
815
816 /* race:
817 * application request passes inc_ap_bio,
818 * but then cannot get an AL-reference.
819 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
820 *
821 * to avoid that:
822 * Suspend IO right here.
823 * still lock the act_log to not trigger ASSERTs there.
824 */
825 drbd_suspend_io(mdev);
826
827 /* no wait necessary anymore, actually we could assert that */
828 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
829
830 prev_first_sect = drbd_md_first_sector(mdev->ldev);
831 prev_size = mdev->ldev->md.md_size_sect;
832 la_size = mdev->ldev->md.la_size_sect;
833
834 /* TODO: should only be some assert here, not (re)init... */
835 drbd_md_set_sector_offsets(mdev, mdev->ldev);
836
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200837 rcu_read_lock();
838 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
839 rcu_read_unlock();
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200840 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700841
842 if (drbd_get_capacity(mdev->this_bdev) != size ||
843 drbd_bm_capacity(mdev) != size) {
844 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100845 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700846 if (unlikely(err)) {
847 /* currently there is only one error: ENOMEM! */
848 size = drbd_bm_capacity(mdev)>>1;
849 if (size == 0) {
850 dev_err(DEV, "OUT OF MEMORY! "
851 "Could not allocate bitmap!\n");
852 } else {
853 dev_err(DEV, "BM resizing failed. "
854 "Leaving size unchanged at size = %lu KB\n",
855 (unsigned long)size);
856 }
857 rv = dev_size_error;
858 }
859 /* racy, see comments above. */
860 drbd_set_my_capacity(mdev, size);
861 mdev->ldev->md.la_size_sect = size;
862 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
863 (unsigned long long)size>>1);
864 }
865 if (rv == dev_size_error)
866 goto out;
867
868 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
869
870 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
871 || prev_size != mdev->ldev->md.md_size_sect;
872
873 if (la_size_changed || md_moved) {
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100874 int err;
875
Philipp Reisnerb411b362009-09-25 16:07:19 -0700876 drbd_al_shrink(mdev); /* All extents inactive. */
877 dev_info(DEV, "Writing the whole bitmap, %s\n",
878 la_size_changed && md_moved ? "size changed and md moved" :
879 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100880 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
Philipp Reisnerfef45d22012-08-14 11:46:59 +0200881 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
882 "size changed", BM_LOCKED_MASK);
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100883 if (err) {
884 rv = dev_size_error;
885 goto out;
886 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700887 drbd_md_mark_dirty(mdev);
888 }
889
890 if (size > la_size)
891 rv = grew;
892 if (size < la_size)
893 rv = shrunk;
894out:
895 lc_unlock(mdev->act_log);
896 wake_up(&mdev->al_wait);
897 drbd_resume_io(mdev);
898
899 return rv;
900}
901
902sector_t
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200903drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
904 sector_t u_size, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700905{
906 sector_t p_size = mdev->p_size; /* partner's disk size. */
907 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
908 sector_t m_size; /* my size */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700909 sector_t size = 0;
910
911 m_size = drbd_get_max_capacity(bdev);
912
Philipp Reisnera393db62009-12-22 13:35:52 +0100913 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
914 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
915 p_size = m_size;
916 }
917
Philipp Reisnerb411b362009-09-25 16:07:19 -0700918 if (p_size && m_size) {
919 size = min_t(sector_t, p_size, m_size);
920 } else {
921 if (la_size) {
922 size = la_size;
923 if (m_size && m_size < size)
924 size = m_size;
925 if (p_size && p_size < size)
926 size = p_size;
927 } else {
928 if (m_size)
929 size = m_size;
930 if (p_size)
931 size = p_size;
932 }
933 }
934
935 if (size == 0)
936 dev_err(DEV, "Both nodes diskless!\n");
937
938 if (u_size) {
939 if (u_size > size)
940 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
941 (unsigned long)u_size>>1, (unsigned long)size>>1);
942 else
943 size = u_size;
944 }
945
946 return size;
947}
948
949/**
950 * drbd_check_al_size() - Ensures that the AL is of the right size
951 * @mdev: DRBD device.
952 *
953 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
954 * failed, and 0 on success. You should call drbd_md_sync() after you called
955 * this function.
956 */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100957static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700958{
959 struct lru_cache *n, *t;
960 struct lc_element *e;
961 unsigned int in_use;
962 int i;
963
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964 if (mdev->act_log &&
Lars Ellenbergf3990022011-03-23 14:31:09 +0100965 mdev->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700966 return 0;
967
968 in_use = 0;
969 t = mdev->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100970 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +0100971 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700972
973 if (n == NULL) {
974 dev_err(DEV, "Cannot allocate act_log lru!\n");
975 return -ENOMEM;
976 }
977 spin_lock_irq(&mdev->al_lock);
978 if (t) {
979 for (i = 0; i < t->nr_elements; i++) {
980 e = lc_element_by_index(t, i);
981 if (e->refcnt)
982 dev_err(DEV, "refcnt(%d)==%d\n",
983 e->lc_number, e->refcnt);
984 in_use += e->refcnt;
985 }
986 }
987 if (!in_use)
988 mdev->act_log = n;
989 spin_unlock_irq(&mdev->al_lock);
990 if (in_use) {
991 dev_err(DEV, "Activity log still in use!\n");
992 lc_destroy(n);
993 return -EBUSY;
994 } else {
995 if (t)
996 lc_destroy(t);
997 }
998 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
999 return 0;
1000}
1001
Philipp Reisner99432fc2011-05-20 16:39:13 +02001002static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001003{
1004 struct request_queue * const q = mdev->rq_queue;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001005 int max_hw_sectors = max_bio_size >> 9;
1006 int max_segments = 0;
1007
1008 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1009 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1010
1011 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001012 rcu_read_lock();
1013 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1014 rcu_read_unlock();
Philipp Reisner99432fc2011-05-20 16:39:13 +02001015 put_ldev(mdev);
1016 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001017
Philipp Reisnerb411b362009-09-25 16:07:19 -07001018 blk_queue_logical_block_size(q, 512);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001019 blk_queue_max_hw_sectors(q, max_hw_sectors);
1020 /* This is the workaround for "bio would need to, but cannot, be split" */
1021 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1022 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001023
Philipp Reisner99432fc2011-05-20 16:39:13 +02001024 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1025 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001026
Philipp Reisner99432fc2011-05-20 16:39:13 +02001027 blk_queue_stack_limits(q, b);
1028
1029 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1030 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1031 q->backing_dev_info.ra_pages,
1032 b->backing_dev_info.ra_pages);
1033 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1034 }
1035 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001036 }
1037}
1038
Philipp Reisner99432fc2011-05-20 16:39:13 +02001039void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1040{
1041 int now, new, local, peer;
1042
1043 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1044 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1045 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1046
1047 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1048 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1049 mdev->local_max_bio_size = local;
1050 put_ldev(mdev);
1051 }
1052
1053 /* We may ignore peer limits if the peer is modern enough.
1054 Because new from 8.3.8 onwards the peer can use multiple
1055 BIOs for a single peer_request */
1056 if (mdev->state.conn >= C_CONNECTED) {
Philipp Reisner31890f42011-01-19 14:12:51 +01001057 if (mdev->tconn->agreed_pro_version < 94)
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001058 peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1059 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
Philipp Reisner31890f42011-01-19 14:12:51 +01001060 else if (mdev->tconn->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001061 peer = DRBD_MAX_SIZE_H80_PACKET;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001062 else if (mdev->tconn->agreed_pro_version < 100)
1063 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1064 else
Philipp Reisner99432fc2011-05-20 16:39:13 +02001065 peer = DRBD_MAX_BIO_SIZE;
1066 }
1067
1068 new = min_t(int, local, peer);
1069
1070 if (mdev->state.role == R_PRIMARY && new < now)
1071 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1072
1073 if (new != now)
1074 dev_info(DEV, "max BIO size = %u\n", new);
1075
1076 drbd_setup_queue_param(mdev, new);
1077}
1078
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001079/* Starts the worker thread */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001080static void conn_reconfig_start(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001081{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001082 drbd_thread_start(&tconn->worker);
1083 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001084}
1085
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001086/* if still unconfigured, stops worker again. */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001087static void conn_reconfig_done(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001088{
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001089 bool stop_threads;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001090 spin_lock_irq(&tconn->req_lock);
Philipp Reisnere0e16652011-07-11 17:04:23 +02001091 stop_threads = conn_all_vols_unconf(tconn) &&
1092 tconn->cstate == C_STANDALONE;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001093 spin_unlock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001094 if (stop_threads) {
1095 /* asender is implicitly stopped by receiver
Philipp Reisner81fa2e62011-05-04 15:10:30 +02001096 * in conn_disconnect() */
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001097 drbd_thread_stop(&tconn->receiver);
1098 drbd_thread_stop(&tconn->worker);
1099 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001100}
1101
Philipp Reisner07782862010-08-31 12:00:50 +02001102/* Make sure IO is suspended before calling this function(). */
1103static void drbd_suspend_al(struct drbd_conf *mdev)
1104{
1105 int s = 0;
1106
Lars Ellenberg61610422011-02-21 13:20:54 +01001107 if (!lc_try_lock(mdev->act_log)) {
Philipp Reisner07782862010-08-31 12:00:50 +02001108 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1109 return;
1110 }
1111
Lars Ellenberg61610422011-02-21 13:20:54 +01001112 drbd_al_shrink(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001113 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner07782862010-08-31 12:00:50 +02001114 if (mdev->state.conn < C_CONNECTED)
1115 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001116 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg61610422011-02-21 13:20:54 +01001117 lc_unlock(mdev->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001118
1119 if (s)
1120 dev_info(DEV, "Suspended AL updates\n");
1121}
1122
Lars Ellenberg5979e362011-04-27 21:09:55 +02001123
1124static bool should_set_defaults(struct genl_info *info)
1125{
1126 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1127 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1128}
1129
Philipp Reisnerd589a212011-05-04 10:06:52 +02001130static void enforce_disk_conf_limits(struct disk_conf *dc)
1131{
1132 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1133 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1134 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1135 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1136
1137 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1138 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1139}
1140
Lars Ellenbergf3990022011-03-23 14:31:09 +01001141int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1142{
1143 enum drbd_ret_code retcode;
1144 struct drbd_conf *mdev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001145 struct disk_conf *new_disk_conf, *old_disk_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02001146 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001147 int err, fifo_size;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001148
1149 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1150 if (!adm_ctx.reply_skb)
1151 return retcode;
1152 if (retcode != NO_ERROR)
1153 goto out;
1154
1155 mdev = adm_ctx.mdev;
1156
1157 /* we also need a disk
1158 * to change the options on */
1159 if (!get_ldev(mdev)) {
1160 retcode = ERR_NO_DISK;
1161 goto out;
1162 }
1163
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001164 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001165 if (!new_disk_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001166 retcode = ERR_NOMEM;
1167 goto fail;
1168 }
1169
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001170 mutex_lock(&mdev->tconn->conf_update);
1171 old_disk_conf = mdev->ldev->disk_conf;
1172 *new_disk_conf = *old_disk_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001173 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001174 set_disk_conf_defaults(new_disk_conf);
Lars Ellenberg5979e362011-04-27 21:09:55 +02001175
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001176 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001177 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001178 retcode = ERR_MANDATORY_TAG;
1179 drbd_msg_put_info(from_attrs_err_to_txt(err));
1180 }
1181
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001182 if (!expect(new_disk_conf->resync_rate >= 1))
1183 new_disk_conf->resync_rate = 1;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001184
Philipp Reisnerd589a212011-05-04 10:06:52 +02001185 enforce_disk_conf_limits(new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001186
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001187 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02001188 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02001189 new_plan = fifo_alloc(fifo_size);
1190 if (!new_plan) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001191 dev_err(DEV, "kmalloc of fifo_buffer failed");
1192 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001193 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001194 }
1195 }
1196
Lars Ellenberg0ee98e22012-08-20 14:54:48 +02001197 drbd_suspend_io(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001198 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1199 drbd_al_shrink(mdev);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001200 err = drbd_check_al_size(mdev, new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001201 lc_unlock(mdev->act_log);
1202 wake_up(&mdev->al_wait);
Lars Ellenberg0ee98e22012-08-20 14:54:48 +02001203 drbd_resume_io(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001204
1205 if (err) {
1206 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001207 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001208 }
1209
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001210 write_lock_irq(&global_state_lock);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001211 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001212 if (retcode == NO_ERROR) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001213 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001214 drbd_resync_after_changed(mdev);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001215 }
1216 write_unlock_irq(&global_state_lock);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001217
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001218 if (retcode != NO_ERROR)
1219 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001220
Philipp Reisner813472c2011-05-03 16:47:02 +02001221 if (new_plan) {
1222 old_plan = mdev->rs_plan_s;
1223 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
Philipp Reisner9958c852011-05-03 16:19:31 +02001224 }
Philipp Reisner9958c852011-05-03 16:19:31 +02001225
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001226 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001227
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001228 if (new_disk_conf->al_updates)
Philipp Reisner4035e4c2012-10-01 18:04:12 +02001229 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001230 else
1231 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1232
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001233 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1234
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001235 drbd_md_sync(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001236
1237 if (mdev->state.conn >= C_CONNECTED)
1238 drbd_send_sync_param(mdev);
1239
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001240 synchronize_rcu();
1241 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001242 kfree(old_plan);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001243 mod_timer(&mdev->request_timer, jiffies + HZ);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001244 goto success;
1245
1246fail_unlock:
1247 mutex_unlock(&mdev->tconn->conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001248 fail:
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001249 kfree(new_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001250 kfree(new_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001251success:
1252 put_ldev(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001253 out:
1254 drbd_adm_finish(info, retcode);
1255 return 0;
1256}
1257
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001258int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001259{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001260 struct drbd_conf *mdev;
1261 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001262 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263 enum determine_dev_size dd;
1264 sector_t max_possible_sectors;
1265 sector_t min_md_device_sectors;
1266 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001267 struct disk_conf *new_disk_conf = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001268 struct block_device *bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269 struct lru_cache *resync_lru = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001270 struct fifo_buffer *new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001271 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001272 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001273 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001274
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001275 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1276 if (!adm_ctx.reply_skb)
1277 return retcode;
1278 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001279 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001280
1281 mdev = adm_ctx.mdev;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001282 conn_reconfig_start(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001283
1284 /* if you want to reconfigure, please tear down first */
1285 if (mdev->state.disk > D_DISKLESS) {
1286 retcode = ERR_DISK_CONFIGURED;
1287 goto fail;
1288 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001289 /* It may just now have detached because of IO error. Make sure
1290 * drbd_ldev_destroy is done already, we may end up here very fast,
1291 * e.g. if someone calls attach from the on-io-error handler,
1292 * to realize a "hot spare" feature (not that I'd recommend that) */
1293 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001294
Lars Ellenberg0c849662012-07-30 09:07:28 +02001295 /* make sure there is no leftover from previous force-detach attempts */
1296 clear_bit(FORCE_DETACH, &mdev->flags);
Lars Ellenbergedc9f5e2012-09-27 15:18:21 +02001297 clear_bit(WAS_IO_ERROR, &mdev->flags);
1298 clear_bit(WAS_READ_ERROR, &mdev->flags);
Lars Ellenberg0c849662012-07-30 09:07:28 +02001299
Lars Ellenberga3248962012-07-30 09:10:41 +02001300 /* and no leftover from previously aborted resync or verify, either */
1301 mdev->rs_total = 0;
1302 mdev->rs_failed = 0;
1303 atomic_set(&mdev->rs_pending_cnt, 0);
1304
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001305 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001306 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1307 if (!nbc) {
1308 retcode = ERR_NOMEM;
1309 goto fail;
1310 }
Philipp Reisner39a1aa7f2012-08-08 21:19:09 +02001311 spin_lock_init(&nbc->md.uuid_lock);
1312
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001313 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1314 if (!new_disk_conf) {
1315 retcode = ERR_NOMEM;
1316 goto fail;
1317 }
1318 nbc->disk_conf = new_disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001319
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001320 set_disk_conf_defaults(new_disk_conf);
1321 err = disk_conf_from_attrs(new_disk_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001322 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001323 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001324 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001325 goto fail;
1326 }
1327
Philipp Reisnerd589a212011-05-04 10:06:52 +02001328 enforce_disk_conf_limits(new_disk_conf);
1329
Philipp Reisner9958c852011-05-03 16:19:31 +02001330 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1331 if (!new_plan) {
1332 retcode = ERR_NOMEM;
1333 goto fail;
1334 }
1335
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001336 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337 retcode = ERR_MD_IDX_INVALID;
1338 goto fail;
1339 }
1340
Philipp Reisner44ed1672011-04-19 17:10:19 +02001341 rcu_read_lock();
1342 nc = rcu_dereference(mdev->tconn->net_conf);
1343 if (nc) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001344 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001345 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001346 retcode = ERR_STONITH_AND_PROT_A;
1347 goto fail;
1348 }
1349 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001350 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001351
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001352 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001353 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
Tejun Heoe525fd82010-11-13 11:55:17 +01001354 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001355 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001356 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001357 retcode = ERR_OPEN_DISK;
1358 goto fail;
1359 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001360 nbc->backing_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001361
Tejun Heoe525fd82010-11-13 11:55:17 +01001362 /*
1363 * meta_dev_idx >= 0: external fixed size, possibly multiple
1364 * drbd sharing one meta device. TODO in that case, paranoia
1365 * check that [md_bdev, meta_dev_idx] is not yet used by some
1366 * other drbd minor! (if you use drbd.conf + drbdadm, that
1367 * should check it for you already; but if you don't, or
1368 * someone fooled it, we need to double check here)
1369 */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001370 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001371 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001372 (new_disk_conf->meta_dev_idx < 0) ?
Tejun Heod4d77622010-11-13 11:55:18 +01001373 (void *)mdev : (void *)drbd_m_holder);
Tejun Heoe525fd82010-11-13 11:55:17 +01001374 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001375 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001376 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001377 retcode = ERR_OPEN_MD_DISK;
1378 goto fail;
1379 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001380 nbc->md_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001381
Tejun Heoe525fd82010-11-13 11:55:17 +01001382 if ((nbc->backing_bdev == nbc->md_bdev) !=
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001383 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1384 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001385 retcode = ERR_MD_IDX_INVALID;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001386 goto fail;
1387 }
1388
1389 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001390 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001391 offsetof(struct bm_extent, lce));
1392 if (!resync_lru) {
1393 retcode = ERR_NOMEM;
Tejun Heoe525fd82010-11-13 11:55:17 +01001394 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001395 }
1396
1397 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1398 drbd_md_set_sector_offsets(mdev, nbc);
1399
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001400 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001401 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1402 (unsigned long long) drbd_get_max_capacity(nbc),
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001403 (unsigned long long) new_disk_conf->disk_size);
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001404 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001405 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001406 }
1407
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001408 if (new_disk_conf->meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001409 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1410 /* at least one MB, otherwise it does not make sense */
1411 min_md_device_sectors = (2<<10);
1412 } else {
1413 max_possible_sectors = DRBD_MAX_SECTORS;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001414 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001415 }
1416
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001418 retcode = ERR_MD_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001419 dev_warn(DEV, "refusing attach: md-device too small, "
1420 "at least %llu sectors needed for this meta-disk type\n",
1421 (unsigned long long) min_md_device_sectors);
Tejun Heoe525fd82010-11-13 11:55:17 +01001422 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001423 }
1424
1425 /* Make sure the new disk is big enough
1426 * (we may currently be R_PRIMARY with no local disk...) */
1427 if (drbd_get_max_capacity(nbc) <
1428 drbd_get_capacity(mdev->this_bdev)) {
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001429 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001430 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001431 }
1432
1433 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1434
Lars Ellenberg13529942009-10-12 19:07:49 +02001435 if (nbc->known_size > max_possible_sectors) {
1436 dev_warn(DEV, "==> truncating very big lower level device "
1437 "to currently maximum possible %llu sectors <==\n",
1438 (unsigned long long) max_possible_sectors);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001439 if (new_disk_conf->meta_dev_idx >= 0)
Lars Ellenberg13529942009-10-12 19:07:49 +02001440 dev_warn(DEV, "==>> using internal or flexible "
1441 "meta data may help <<==\n");
1442 }
1443
Philipp Reisnerb411b362009-09-25 16:07:19 -07001444 drbd_suspend_io(mdev);
1445 /* also wait for the last barrier ack. */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01001446 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1447 * We need a way to either ignore barrier acks for barriers sent before a device
1448 * was attached, or a way to wait for all pending barrier acks to come in.
1449 * As barriers are counted per resource,
1450 * we'd need to suspend io on all devices of a resource.
1451 */
Philipp Reisner2aebfab2011-03-28 16:48:11 +02001452 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001453 /* and for any other previously queued work */
Philipp Reisnera21e9292011-02-08 15:08:49 +01001454 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001455
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001456 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1457 retcode = rv; /* FIXME: Type mismatch. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458 drbd_resume_io(mdev);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001459 if (rv < SS_SUCCESS)
Tejun Heoe525fd82010-11-13 11:55:17 +01001460 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001461
1462 if (!get_ldev_if_state(mdev, D_ATTACHING))
1463 goto force_diskless;
1464
1465 drbd_md_set_sector_offsets(mdev, nbc);
1466
1467 if (!mdev->bitmap) {
1468 if (drbd_bm_init(mdev)) {
1469 retcode = ERR_NOMEM;
1470 goto force_diskless_dec;
1471 }
1472 }
1473
1474 retcode = drbd_md_read(mdev, nbc);
1475 if (retcode != NO_ERROR)
1476 goto force_diskless_dec;
1477
1478 if (mdev->state.conn < C_CONNECTED &&
1479 mdev->state.role == R_PRIMARY &&
1480 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1481 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1482 (unsigned long long)mdev->ed_uuid);
1483 retcode = ERR_DATA_NOT_CURRENT;
1484 goto force_diskless_dec;
1485 }
1486
1487 /* Since we are diskless, fix the activity log first... */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001488 if (drbd_check_al_size(mdev, new_disk_conf)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001489 retcode = ERR_NOMEM;
1490 goto force_diskless_dec;
1491 }
1492
1493 /* Prevent shrinking of consistent devices ! */
1494 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001495 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001496 dev_warn(DEV, "refusing to truncate a consistent device\n");
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001497 retcode = ERR_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001498 goto force_diskless_dec;
1499 }
1500
Philipp Reisnerb411b362009-09-25 16:07:19 -07001501 /* Reset the "barriers don't work" bits here, then force meta data to
1502 * be written, to ensure we determine if barriers are supported. */
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001503 if (new_disk_conf->md_flushes)
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001504 clear_bit(MD_NO_FUA, &mdev->flags);
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001505 else
1506 set_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001507
1508 /* Point of no return reached.
1509 * Devices and memory are no longer released by error cleanup below.
1510 * now mdev takes over responsibility, and the state engine should
1511 * clean it up somewhere. */
1512 D_ASSERT(mdev->ldev == NULL);
1513 mdev->ldev = nbc;
1514 mdev->resync = resync_lru;
Philipp Reisner9958c852011-05-03 16:19:31 +02001515 mdev->rs_plan_s = new_plan;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001516 nbc = NULL;
1517 resync_lru = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001518 new_disk_conf = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001519 new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001520
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001521 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522
1523 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1524 set_bit(CRASHED_PRIMARY, &mdev->flags);
1525 else
1526 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1527
Philipp Reisner894c6a92010-06-18 16:03:20 +02001528 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02001529 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001530 set_bit(CRASHED_PRIMARY, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001531
1532 mdev->send_cnt = 0;
1533 mdev->recv_cnt = 0;
1534 mdev->read_cnt = 0;
1535 mdev->writ_cnt = 0;
1536
Philipp Reisner99432fc2011-05-20 16:39:13 +02001537 drbd_reconsider_max_bio_size(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538
1539 /* If I am currently not R_PRIMARY,
1540 * but meta data primary indicator is set,
1541 * I just now recover from a hard crash,
1542 * and have been R_PRIMARY before that crash.
1543 *
1544 * Now, if I had no connection before that crash
1545 * (have been degraded R_PRIMARY), chances are that
1546 * I won't find my peer now either.
1547 *
1548 * In that case, and _only_ in that case,
1549 * we use the degr-wfc-timeout instead of the default,
1550 * so we can automatically recover from a crash of a
1551 * degraded but active "cluster" after a certain timeout.
1552 */
1553 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1554 if (mdev->state.role != R_PRIMARY &&
1555 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1556 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1557 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1558
Bart Van Assche24c48302011-05-21 18:32:29 +02001559 dd = drbd_determine_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001560 if (dd == dev_size_error) {
1561 retcode = ERR_NOMEM_BITMAP;
1562 goto force_diskless_dec;
1563 } else if (dd == grew)
1564 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1565
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001566 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1567 (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1568 drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001569 dev_info(DEV, "Assuming that all blocks are out of sync "
1570 "(aka FullSync)\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001571 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1572 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001573 retcode = ERR_IO_MD_DISK;
1574 goto force_diskless_dec;
1575 }
1576 } else {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001577 if (drbd_bitmap_io(mdev, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01001578 "read from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001579 retcode = ERR_IO_MD_DISK;
1580 goto force_diskless_dec;
1581 }
1582 }
1583
Philipp Reisner07782862010-08-31 12:00:50 +02001584 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1585 drbd_suspend_al(mdev); /* IO is still suspended here... */
1586
Philipp Reisner87eeee42011-01-19 14:16:30 +01001587 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02001588 os = drbd_read_state(mdev);
1589 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001590 /* If MDF_CONSISTENT is not set go into inconsistent state,
1591 otherwise investigate MDF_WasUpToDate...
1592 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1593 otherwise into D_CONSISTENT state.
1594 */
1595 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1596 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1597 ns.disk = D_CONSISTENT;
1598 else
1599 ns.disk = D_OUTDATED;
1600 } else {
1601 ns.disk = D_INCONSISTENT;
1602 }
1603
1604 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1605 ns.pdsk = D_OUTDATED;
1606
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001607 rcu_read_lock();
1608 if (ns.disk == D_CONSISTENT &&
1609 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001610 ns.disk = D_UP_TO_DATE;
1611
1612 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1613 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1614 this point, because drbd_request_state() modifies these
1615 flags. */
1616
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001617 if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
Philipp Reisner4035e4c2012-10-01 18:04:12 +02001618 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001619 else
1620 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1621
1622 rcu_read_unlock();
1623
Philipp Reisnerb411b362009-09-25 16:07:19 -07001624 /* In case we are C_CONNECTED postpone any decision on the new disk
1625 state after the negotiation phase. */
1626 if (mdev->state.conn == C_CONNECTED) {
1627 mdev->new_state_tmp.i = ns.i;
1628 ns.i = os.i;
1629 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02001630
1631 /* We expect to receive up-to-date UUIDs soon.
1632 To avoid a race in receive_state, free p_uuid while
1633 holding req_lock. I.e. atomic with the state change */
1634 kfree(mdev->p_uuid);
1635 mdev->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001636 }
1637
1638 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001639 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001640
1641 if (rv < SS_SUCCESS)
1642 goto force_diskless_dec;
1643
Philipp Reisnercdfda632011-07-05 15:38:59 +02001644 mod_timer(&mdev->request_timer, jiffies + HZ);
1645
Philipp Reisnerb411b362009-09-25 16:07:19 -07001646 if (mdev->state.role == R_PRIMARY)
1647 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1648 else
1649 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1650
1651 drbd_md_mark_dirty(mdev);
1652 drbd_md_sync(mdev);
1653
1654 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1655 put_ldev(mdev);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001656 conn_reconfig_done(mdev->tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001657 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001658 return 0;
1659
1660 force_diskless_dec:
1661 put_ldev(mdev);
1662 force_diskless:
Philipp Reisner9510b242011-07-01 17:00:57 +02001663 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001664 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001665 fail:
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001666 conn_reconfig_done(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001667 if (nbc) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001668 if (nbc->backing_bdev)
1669 blkdev_put(nbc->backing_bdev,
1670 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1671 if (nbc->md_bdev)
1672 blkdev_put(nbc->md_bdev,
1673 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001674 kfree(nbc);
1675 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001676 kfree(new_disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001677 lc_destroy(resync_lru);
Philipp Reisner9958c852011-05-03 16:19:31 +02001678 kfree(new_plan);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001679
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001680 finish:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001681 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001682 return 0;
1683}
1684
Philipp Reisnercdfda632011-07-05 15:38:59 +02001685static int adm_detach(struct drbd_conf *mdev, int force)
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001686{
Philipp Reisner19f83c72011-03-29 14:21:03 +02001687 enum drbd_state_rv retcode;
Lars Ellenberg009ba892011-05-02 11:51:31 +02001688 int ret;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001689
1690 if (force) {
Lars Ellenberg0c849662012-07-30 09:07:28 +02001691 set_bit(FORCE_DETACH, &mdev->flags);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001692 drbd_force_state(mdev, NS(disk, D_FAILED));
1693 retcode = SS_SUCCESS;
1694 goto out;
1695 }
1696
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001697 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
Philipp Reisner0cfac5d2011-11-10 12:12:52 +01001698 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
Lars Ellenberg009ba892011-05-02 11:51:31 +02001699 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
Philipp Reisner0cfac5d2011-11-10 12:12:52 +01001700 drbd_md_put_buffer(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001701 /* D_FAILED will transition to DISKLESS. */
1702 ret = wait_event_interruptible(mdev->misc_wait,
1703 mdev->state.disk != D_FAILED);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001704 drbd_resume_io(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001705 if ((int)retcode == (int)SS_IS_DISKLESS)
1706 retcode = SS_NOTHING_TO_DO;
1707 if (ret)
1708 retcode = ERR_INTR;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001709out:
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001710 return retcode;
1711}
1712
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001713/* Detaching the disk is a process in multiple stages. First we need to lock
1714 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1715 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1716 * internal references as well.
1717 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001718int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001719{
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001720 enum drbd_ret_code retcode;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001721 struct detach_parms parms = { };
1722 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001723
1724 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1725 if (!adm_ctx.reply_skb)
1726 return retcode;
1727 if (retcode != NO_ERROR)
1728 goto out;
1729
Philipp Reisnercdfda632011-07-05 15:38:59 +02001730 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1731 err = detach_parms_from_attrs(&parms, info);
1732 if (err) {
1733 retcode = ERR_MANDATORY_TAG;
1734 drbd_msg_put_info(from_attrs_err_to_txt(err));
1735 goto out;
1736 }
1737 }
1738
1739 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001740out:
1741 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001742 return 0;
1743}
1744
Lars Ellenbergf3990022011-03-23 14:31:09 +01001745static bool conn_resync_running(struct drbd_tconn *tconn)
1746{
1747 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001748 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001749 int vnr;
1750
Philipp Reisner695d08f2011-04-11 22:53:32 -07001751 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001752 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1753 if (mdev->state.conn == C_SYNC_SOURCE ||
1754 mdev->state.conn == C_SYNC_TARGET ||
1755 mdev->state.conn == C_PAUSED_SYNC_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001756 mdev->state.conn == C_PAUSED_SYNC_T) {
1757 rv = true;
1758 break;
1759 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001760 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001761 rcu_read_unlock();
1762
1763 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001764}
1765
1766static bool conn_ov_running(struct drbd_tconn *tconn)
1767{
1768 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001769 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001770 int vnr;
1771
Philipp Reisner695d08f2011-04-11 22:53:32 -07001772 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001773 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1774 if (mdev->state.conn == C_VERIFY_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001775 mdev->state.conn == C_VERIFY_T) {
1776 rv = true;
1777 break;
1778 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001779 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001780 rcu_read_unlock();
1781
1782 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001783}
1784
Philipp Reisnercd643972011-04-13 18:00:59 -07001785static enum drbd_ret_code
Philipp Reisner44ed1672011-04-19 17:10:19 +02001786_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07001787{
1788 struct drbd_conf *mdev;
1789 int i;
1790
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02001791 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1792 if (new_conf->wire_protocol != old_conf->wire_protocol)
1793 return ERR_NEED_APV_100;
1794
1795 if (new_conf->two_primaries != old_conf->two_primaries)
1796 return ERR_NEED_APV_100;
1797
1798 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1799 return ERR_NEED_APV_100;
1800
1801 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1802 return ERR_NEED_APV_100;
1803 }
1804
1805 if (!new_conf->two_primaries &&
1806 conn_highest_role(tconn) == R_PRIMARY &&
1807 conn_highest_peer(tconn) == R_PRIMARY)
1808 return ERR_NEED_ALLOW_TWO_PRI;
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001809
Philipp Reisnercd643972011-04-13 18:00:59 -07001810 if (new_conf->two_primaries &&
1811 (new_conf->wire_protocol != DRBD_PROT_C))
1812 return ERR_NOT_PROTO_C;
1813
Philipp Reisnercd643972011-04-13 18:00:59 -07001814 idr_for_each_entry(&tconn->volumes, mdev, i) {
1815 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001816 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
Philipp Reisnercd643972011-04-13 18:00:59 -07001817 put_ldev(mdev);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001818 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07001819 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07001820 }
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02001821 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
Lars Ellenbergeb120102012-08-01 12:46:20 +02001822 return ERR_DISCARD_IMPOSSIBLE;
Philipp Reisnercd643972011-04-13 18:00:59 -07001823 }
Philipp Reisnercd643972011-04-13 18:00:59 -07001824
1825 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1826 return ERR_CONG_NOT_PROTO_A;
1827
1828 return NO_ERROR;
1829}
1830
Philipp Reisner44ed1672011-04-19 17:10:19 +02001831static enum drbd_ret_code
1832check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1833{
1834 static enum drbd_ret_code rv;
1835 struct drbd_conf *mdev;
1836 int i;
1837
1838 rcu_read_lock();
1839 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1840 rcu_read_unlock();
1841
1842 /* tconn->volumes protected by genl_lock() here */
1843 idr_for_each_entry(&tconn->volumes, mdev, i) {
1844 if (!mdev->bitmap) {
1845 if(drbd_bm_init(mdev))
1846 return ERR_NOMEM;
1847 }
1848 }
1849
1850 return rv;
1851}
1852
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001853struct crypto {
1854 struct crypto_hash *verify_tfm;
1855 struct crypto_hash *csums_tfm;
1856 struct crypto_hash *cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001857 struct crypto_hash *integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001858};
1859
1860static int
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001861alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001862{
1863 if (!tfm_name[0])
1864 return NO_ERROR;
1865
1866 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1867 if (IS_ERR(*tfm)) {
1868 *tfm = NULL;
1869 return err_alg;
1870 }
1871
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001872 return NO_ERROR;
1873}
1874
1875static enum drbd_ret_code
1876alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1877{
1878 char hmac_name[CRYPTO_MAX_ALG_NAME];
1879 enum drbd_ret_code rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001880
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001881 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1882 ERR_CSUMS_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001883 if (rv != NO_ERROR)
1884 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001885 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1886 ERR_VERIFY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001887 if (rv != NO_ERROR)
1888 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001889 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1890 ERR_INTEGRITY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001891 if (rv != NO_ERROR)
1892 return rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001893 if (new_conf->cram_hmac_alg[0] != 0) {
1894 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1895 new_conf->cram_hmac_alg);
1896
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001897 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1898 ERR_AUTH_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001899 }
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001900
1901 return rv;
1902}
1903
1904static void free_crypto(struct crypto *crypto)
1905{
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001906 crypto_free_hash(crypto->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001907 crypto_free_hash(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001908 crypto_free_hash(crypto->csums_tfm);
1909 crypto_free_hash(crypto->verify_tfm);
1910}
1911
Lars Ellenbergf3990022011-03-23 14:31:09 +01001912int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1913{
1914 enum drbd_ret_code retcode;
1915 struct drbd_tconn *tconn;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001916 struct net_conf *old_conf, *new_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001917 int err;
1918 int ovr; /* online verify running */
1919 int rsr; /* re-sync running */
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001920 struct crypto crypto = { };
Lars Ellenbergf3990022011-03-23 14:31:09 +01001921
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02001922 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001923 if (!adm_ctx.reply_skb)
1924 return retcode;
1925 if (retcode != NO_ERROR)
1926 goto out;
1927
1928 tconn = adm_ctx.tconn;
1929
1930 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1931 if (!new_conf) {
1932 retcode = ERR_NOMEM;
1933 goto out;
1934 }
1935
Lars Ellenbergf3990022011-03-23 14:31:09 +01001936 conn_reconfig_start(tconn);
1937
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001938 mutex_lock(&tconn->data.mutex);
Philipp Reisnera0095502011-05-03 13:14:15 +02001939 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001940 old_conf = tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001941
1942 if (!old_conf) {
1943 drbd_msg_put_info("net conf missing, try connect");
1944 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001945 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001946 }
1947
1948 *new_conf = *old_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001949 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001950 set_net_conf_defaults(new_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001951
Lars Ellenbergf3990022011-03-23 14:31:09 +01001952 err = net_conf_from_attrs_for_change(new_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001953 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001954 retcode = ERR_MANDATORY_TAG;
1955 drbd_msg_put_info(from_attrs_err_to_txt(err));
1956 goto fail;
1957 }
1958
Philipp Reisnercd643972011-04-13 18:00:59 -07001959 retcode = check_net_options(tconn, new_conf);
1960 if (retcode != NO_ERROR)
1961 goto fail;
1962
Lars Ellenbergf3990022011-03-23 14:31:09 +01001963 /* re-sync running */
1964 rsr = conn_resync_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001965 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001966 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001967 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001968 }
1969
Lars Ellenbergf3990022011-03-23 14:31:09 +01001970 /* online verify running */
1971 ovr = conn_ov_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001972 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1973 retcode = ERR_VERIFY_RUNNING;
1974 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001975 }
1976
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001977 retcode = alloc_crypto(&crypto, new_conf);
1978 if (retcode != NO_ERROR)
1979 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001980
Philipp Reisner44ed1672011-04-19 17:10:19 +02001981 rcu_assign_pointer(tconn->net_conf, new_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001982
1983 if (!rsr) {
1984 crypto_free_hash(tconn->csums_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001985 tconn->csums_tfm = crypto.csums_tfm;
1986 crypto.csums_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001987 }
1988 if (!ovr) {
1989 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001990 tconn->verify_tfm = crypto.verify_tfm;
1991 crypto.verify_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001992 }
1993
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001994 crypto_free_hash(tconn->integrity_tfm);
1995 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001996 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001997 /* Do this without trying to take tconn->data.mutex again. */
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001998 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001999
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002000 crypto_free_hash(tconn->cram_hmac_tfm);
2001 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2002
Philipp Reisnera0095502011-05-03 13:14:15 +02002003 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002004 mutex_unlock(&tconn->data.mutex);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002005 synchronize_rcu();
2006 kfree(old_conf);
2007
Lars Ellenbergf3990022011-03-23 14:31:09 +01002008 if (tconn->cstate >= C_WF_REPORT_PARAMS)
2009 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2010
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002011 goto done;
2012
Lars Ellenbergf3990022011-03-23 14:31:09 +01002013 fail:
Philipp Reisnera0095502011-05-03 13:14:15 +02002014 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002015 mutex_unlock(&tconn->data.mutex);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002016 free_crypto(&crypto);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002017 kfree(new_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002018 done:
Lars Ellenbergf3990022011-03-23 14:31:09 +01002019 conn_reconfig_done(tconn);
2020 out:
2021 drbd_adm_finish(info, retcode);
2022 return 0;
2023}
2024
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002025int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002026{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002027 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002028 struct net_conf *old_conf, *new_conf = NULL;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002029 struct crypto crypto = { };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002030 struct drbd_tconn *tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002031 enum drbd_ret_code retcode;
2032 int i;
2033 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002034
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002035 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002036
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002037 if (!adm_ctx.reply_skb)
2038 return retcode;
2039 if (retcode != NO_ERROR)
2040 goto out;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002041 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2042 drbd_msg_put_info("connection endpoint(s) missing");
2043 retcode = ERR_INVALID_REQUEST;
2044 goto out;
2045 }
2046
2047 /* No need for _rcu here. All reconfiguration is
2048 * strictly serialized on genl_lock(). We are protected against
2049 * concurrent reconfiguration/addition/deletion */
2050 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2051 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2052 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2053 retcode = ERR_LOCAL_ADDR;
2054 goto out;
2055 }
2056
2057 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2058 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2059 retcode = ERR_PEER_ADDR;
2060 goto out;
2061 }
2062 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002063
2064 tconn = adm_ctx.tconn;
Philipp Reisner80883192011-02-18 14:56:45 +01002065 conn_reconfig_start(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002066
Philipp Reisner80883192011-02-18 14:56:45 +01002067 if (tconn->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002068 retcode = ERR_NET_CONFIGURED;
2069 goto fail;
2070 }
2071
Andreas Gruenbachera209b4a2011-08-17 12:43:25 +02002072 /* allocation not in the IO path, drbdsetup / netlink process context */
Lars Ellenberg5979e362011-04-27 21:09:55 +02002073 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002074 if (!new_conf) {
2075 retcode = ERR_NOMEM;
2076 goto fail;
2077 }
2078
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002079 set_net_conf_defaults(new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080
Lars Ellenbergf3990022011-03-23 14:31:09 +01002081 err = net_conf_from_attrs(new_conf, info);
Lars Ellenberg25e40932011-08-19 10:39:00 +02002082 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002083 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002084 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002085 goto fail;
2086 }
2087
Philipp Reisnercd643972011-04-13 18:00:59 -07002088 retcode = check_net_options(tconn, new_conf);
2089 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002090 goto fail;
Philipp Reisner422028b2010-10-27 11:12:07 +02002091
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002092 retcode = alloc_crypto(&crypto, new_conf);
2093 if (retcode != NO_ERROR)
2094 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002095
Philipp Reisnerb411b362009-09-25 16:07:19 -07002096 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2097
Philipp Reisner80883192011-02-18 14:56:45 +01002098 conn_flush_workqueue(tconn);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002099
Philipp Reisnera0095502011-05-03 13:14:15 +02002100 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002101 old_conf = tconn->net_conf;
2102 if (old_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002103 retcode = ERR_NET_CONFIGURED;
Philipp Reisnera0095502011-05-03 13:14:15 +02002104 mutex_unlock(&tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002105 goto fail;
2106 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002107 rcu_assign_pointer(tconn->net_conf, new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002108
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002109 conn_free_crypto(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002110 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002111 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002112 tconn->csums_tfm = crypto.csums_tfm;
2113 tconn->verify_tfm = crypto.verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002114
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002115 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2116 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2117 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2118 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2119
Philipp Reisnera0095502011-05-03 13:14:15 +02002120 mutex_unlock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002121
Philipp Reisner695d08f2011-04-11 22:53:32 -07002122 rcu_read_lock();
Philipp Reisner80883192011-02-18 14:56:45 +01002123 idr_for_each_entry(&tconn->volumes, mdev, i) {
2124 mdev->send_cnt = 0;
2125 mdev->recv_cnt = 0;
Philipp Reisner80883192011-02-18 14:56:45 +01002126 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002127 rcu_read_unlock();
Lars Ellenberg5ee743e2011-04-26 16:22:25 +02002128
2129 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2130
Philipp Reisner80883192011-02-18 14:56:45 +01002131 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002132 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002133 return 0;
2134
2135fail:
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002136 free_crypto(&crypto);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002137 kfree(new_conf);
2138
Philipp Reisner80883192011-02-18 14:56:45 +01002139 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002140out:
2141 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002142 return 0;
2143}
2144
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002145static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2146{
2147 enum drbd_state_rv rv;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002148
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002149 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2150 force ? CS_HARD : 0);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002151
2152 switch (rv) {
2153 case SS_NOTHING_TO_DO:
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002154 break;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002155 case SS_ALREADY_STANDALONE:
2156 return SS_SUCCESS;
2157 case SS_PRIMARY_NOP:
2158 /* Our state checking code wants to see the peer outdated. */
2159 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002160 pdsk, D_OUTDATED), CS_VERBOSE);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002161 break;
2162 case SS_CW_FAILED_BY_PEER:
2163 /* The peer probably wants to see us outdated. */
2164 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2165 disk, D_OUTDATED), 0);
2166 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002167 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2168 CS_HARD);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002169 }
2170 break;
2171 default:;
2172 /* no special handling necessary */
2173 }
2174
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002175 if (rv >= SS_SUCCESS) {
2176 enum drbd_state_rv rv2;
2177 /* No one else can reconfigure the network while I am here.
2178 * The state handling only uses drbd_thread_stop_nowait(),
2179 * we want to really wait here until the receiver is no more.
2180 */
2181 drbd_thread_stop(&adm_ctx.tconn->receiver);
2182
2183 /* Race breaker. This additional state change request may be
2184 * necessary, if this was a forced disconnect during a receiver
2185 * restart. We may have "killed" the receiver thread just
2186 * after drbdd_init() returned. Typically, we should be
2187 * C_STANDALONE already, now, and this becomes a no-op.
2188 */
2189 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2190 CS_VERBOSE | CS_HARD);
2191 if (rv2 < SS_SUCCESS)
2192 conn_err(tconn,
2193 "unexpected rv2=%d in conn_try_disconnect()\n",
2194 rv2);
2195 }
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002196 return rv;
2197}
2198
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002199int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002200{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002201 struct disconnect_parms parms;
2202 struct drbd_tconn *tconn;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002203 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002204 enum drbd_ret_code retcode;
2205 int err;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002206
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002207 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002208 if (!adm_ctx.reply_skb)
2209 return retcode;
2210 if (retcode != NO_ERROR)
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002211 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002212
2213 tconn = adm_ctx.tconn;
2214 memset(&parms, 0, sizeof(parms));
2215 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002216 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002217 if (err) {
2218 retcode = ERR_MANDATORY_TAG;
2219 drbd_msg_put_info(from_attrs_err_to_txt(err));
2220 goto fail;
2221 }
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002222 }
2223
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002224 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2225 if (rv < SS_SUCCESS)
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002226 retcode = rv; /* FIXME: Type mismatch. */
2227 else
2228 retcode = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002230 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002231 return 0;
2232}
2233
2234void resync_after_online_grow(struct drbd_conf *mdev)
2235{
2236 int iass; /* I am sync source */
2237
2238 dev_info(DEV, "Resync of new storage after online grow\n");
2239 if (mdev->state.role != mdev->state.peer)
2240 iass = (mdev->state.role == R_PRIMARY);
2241 else
Lars Ellenberg427c0432012-08-01 12:43:01 +02002242 iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002243
2244 if (iass)
2245 drbd_start_resync(mdev, C_SYNC_SOURCE);
2246 else
2247 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2248}
2249
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002250int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002251{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002252 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002253 struct resize_parms rs;
2254 struct drbd_conf *mdev;
2255 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002256 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002257 enum dds_flags ddsf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002258 sector_t u_size;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002259 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002260
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002261 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2262 if (!adm_ctx.reply_skb)
2263 return retcode;
2264 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002265 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002266
2267 memset(&rs, 0, sizeof(struct resize_parms));
2268 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002269 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002270 if (err) {
2271 retcode = ERR_MANDATORY_TAG;
2272 drbd_msg_put_info(from_attrs_err_to_txt(err));
2273 goto fail;
2274 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002275 }
2276
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002277 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002278 if (mdev->state.conn > C_CONNECTED) {
2279 retcode = ERR_RESIZE_RESYNC;
2280 goto fail;
2281 }
2282
2283 if (mdev->state.role == R_SECONDARY &&
2284 mdev->state.peer == R_SECONDARY) {
2285 retcode = ERR_NO_PRIMARY;
2286 goto fail;
2287 }
2288
2289 if (!get_ldev(mdev)) {
2290 retcode = ERR_NO_DISK;
2291 goto fail;
2292 }
2293
Philipp Reisner31890f42011-01-19 14:12:51 +01002294 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002295 retcode = ERR_NEED_APV_93;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002296 goto fail_ldev;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002297 }
2298
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002299 rcu_read_lock();
2300 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2301 rcu_read_unlock();
2302 if (u_size != (sector_t)rs.resize_size) {
2303 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2304 if (!new_disk_conf) {
2305 retcode = ERR_NOMEM;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002306 goto fail_ldev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002307 }
2308 }
2309
Philipp Reisner087c2492010-03-26 13:49:56 +01002310 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002311 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002312
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002313 if (new_disk_conf) {
2314 mutex_lock(&mdev->tconn->conf_update);
2315 old_disk_conf = mdev->ldev->disk_conf;
2316 *new_disk_conf = *old_disk_conf;
2317 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2318 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2319 mutex_unlock(&mdev->tconn->conf_update);
2320 synchronize_rcu();
2321 kfree(old_disk_conf);
2322 }
2323
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002324 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Bart Van Assche24c48302011-05-21 18:32:29 +02002325 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002326 drbd_md_sync(mdev);
2327 put_ldev(mdev);
2328 if (dd == dev_size_error) {
2329 retcode = ERR_NOMEM_BITMAP;
2330 goto fail;
2331 }
2332
Philipp Reisner087c2492010-03-26 13:49:56 +01002333 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002334 if (dd == grew)
2335 set_bit(RESIZE_PENDING, &mdev->flags);
2336
2337 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002338 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002339 }
2340
2341 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002342 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002343 return 0;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002344
2345 fail_ldev:
2346 put_ldev(mdev);
2347 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002348}
2349
Lars Ellenbergf3990022011-03-23 14:31:09 +01002350int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002351{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002352 enum drbd_ret_code retcode;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002353 struct drbd_tconn *tconn;
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002354 struct res_opts res_opts;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002355 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002356
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002357 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002358 if (!adm_ctx.reply_skb)
2359 return retcode;
2360 if (retcode != NO_ERROR)
2361 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002362 tconn = adm_ctx.tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002363
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002364 res_opts = tconn->res_opts;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002365 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002366 set_res_opts_defaults(&res_opts);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002367
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002368 err = res_opts_from_attrs(&res_opts, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002369 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002371 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002372 goto fail;
2373 }
2374
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002375 err = set_resource_options(tconn, &res_opts);
2376 if (err) {
2377 retcode = ERR_INVALID_REQUEST;
2378 if (err == -ENOMEM)
2379 retcode = ERR_NOMEM;
Philipp Reisner778f2712010-07-06 11:14:00 +02002380 }
2381
Philipp Reisnerb411b362009-09-25 16:07:19 -07002382fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002383 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002384 return 0;
2385}
2386
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002387int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002388{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002389 struct drbd_conf *mdev;
2390 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2391
2392 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2393 if (!adm_ctx.reply_skb)
2394 return retcode;
2395 if (retcode != NO_ERROR)
2396 goto out;
2397
2398 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002399
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002400 /* If there is still bitmap IO pending, probably because of a previous
Lars Ellenberg970fbde2012-07-30 09:11:38 +02002401 * resync just being finished, wait for it before requesting a new resync.
2402 * Also wait for it's after_state_ch(). */
Lars Ellenberg5016b822012-05-07 12:00:56 +02002403 drbd_suspend_io(mdev);
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002404 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg970fbde2012-07-30 09:11:38 +02002405 drbd_flush_workqueue(mdev);
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002406
Philipp Reisnerb411b362009-09-25 16:07:19 -07002407 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2408
2409 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2410 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2411
2412 while (retcode == SS_NEED_CONNECTION) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01002413 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002414 if (mdev->state.conn < C_CONNECTED)
2415 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002416 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002417
2418 if (retcode != SS_NEED_CONNECTION)
2419 break;
2420
2421 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2422 }
Lars Ellenberg5016b822012-05-07 12:00:56 +02002423 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002424
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002425out:
2426 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002427 return 0;
2428}
2429
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002430static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2431 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002432{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002433 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002434
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002435 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2436 if (!adm_ctx.reply_skb)
2437 return retcode;
2438 if (retcode != NO_ERROR)
2439 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002440
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002441 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2442out:
2443 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002444 return 0;
2445}
2446
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002447static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2448{
2449 int rv;
2450
2451 rv = drbd_bmio_set_n_write(mdev);
2452 drbd_suspend_al(mdev);
2453 return rv;
2454}
2455
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002456int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002457{
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002458 int retcode; /* drbd_ret_code, drbd_state_rv */
2459 struct drbd_conf *mdev;
2460
2461 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2462 if (!adm_ctx.reply_skb)
2463 return retcode;
2464 if (retcode != NO_ERROR)
2465 goto out;
2466
2467 mdev = adm_ctx.mdev;
2468
Lars Ellenberg5016b822012-05-07 12:00:56 +02002469 /* If there is still bitmap IO pending, probably because of a previous
Lars Ellenberg970fbde2012-07-30 09:11:38 +02002470 * resync just being finished, wait for it before requesting a new resync.
2471 * Also wait for it's after_state_ch(). */
Lars Ellenberg5016b822012-05-07 12:00:56 +02002472 drbd_suspend_io(mdev);
2473 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg970fbde2012-07-30 09:11:38 +02002474 drbd_flush_workqueue(mdev);
Lars Ellenberg5016b822012-05-07 12:00:56 +02002475
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002476 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
2477 if (retcode < SS_SUCCESS) {
2478 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
2479 /* The peer will get a resync upon connect anyways.
2480 * Just make that into a full resync. */
2481 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2482 if (retcode >= SS_SUCCESS) {
2483 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2484 "set_n_write from invalidate_peer",
2485 BM_LOCKED_SET_ALLOWED))
2486 retcode = ERR_IO_MD_DISK;
2487 }
2488 } else
2489 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
2490 }
Lars Ellenberg5016b822012-05-07 12:00:56 +02002491 drbd_resume_io(mdev);
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002492
2493out:
2494 drbd_adm_finish(info, retcode);
2495 return 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002496}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002497
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002498int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2499{
2500 enum drbd_ret_code retcode;
2501
2502 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2503 if (!adm_ctx.reply_skb)
2504 return retcode;
2505 if (retcode != NO_ERROR)
2506 goto out;
2507
2508 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002509 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002510out:
2511 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002512 return 0;
2513}
2514
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002515int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002516{
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002517 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002518 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002519
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002520 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2521 if (!adm_ctx.reply_skb)
2522 return retcode;
2523 if (retcode != NO_ERROR)
2524 goto out;
2525
2526 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2527 s = adm_ctx.mdev->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01002528 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2529 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2530 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2531 } else {
2532 retcode = ERR_PAUSE_IS_CLEAR;
2533 }
2534 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002535
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002536out:
2537 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002538 return 0;
2539}
2540
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002541int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002542{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002543 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002544}
2545
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002546int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002547{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002548 struct drbd_conf *mdev;
2549 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2550
2551 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2552 if (!adm_ctx.reply_skb)
2553 return retcode;
2554 if (retcode != NO_ERROR)
2555 goto out;
2556
2557 mdev = adm_ctx.mdev;
Philipp Reisner43a51822010-06-11 11:26:34 +02002558 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2559 drbd_uuid_new_current(mdev);
2560 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02002561 }
Philipp Reisner265be2d2010-05-31 10:14:17 +02002562 drbd_suspend_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002563 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2564 if (retcode == SS_SUCCESS) {
Philipp Reisner265be2d2010-05-31 10:14:17 +02002565 if (mdev->state.conn < C_CONNECTED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002566 tl_clear(mdev->tconn);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002567 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002568 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002569 }
2570 drbd_resume_io(mdev);
2571
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002572out:
2573 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002574 return 0;
2575}
2576
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002577int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002578{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002579 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002580}
2581
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002582int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002583{
2584 struct nlattr *nla;
2585 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2586 if (!nla)
2587 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002588 if (vnr != VOLUME_UNSPECIFIED &&
2589 nla_put_u32(skb, T_ctx_volume, vnr))
2590 goto nla_put_failure;
2591 if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2592 goto nla_put_failure;
2593 if (tconn->my_addr_len &&
2594 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2595 goto nla_put_failure;
2596 if (tconn->peer_addr_len &&
2597 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2598 goto nla_put_failure;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002599 nla_nest_end(skb, nla);
2600 return 0;
2601
2602nla_put_failure:
2603 if (nla)
2604 nla_nest_cancel(skb, nla);
2605 return -EMSGSIZE;
2606}
2607
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002608int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2609 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002610{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002611 struct state_info *si = NULL; /* for sizeof(si->member); */
Philipp Reisner44ed1672011-04-19 17:10:19 +02002612 struct net_conf *nc;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002613 struct nlattr *nla;
2614 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002615 int err = 0;
2616 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002617
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002618 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2619 * to. So we better exclude_sensitive information.
2620 *
2621 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2622 * in the context of the requesting user process. Exclude sensitive
2623 * information, unless current has superuser.
2624 *
2625 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2626 * relies on the current implementation of netlink_dump(), which
2627 * executes the dump callback successively from netlink_recvmsg(),
2628 * always in the context of the receiving process */
2629 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002630
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002631 got_ldev = get_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002632
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002633 /* We need to add connection name and volume number information still.
2634 * Minor number is in drbd_genlmsghdr. */
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002635 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002636 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002637
Lars Ellenbergf3990022011-03-23 14:31:09 +01002638 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2639 goto nla_put_failure;
2640
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002641 rcu_read_lock();
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002642 if (got_ldev)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002643 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002644 goto nla_put_failure;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002645
Philipp Reisner44ed1672011-04-19 17:10:19 +02002646 nc = rcu_dereference(mdev->tconn->net_conf);
2647 if (nc)
2648 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2649 rcu_read_unlock();
2650 if (err)
2651 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002652
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002653 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2654 if (!nla)
2655 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002656 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2657 nla_put_u32(skb, T_current_state, mdev->state.i) ||
2658 nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
Philipp Marek3174f8c2012-03-03 21:04:30 +01002659 nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
2660 nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
2661 nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
2662 nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
2663 nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
2664 nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
2665 nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
2666 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
2667 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
2668 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002669 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002670
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002671 if (got_ldev) {
Philipp Reisner39a1aa7f2012-08-08 21:19:09 +02002672 int err;
2673
2674 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2675 err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2676 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2677
2678 if (err)
2679 goto nla_put_failure;
2680
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002681 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002682 nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2683 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2684 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002685 if (C_SYNC_SOURCE <= mdev->state.conn &&
2686 C_PAUSED_SYNC_T >= mdev->state.conn) {
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002687 if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2688 nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2689 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002690 }
2691 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002692
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002693 if (sib) {
2694 switch(sib->sib_reason) {
2695 case SIB_SYNC_PROGRESS:
2696 case SIB_GET_STATUS_REPLY:
2697 break;
2698 case SIB_STATE_CHANGE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002699 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2700 nla_put_u32(skb, T_new_state, sib->ns.i))
2701 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002702 break;
2703 case SIB_HELPER_POST:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002704 if (nla_put_u32(skb, T_helper_exit_code,
2705 sib->helper_exit_code))
2706 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002707 /* fall through */
2708 case SIB_HELPER_PRE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002709 if (nla_put_string(skb, T_helper, sib->helper_name))
2710 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002711 break;
2712 }
2713 }
2714 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002715
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002716 if (0)
2717nla_put_failure:
2718 err = -EMSGSIZE;
2719 if (got_ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002720 put_ldev(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002721 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002722}
2723
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002724int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002725{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002726 enum drbd_ret_code retcode;
2727 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002728
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002729 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2730 if (!adm_ctx.reply_skb)
2731 return retcode;
2732 if (retcode != NO_ERROR)
2733 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002734
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002735 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2736 if (err) {
2737 nlmsg_free(adm_ctx.reply_skb);
2738 return err;
2739 }
2740out:
2741 drbd_adm_finish(info, retcode);
2742 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002743}
2744
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002745int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002746{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002747 struct drbd_conf *mdev;
2748 struct drbd_genlmsghdr *dh;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002749 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2750 struct drbd_tconn *tconn = NULL;
2751 struct drbd_tconn *tmp;
2752 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002753
Lars Ellenberg543cc102011-03-10 22:18:18 +01002754 /* Open coded, deferred, iteration:
2755 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2756 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2757 * ...
2758 * }
2759 * }
2760 * where tconn is cb->args[0];
2761 * and i is cb->args[1];
2762 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002763 * cb->args[2] indicates if we shall loop over all resources,
2764 * or just dump all volumes of a single resource.
2765 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002766 * This may miss entries inserted after this dump started,
2767 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01002768 *
2769 * We need to make sure the mdev won't disappear while
2770 * we are looking at it, and revalidate our iterators
2771 * on each iteration.
2772 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002773
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002774 /* synchronize with conn_create()/conn_destroy() */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002775 rcu_read_lock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002776 /* revalidate iterator position */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002777 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
Lars Ellenberg543cc102011-03-10 22:18:18 +01002778 if (pos == NULL) {
2779 /* first iteration */
2780 pos = tmp;
2781 tconn = pos;
2782 break;
2783 }
2784 if (tmp == pos) {
2785 tconn = pos;
2786 break;
2787 }
2788 }
2789 if (tconn) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002790next_tconn:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002791 mdev = idr_get_next(&tconn->volumes, &volume);
2792 if (!mdev) {
2793 /* No more volumes to dump on this tconn.
2794 * Advance tconn iterator. */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002795 pos = list_entry_rcu(tconn->all_tconn.next,
2796 struct drbd_tconn, all_tconn);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002797 /* Did we dump any volume on this tconn yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002798 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002799 /* If we reached the end of the list,
2800 * or only a single resource dump was requested,
2801 * we are done. */
2802 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2803 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002804 volume = 0;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002805 tconn = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002806 goto next_tconn;
2807 }
2808 }
2809
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002810 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2811 cb->nlh->nlmsg_seq, &drbd_genl_family,
2812 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2813 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002814 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002815
Lars Ellenberg543cc102011-03-10 22:18:18 +01002816 if (!mdev) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002817 /* This is a tconn without a single volume.
2818 * Suprisingly enough, it may have a network
2819 * configuration. */
2820 struct net_conf *nc;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002821 dh->minor = -1U;
2822 dh->ret_code = NO_ERROR;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002823 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002824 goto cancel;
2825 nc = rcu_dereference(tconn->net_conf);
2826 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2827 goto cancel;
2828 goto done;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002829 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002830
Lars Ellenberg543cc102011-03-10 22:18:18 +01002831 D_ASSERT(mdev->vnr == volume);
2832 D_ASSERT(mdev->tconn == tconn);
2833
2834 dh->minor = mdev_to_minor(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002835 dh->ret_code = NO_ERROR;
2836
2837 if (nla_put_status_info(skb, mdev, NULL)) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002838cancel:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002839 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002840 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002841 }
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002842done:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002843 genlmsg_end(skb, dh);
2844 }
2845
Lars Ellenberg543cc102011-03-10 22:18:18 +01002846out:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002847 rcu_read_unlock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002848 /* where to start the next iteration */
2849 cb->args[0] = (long)pos;
2850 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002851
Lars Ellenberg543cc102011-03-10 22:18:18 +01002852 /* No more tconns/volumes/minors found results in an empty skb.
2853 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002854 return skb->len;
2855}
2856
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002857/*
2858 * Request status of all resources, or of all volumes within a single resource.
2859 *
2860 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2861 * Which means we cannot use the family->attrbuf or other such members, because
2862 * dump is NOT protected by the genl_lock(). During dump, we only have access
2863 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2864 *
2865 * Once things are setup properly, we call into get_one_status().
2866 */
2867int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2868{
2869 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2870 struct nlattr *nla;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002871 const char *resource_name;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002872 struct drbd_tconn *tconn;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002873 int maxtype;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002874
2875 /* Is this a followup call? */
2876 if (cb->args[0]) {
2877 /* ... of a single resource dump,
2878 * and the resource iterator has been advanced already? */
2879 if (cb->args[2] && cb->args[2] != cb->args[0])
2880 return 0; /* DONE. */
2881 goto dump;
2882 }
2883
2884 /* First call (from netlink_dump_start). We need to figure out
2885 * which resource(s) the user wants us to dump. */
2886 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2887 nlmsg_attrlen(cb->nlh, hdrlen),
2888 DRBD_NLA_CFG_CONTEXT);
2889
2890 /* No explicit context given. Dump all. */
2891 if (!nla)
2892 goto dump;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002893 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2894 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2895 if (IS_ERR(nla))
2896 return PTR_ERR(nla);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002897 /* context given, but no name present? */
2898 if (!nla)
2899 return -EINVAL;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002900 resource_name = nla_data(nla);
2901 tconn = conn_get_by_name(resource_name);
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002902
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002903 if (!tconn)
2904 return -ENODEV;
2905
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002906 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2907
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002908 /* prime iterators, and set "filter" mode mark:
2909 * only dump this tconn. */
2910 cb->args[0] = (long)tconn;
2911 /* cb->args[1] = 0; passed in this way. */
2912 cb->args[2] = (long)tconn;
2913
2914dump:
2915 return get_one_status(skb, cb);
2916}
2917
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002918int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2919{
2920 enum drbd_ret_code retcode;
2921 struct timeout_parms tp;
2922 int err;
2923
2924 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2925 if (!adm_ctx.reply_skb)
2926 return retcode;
2927 if (retcode != NO_ERROR)
2928 goto out;
2929
2930 tp.timeout_type =
2931 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2932 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2933 UT_DEFAULT;
2934
2935 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2936 if (err) {
2937 nlmsg_free(adm_ctx.reply_skb);
2938 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002939 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002940out:
2941 drbd_adm_finish(info, retcode);
2942 return 0;
2943}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002944
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002945int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2946{
2947 struct drbd_conf *mdev;
2948 enum drbd_ret_code retcode;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002949 struct start_ov_parms parms;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002950
2951 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2952 if (!adm_ctx.reply_skb)
2953 return retcode;
2954 if (retcode != NO_ERROR)
2955 goto out;
2956
2957 mdev = adm_ctx.mdev;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002958
2959 /* resume from last known position, if possible */
2960 parms.ov_start_sector = mdev->ov_start_sector;
2961 parms.ov_stop_sector = ULLONG_MAX;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002962 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002963 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002964 if (err) {
2965 retcode = ERR_MANDATORY_TAG;
2966 drbd_msg_put_info(from_attrs_err_to_txt(err));
2967 goto out;
2968 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002969 }
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002970 /* w_make_ov_request expects position to be aligned */
2971 mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
2972 mdev->ov_stop_sector = parms.ov_stop_sector;
2973
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002974 /* If there is still bitmap IO pending, e.g. previous resync or verify
2975 * just being finished, wait for it before requesting a new resync. */
Lars Ellenberg5016b822012-05-07 12:00:56 +02002976 drbd_suspend_io(mdev);
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002977 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002978 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
Lars Ellenberg5016b822012-05-07 12:00:56 +02002979 drbd_resume_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002980out:
2981 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002982 return 0;
2983}
2984
2985
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002986int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002987{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002988 struct drbd_conf *mdev;
2989 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002990 int skip_initial_sync = 0;
2991 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002992 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002993
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002994 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2995 if (!adm_ctx.reply_skb)
2996 return retcode;
2997 if (retcode != NO_ERROR)
2998 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002999
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003000 mdev = adm_ctx.mdev;
3001 memset(&args, 0, sizeof(args));
3002 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01003003 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003004 if (err) {
3005 retcode = ERR_MANDATORY_TAG;
3006 drbd_msg_put_info(from_attrs_err_to_txt(err));
3007 goto out_nolock;
3008 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003009 }
3010
Philipp Reisner8410da82011-02-11 20:11:10 +01003011 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003012
3013 if (!get_ldev(mdev)) {
3014 retcode = ERR_NO_DISK;
3015 goto out;
3016 }
3017
3018 /* this is "skip initial sync", assume to be clean */
Philipp Reisner31890f42011-01-19 14:12:51 +01003019 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003020 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3021 dev_info(DEV, "Preparing to skip initial sync\n");
3022 skip_initial_sync = 1;
3023 } else if (mdev->state.conn != C_STANDALONE) {
3024 retcode = ERR_CONNECTED;
3025 goto out_dec;
3026 }
3027
3028 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3029 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3030
3031 if (args.clear_bm) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003032 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3033 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034 if (err) {
3035 dev_err(DEV, "Writing bitmap failed with %d\n",err);
3036 retcode = ERR_IO_MD_DISK;
3037 }
3038 if (skip_initial_sync) {
3039 drbd_send_uuids_skip_initial_sync(mdev);
3040 _drbd_uuid_set(mdev, UI_BITMAP, 0);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003041 drbd_print_uuids(mdev, "cleared bitmap UUID");
Philipp Reisner87eeee42011-01-19 14:16:30 +01003042 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003043 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3044 CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003045 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003046 }
3047 }
3048
3049 drbd_md_sync(mdev);
3050out_dec:
3051 put_ldev(mdev);
3052out:
Philipp Reisner8410da82011-02-11 20:11:10 +01003053 mutex_unlock(mdev->state_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003054out_nolock:
3055 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003056 return 0;
3057}
3058
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003059static enum drbd_ret_code
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003060drbd_check_resource_name(const char *name)
Philipp Reisner774b3052011-02-22 02:07:03 -05003061{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003062 if (!name || !name[0]) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003063 drbd_msg_put_info("resource name missing");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003064 return ERR_MANDATORY_TAG;
3065 }
3066 /* if we want to use these in sysfs/configfs/debugfs some day,
3067 * we must not allow slashes */
3068 if (strchr(name, '/')) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003069 drbd_msg_put_info("invalid resource name");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003070 return ERR_INVALID_REQUEST;
3071 }
3072 return NO_ERROR;
3073}
Philipp Reisner774b3052011-02-22 02:07:03 -05003074
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003075int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003076{
3077 enum drbd_ret_code retcode;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003078 struct res_opts res_opts;
3079 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003080
3081 retcode = drbd_adm_prepare(skb, info, 0);
3082 if (!adm_ctx.reply_skb)
3083 return retcode;
3084 if (retcode != NO_ERROR)
3085 goto out;
3086
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003087 set_res_opts_defaults(&res_opts);
3088 err = res_opts_from_attrs(&res_opts, info);
3089 if (err && err != -ENOMSG) {
3090 retcode = ERR_MANDATORY_TAG;
3091 drbd_msg_put_info(from_attrs_err_to_txt(err));
3092 goto out;
3093 }
3094
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003095 retcode = drbd_check_resource_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003096 if (retcode != NO_ERROR)
3097 goto out;
3098
3099 if (adm_ctx.tconn) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01003100 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3101 retcode = ERR_INVALID_REQUEST;
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003102 drbd_msg_put_info("resource exists");
Lars Ellenberg38f19612011-03-14 13:22:35 +01003103 }
3104 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003105 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003106 }
3107
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003108 if (!conn_create(adm_ctx.resource_name, &res_opts))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003109 retcode = ERR_NOMEM;
3110out:
3111 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003112 return 0;
3113}
3114
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003115int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003116{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003117 struct drbd_genlmsghdr *dh = info->userhdr;
3118 enum drbd_ret_code retcode;
Philipp Reisner774b3052011-02-22 02:07:03 -05003119
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003120 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003121 if (!adm_ctx.reply_skb)
3122 return retcode;
3123 if (retcode != NO_ERROR)
3124 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003125
Andreas Gruenbacherf2257a52011-07-14 16:00:40 +02003126 if (dh->minor > MINORMASK) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003127 drbd_msg_put_info("requested minor out of range");
3128 retcode = ERR_INVALID_REQUEST;
3129 goto out;
3130 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02003131 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003132 drbd_msg_put_info("requested volume id out of range");
3133 retcode = ERR_INVALID_REQUEST;
3134 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003135 }
3136
Lars Ellenberg38f19612011-03-14 13:22:35 +01003137 /* drbd_adm_prepare made sure already
3138 * that mdev->tconn and mdev->vnr match the request. */
3139 if (adm_ctx.mdev) {
3140 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3141 retcode = ERR_MINOR_EXISTS;
3142 /* else: still NO_ERROR */
3143 goto out;
3144 }
3145
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003146 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3147out:
3148 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003149 return 0;
3150}
3151
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003152static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3153{
3154 if (mdev->state.disk == D_DISKLESS &&
3155 /* no need to be mdev->state.conn == C_STANDALONE &&
3156 * we may want to delete a minor from a live replication group.
3157 */
3158 mdev->state.role == R_SECONDARY) {
Philipp Reisner369bea62011-07-06 23:04:44 +02003159 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3160 CS_VERBOSE + CS_WAIT_COMPLETE);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02003161 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3162 idr_remove(&minors, mdev_to_minor(mdev));
3163 del_gendisk(mdev->vdisk);
3164 synchronize_rcu();
3165 kref_put(&mdev->kref, &drbd_minor_destroy);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003166 return NO_ERROR;
3167 } else
3168 return ERR_MINOR_CONFIGURED;
3169}
3170
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003171int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003172{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003173 enum drbd_ret_code retcode;
3174
3175 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3176 if (!adm_ctx.reply_skb)
3177 return retcode;
3178 if (retcode != NO_ERROR)
3179 goto out;
3180
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003181 retcode = adm_delete_minor(adm_ctx.mdev);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003182out:
3183 drbd_adm_finish(info, retcode);
3184 return 0;
3185}
3186
3187int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3188{
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003189 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003190 struct drbd_conf *mdev;
3191 unsigned i;
3192
3193 retcode = drbd_adm_prepare(skb, info, 0);
3194 if (!adm_ctx.reply_skb)
3195 return retcode;
3196 if (retcode != NO_ERROR)
3197 goto out;
3198
3199 if (!adm_ctx.tconn) {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003200 retcode = ERR_RES_NOT_KNOWN;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003201 goto out;
3202 }
3203
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003204 /* demote */
3205 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3206 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3207 if (retcode < SS_SUCCESS) {
3208 drbd_msg_put_info("failed to demote");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003209 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003210 }
3211 }
3212
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003213 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3214 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003215 drbd_msg_put_info("failed to disconnect");
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003216 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003217 }
3218
3219 /* detach */
3220 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
Philipp Reisnercdfda632011-07-05 15:38:59 +02003221 retcode = adm_detach(mdev, 0);
Lars Ellenberg27012382012-07-24 10:13:55 +02003222 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003223 drbd_msg_put_info("failed to detach");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003224 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003225 }
3226 }
3227
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003228 /* If we reach this, all volumes (of this tconn) are Secondary,
3229 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003230 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003231 drbd_thread_stop(&adm_ctx.tconn->worker);
3232
3233 /* Now, nothing can fail anymore */
3234
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003235 /* delete volumes */
3236 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3237 retcode = adm_delete_minor(mdev);
3238 if (retcode != NO_ERROR) {
3239 /* "can not happen" */
3240 drbd_msg_put_info("failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07003241 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003242 }
3243 }
3244
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003245 /* delete connection */
3246 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003247 list_del_rcu(&adm_ctx.tconn->all_tconn);
3248 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003249 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3250
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003251 retcode = NO_ERROR;
3252 } else {
3253 /* "can not happen" */
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003254 retcode = ERR_RES_IN_USE;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003255 drbd_msg_put_info("failed to delete connection");
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003256 }
Philipp Reisneref356262011-04-13 14:21:29 -07003257 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003258out:
3259 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003260 return 0;
3261}
3262
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003263int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003264{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003265 enum drbd_ret_code retcode;
3266
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003267 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003268 if (!adm_ctx.reply_skb)
3269 return retcode;
3270 if (retcode != NO_ERROR)
3271 goto out;
3272
3273 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003274 list_del_rcu(&adm_ctx.tconn->all_tconn);
3275 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003276 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3277
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003278 retcode = NO_ERROR;
Philipp Reisner774b3052011-02-22 02:07:03 -05003279 } else {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003280 retcode = ERR_RES_IN_USE;
Philipp Reisner774b3052011-02-22 02:07:03 -05003281 }
3282
Lars Ellenberg992d6e92011-05-02 11:47:18 +02003283 if (retcode == NO_ERROR)
3284 drbd_thread_stop(&adm_ctx.tconn->worker);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003285out:
3286 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003287 return 0;
3288}
3289
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003290void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003291{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003292 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3293 struct sk_buff *msg;
3294 struct drbd_genlmsghdr *d_out;
3295 unsigned seq;
3296 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003297
Philipp Reisner328e0f122012-10-19 14:37:47 +02003298 if (sib->sib_reason == SIB_SYNC_PROGRESS &&
3299 time_after(jiffies, mdev->rs_last_bcast + HZ))
3300 mdev->rs_last_bcast = jiffies;
3301 else
3302 return;
3303
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003304 seq = atomic_inc_return(&drbd_genl_seq);
3305 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3306 if (!msg)
3307 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003308
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003309 err = -EMSGSIZE;
3310 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3311 if (!d_out) /* cannot happen, but anyways. */
3312 goto nla_put_failure;
3313 d_out->minor = mdev_to_minor(mdev);
Andreas Gruenbacher6f9b5f82011-05-06 01:03:32 +02003314 d_out->ret_code = NO_ERROR;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02003315
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003316 if (nla_put_status_info(msg, mdev, sib))
3317 goto nla_put_failure;
3318 genlmsg_end(msg, d_out);
3319 err = drbd_genl_multicast_events(msg, 0);
3320 /* msg has been consumed or freed in netlink_broadcast() */
3321 if (err && err != -ESRCH)
3322 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003323
Philipp Reisnerb411b362009-09-25 16:07:19 -07003324 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003325
3326nla_put_failure:
3327 nlmsg_free(msg);
3328failed:
3329 dev_err(DEV, "Error %d while broadcasting event. "
3330 "Event seq:%u sib_reason:%u\n",
3331 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003332}