blob: d4c05e26a13a743a7bf1929cf961c8c645796054 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020035#include "drbd_req.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070038#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020039#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070040
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010041#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010043/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +020050int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010052int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010053
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010056int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010057int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010071int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010072int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
Andreas Gruenbacher01b39b52011-06-10 12:57:26 +020078#include "drbd_nla.h"
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010079#include <linux/genl_magic_func.h>
80
81/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070082static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010084/* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
87 */
88static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
90 unsigned int minor;
91 /* assigned from request attributes, if present */
92 unsigned int volume;
93#define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +020096 char *resource_name;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +020097 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
Philipp Reisnerb411b362009-09-25 16:07:19 -070099
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100100 /* reply buffer */
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
107} adm_ctx;
108
109static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
110{
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700114}
115
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100116/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
Lars Ellenberg8432b312011-03-08 16:11:16 +0100118int drbd_msg_put_info(const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100119{
120 struct sk_buff *skb = adm_ctx.reply_skb;
121 struct nlattr *nla;
122 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100124 if (!info || !info[0])
125 return 0;
126
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 if (!nla)
129 return err;
130
131 err = nla_put_string(skb, T_info_text, info);
132 if (err) {
133 nla_nest_cancel(skb, nla);
134 return err;
135 } else
136 nla_nest_end(skb, nla);
137 return 0;
138}
139
140/* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
144 */
145#define DRBD_ADM_NEED_MINOR 1
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200146#define DRBD_ADM_NEED_RESOURCE 2
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200147#define DRBD_ADM_NEED_CONNECTION 4
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100148static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
149 unsigned flags)
150{
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
153 int err;
154
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
156
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd != DRBD_ADM_GET_STATUS
159 && security_netlink_recv(skb, CAP_SYS_ADMIN))
160 return -EPERM;
161
162 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200163 if (!adm_ctx.reply_skb) {
164 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100165 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200166 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100167
168 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
169 info, &drbd_genl_family, 0, cmd);
170 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
171 * but anyways */
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200172 if (!adm_ctx.reply_dh) {
173 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100174 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200175 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100176
177 adm_ctx.reply_dh->minor = d_in->minor;
178 adm_ctx.reply_dh->ret_code = NO_ERROR;
179
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200180 adm_ctx.volume = VOLUME_UNSPECIFIED;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100181 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
182 struct nlattr *nla;
183 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100184 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100185 if (err)
186 goto fail;
187
188 /* It was present, and valid,
189 * copy it over to the reply skb. */
190 err = nla_put_nohdr(adm_ctx.reply_skb,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
192 info->attrs[DRBD_NLA_CFG_CONTEXT]);
193 if (err)
194 goto fail;
195
196 /* and assign stuff to the global adm_ctx */
197 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200198 if (nla)
199 adm_ctx.volume = nla_get_u32(nla);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200200 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100201 if (nla)
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200202 adm_ctx.resource_name = nla_data(nla);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200203 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
204 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
205 if ((adm_ctx.my_addr &&
206 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
207 (adm_ctx.peer_addr &&
208 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
209 err = -EINVAL;
210 goto fail;
211 }
212 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100213
214 adm_ctx.minor = d_in->minor;
215 adm_ctx.mdev = minor_to_mdev(d_in->minor);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200216 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100217
218 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
219 drbd_msg_put_info("unknown minor");
220 return ERR_MINOR_INVALID;
221 }
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200222 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
223 drbd_msg_put_info("unknown resource");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100224 return ERR_INVALID_REQUEST;
225 }
226
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200227 if (flags & DRBD_ADM_NEED_CONNECTION) {
228 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
229 drbd_msg_put_info("no resource name expected");
230 return ERR_INVALID_REQUEST;
231 }
232 if (adm_ctx.mdev) {
233 drbd_msg_put_info("no minor number expected");
234 return ERR_INVALID_REQUEST;
235 }
236 if (adm_ctx.my_addr && adm_ctx.peer_addr)
237 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
238 nla_len(adm_ctx.my_addr),
239 nla_data(adm_ctx.peer_addr),
240 nla_len(adm_ctx.peer_addr));
241 if (!adm_ctx.tconn) {
242 drbd_msg_put_info("unknown connection");
243 return ERR_INVALID_REQUEST;
244 }
245 }
246
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100247 /* some more paranoia, if the request was over-determined */
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100248 if (adm_ctx.mdev && adm_ctx.tconn &&
249 adm_ctx.mdev->tconn != adm_ctx.tconn) {
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200251 adm_ctx.minor, adm_ctx.resource_name,
252 adm_ctx.mdev->tconn->name);
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200253 drbd_msg_put_info("minor exists in different resource");
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100254 return ERR_INVALID_REQUEST;
255 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100256 if (adm_ctx.mdev &&
257 adm_ctx.volume != VOLUME_UNSPECIFIED &&
258 adm_ctx.volume != adm_ctx.mdev->vnr) {
259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
260 adm_ctx.minor, adm_ctx.volume,
261 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100262 drbd_msg_put_info("minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100263 return ERR_INVALID_REQUEST;
264 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200265
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100266 return NO_ERROR;
267
268fail:
269 nlmsg_free(adm_ctx.reply_skb);
270 adm_ctx.reply_skb = NULL;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200271 return err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100272}
273
274static int drbd_adm_finish(struct genl_info *info, int retcode)
275{
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200276 if (adm_ctx.tconn) {
277 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
278 adm_ctx.tconn = NULL;
279 }
280
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100281 if (!adm_ctx.reply_skb)
282 return -ENOMEM;
283
284 adm_ctx.reply_dh->ret_code = retcode;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100285 drbd_adm_send_reply(adm_ctx.reply_skb, info);
286 return 0;
287}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100289static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
290{
291 char *afs;
292
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200293 /* FIXME: A future version will not allow this case. */
294 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
295 return;
296
297 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
298 case AF_INET6:
299 afs = "ipv6";
300 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
301 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
302 break;
303 case AF_INET:
304 afs = "ipv4";
305 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
306 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
307 break;
308 default:
309 afs = "ssocks";
310 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
311 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100312 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200313 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100314}
315
Philipp Reisnerb411b362009-09-25 16:07:19 -0700316int drbd_khelper(struct drbd_conf *mdev, char *cmd)
317{
318 char *envp[] = { "HOME=/",
319 "TERM=linux",
320 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100321 (char[20]) { }, /* address family */
322 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100324 char mb[12];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 char *argv[] = {usermode_helper, cmd, mb, NULL };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100326 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327 int ret;
328
329 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100330 setup_khelper_env(mdev->tconn, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700331
Lars Ellenberg1090c052010-07-19 17:41:04 +0200332 /* The helper may take some time.
333 * write out any unsynced meta data changes now */
334 drbd_md_sync(mdev);
335
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100337 sib.sib_reason = SIB_HELPER_PRE;
338 sib.helper_name = cmd;
339 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700340 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
341 if (ret)
342 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
343 usermode_helper, cmd, mb,
344 (ret >> 8) & 0xff, ret);
345 else
346 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
347 usermode_helper, cmd, mb,
348 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100349 sib.sib_reason = SIB_HELPER_POST;
350 sib.helper_exit_code = ret;
351 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700352
353 if (ret < 0) /* Ignore any ERRNOs we got. */
354 ret = 0;
355
356 return ret;
357}
358
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100359static void conn_md_sync(struct drbd_tconn *tconn)
360{
361 struct drbd_conf *mdev;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100362 int vnr;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100363
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200364 rcu_read_lock();
365 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
366 kref_get(&mdev->kref);
367 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100368 drbd_md_sync(mdev);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200369 kref_put(&mdev->kref, &drbd_minor_destroy);
370 rcu_read_lock();
371 }
372 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100373}
374
375int conn_khelper(struct drbd_tconn *tconn, char *cmd)
376{
377 char *envp[] = { "HOME=/",
378 "TERM=linux",
379 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
380 (char[20]) { }, /* address family */
381 (char[60]) { }, /* address */
382 NULL };
383 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
384 int ret;
385
386 setup_khelper_env(tconn, envp);
387 conn_md_sync(tconn);
388
389 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
390 /* TODO: conn_bcast_event() ?? */
391
392 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
393 if (ret)
394 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
395 usermode_helper, cmd, tconn->name,
396 (ret >> 8) & 0xff, ret);
397 else
398 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
399 usermode_helper, cmd, tconn->name,
400 (ret >> 8) & 0xff, ret);
401 /* TODO: conn_bcast_event() ?? */
402
403 if (ret < 0) /* Ignore any ERRNOs we got. */
404 ret = 0;
405
406 return ret;
407}
408
Philipp Reisnercb703452011-03-24 11:03:07 +0100409static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700410{
Philipp Reisnercb703452011-03-24 11:03:07 +0100411 enum drbd_fencing_p fp = FP_NOT_AVAIL;
412 struct drbd_conf *mdev;
413 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700414
Philipp Reisner695d08f2011-04-11 22:53:32 -0700415 rcu_read_lock();
Philipp Reisnercb703452011-03-24 11:03:07 +0100416 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
417 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200418 fp = max_t(enum drbd_fencing_p, fp,
419 rcu_dereference(mdev->ldev->disk_conf)->fencing);
Philipp Reisnercb703452011-03-24 11:03:07 +0100420 put_ldev(mdev);
421 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700422 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700423 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700424
Philipp Reisnercb703452011-03-24 11:03:07 +0100425 return fp;
426}
427
428bool conn_try_outdate_peer(struct drbd_tconn *tconn)
429{
430 union drbd_state mask = { };
431 union drbd_state val = { };
432 enum drbd_fencing_p fp;
433 char *ex_to_string;
434 int r;
435
436 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
437 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
438 return false;
439 }
440
441 fp = highest_fencing_policy(tconn);
442 switch (fp) {
443 case FP_NOT_AVAIL:
444 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
445 goto out;
446 case FP_DONT_CARE:
447 return true;
448 default: ;
449 }
450
451 r = conn_khelper(tconn, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700452
453 switch ((r>>8) & 0xff) {
454 case 3: /* peer is inconsistent */
455 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100456 mask.pdsk = D_MASK;
457 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458 break;
459 case 4: /* peer got outdated, or was already outdated */
460 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100461 mask.pdsk = D_MASK;
462 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700463 break;
464 case 5: /* peer was down */
Philipp Reisnercb703452011-03-24 11:03:07 +0100465 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700466 /* we will(have) create(d) a new UUID anyways... */
467 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100468 mask.pdsk = D_MASK;
469 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470 } else {
471 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700472 }
473 break;
474 case 6: /* Peer is primary, voluntarily outdate myself.
475 * This is useful when an unconnected R_SECONDARY is asked to
476 * become R_PRIMARY, but finds the other peer being active. */
477 ex_to_string = "peer is active";
Philipp Reisnercb703452011-03-24 11:03:07 +0100478 conn_warn(tconn, "Peer is primary, outdating myself.\n");
479 mask.disk = D_MASK;
480 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700481 break;
482 case 7:
483 if (fp != FP_STONITH)
Philipp Reisnercb703452011-03-24 11:03:07 +0100484 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700485 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100486 mask.pdsk = D_MASK;
487 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700488 break;
489 default:
490 /* The script is broken ... */
Philipp Reisnercb703452011-03-24 11:03:07 +0100491 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
492 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700493 }
494
Philipp Reisnercb703452011-03-24 11:03:07 +0100495 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
496 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200497
Philipp Reisnercb703452011-03-24 11:03:07 +0100498 out:
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200499
Philipp Reisnercb703452011-03-24 11:03:07 +0100500 /* Not using
501 conn_request_state(tconn, mask, val, CS_VERBOSE);
502 here, because we might were able to re-establish the connection in the
503 meantime. */
504 spin_lock_irq(&tconn->req_lock);
Philipp Reisnera1096a62012-04-06 12:07:34 +0200505 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
Philipp Reisnercb703452011-03-24 11:03:07 +0100506 _conn_request_state(tconn, mask, val, CS_VERBOSE);
507 spin_unlock_irq(&tconn->req_lock);
508
509 return conn_highest_pdsk(tconn) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700510}
511
Philipp Reisner87f7be42010-06-11 13:56:33 +0200512static int _try_outdate_peer_async(void *data)
513{
Philipp Reisnercb703452011-03-24 11:03:07 +0100514 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200515
Philipp Reisnercb703452011-03-24 11:03:07 +0100516 conn_try_outdate_peer(tconn);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200517
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200518 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200519 return 0;
520}
521
Philipp Reisnercb703452011-03-24 11:03:07 +0100522void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200523{
524 struct task_struct *opa;
525
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200526 kref_get(&tconn->kref);
Philipp Reisnercb703452011-03-24 11:03:07 +0100527 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200528 if (IS_ERR(opa)) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100529 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200530 kref_put(&tconn->kref, &conn_destroy);
531 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200532}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700533
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100534enum drbd_state_rv
535drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700536{
537 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100538 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200539 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700540 int try = 0;
541 int forced = 0;
542 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700543
544 if (new_role == R_PRIMARY)
Philipp Reisner0625ac12011-02-07 14:49:19 +0100545 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700546
Philipp Reisner8410da82011-02-11 20:11:10 +0100547 mutex_lock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700548
549 mask.i = 0; mask.role = R_MASK;
550 val.i = 0; val.role = new_role;
551
552 while (try++ < max_tries) {
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100553 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700554
555 /* in case we first succeeded to outdate,
556 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100557 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700558 val.pdsk = 0;
559 mask.pdsk = 0;
560 continue;
561 }
562
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100563 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100564 (mdev->state.disk < D_UP_TO_DATE &&
565 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700566 mask.disk = D_MASK;
567 val.disk = D_UP_TO_DATE;
568 forced = 1;
569 continue;
570 }
571
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100572 if (rv == SS_NO_UP_TO_DATE_DISK &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
574 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575
Philipp Reisnercb703452011-03-24 11:03:07 +0100576 if (conn_try_outdate_peer(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700577 val.disk = D_UP_TO_DATE;
578 mask.disk = D_MASK;
579 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700580 continue;
581 }
582
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100583 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100584 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100585 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100586 if (!conn_try_outdate_peer(mdev->tconn) && force) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700587 dev_warn(DEV, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100588 mask.pdsk = D_MASK;
589 val.pdsk = D_OUTDATED;
590
Philipp Reisnerb411b362009-09-25 16:07:19 -0700591 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700592 continue;
593 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100594 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700595 /* Maybe the peer is detected as dead very soon...
596 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200597 int timeo;
598 rcu_read_lock();
599 nc = rcu_dereference(mdev->tconn->net_conf);
600 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
601 rcu_read_unlock();
602 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700603 if (try < max_tries)
604 try = max_tries - 1;
605 continue;
606 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100607 if (rv < SS_SUCCESS) {
608 rv = _drbd_request_state(mdev, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700609 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100610 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100611 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700612 }
613 break;
614 }
615
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100616 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100617 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700618
619 if (forced)
620 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
621
622 /* Wait until nothing is on the fly :) */
623 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
624
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100625 /* FIXME also wait for all pending P_BARRIER_ACK? */
626
Philipp Reisnerb411b362009-09-25 16:07:19 -0700627 if (new_role == R_SECONDARY) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100628 set_disk_ro(mdev->vdisk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700629 if (get_ldev(mdev)) {
630 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
631 put_ldev(mdev);
632 }
633 } else {
Philipp Reisnera0095502011-05-03 13:14:15 +0200634 mutex_lock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200635 nc = mdev->tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200636 if (nc)
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200637 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
Philipp Reisnera0095502011-05-03 13:14:15 +0200638 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200639
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100640 set_disk_ro(mdev->vdisk, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700641 if (get_ldev(mdev)) {
642 if (((mdev->state.conn < C_CONNECTED ||
643 mdev->state.pdsk <= D_FAILED)
644 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
645 drbd_uuid_new_current(mdev);
646
647 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
648 put_ldev(mdev);
649 }
650 }
651
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100652 /* writeout of activity log covered areas of the bitmap
653 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700654
655 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
656 /* if this was forced, we should consider sync */
657 if (forced)
658 drbd_send_uuids(mdev);
Philipp Reisner43de7c82011-11-10 13:16:13 +0100659 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700660 }
661
662 drbd_md_sync(mdev);
663
664 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100665out:
Philipp Reisner8410da82011-02-11 20:11:10 +0100666 mutex_unlock(mdev->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100667 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700668}
669
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100670static const char *from_attrs_err_to_txt(int err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700671{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100672 return err == -ENOMSG ? "required attribute missing" :
673 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100674 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100675 "invalid attribute value";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700676}
677
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100678int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700679{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100680 struct set_role_parms parms;
681 int err;
682 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700683
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100684 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
685 if (!adm_ctx.reply_skb)
686 return retcode;
687 if (retcode != NO_ERROR)
688 goto out;
689
690 memset(&parms, 0, sizeof(parms));
691 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100692 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100693 if (err) {
694 retcode = ERR_MANDATORY_TAG;
695 drbd_msg_put_info(from_attrs_err_to_txt(err));
696 goto out;
697 }
698 }
699
700 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
701 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
702 else
703 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
704out:
705 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700706 return 0;
707}
708
709/* initializes the md.*_offset members, so we are able to find
710 * the on disk meta data */
711static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
712 struct drbd_backing_dev *bdev)
713{
714 sector_t md_size_sect = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200715 int meta_dev_idx;
716
717 rcu_read_lock();
718 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
719
720 switch (meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700721 default:
722 /* v07 style fixed size indexed meta data */
723 bdev->md.md_size_sect = MD_RESERVED_SECT;
724 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
725 bdev->md.al_offset = MD_AL_OFFSET;
726 bdev->md.bm_offset = MD_BM_OFFSET;
727 break;
728 case DRBD_MD_INDEX_FLEX_EXT:
729 /* just occupy the full device; unit: sectors */
730 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
731 bdev->md.md_offset = 0;
732 bdev->md.al_offset = MD_AL_OFFSET;
733 bdev->md.bm_offset = MD_BM_OFFSET;
734 break;
735 case DRBD_MD_INDEX_INTERNAL:
736 case DRBD_MD_INDEX_FLEX_INT:
737 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
738 /* al size is still fixed */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100739 bdev->md.al_offset = -MD_AL_SECTORS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700740 /* we need (slightly less than) ~ this much bitmap sectors: */
741 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
742 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
743 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
744 md_size_sect = ALIGN(md_size_sect, 8);
745
746 /* plus the "drbd meta data super block",
747 * and the activity log; */
748 md_size_sect += MD_BM_OFFSET;
749
750 bdev->md.md_size_sect = md_size_sect;
751 /* bitmap offset is adjusted by 'super' block size */
752 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
753 break;
754 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200755 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756}
757
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100758/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700759char *ppsize(char *buf, unsigned long long size)
760{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100761 /* Needs 9 bytes at max including trailing NUL:
762 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700763 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
764 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100765 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766 /* shift + round */
767 size = (size >> 10) + !!(size & (1<<9));
768 base++;
769 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100770 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700771
772 return buf;
773}
774
775/* there is still a theoretical deadlock when called from receiver
776 * on an D_INCONSISTENT R_PRIMARY:
777 * remote READ does inc_ap_bio, receiver would need to receive answer
778 * packet from remote to dec_ap_bio again.
779 * receiver receive_sizes(), comes here,
780 * waits for ap_bio_cnt == 0. -> deadlock.
781 * but this cannot happen, actually, because:
782 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
783 * (not connected, or bad/no disk on peer):
784 * see drbd_fail_request_early, ap_bio_cnt is zero.
785 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
786 * peer may not initiate a resize.
787 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100788/* Note these are not to be confused with
789 * drbd_adm_suspend_io/drbd_adm_resume_io,
790 * which are (sub) state changes triggered by admin (drbdsetup),
791 * and can be long lived.
792 * This changes an mdev->flag, is triggered by drbd internals,
793 * and should be short-lived. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700794void drbd_suspend_io(struct drbd_conf *mdev)
795{
796 set_bit(SUSPEND_IO, &mdev->flags);
Philipp Reisner2aebfab2011-03-28 16:48:11 +0200797 if (drbd_suspended(mdev))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200798 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700799 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
800}
801
802void drbd_resume_io(struct drbd_conf *mdev)
803{
804 clear_bit(SUSPEND_IO, &mdev->flags);
805 wake_up(&mdev->misc_wait);
806}
807
808/**
809 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
810 * @mdev: DRBD device.
811 *
812 * Returns 0 on success, negative return values indicate errors.
813 * You should call drbd_md_sync() after calling this function.
814 */
Bart Van Assche24c48302011-05-21 18:32:29 +0200815enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700816{
817 sector_t prev_first_sect, prev_size; /* previous meta location */
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200818 sector_t la_size, u_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700819 sector_t size;
820 char ppb[10];
821
822 int md_moved, la_size_changed;
823 enum determine_dev_size rv = unchanged;
824
825 /* race:
826 * application request passes inc_ap_bio,
827 * but then cannot get an AL-reference.
828 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
829 *
830 * to avoid that:
831 * Suspend IO right here.
832 * still lock the act_log to not trigger ASSERTs there.
833 */
834 drbd_suspend_io(mdev);
835
836 /* no wait necessary anymore, actually we could assert that */
837 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
838
839 prev_first_sect = drbd_md_first_sector(mdev->ldev);
840 prev_size = mdev->ldev->md.md_size_sect;
841 la_size = mdev->ldev->md.la_size_sect;
842
843 /* TODO: should only be some assert here, not (re)init... */
844 drbd_md_set_sector_offsets(mdev, mdev->ldev);
845
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200846 rcu_read_lock();
847 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
848 rcu_read_unlock();
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200849 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700850
851 if (drbd_get_capacity(mdev->this_bdev) != size ||
852 drbd_bm_capacity(mdev) != size) {
853 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100854 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855 if (unlikely(err)) {
856 /* currently there is only one error: ENOMEM! */
857 size = drbd_bm_capacity(mdev)>>1;
858 if (size == 0) {
859 dev_err(DEV, "OUT OF MEMORY! "
860 "Could not allocate bitmap!\n");
861 } else {
862 dev_err(DEV, "BM resizing failed. "
863 "Leaving size unchanged at size = %lu KB\n",
864 (unsigned long)size);
865 }
866 rv = dev_size_error;
867 }
868 /* racy, see comments above. */
869 drbd_set_my_capacity(mdev, size);
870 mdev->ldev->md.la_size_sect = size;
871 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
872 (unsigned long long)size>>1);
873 }
874 if (rv == dev_size_error)
875 goto out;
876
877 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
878
879 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
880 || prev_size != mdev->ldev->md.md_size_sect;
881
882 if (la_size_changed || md_moved) {
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100883 int err;
884
Philipp Reisnerb411b362009-09-25 16:07:19 -0700885 drbd_al_shrink(mdev); /* All extents inactive. */
886 dev_info(DEV, "Writing the whole bitmap, %s\n",
887 la_size_changed && md_moved ? "size changed and md moved" :
888 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100889 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
890 err = drbd_bitmap_io(mdev, &drbd_bm_write,
891 "size changed", BM_LOCKED_MASK);
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100892 if (err) {
893 rv = dev_size_error;
894 goto out;
895 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700896 drbd_md_mark_dirty(mdev);
897 }
898
899 if (size > la_size)
900 rv = grew;
901 if (size < la_size)
902 rv = shrunk;
903out:
904 lc_unlock(mdev->act_log);
905 wake_up(&mdev->al_wait);
906 drbd_resume_io(mdev);
907
908 return rv;
909}
910
911sector_t
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200912drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
913 sector_t u_size, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700914{
915 sector_t p_size = mdev->p_size; /* partner's disk size. */
916 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
917 sector_t m_size; /* my size */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700918 sector_t size = 0;
919
920 m_size = drbd_get_max_capacity(bdev);
921
Philipp Reisnera393db62009-12-22 13:35:52 +0100922 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
923 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
924 p_size = m_size;
925 }
926
Philipp Reisnerb411b362009-09-25 16:07:19 -0700927 if (p_size && m_size) {
928 size = min_t(sector_t, p_size, m_size);
929 } else {
930 if (la_size) {
931 size = la_size;
932 if (m_size && m_size < size)
933 size = m_size;
934 if (p_size && p_size < size)
935 size = p_size;
936 } else {
937 if (m_size)
938 size = m_size;
939 if (p_size)
940 size = p_size;
941 }
942 }
943
944 if (size == 0)
945 dev_err(DEV, "Both nodes diskless!\n");
946
947 if (u_size) {
948 if (u_size > size)
949 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
950 (unsigned long)u_size>>1, (unsigned long)size>>1);
951 else
952 size = u_size;
953 }
954
955 return size;
956}
957
958/**
959 * drbd_check_al_size() - Ensures that the AL is of the right size
960 * @mdev: DRBD device.
961 *
962 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
963 * failed, and 0 on success. You should call drbd_md_sync() after you called
964 * this function.
965 */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100966static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700967{
968 struct lru_cache *n, *t;
969 struct lc_element *e;
970 unsigned int in_use;
971 int i;
972
Philipp Reisnerb411b362009-09-25 16:07:19 -0700973 if (mdev->act_log &&
Lars Ellenbergf3990022011-03-23 14:31:09 +0100974 mdev->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700975 return 0;
976
977 in_use = 0;
978 t = mdev->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100979 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +0100980 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981
982 if (n == NULL) {
983 dev_err(DEV, "Cannot allocate act_log lru!\n");
984 return -ENOMEM;
985 }
986 spin_lock_irq(&mdev->al_lock);
987 if (t) {
988 for (i = 0; i < t->nr_elements; i++) {
989 e = lc_element_by_index(t, i);
990 if (e->refcnt)
991 dev_err(DEV, "refcnt(%d)==%d\n",
992 e->lc_number, e->refcnt);
993 in_use += e->refcnt;
994 }
995 }
996 if (!in_use)
997 mdev->act_log = n;
998 spin_unlock_irq(&mdev->al_lock);
999 if (in_use) {
1000 dev_err(DEV, "Activity log still in use!\n");
1001 lc_destroy(n);
1002 return -EBUSY;
1003 } else {
1004 if (t)
1005 lc_destroy(t);
1006 }
1007 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1008 return 0;
1009}
1010
Philipp Reisner99432fc2011-05-20 16:39:13 +02001011static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001012{
1013 struct request_queue * const q = mdev->rq_queue;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001014 int max_hw_sectors = max_bio_size >> 9;
1015 int max_segments = 0;
1016
1017 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1018 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1019
1020 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001021 rcu_read_lock();
1022 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1023 rcu_read_unlock();
Philipp Reisner99432fc2011-05-20 16:39:13 +02001024 put_ldev(mdev);
1025 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001026
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027 blk_queue_logical_block_size(q, 512);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001028 blk_queue_max_hw_sectors(q, max_hw_sectors);
1029 /* This is the workaround for "bio would need to, but cannot, be split" */
1030 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1031 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032
Philipp Reisner99432fc2011-05-20 16:39:13 +02001033 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1034 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001035
Philipp Reisner99432fc2011-05-20 16:39:13 +02001036 blk_queue_stack_limits(q, b);
1037
1038 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1039 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1040 q->backing_dev_info.ra_pages,
1041 b->backing_dev_info.ra_pages);
1042 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1043 }
1044 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001045 }
1046}
1047
Philipp Reisner99432fc2011-05-20 16:39:13 +02001048void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1049{
1050 int now, new, local, peer;
1051
1052 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1053 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1054 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1055
1056 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1057 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1058 mdev->local_max_bio_size = local;
1059 put_ldev(mdev);
1060 }
1061
1062 /* We may ignore peer limits if the peer is modern enough.
1063 Because new from 8.3.8 onwards the peer can use multiple
1064 BIOs for a single peer_request */
1065 if (mdev->state.conn >= C_CONNECTED) {
Philipp Reisner31890f42011-01-19 14:12:51 +01001066 if (mdev->tconn->agreed_pro_version < 94)
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001067 peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1068 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
Philipp Reisner31890f42011-01-19 14:12:51 +01001069 else if (mdev->tconn->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001070 peer = DRBD_MAX_SIZE_H80_PACKET;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001071 else if (mdev->tconn->agreed_pro_version < 100)
1072 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1073 else
Philipp Reisner99432fc2011-05-20 16:39:13 +02001074 peer = DRBD_MAX_BIO_SIZE;
1075 }
1076
1077 new = min_t(int, local, peer);
1078
1079 if (mdev->state.role == R_PRIMARY && new < now)
1080 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1081
1082 if (new != now)
1083 dev_info(DEV, "max BIO size = %u\n", new);
1084
1085 drbd_setup_queue_param(mdev, new);
1086}
1087
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001088/* Starts the worker thread */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001089static void conn_reconfig_start(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001090{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001091 drbd_thread_start(&tconn->worker);
1092 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001093}
1094
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001095/* if still unconfigured, stops worker again. */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001096static void conn_reconfig_done(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001097{
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001098 bool stop_threads;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001099 spin_lock_irq(&tconn->req_lock);
Philipp Reisnere0e16652011-07-11 17:04:23 +02001100 stop_threads = conn_all_vols_unconf(tconn) &&
1101 tconn->cstate == C_STANDALONE;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001102 spin_unlock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001103 if (stop_threads) {
1104 /* asender is implicitly stopped by receiver
Philipp Reisner81fa2e62011-05-04 15:10:30 +02001105 * in conn_disconnect() */
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001106 drbd_thread_stop(&tconn->receiver);
1107 drbd_thread_stop(&tconn->worker);
1108 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001109}
1110
Philipp Reisner07782862010-08-31 12:00:50 +02001111/* Make sure IO is suspended before calling this function(). */
1112static void drbd_suspend_al(struct drbd_conf *mdev)
1113{
1114 int s = 0;
1115
Lars Ellenberg61610422011-02-21 13:20:54 +01001116 if (!lc_try_lock(mdev->act_log)) {
Philipp Reisner07782862010-08-31 12:00:50 +02001117 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1118 return;
1119 }
1120
Lars Ellenberg61610422011-02-21 13:20:54 +01001121 drbd_al_shrink(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001122 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner07782862010-08-31 12:00:50 +02001123 if (mdev->state.conn < C_CONNECTED)
1124 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001125 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg61610422011-02-21 13:20:54 +01001126 lc_unlock(mdev->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001127
1128 if (s)
1129 dev_info(DEV, "Suspended AL updates\n");
1130}
1131
Lars Ellenberg5979e362011-04-27 21:09:55 +02001132
1133static bool should_set_defaults(struct genl_info *info)
1134{
1135 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1136 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1137}
1138
Philipp Reisnerd589a212011-05-04 10:06:52 +02001139static void enforce_disk_conf_limits(struct disk_conf *dc)
1140{
1141 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1142 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1143 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1144 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1145
1146 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1147 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1148}
1149
Lars Ellenbergf3990022011-03-23 14:31:09 +01001150int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1151{
1152 enum drbd_ret_code retcode;
1153 struct drbd_conf *mdev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001154 struct disk_conf *new_disk_conf, *old_disk_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02001155 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001156 int err, fifo_size;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001157
1158 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1159 if (!adm_ctx.reply_skb)
1160 return retcode;
1161 if (retcode != NO_ERROR)
1162 goto out;
1163
1164 mdev = adm_ctx.mdev;
1165
1166 /* we also need a disk
1167 * to change the options on */
1168 if (!get_ldev(mdev)) {
1169 retcode = ERR_NO_DISK;
1170 goto out;
1171 }
1172
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001173 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001174 if (!new_disk_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001175 retcode = ERR_NOMEM;
1176 goto fail;
1177 }
1178
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001179 mutex_lock(&mdev->tconn->conf_update);
1180 old_disk_conf = mdev->ldev->disk_conf;
1181 *new_disk_conf = *old_disk_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001182 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001183 set_disk_conf_defaults(new_disk_conf);
Lars Ellenberg5979e362011-04-27 21:09:55 +02001184
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001185 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001186 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001187 retcode = ERR_MANDATORY_TAG;
1188 drbd_msg_put_info(from_attrs_err_to_txt(err));
1189 }
1190
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001191 if (!expect(new_disk_conf->resync_rate >= 1))
1192 new_disk_conf->resync_rate = 1;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001193
Philipp Reisnerd589a212011-05-04 10:06:52 +02001194 enforce_disk_conf_limits(new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001195
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001196 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02001197 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02001198 new_plan = fifo_alloc(fifo_size);
1199 if (!new_plan) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001200 dev_err(DEV, "kmalloc of fifo_buffer failed");
1201 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001202 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001203 }
1204 }
1205
Lars Ellenbergf3990022011-03-23 14:31:09 +01001206 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1207 drbd_al_shrink(mdev);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001208 err = drbd_check_al_size(mdev, new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001209 lc_unlock(mdev->act_log);
1210 wake_up(&mdev->al_wait);
1211
1212 if (err) {
1213 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001214 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001215 }
1216
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001217 write_lock_irq(&global_state_lock);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001218 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001219 if (retcode == NO_ERROR) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001220 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001221 drbd_resync_after_changed(mdev);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001222 }
1223 write_unlock_irq(&global_state_lock);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001224
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001225 if (retcode != NO_ERROR)
1226 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001227
Philipp Reisner813472c2011-05-03 16:47:02 +02001228 if (new_plan) {
1229 old_plan = mdev->rs_plan_s;
1230 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
Philipp Reisner9958c852011-05-03 16:19:31 +02001231 }
Philipp Reisner9958c852011-05-03 16:19:31 +02001232
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001233 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001234
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001235 if (new_disk_conf->al_updates)
1236 mdev->ldev->md.flags &= MDF_AL_DISABLED;
1237 else
1238 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1239
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001240 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1241
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001242 drbd_md_sync(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001243
1244 if (mdev->state.conn >= C_CONNECTED)
1245 drbd_send_sync_param(mdev);
1246
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001247 synchronize_rcu();
1248 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001249 kfree(old_plan);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001250 mod_timer(&mdev->request_timer, jiffies + HZ);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001251 goto success;
1252
1253fail_unlock:
1254 mutex_unlock(&mdev->tconn->conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001255 fail:
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001256 kfree(new_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001257 kfree(new_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001258success:
1259 put_ldev(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001260 out:
1261 drbd_adm_finish(info, retcode);
1262 return 0;
1263}
1264
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001265int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001266{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001267 struct drbd_conf *mdev;
1268 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001269 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001270 enum determine_dev_size dd;
1271 sector_t max_possible_sectors;
1272 sector_t min_md_device_sectors;
1273 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001274 struct disk_conf *new_disk_conf = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001275 struct block_device *bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001276 struct lru_cache *resync_lru = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001277 struct fifo_buffer *new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001278 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001279 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001280 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001281
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001282 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1283 if (!adm_ctx.reply_skb)
1284 return retcode;
1285 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001286 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001287
1288 mdev = adm_ctx.mdev;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001289 conn_reconfig_start(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001290
1291 /* if you want to reconfigure, please tear down first */
1292 if (mdev->state.disk > D_DISKLESS) {
1293 retcode = ERR_DISK_CONFIGURED;
1294 goto fail;
1295 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001296 /* It may just now have detached because of IO error. Make sure
1297 * drbd_ldev_destroy is done already, we may end up here very fast,
1298 * e.g. if someone calls attach from the on-io-error handler,
1299 * to realize a "hot spare" feature (not that I'd recommend that) */
1300 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001301
Lars Ellenberg0c849662012-07-30 09:07:28 +02001302 /* make sure there is no leftover from previous force-detach attempts */
1303 clear_bit(FORCE_DETACH, &mdev->flags);
1304
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001305 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001306 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1307 if (!nbc) {
1308 retcode = ERR_NOMEM;
1309 goto fail;
1310 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001311 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1312 if (!new_disk_conf) {
1313 retcode = ERR_NOMEM;
1314 goto fail;
1315 }
1316 nbc->disk_conf = new_disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001317
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001318 set_disk_conf_defaults(new_disk_conf);
1319 err = disk_conf_from_attrs(new_disk_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001320 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001321 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001322 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001323 goto fail;
1324 }
1325
Philipp Reisnerd589a212011-05-04 10:06:52 +02001326 enforce_disk_conf_limits(new_disk_conf);
1327
Philipp Reisner9958c852011-05-03 16:19:31 +02001328 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1329 if (!new_plan) {
1330 retcode = ERR_NOMEM;
1331 goto fail;
1332 }
1333
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001334 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001335 retcode = ERR_MD_IDX_INVALID;
1336 goto fail;
1337 }
1338
Philipp Reisner44ed1672011-04-19 17:10:19 +02001339 rcu_read_lock();
1340 nc = rcu_dereference(mdev->tconn->net_conf);
1341 if (nc) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001342 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001343 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001344 retcode = ERR_STONITH_AND_PROT_A;
1345 goto fail;
1346 }
1347 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001348 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001349
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001350 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001351 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
Tejun Heoe525fd82010-11-13 11:55:17 +01001352 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001353 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001354 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001355 retcode = ERR_OPEN_DISK;
1356 goto fail;
1357 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001358 nbc->backing_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001359
Tejun Heoe525fd82010-11-13 11:55:17 +01001360 /*
1361 * meta_dev_idx >= 0: external fixed size, possibly multiple
1362 * drbd sharing one meta device. TODO in that case, paranoia
1363 * check that [md_bdev, meta_dev_idx] is not yet used by some
1364 * other drbd minor! (if you use drbd.conf + drbdadm, that
1365 * should check it for you already; but if you don't, or
1366 * someone fooled it, we need to double check here)
1367 */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001368 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001369 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001370 (new_disk_conf->meta_dev_idx < 0) ?
Tejun Heod4d77622010-11-13 11:55:18 +01001371 (void *)mdev : (void *)drbd_m_holder);
Tejun Heoe525fd82010-11-13 11:55:17 +01001372 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001373 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001374 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001375 retcode = ERR_OPEN_MD_DISK;
1376 goto fail;
1377 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001378 nbc->md_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001379
Tejun Heoe525fd82010-11-13 11:55:17 +01001380 if ((nbc->backing_bdev == nbc->md_bdev) !=
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001381 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1382 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001383 retcode = ERR_MD_IDX_INVALID;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001384 goto fail;
1385 }
1386
1387 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001388 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001389 offsetof(struct bm_extent, lce));
1390 if (!resync_lru) {
1391 retcode = ERR_NOMEM;
Tejun Heoe525fd82010-11-13 11:55:17 +01001392 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001393 }
1394
1395 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1396 drbd_md_set_sector_offsets(mdev, nbc);
1397
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001398 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001399 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1400 (unsigned long long) drbd_get_max_capacity(nbc),
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001401 (unsigned long long) new_disk_conf->disk_size);
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001402 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001403 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001404 }
1405
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001406 if (new_disk_conf->meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001407 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1408 /* at least one MB, otherwise it does not make sense */
1409 min_md_device_sectors = (2<<10);
1410 } else {
1411 max_possible_sectors = DRBD_MAX_SECTORS;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001412 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001413 }
1414
Philipp Reisnerb411b362009-09-25 16:07:19 -07001415 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001416 retcode = ERR_MD_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417 dev_warn(DEV, "refusing attach: md-device too small, "
1418 "at least %llu sectors needed for this meta-disk type\n",
1419 (unsigned long long) min_md_device_sectors);
Tejun Heoe525fd82010-11-13 11:55:17 +01001420 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421 }
1422
1423 /* Make sure the new disk is big enough
1424 * (we may currently be R_PRIMARY with no local disk...) */
1425 if (drbd_get_max_capacity(nbc) <
1426 drbd_get_capacity(mdev->this_bdev)) {
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001427 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001428 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001429 }
1430
1431 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1432
Lars Ellenberg13529942009-10-12 19:07:49 +02001433 if (nbc->known_size > max_possible_sectors) {
1434 dev_warn(DEV, "==> truncating very big lower level device "
1435 "to currently maximum possible %llu sectors <==\n",
1436 (unsigned long long) max_possible_sectors);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001437 if (new_disk_conf->meta_dev_idx >= 0)
Lars Ellenberg13529942009-10-12 19:07:49 +02001438 dev_warn(DEV, "==>> using internal or flexible "
1439 "meta data may help <<==\n");
1440 }
1441
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442 drbd_suspend_io(mdev);
1443 /* also wait for the last barrier ack. */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01001444 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1445 * We need a way to either ignore barrier acks for barriers sent before a device
1446 * was attached, or a way to wait for all pending barrier acks to come in.
1447 * As barriers are counted per resource,
1448 * we'd need to suspend io on all devices of a resource.
1449 */
Philipp Reisner2aebfab2011-03-28 16:48:11 +02001450 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001451 /* and for any other previously queued work */
Philipp Reisnera21e9292011-02-08 15:08:49 +01001452 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001453
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001454 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1455 retcode = rv; /* FIXME: Type mismatch. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001456 drbd_resume_io(mdev);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001457 if (rv < SS_SUCCESS)
Tejun Heoe525fd82010-11-13 11:55:17 +01001458 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459
1460 if (!get_ldev_if_state(mdev, D_ATTACHING))
1461 goto force_diskless;
1462
1463 drbd_md_set_sector_offsets(mdev, nbc);
1464
1465 if (!mdev->bitmap) {
1466 if (drbd_bm_init(mdev)) {
1467 retcode = ERR_NOMEM;
1468 goto force_diskless_dec;
1469 }
1470 }
1471
1472 retcode = drbd_md_read(mdev, nbc);
1473 if (retcode != NO_ERROR)
1474 goto force_diskless_dec;
1475
1476 if (mdev->state.conn < C_CONNECTED &&
1477 mdev->state.role == R_PRIMARY &&
1478 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1479 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1480 (unsigned long long)mdev->ed_uuid);
1481 retcode = ERR_DATA_NOT_CURRENT;
1482 goto force_diskless_dec;
1483 }
1484
1485 /* Since we are diskless, fix the activity log first... */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001486 if (drbd_check_al_size(mdev, new_disk_conf)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001487 retcode = ERR_NOMEM;
1488 goto force_diskless_dec;
1489 }
1490
1491 /* Prevent shrinking of consistent devices ! */
1492 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001493 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001494 dev_warn(DEV, "refusing to truncate a consistent device\n");
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001495 retcode = ERR_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001496 goto force_diskless_dec;
1497 }
1498
Philipp Reisnerb411b362009-09-25 16:07:19 -07001499 /* Reset the "barriers don't work" bits here, then force meta data to
1500 * be written, to ensure we determine if barriers are supported. */
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001501 if (new_disk_conf->md_flushes)
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001502 clear_bit(MD_NO_FUA, &mdev->flags);
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001503 else
1504 set_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001505
1506 /* Point of no return reached.
1507 * Devices and memory are no longer released by error cleanup below.
1508 * now mdev takes over responsibility, and the state engine should
1509 * clean it up somewhere. */
1510 D_ASSERT(mdev->ldev == NULL);
1511 mdev->ldev = nbc;
1512 mdev->resync = resync_lru;
Philipp Reisner9958c852011-05-03 16:19:31 +02001513 mdev->rs_plan_s = new_plan;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001514 nbc = NULL;
1515 resync_lru = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001516 new_disk_conf = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001517 new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001518
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001519 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001520
1521 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1522 set_bit(CRASHED_PRIMARY, &mdev->flags);
1523 else
1524 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1525
Philipp Reisner894c6a92010-06-18 16:03:20 +02001526 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02001527 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001528 set_bit(CRASHED_PRIMARY, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001529
1530 mdev->send_cnt = 0;
1531 mdev->recv_cnt = 0;
1532 mdev->read_cnt = 0;
1533 mdev->writ_cnt = 0;
1534
Philipp Reisner99432fc2011-05-20 16:39:13 +02001535 drbd_reconsider_max_bio_size(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001536
1537 /* If I am currently not R_PRIMARY,
1538 * but meta data primary indicator is set,
1539 * I just now recover from a hard crash,
1540 * and have been R_PRIMARY before that crash.
1541 *
1542 * Now, if I had no connection before that crash
1543 * (have been degraded R_PRIMARY), chances are that
1544 * I won't find my peer now either.
1545 *
1546 * In that case, and _only_ in that case,
1547 * we use the degr-wfc-timeout instead of the default,
1548 * so we can automatically recover from a crash of a
1549 * degraded but active "cluster" after a certain timeout.
1550 */
1551 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1552 if (mdev->state.role != R_PRIMARY &&
1553 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1554 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1555 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1556
Bart Van Assche24c48302011-05-21 18:32:29 +02001557 dd = drbd_determine_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001558 if (dd == dev_size_error) {
1559 retcode = ERR_NOMEM_BITMAP;
1560 goto force_diskless_dec;
1561 } else if (dd == grew)
1562 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1563
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001564 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1565 (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1566 drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001567 dev_info(DEV, "Assuming that all blocks are out of sync "
1568 "(aka FullSync)\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001569 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1570 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001571 retcode = ERR_IO_MD_DISK;
1572 goto force_diskless_dec;
1573 }
1574 } else {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001575 if (drbd_bitmap_io(mdev, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01001576 "read from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001577 retcode = ERR_IO_MD_DISK;
1578 goto force_diskless_dec;
1579 }
1580 }
1581
Philipp Reisner07782862010-08-31 12:00:50 +02001582 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1583 drbd_suspend_al(mdev); /* IO is still suspended here... */
1584
Philipp Reisner87eeee42011-01-19 14:16:30 +01001585 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02001586 os = drbd_read_state(mdev);
1587 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001588 /* If MDF_CONSISTENT is not set go into inconsistent state,
1589 otherwise investigate MDF_WasUpToDate...
1590 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1591 otherwise into D_CONSISTENT state.
1592 */
1593 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1594 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1595 ns.disk = D_CONSISTENT;
1596 else
1597 ns.disk = D_OUTDATED;
1598 } else {
1599 ns.disk = D_INCONSISTENT;
1600 }
1601
1602 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1603 ns.pdsk = D_OUTDATED;
1604
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001605 rcu_read_lock();
1606 if (ns.disk == D_CONSISTENT &&
1607 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001608 ns.disk = D_UP_TO_DATE;
1609
1610 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1611 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1612 this point, because drbd_request_state() modifies these
1613 flags. */
1614
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001615 if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
1616 mdev->ldev->md.flags &= MDF_AL_DISABLED;
1617 else
1618 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1619
1620 rcu_read_unlock();
1621
Philipp Reisnerb411b362009-09-25 16:07:19 -07001622 /* In case we are C_CONNECTED postpone any decision on the new disk
1623 state after the negotiation phase. */
1624 if (mdev->state.conn == C_CONNECTED) {
1625 mdev->new_state_tmp.i = ns.i;
1626 ns.i = os.i;
1627 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02001628
1629 /* We expect to receive up-to-date UUIDs soon.
1630 To avoid a race in receive_state, free p_uuid while
1631 holding req_lock. I.e. atomic with the state change */
1632 kfree(mdev->p_uuid);
1633 mdev->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001634 }
1635
1636 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001637 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001638
1639 if (rv < SS_SUCCESS)
1640 goto force_diskless_dec;
1641
Philipp Reisnercdfda632011-07-05 15:38:59 +02001642 mod_timer(&mdev->request_timer, jiffies + HZ);
1643
Philipp Reisnerb411b362009-09-25 16:07:19 -07001644 if (mdev->state.role == R_PRIMARY)
1645 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1646 else
1647 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1648
1649 drbd_md_mark_dirty(mdev);
1650 drbd_md_sync(mdev);
1651
1652 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1653 put_ldev(mdev);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001654 conn_reconfig_done(mdev->tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001655 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001656 return 0;
1657
1658 force_diskless_dec:
1659 put_ldev(mdev);
1660 force_diskless:
Philipp Reisner9510b242011-07-01 17:00:57 +02001661 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001662 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001663 fail:
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001664 conn_reconfig_done(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001665 if (nbc) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001666 if (nbc->backing_bdev)
1667 blkdev_put(nbc->backing_bdev,
1668 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1669 if (nbc->md_bdev)
1670 blkdev_put(nbc->md_bdev,
1671 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001672 kfree(nbc);
1673 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001674 kfree(new_disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001675 lc_destroy(resync_lru);
Philipp Reisner9958c852011-05-03 16:19:31 +02001676 kfree(new_plan);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001677
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001678 finish:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001679 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001680 return 0;
1681}
1682
Philipp Reisnercdfda632011-07-05 15:38:59 +02001683static int adm_detach(struct drbd_conf *mdev, int force)
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001684{
Philipp Reisner19f83c72011-03-29 14:21:03 +02001685 enum drbd_state_rv retcode;
Lars Ellenberg009ba892011-05-02 11:51:31 +02001686 int ret;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001687
1688 if (force) {
Lars Ellenberg0c849662012-07-30 09:07:28 +02001689 set_bit(FORCE_DETACH, &mdev->flags);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001690 drbd_force_state(mdev, NS(disk, D_FAILED));
1691 retcode = SS_SUCCESS;
1692 goto out;
1693 }
1694
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001695 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
Philipp Reisner0cfac5d2011-11-10 12:12:52 +01001696 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
Lars Ellenberg009ba892011-05-02 11:51:31 +02001697 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
Philipp Reisner0cfac5d2011-11-10 12:12:52 +01001698 drbd_md_put_buffer(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001699 /* D_FAILED will transition to DISKLESS. */
1700 ret = wait_event_interruptible(mdev->misc_wait,
1701 mdev->state.disk != D_FAILED);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001702 drbd_resume_io(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001703 if ((int)retcode == (int)SS_IS_DISKLESS)
1704 retcode = SS_NOTHING_TO_DO;
1705 if (ret)
1706 retcode = ERR_INTR;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001707out:
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001708 return retcode;
1709}
1710
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001711/* Detaching the disk is a process in multiple stages. First we need to lock
1712 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1713 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1714 * internal references as well.
1715 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001716int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001717{
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001718 enum drbd_ret_code retcode;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001719 struct detach_parms parms = { };
1720 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001721
1722 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1723 if (!adm_ctx.reply_skb)
1724 return retcode;
1725 if (retcode != NO_ERROR)
1726 goto out;
1727
Philipp Reisnercdfda632011-07-05 15:38:59 +02001728 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1729 err = detach_parms_from_attrs(&parms, info);
1730 if (err) {
1731 retcode = ERR_MANDATORY_TAG;
1732 drbd_msg_put_info(from_attrs_err_to_txt(err));
1733 goto out;
1734 }
1735 }
1736
1737 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001738out:
1739 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001740 return 0;
1741}
1742
Lars Ellenbergf3990022011-03-23 14:31:09 +01001743static bool conn_resync_running(struct drbd_tconn *tconn)
1744{
1745 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001746 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001747 int vnr;
1748
Philipp Reisner695d08f2011-04-11 22:53:32 -07001749 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001750 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1751 if (mdev->state.conn == C_SYNC_SOURCE ||
1752 mdev->state.conn == C_SYNC_TARGET ||
1753 mdev->state.conn == C_PAUSED_SYNC_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001754 mdev->state.conn == C_PAUSED_SYNC_T) {
1755 rv = true;
1756 break;
1757 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001758 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001759 rcu_read_unlock();
1760
1761 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001762}
1763
1764static bool conn_ov_running(struct drbd_tconn *tconn)
1765{
1766 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001767 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001768 int vnr;
1769
Philipp Reisner695d08f2011-04-11 22:53:32 -07001770 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001771 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1772 if (mdev->state.conn == C_VERIFY_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001773 mdev->state.conn == C_VERIFY_T) {
1774 rv = true;
1775 break;
1776 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001777 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001778 rcu_read_unlock();
1779
1780 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001781}
1782
Philipp Reisnercd643972011-04-13 18:00:59 -07001783static enum drbd_ret_code
Philipp Reisner44ed1672011-04-19 17:10:19 +02001784_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07001785{
1786 struct drbd_conf *mdev;
1787 int i;
1788
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02001789 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1790 if (new_conf->wire_protocol != old_conf->wire_protocol)
1791 return ERR_NEED_APV_100;
1792
1793 if (new_conf->two_primaries != old_conf->two_primaries)
1794 return ERR_NEED_APV_100;
1795
1796 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1797 return ERR_NEED_APV_100;
1798
1799 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1800 return ERR_NEED_APV_100;
1801 }
1802
1803 if (!new_conf->two_primaries &&
1804 conn_highest_role(tconn) == R_PRIMARY &&
1805 conn_highest_peer(tconn) == R_PRIMARY)
1806 return ERR_NEED_ALLOW_TWO_PRI;
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001807
Philipp Reisnercd643972011-04-13 18:00:59 -07001808 if (new_conf->two_primaries &&
1809 (new_conf->wire_protocol != DRBD_PROT_C))
1810 return ERR_NOT_PROTO_C;
1811
Philipp Reisnercd643972011-04-13 18:00:59 -07001812 idr_for_each_entry(&tconn->volumes, mdev, i) {
1813 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001814 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
Philipp Reisnercd643972011-04-13 18:00:59 -07001815 put_ldev(mdev);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001816 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07001817 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07001818 }
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02001819 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
Philipp Reisnercd643972011-04-13 18:00:59 -07001820 return ERR_DISCARD;
Philipp Reisnercd643972011-04-13 18:00:59 -07001821 }
Philipp Reisnercd643972011-04-13 18:00:59 -07001822
1823 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1824 return ERR_CONG_NOT_PROTO_A;
1825
1826 return NO_ERROR;
1827}
1828
Philipp Reisner44ed1672011-04-19 17:10:19 +02001829static enum drbd_ret_code
1830check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1831{
1832 static enum drbd_ret_code rv;
1833 struct drbd_conf *mdev;
1834 int i;
1835
1836 rcu_read_lock();
1837 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1838 rcu_read_unlock();
1839
1840 /* tconn->volumes protected by genl_lock() here */
1841 idr_for_each_entry(&tconn->volumes, mdev, i) {
1842 if (!mdev->bitmap) {
1843 if(drbd_bm_init(mdev))
1844 return ERR_NOMEM;
1845 }
1846 }
1847
1848 return rv;
1849}
1850
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001851struct crypto {
1852 struct crypto_hash *verify_tfm;
1853 struct crypto_hash *csums_tfm;
1854 struct crypto_hash *cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001855 struct crypto_hash *integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001856};
1857
1858static int
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001859alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001860{
1861 if (!tfm_name[0])
1862 return NO_ERROR;
1863
1864 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1865 if (IS_ERR(*tfm)) {
1866 *tfm = NULL;
1867 return err_alg;
1868 }
1869
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001870 return NO_ERROR;
1871}
1872
1873static enum drbd_ret_code
1874alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1875{
1876 char hmac_name[CRYPTO_MAX_ALG_NAME];
1877 enum drbd_ret_code rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001878
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001879 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1880 ERR_CSUMS_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001881 if (rv != NO_ERROR)
1882 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001883 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1884 ERR_VERIFY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001885 if (rv != NO_ERROR)
1886 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001887 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1888 ERR_INTEGRITY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001889 if (rv != NO_ERROR)
1890 return rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001891 if (new_conf->cram_hmac_alg[0] != 0) {
1892 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1893 new_conf->cram_hmac_alg);
1894
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001895 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1896 ERR_AUTH_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001897 }
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001898
1899 return rv;
1900}
1901
1902static void free_crypto(struct crypto *crypto)
1903{
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001904 crypto_free_hash(crypto->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001905 crypto_free_hash(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001906 crypto_free_hash(crypto->csums_tfm);
1907 crypto_free_hash(crypto->verify_tfm);
1908}
1909
Lars Ellenbergf3990022011-03-23 14:31:09 +01001910int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1911{
1912 enum drbd_ret_code retcode;
1913 struct drbd_tconn *tconn;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001914 struct net_conf *old_conf, *new_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001915 int err;
1916 int ovr; /* online verify running */
1917 int rsr; /* re-sync running */
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001918 struct crypto crypto = { };
Lars Ellenbergf3990022011-03-23 14:31:09 +01001919
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02001920 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001921 if (!adm_ctx.reply_skb)
1922 return retcode;
1923 if (retcode != NO_ERROR)
1924 goto out;
1925
1926 tconn = adm_ctx.tconn;
1927
1928 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1929 if (!new_conf) {
1930 retcode = ERR_NOMEM;
1931 goto out;
1932 }
1933
Lars Ellenbergf3990022011-03-23 14:31:09 +01001934 conn_reconfig_start(tconn);
1935
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001936 mutex_lock(&tconn->data.mutex);
Philipp Reisnera0095502011-05-03 13:14:15 +02001937 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001938 old_conf = tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001939
1940 if (!old_conf) {
1941 drbd_msg_put_info("net conf missing, try connect");
1942 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001943 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001944 }
1945
1946 *new_conf = *old_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001947 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001948 set_net_conf_defaults(new_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001949
Lars Ellenbergf3990022011-03-23 14:31:09 +01001950 err = net_conf_from_attrs_for_change(new_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001951 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001952 retcode = ERR_MANDATORY_TAG;
1953 drbd_msg_put_info(from_attrs_err_to_txt(err));
1954 goto fail;
1955 }
1956
Philipp Reisnercd643972011-04-13 18:00:59 -07001957 retcode = check_net_options(tconn, new_conf);
1958 if (retcode != NO_ERROR)
1959 goto fail;
1960
Lars Ellenbergf3990022011-03-23 14:31:09 +01001961 /* re-sync running */
1962 rsr = conn_resync_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001963 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001964 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001965 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001966 }
1967
Lars Ellenbergf3990022011-03-23 14:31:09 +01001968 /* online verify running */
1969 ovr = conn_ov_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001970 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1971 retcode = ERR_VERIFY_RUNNING;
1972 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001973 }
1974
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001975 retcode = alloc_crypto(&crypto, new_conf);
1976 if (retcode != NO_ERROR)
1977 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001978
Philipp Reisner44ed1672011-04-19 17:10:19 +02001979 rcu_assign_pointer(tconn->net_conf, new_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001980
1981 if (!rsr) {
1982 crypto_free_hash(tconn->csums_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001983 tconn->csums_tfm = crypto.csums_tfm;
1984 crypto.csums_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001985 }
1986 if (!ovr) {
1987 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001988 tconn->verify_tfm = crypto.verify_tfm;
1989 crypto.verify_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001990 }
1991
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001992 crypto_free_hash(tconn->integrity_tfm);
1993 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001994 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001995 /* Do this without trying to take tconn->data.mutex again. */
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001996 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001997
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001998 crypto_free_hash(tconn->cram_hmac_tfm);
1999 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2000
Philipp Reisnera0095502011-05-03 13:14:15 +02002001 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002002 mutex_unlock(&tconn->data.mutex);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002003 synchronize_rcu();
2004 kfree(old_conf);
2005
Lars Ellenbergf3990022011-03-23 14:31:09 +01002006 if (tconn->cstate >= C_WF_REPORT_PARAMS)
2007 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2008
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002009 goto done;
2010
Lars Ellenbergf3990022011-03-23 14:31:09 +01002011 fail:
Philipp Reisnera0095502011-05-03 13:14:15 +02002012 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002013 mutex_unlock(&tconn->data.mutex);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002014 free_crypto(&crypto);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002015 kfree(new_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002016 done:
Lars Ellenbergf3990022011-03-23 14:31:09 +01002017 conn_reconfig_done(tconn);
2018 out:
2019 drbd_adm_finish(info, retcode);
2020 return 0;
2021}
2022
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002023int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002024{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002025 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002026 struct net_conf *old_conf, *new_conf = NULL;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002027 struct crypto crypto = { };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002028 struct drbd_tconn *tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002029 enum drbd_ret_code retcode;
2030 int i;
2031 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002032
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002033 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002034
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002035 if (!adm_ctx.reply_skb)
2036 return retcode;
2037 if (retcode != NO_ERROR)
2038 goto out;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002039 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2040 drbd_msg_put_info("connection endpoint(s) missing");
2041 retcode = ERR_INVALID_REQUEST;
2042 goto out;
2043 }
2044
2045 /* No need for _rcu here. All reconfiguration is
2046 * strictly serialized on genl_lock(). We are protected against
2047 * concurrent reconfiguration/addition/deletion */
2048 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2049 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2050 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2051 retcode = ERR_LOCAL_ADDR;
2052 goto out;
2053 }
2054
2055 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2056 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2057 retcode = ERR_PEER_ADDR;
2058 goto out;
2059 }
2060 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002061
2062 tconn = adm_ctx.tconn;
Philipp Reisner80883192011-02-18 14:56:45 +01002063 conn_reconfig_start(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002064
Philipp Reisner80883192011-02-18 14:56:45 +01002065 if (tconn->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002066 retcode = ERR_NET_CONFIGURED;
2067 goto fail;
2068 }
2069
Andreas Gruenbachera209b4a2011-08-17 12:43:25 +02002070 /* allocation not in the IO path, drbdsetup / netlink process context */
Lars Ellenberg5979e362011-04-27 21:09:55 +02002071 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072 if (!new_conf) {
2073 retcode = ERR_NOMEM;
2074 goto fail;
2075 }
2076
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002077 set_net_conf_defaults(new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002078
Lars Ellenbergf3990022011-03-23 14:31:09 +01002079 err = net_conf_from_attrs(new_conf, info);
Lars Ellenberg25e40932011-08-19 10:39:00 +02002080 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002081 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002082 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002083 goto fail;
2084 }
2085
Philipp Reisnercd643972011-04-13 18:00:59 -07002086 retcode = check_net_options(tconn, new_conf);
2087 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002088 goto fail;
Philipp Reisner422028b2010-10-27 11:12:07 +02002089
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002090 retcode = alloc_crypto(&crypto, new_conf);
2091 if (retcode != NO_ERROR)
2092 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002093
Philipp Reisnerb411b362009-09-25 16:07:19 -07002094 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2095
Philipp Reisner80883192011-02-18 14:56:45 +01002096 conn_flush_workqueue(tconn);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002097
Philipp Reisnera0095502011-05-03 13:14:15 +02002098 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002099 old_conf = tconn->net_conf;
2100 if (old_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002101 retcode = ERR_NET_CONFIGURED;
Philipp Reisnera0095502011-05-03 13:14:15 +02002102 mutex_unlock(&tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002103 goto fail;
2104 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002105 rcu_assign_pointer(tconn->net_conf, new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002106
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002107 conn_free_crypto(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002108 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002109 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002110 tconn->csums_tfm = crypto.csums_tfm;
2111 tconn->verify_tfm = crypto.verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002112
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002113 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2114 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2115 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2116 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2117
Philipp Reisnera0095502011-05-03 13:14:15 +02002118 mutex_unlock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002119
Philipp Reisner695d08f2011-04-11 22:53:32 -07002120 rcu_read_lock();
Philipp Reisner80883192011-02-18 14:56:45 +01002121 idr_for_each_entry(&tconn->volumes, mdev, i) {
2122 mdev->send_cnt = 0;
2123 mdev->recv_cnt = 0;
Philipp Reisner80883192011-02-18 14:56:45 +01002124 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002125 rcu_read_unlock();
Lars Ellenberg5ee743e2011-04-26 16:22:25 +02002126
2127 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2128
Philipp Reisner80883192011-02-18 14:56:45 +01002129 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002130 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002131 return 0;
2132
2133fail:
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002134 free_crypto(&crypto);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002135 kfree(new_conf);
2136
Philipp Reisner80883192011-02-18 14:56:45 +01002137 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002138out:
2139 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002140 return 0;
2141}
2142
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002143static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2144{
2145 enum drbd_state_rv rv;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002146
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002147 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2148 force ? CS_HARD : 0);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002149
2150 switch (rv) {
2151 case SS_NOTHING_TO_DO:
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002152 break;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002153 case SS_ALREADY_STANDALONE:
2154 return SS_SUCCESS;
2155 case SS_PRIMARY_NOP:
2156 /* Our state checking code wants to see the peer outdated. */
2157 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002158 pdsk, D_OUTDATED), CS_VERBOSE);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002159 break;
2160 case SS_CW_FAILED_BY_PEER:
2161 /* The peer probably wants to see us outdated. */
2162 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2163 disk, D_OUTDATED), 0);
2164 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002165 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2166 CS_HARD);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002167 }
2168 break;
2169 default:;
2170 /* no special handling necessary */
2171 }
2172
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002173 if (rv >= SS_SUCCESS) {
2174 enum drbd_state_rv rv2;
2175 /* No one else can reconfigure the network while I am here.
2176 * The state handling only uses drbd_thread_stop_nowait(),
2177 * we want to really wait here until the receiver is no more.
2178 */
2179 drbd_thread_stop(&adm_ctx.tconn->receiver);
2180
2181 /* Race breaker. This additional state change request may be
2182 * necessary, if this was a forced disconnect during a receiver
2183 * restart. We may have "killed" the receiver thread just
2184 * after drbdd_init() returned. Typically, we should be
2185 * C_STANDALONE already, now, and this becomes a no-op.
2186 */
2187 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2188 CS_VERBOSE | CS_HARD);
2189 if (rv2 < SS_SUCCESS)
2190 conn_err(tconn,
2191 "unexpected rv2=%d in conn_try_disconnect()\n",
2192 rv2);
2193 }
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002194 return rv;
2195}
2196
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002197int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002198{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002199 struct disconnect_parms parms;
2200 struct drbd_tconn *tconn;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002201 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002202 enum drbd_ret_code retcode;
2203 int err;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002204
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002205 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002206 if (!adm_ctx.reply_skb)
2207 return retcode;
2208 if (retcode != NO_ERROR)
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002209 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002210
2211 tconn = adm_ctx.tconn;
2212 memset(&parms, 0, sizeof(parms));
2213 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002214 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002215 if (err) {
2216 retcode = ERR_MANDATORY_TAG;
2217 drbd_msg_put_info(from_attrs_err_to_txt(err));
2218 goto fail;
2219 }
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002220 }
2221
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002222 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2223 if (rv < SS_SUCCESS)
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002224 retcode = rv; /* FIXME: Type mismatch. */
2225 else
2226 retcode = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002227 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002228 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229 return 0;
2230}
2231
2232void resync_after_online_grow(struct drbd_conf *mdev)
2233{
2234 int iass; /* I am sync source */
2235
2236 dev_info(DEV, "Resync of new storage after online grow\n");
2237 if (mdev->state.role != mdev->state.peer)
2238 iass = (mdev->state.role == R_PRIMARY);
2239 else
Philipp Reisner25703f82011-02-07 14:35:25 +01002240 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002241
2242 if (iass)
2243 drbd_start_resync(mdev, C_SYNC_SOURCE);
2244 else
2245 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2246}
2247
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002248int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002249{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002250 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002251 struct resize_parms rs;
2252 struct drbd_conf *mdev;
2253 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002254 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002255 enum dds_flags ddsf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002256 sector_t u_size;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002257 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002258
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002259 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2260 if (!adm_ctx.reply_skb)
2261 return retcode;
2262 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002263 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002264
2265 memset(&rs, 0, sizeof(struct resize_parms));
2266 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002267 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002268 if (err) {
2269 retcode = ERR_MANDATORY_TAG;
2270 drbd_msg_put_info(from_attrs_err_to_txt(err));
2271 goto fail;
2272 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002273 }
2274
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002275 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002276 if (mdev->state.conn > C_CONNECTED) {
2277 retcode = ERR_RESIZE_RESYNC;
2278 goto fail;
2279 }
2280
2281 if (mdev->state.role == R_SECONDARY &&
2282 mdev->state.peer == R_SECONDARY) {
2283 retcode = ERR_NO_PRIMARY;
2284 goto fail;
2285 }
2286
2287 if (!get_ldev(mdev)) {
2288 retcode = ERR_NO_DISK;
2289 goto fail;
2290 }
2291
Philipp Reisner31890f42011-01-19 14:12:51 +01002292 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002293 retcode = ERR_NEED_APV_93;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002294 goto fail_ldev;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002295 }
2296
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002297 rcu_read_lock();
2298 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2299 rcu_read_unlock();
2300 if (u_size != (sector_t)rs.resize_size) {
2301 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2302 if (!new_disk_conf) {
2303 retcode = ERR_NOMEM;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002304 goto fail_ldev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002305 }
2306 }
2307
Philipp Reisner087c2492010-03-26 13:49:56 +01002308 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002309 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002310
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002311 if (new_disk_conf) {
2312 mutex_lock(&mdev->tconn->conf_update);
2313 old_disk_conf = mdev->ldev->disk_conf;
2314 *new_disk_conf = *old_disk_conf;
2315 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2316 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2317 mutex_unlock(&mdev->tconn->conf_update);
2318 synchronize_rcu();
2319 kfree(old_disk_conf);
2320 }
2321
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002322 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Bart Van Assche24c48302011-05-21 18:32:29 +02002323 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002324 drbd_md_sync(mdev);
2325 put_ldev(mdev);
2326 if (dd == dev_size_error) {
2327 retcode = ERR_NOMEM_BITMAP;
2328 goto fail;
2329 }
2330
Philipp Reisner087c2492010-03-26 13:49:56 +01002331 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002332 if (dd == grew)
2333 set_bit(RESIZE_PENDING, &mdev->flags);
2334
2335 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002336 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002337 }
2338
2339 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002340 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002341 return 0;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002342
2343 fail_ldev:
2344 put_ldev(mdev);
2345 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002346}
2347
Lars Ellenbergf3990022011-03-23 14:31:09 +01002348int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002349{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002350 enum drbd_ret_code retcode;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002351 struct drbd_tconn *tconn;
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002352 struct res_opts res_opts;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002353 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002354
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002355 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002356 if (!adm_ctx.reply_skb)
2357 return retcode;
2358 if (retcode != NO_ERROR)
2359 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002360 tconn = adm_ctx.tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002361
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002362 res_opts = tconn->res_opts;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002363 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002364 set_res_opts_defaults(&res_opts);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002365
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002366 err = res_opts_from_attrs(&res_opts, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002367 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002368 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002369 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370 goto fail;
2371 }
2372
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002373 err = set_resource_options(tconn, &res_opts);
2374 if (err) {
2375 retcode = ERR_INVALID_REQUEST;
2376 if (err == -ENOMEM)
2377 retcode = ERR_NOMEM;
Philipp Reisner778f2712010-07-06 11:14:00 +02002378 }
2379
Philipp Reisnerb411b362009-09-25 16:07:19 -07002380fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002381 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002382 return 0;
2383}
2384
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002385int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002386{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002387 struct drbd_conf *mdev;
2388 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2389
2390 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2391 if (!adm_ctx.reply_skb)
2392 return retcode;
2393 if (retcode != NO_ERROR)
2394 goto out;
2395
2396 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002397
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002398 /* If there is still bitmap IO pending, probably because of a previous
2399 * resync just being finished, wait for it before requesting a new resync. */
Lars Ellenberg5016b822012-05-07 12:00:56 +02002400 drbd_suspend_io(mdev);
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002401 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2402
Philipp Reisnerb411b362009-09-25 16:07:19 -07002403 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2404
2405 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2406 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2407
2408 while (retcode == SS_NEED_CONNECTION) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01002409 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002410 if (mdev->state.conn < C_CONNECTED)
2411 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002412 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002413
2414 if (retcode != SS_NEED_CONNECTION)
2415 break;
2416
2417 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2418 }
Lars Ellenberg5016b822012-05-07 12:00:56 +02002419 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002420
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002421out:
2422 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002423 return 0;
2424}
2425
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002426static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2427 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002428{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002429 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002430
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002431 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2432 if (!adm_ctx.reply_skb)
2433 return retcode;
2434 if (retcode != NO_ERROR)
2435 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002436
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002437 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2438out:
2439 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002440 return 0;
2441}
2442
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002443static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2444{
2445 int rv;
2446
2447 rv = drbd_bmio_set_n_write(mdev);
2448 drbd_suspend_al(mdev);
2449 return rv;
2450}
2451
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002452int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002453{
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002454 int retcode; /* drbd_ret_code, drbd_state_rv */
2455 struct drbd_conf *mdev;
2456
2457 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2458 if (!adm_ctx.reply_skb)
2459 return retcode;
2460 if (retcode != NO_ERROR)
2461 goto out;
2462
2463 mdev = adm_ctx.mdev;
2464
Lars Ellenberg5016b822012-05-07 12:00:56 +02002465 /* If there is still bitmap IO pending, probably because of a previous
2466 * resync just being finished, wait for it before requesting a new resync. */
2467 drbd_suspend_io(mdev);
2468 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2469
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002470 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
2471 if (retcode < SS_SUCCESS) {
2472 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
2473 /* The peer will get a resync upon connect anyways.
2474 * Just make that into a full resync. */
2475 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2476 if (retcode >= SS_SUCCESS) {
2477 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2478 "set_n_write from invalidate_peer",
2479 BM_LOCKED_SET_ALLOWED))
2480 retcode = ERR_IO_MD_DISK;
2481 }
2482 } else
2483 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
2484 }
Lars Ellenberg5016b822012-05-07 12:00:56 +02002485 drbd_resume_io(mdev);
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002486
2487out:
2488 drbd_adm_finish(info, retcode);
2489 return 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002490}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002491
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002492int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2493{
2494 enum drbd_ret_code retcode;
2495
2496 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2497 if (!adm_ctx.reply_skb)
2498 return retcode;
2499 if (retcode != NO_ERROR)
2500 goto out;
2501
2502 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002503 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002504out:
2505 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002506 return 0;
2507}
2508
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002509int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002510{
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002511 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002512 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002513
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002514 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2515 if (!adm_ctx.reply_skb)
2516 return retcode;
2517 if (retcode != NO_ERROR)
2518 goto out;
2519
2520 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2521 s = adm_ctx.mdev->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01002522 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2523 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2524 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2525 } else {
2526 retcode = ERR_PAUSE_IS_CLEAR;
2527 }
2528 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002529
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002530out:
2531 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002532 return 0;
2533}
2534
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002535int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002536{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002537 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002538}
2539
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002540int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002541{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002542 struct drbd_conf *mdev;
2543 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2544
2545 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2546 if (!adm_ctx.reply_skb)
2547 return retcode;
2548 if (retcode != NO_ERROR)
2549 goto out;
2550
2551 mdev = adm_ctx.mdev;
Philipp Reisner43a51822010-06-11 11:26:34 +02002552 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2553 drbd_uuid_new_current(mdev);
2554 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02002555 }
Philipp Reisner265be2d2010-05-31 10:14:17 +02002556 drbd_suspend_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002557 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2558 if (retcode == SS_SUCCESS) {
Philipp Reisner265be2d2010-05-31 10:14:17 +02002559 if (mdev->state.conn < C_CONNECTED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002560 tl_clear(mdev->tconn);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002561 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002562 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002563 }
2564 drbd_resume_io(mdev);
2565
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002566out:
2567 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002568 return 0;
2569}
2570
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002571int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002572{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002573 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002574}
2575
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002576int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002577{
2578 struct nlattr *nla;
2579 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2580 if (!nla)
2581 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002582 if (vnr != VOLUME_UNSPECIFIED &&
2583 nla_put_u32(skb, T_ctx_volume, vnr))
2584 goto nla_put_failure;
2585 if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2586 goto nla_put_failure;
2587 if (tconn->my_addr_len &&
2588 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2589 goto nla_put_failure;
2590 if (tconn->peer_addr_len &&
2591 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2592 goto nla_put_failure;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002593 nla_nest_end(skb, nla);
2594 return 0;
2595
2596nla_put_failure:
2597 if (nla)
2598 nla_nest_cancel(skb, nla);
2599 return -EMSGSIZE;
2600}
2601
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002602int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2603 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002604{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002605 struct state_info *si = NULL; /* for sizeof(si->member); */
Philipp Reisner44ed1672011-04-19 17:10:19 +02002606 struct net_conf *nc;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002607 struct nlattr *nla;
2608 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002609 int err = 0;
2610 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002611
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002612 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2613 * to. So we better exclude_sensitive information.
2614 *
2615 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2616 * in the context of the requesting user process. Exclude sensitive
2617 * information, unless current has superuser.
2618 *
2619 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2620 * relies on the current implementation of netlink_dump(), which
2621 * executes the dump callback successively from netlink_recvmsg(),
2622 * always in the context of the receiving process */
2623 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002624
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002625 got_ldev = get_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002626
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002627 /* We need to add connection name and volume number information still.
2628 * Minor number is in drbd_genlmsghdr. */
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002629 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002630 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002631
Lars Ellenbergf3990022011-03-23 14:31:09 +01002632 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2633 goto nla_put_failure;
2634
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002635 rcu_read_lock();
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002636 if (got_ldev)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002637 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002638 goto nla_put_failure;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002639
Philipp Reisner44ed1672011-04-19 17:10:19 +02002640 nc = rcu_dereference(mdev->tconn->net_conf);
2641 if (nc)
2642 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2643 rcu_read_unlock();
2644 if (err)
2645 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002646
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002647 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2648 if (!nla)
2649 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002650 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2651 nla_put_u32(skb, T_current_state, mdev->state.i) ||
2652 nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
2653 nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)))
2654 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002655
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002656 if (got_ldev) {
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002657 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
2658 nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid) ||
2659 nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2660 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2661 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002662 if (C_SYNC_SOURCE <= mdev->state.conn &&
2663 C_PAUSED_SYNC_T >= mdev->state.conn) {
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002664 if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2665 nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2666 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002667 }
2668 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002669
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002670 if (sib) {
2671 switch(sib->sib_reason) {
2672 case SIB_SYNC_PROGRESS:
2673 case SIB_GET_STATUS_REPLY:
2674 break;
2675 case SIB_STATE_CHANGE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002676 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2677 nla_put_u32(skb, T_new_state, sib->ns.i))
2678 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002679 break;
2680 case SIB_HELPER_POST:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002681 if (nla_put_u32(skb, T_helper_exit_code,
2682 sib->helper_exit_code))
2683 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002684 /* fall through */
2685 case SIB_HELPER_PRE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002686 if (nla_put_string(skb, T_helper, sib->helper_name))
2687 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002688 break;
2689 }
2690 }
2691 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002692
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002693 if (0)
2694nla_put_failure:
2695 err = -EMSGSIZE;
2696 if (got_ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002697 put_ldev(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002698 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002699}
2700
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002701int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002702{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002703 enum drbd_ret_code retcode;
2704 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002705
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002706 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2707 if (!adm_ctx.reply_skb)
2708 return retcode;
2709 if (retcode != NO_ERROR)
2710 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002711
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002712 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2713 if (err) {
2714 nlmsg_free(adm_ctx.reply_skb);
2715 return err;
2716 }
2717out:
2718 drbd_adm_finish(info, retcode);
2719 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002720}
2721
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002722int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002723{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002724 struct drbd_conf *mdev;
2725 struct drbd_genlmsghdr *dh;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002726 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2727 struct drbd_tconn *tconn = NULL;
2728 struct drbd_tconn *tmp;
2729 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002730
Lars Ellenberg543cc102011-03-10 22:18:18 +01002731 /* Open coded, deferred, iteration:
2732 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2733 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2734 * ...
2735 * }
2736 * }
2737 * where tconn is cb->args[0];
2738 * and i is cb->args[1];
2739 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002740 * cb->args[2] indicates if we shall loop over all resources,
2741 * or just dump all volumes of a single resource.
2742 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002743 * This may miss entries inserted after this dump started,
2744 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01002745 *
2746 * We need to make sure the mdev won't disappear while
2747 * we are looking at it, and revalidate our iterators
2748 * on each iteration.
2749 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002750
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002751 /* synchronize with conn_create()/conn_destroy() */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002752 rcu_read_lock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002753 /* revalidate iterator position */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002754 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
Lars Ellenberg543cc102011-03-10 22:18:18 +01002755 if (pos == NULL) {
2756 /* first iteration */
2757 pos = tmp;
2758 tconn = pos;
2759 break;
2760 }
2761 if (tmp == pos) {
2762 tconn = pos;
2763 break;
2764 }
2765 }
2766 if (tconn) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002767next_tconn:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002768 mdev = idr_get_next(&tconn->volumes, &volume);
2769 if (!mdev) {
2770 /* No more volumes to dump on this tconn.
2771 * Advance tconn iterator. */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002772 pos = list_entry_rcu(tconn->all_tconn.next,
2773 struct drbd_tconn, all_tconn);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002774 /* Did we dump any volume on this tconn yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002775 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002776 /* If we reached the end of the list,
2777 * or only a single resource dump was requested,
2778 * we are done. */
2779 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2780 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002781 volume = 0;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002782 tconn = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002783 goto next_tconn;
2784 }
2785 }
2786
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002787 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2788 cb->nlh->nlmsg_seq, &drbd_genl_family,
2789 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2790 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002791 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002792
Lars Ellenberg543cc102011-03-10 22:18:18 +01002793 if (!mdev) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002794 /* This is a tconn without a single volume.
2795 * Suprisingly enough, it may have a network
2796 * configuration. */
2797 struct net_conf *nc;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002798 dh->minor = -1U;
2799 dh->ret_code = NO_ERROR;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002800 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002801 goto cancel;
2802 nc = rcu_dereference(tconn->net_conf);
2803 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2804 goto cancel;
2805 goto done;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002806 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002807
Lars Ellenberg543cc102011-03-10 22:18:18 +01002808 D_ASSERT(mdev->vnr == volume);
2809 D_ASSERT(mdev->tconn == tconn);
2810
2811 dh->minor = mdev_to_minor(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002812 dh->ret_code = NO_ERROR;
2813
2814 if (nla_put_status_info(skb, mdev, NULL)) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002815cancel:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002816 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002817 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002818 }
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002819done:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002820 genlmsg_end(skb, dh);
2821 }
2822
Lars Ellenberg543cc102011-03-10 22:18:18 +01002823out:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002824 rcu_read_unlock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002825 /* where to start the next iteration */
2826 cb->args[0] = (long)pos;
2827 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002828
Lars Ellenberg543cc102011-03-10 22:18:18 +01002829 /* No more tconns/volumes/minors found results in an empty skb.
2830 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002831 return skb->len;
2832}
2833
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002834/*
2835 * Request status of all resources, or of all volumes within a single resource.
2836 *
2837 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2838 * Which means we cannot use the family->attrbuf or other such members, because
2839 * dump is NOT protected by the genl_lock(). During dump, we only have access
2840 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2841 *
2842 * Once things are setup properly, we call into get_one_status().
2843 */
2844int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2845{
2846 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2847 struct nlattr *nla;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002848 const char *resource_name;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002849 struct drbd_tconn *tconn;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002850 int maxtype;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002851
2852 /* Is this a followup call? */
2853 if (cb->args[0]) {
2854 /* ... of a single resource dump,
2855 * and the resource iterator has been advanced already? */
2856 if (cb->args[2] && cb->args[2] != cb->args[0])
2857 return 0; /* DONE. */
2858 goto dump;
2859 }
2860
2861 /* First call (from netlink_dump_start). We need to figure out
2862 * which resource(s) the user wants us to dump. */
2863 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2864 nlmsg_attrlen(cb->nlh, hdrlen),
2865 DRBD_NLA_CFG_CONTEXT);
2866
2867 /* No explicit context given. Dump all. */
2868 if (!nla)
2869 goto dump;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002870 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2871 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2872 if (IS_ERR(nla))
2873 return PTR_ERR(nla);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002874 /* context given, but no name present? */
2875 if (!nla)
2876 return -EINVAL;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002877 resource_name = nla_data(nla);
2878 tconn = conn_get_by_name(resource_name);
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002879
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002880 if (!tconn)
2881 return -ENODEV;
2882
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002883 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2884
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002885 /* prime iterators, and set "filter" mode mark:
2886 * only dump this tconn. */
2887 cb->args[0] = (long)tconn;
2888 /* cb->args[1] = 0; passed in this way. */
2889 cb->args[2] = (long)tconn;
2890
2891dump:
2892 return get_one_status(skb, cb);
2893}
2894
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002895int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2896{
2897 enum drbd_ret_code retcode;
2898 struct timeout_parms tp;
2899 int err;
2900
2901 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2902 if (!adm_ctx.reply_skb)
2903 return retcode;
2904 if (retcode != NO_ERROR)
2905 goto out;
2906
2907 tp.timeout_type =
2908 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2909 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2910 UT_DEFAULT;
2911
2912 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2913 if (err) {
2914 nlmsg_free(adm_ctx.reply_skb);
2915 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002916 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002917out:
2918 drbd_adm_finish(info, retcode);
2919 return 0;
2920}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002921
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002922int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2923{
2924 struct drbd_conf *mdev;
2925 enum drbd_ret_code retcode;
2926
2927 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2928 if (!adm_ctx.reply_skb)
2929 return retcode;
2930 if (retcode != NO_ERROR)
2931 goto out;
2932
2933 mdev = adm_ctx.mdev;
2934 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2935 /* resume from last known position, if possible */
2936 struct start_ov_parms parms =
2937 { .ov_start_sector = mdev->ov_start_sector };
Lars Ellenbergf3990022011-03-23 14:31:09 +01002938 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002939 if (err) {
2940 retcode = ERR_MANDATORY_TAG;
2941 drbd_msg_put_info(from_attrs_err_to_txt(err));
2942 goto out;
2943 }
2944 /* w_make_ov_request expects position to be aligned */
2945 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2946 }
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002947 /* If there is still bitmap IO pending, e.g. previous resync or verify
2948 * just being finished, wait for it before requesting a new resync. */
Lars Ellenberg5016b822012-05-07 12:00:56 +02002949 drbd_suspend_io(mdev);
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002950 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002951 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
Lars Ellenberg5016b822012-05-07 12:00:56 +02002952 drbd_resume_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002953out:
2954 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002955 return 0;
2956}
2957
2958
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002959int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002960{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002961 struct drbd_conf *mdev;
2962 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002963 int skip_initial_sync = 0;
2964 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002965 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002966
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002967 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2968 if (!adm_ctx.reply_skb)
2969 return retcode;
2970 if (retcode != NO_ERROR)
2971 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002972
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002973 mdev = adm_ctx.mdev;
2974 memset(&args, 0, sizeof(args));
2975 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002976 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002977 if (err) {
2978 retcode = ERR_MANDATORY_TAG;
2979 drbd_msg_put_info(from_attrs_err_to_txt(err));
2980 goto out_nolock;
2981 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002982 }
2983
Philipp Reisner8410da82011-02-11 20:11:10 +01002984 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002985
2986 if (!get_ldev(mdev)) {
2987 retcode = ERR_NO_DISK;
2988 goto out;
2989 }
2990
2991 /* this is "skip initial sync", assume to be clean */
Philipp Reisner31890f42011-01-19 14:12:51 +01002992 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002993 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2994 dev_info(DEV, "Preparing to skip initial sync\n");
2995 skip_initial_sync = 1;
2996 } else if (mdev->state.conn != C_STANDALONE) {
2997 retcode = ERR_CONNECTED;
2998 goto out_dec;
2999 }
3000
3001 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3002 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3003
3004 if (args.clear_bm) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003005 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3006 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003007 if (err) {
3008 dev_err(DEV, "Writing bitmap failed with %d\n",err);
3009 retcode = ERR_IO_MD_DISK;
3010 }
3011 if (skip_initial_sync) {
3012 drbd_send_uuids_skip_initial_sync(mdev);
3013 _drbd_uuid_set(mdev, UI_BITMAP, 0);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003014 drbd_print_uuids(mdev, "cleared bitmap UUID");
Philipp Reisner87eeee42011-01-19 14:16:30 +01003015 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003016 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3017 CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003018 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003019 }
3020 }
3021
3022 drbd_md_sync(mdev);
3023out_dec:
3024 put_ldev(mdev);
3025out:
Philipp Reisner8410da82011-02-11 20:11:10 +01003026 mutex_unlock(mdev->state_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003027out_nolock:
3028 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003029 return 0;
3030}
3031
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003032static enum drbd_ret_code
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003033drbd_check_resource_name(const char *name)
Philipp Reisner774b3052011-02-22 02:07:03 -05003034{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003035 if (!name || !name[0]) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003036 drbd_msg_put_info("resource name missing");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003037 return ERR_MANDATORY_TAG;
3038 }
3039 /* if we want to use these in sysfs/configfs/debugfs some day,
3040 * we must not allow slashes */
3041 if (strchr(name, '/')) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003042 drbd_msg_put_info("invalid resource name");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003043 return ERR_INVALID_REQUEST;
3044 }
3045 return NO_ERROR;
3046}
Philipp Reisner774b3052011-02-22 02:07:03 -05003047
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003048int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003049{
3050 enum drbd_ret_code retcode;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003051 struct res_opts res_opts;
3052 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003053
3054 retcode = drbd_adm_prepare(skb, info, 0);
3055 if (!adm_ctx.reply_skb)
3056 return retcode;
3057 if (retcode != NO_ERROR)
3058 goto out;
3059
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003060 set_res_opts_defaults(&res_opts);
3061 err = res_opts_from_attrs(&res_opts, info);
3062 if (err && err != -ENOMSG) {
3063 retcode = ERR_MANDATORY_TAG;
3064 drbd_msg_put_info(from_attrs_err_to_txt(err));
3065 goto out;
3066 }
3067
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003068 retcode = drbd_check_resource_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003069 if (retcode != NO_ERROR)
3070 goto out;
3071
3072 if (adm_ctx.tconn) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01003073 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3074 retcode = ERR_INVALID_REQUEST;
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003075 drbd_msg_put_info("resource exists");
Lars Ellenberg38f19612011-03-14 13:22:35 +01003076 }
3077 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003078 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003079 }
3080
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003081 if (!conn_create(adm_ctx.resource_name, &res_opts))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003082 retcode = ERR_NOMEM;
3083out:
3084 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003085 return 0;
3086}
3087
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003088int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003089{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003090 struct drbd_genlmsghdr *dh = info->userhdr;
3091 enum drbd_ret_code retcode;
Philipp Reisner774b3052011-02-22 02:07:03 -05003092
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003093 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003094 if (!adm_ctx.reply_skb)
3095 return retcode;
3096 if (retcode != NO_ERROR)
3097 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003098
Andreas Gruenbacherf2257a52011-07-14 16:00:40 +02003099 if (dh->minor > MINORMASK) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003100 drbd_msg_put_info("requested minor out of range");
3101 retcode = ERR_INVALID_REQUEST;
3102 goto out;
3103 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02003104 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003105 drbd_msg_put_info("requested volume id out of range");
3106 retcode = ERR_INVALID_REQUEST;
3107 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003108 }
3109
Lars Ellenberg38f19612011-03-14 13:22:35 +01003110 /* drbd_adm_prepare made sure already
3111 * that mdev->tconn and mdev->vnr match the request. */
3112 if (adm_ctx.mdev) {
3113 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3114 retcode = ERR_MINOR_EXISTS;
3115 /* else: still NO_ERROR */
3116 goto out;
3117 }
3118
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003119 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3120out:
3121 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003122 return 0;
3123}
3124
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003125static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3126{
3127 if (mdev->state.disk == D_DISKLESS &&
3128 /* no need to be mdev->state.conn == C_STANDALONE &&
3129 * we may want to delete a minor from a live replication group.
3130 */
3131 mdev->state.role == R_SECONDARY) {
Philipp Reisner369bea62011-07-06 23:04:44 +02003132 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3133 CS_VERBOSE + CS_WAIT_COMPLETE);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02003134 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3135 idr_remove(&minors, mdev_to_minor(mdev));
3136 del_gendisk(mdev->vdisk);
3137 synchronize_rcu();
3138 kref_put(&mdev->kref, &drbd_minor_destroy);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003139 return NO_ERROR;
3140 } else
3141 return ERR_MINOR_CONFIGURED;
3142}
3143
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003144int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003145{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003146 enum drbd_ret_code retcode;
3147
3148 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3149 if (!adm_ctx.reply_skb)
3150 return retcode;
3151 if (retcode != NO_ERROR)
3152 goto out;
3153
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003154 retcode = adm_delete_minor(adm_ctx.mdev);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003155out:
3156 drbd_adm_finish(info, retcode);
3157 return 0;
3158}
3159
3160int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3161{
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003162 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003163 struct drbd_conf *mdev;
3164 unsigned i;
3165
3166 retcode = drbd_adm_prepare(skb, info, 0);
3167 if (!adm_ctx.reply_skb)
3168 return retcode;
3169 if (retcode != NO_ERROR)
3170 goto out;
3171
3172 if (!adm_ctx.tconn) {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003173 retcode = ERR_RES_NOT_KNOWN;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003174 goto out;
3175 }
3176
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003177 /* demote */
3178 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3179 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3180 if (retcode < SS_SUCCESS) {
3181 drbd_msg_put_info("failed to demote");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003182 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003183 }
3184 }
3185
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003186 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3187 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003188 drbd_msg_put_info("failed to disconnect");
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003189 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003190 }
3191
3192 /* detach */
3193 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
Philipp Reisnercdfda632011-07-05 15:38:59 +02003194 retcode = adm_detach(mdev, 0);
Lars Ellenberg27012382012-07-24 10:13:55 +02003195 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003196 drbd_msg_put_info("failed to detach");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003197 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003198 }
3199 }
3200
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003201 /* If we reach this, all volumes (of this tconn) are Secondary,
3202 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003203 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003204 drbd_thread_stop(&adm_ctx.tconn->worker);
3205
3206 /* Now, nothing can fail anymore */
3207
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003208 /* delete volumes */
3209 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3210 retcode = adm_delete_minor(mdev);
3211 if (retcode != NO_ERROR) {
3212 /* "can not happen" */
3213 drbd_msg_put_info("failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07003214 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003215 }
3216 }
3217
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003218 /* delete connection */
3219 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003220 list_del_rcu(&adm_ctx.tconn->all_tconn);
3221 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003222 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3223
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003224 retcode = NO_ERROR;
3225 } else {
3226 /* "can not happen" */
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003227 retcode = ERR_RES_IN_USE;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003228 drbd_msg_put_info("failed to delete connection");
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003229 }
Philipp Reisneref356262011-04-13 14:21:29 -07003230 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003231out:
3232 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003233 return 0;
3234}
3235
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003236int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003237{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003238 enum drbd_ret_code retcode;
3239
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003240 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003241 if (!adm_ctx.reply_skb)
3242 return retcode;
3243 if (retcode != NO_ERROR)
3244 goto out;
3245
3246 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003247 list_del_rcu(&adm_ctx.tconn->all_tconn);
3248 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003249 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3250
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003251 retcode = NO_ERROR;
Philipp Reisner774b3052011-02-22 02:07:03 -05003252 } else {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003253 retcode = ERR_RES_IN_USE;
Philipp Reisner774b3052011-02-22 02:07:03 -05003254 }
3255
Lars Ellenberg992d6e92011-05-02 11:47:18 +02003256 if (retcode == NO_ERROR)
3257 drbd_thread_stop(&adm_ctx.tconn->worker);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003258out:
3259 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003260 return 0;
3261}
3262
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003263void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003264{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003265 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3266 struct sk_buff *msg;
3267 struct drbd_genlmsghdr *d_out;
3268 unsigned seq;
3269 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003270
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003271 seq = atomic_inc_return(&drbd_genl_seq);
3272 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3273 if (!msg)
3274 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003275
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003276 err = -EMSGSIZE;
3277 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3278 if (!d_out) /* cannot happen, but anyways. */
3279 goto nla_put_failure;
3280 d_out->minor = mdev_to_minor(mdev);
Andreas Gruenbacher6f9b5f82011-05-06 01:03:32 +02003281 d_out->ret_code = NO_ERROR;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02003282
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003283 if (nla_put_status_info(msg, mdev, sib))
3284 goto nla_put_failure;
3285 genlmsg_end(msg, d_out);
3286 err = drbd_genl_multicast_events(msg, 0);
3287 /* msg has been consumed or freed in netlink_broadcast() */
3288 if (err && err != -ESRCH)
3289 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003290
Philipp Reisnerb411b362009-09-25 16:07:19 -07003291 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003292
3293nla_put_failure:
3294 nlmsg_free(msg);
3295failed:
3296 dev_err(DEV, "Error %d while broadcasting event. "
3297 "Event seq:%u sib_reason:%u\n",
3298 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003299}