blob: 363034a77e88fba2978d2b8a164642986e4c3044 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020035#include "drbd_req.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070038#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020039#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070040
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010041#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010043/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +020050int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010052int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010053
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010056int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010057int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010071int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010072int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
Andreas Gruenbacher01b39b52011-06-10 12:57:26 +020078#include "drbd_nla.h"
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010079#include <linux/genl_magic_func.h>
80
81/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070082static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010084/* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
87 */
88static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
90 unsigned int minor;
91 /* assigned from request attributes, if present */
92 unsigned int volume;
93#define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +020096 char *resource_name;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +020097 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
Philipp Reisnerb411b362009-09-25 16:07:19 -070099
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100100 /* reply buffer */
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
107} adm_ctx;
108
109static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
110{
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700114}
115
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100116/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
Lars Ellenberg8432b312011-03-08 16:11:16 +0100118int drbd_msg_put_info(const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100119{
120 struct sk_buff *skb = adm_ctx.reply_skb;
121 struct nlattr *nla;
122 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100124 if (!info || !info[0])
125 return 0;
126
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 if (!nla)
129 return err;
130
131 err = nla_put_string(skb, T_info_text, info);
132 if (err) {
133 nla_nest_cancel(skb, nla);
134 return err;
135 } else
136 nla_nest_end(skb, nla);
137 return 0;
138}
139
140/* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
144 */
145#define DRBD_ADM_NEED_MINOR 1
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200146#define DRBD_ADM_NEED_RESOURCE 2
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200147#define DRBD_ADM_NEED_CONNECTION 4
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100148static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
149 unsigned flags)
150{
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
153 int err;
154
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
156
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd != DRBD_ADM_GET_STATUS
159 && security_netlink_recv(skb, CAP_SYS_ADMIN))
160 return -EPERM;
161
162 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200163 if (!adm_ctx.reply_skb) {
164 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100165 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200166 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100167
168 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
169 info, &drbd_genl_family, 0, cmd);
170 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
171 * but anyways */
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200172 if (!adm_ctx.reply_dh) {
173 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100174 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200175 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100176
177 adm_ctx.reply_dh->minor = d_in->minor;
178 adm_ctx.reply_dh->ret_code = NO_ERROR;
179
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200180 adm_ctx.volume = VOLUME_UNSPECIFIED;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100181 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
182 struct nlattr *nla;
183 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100184 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100185 if (err)
186 goto fail;
187
188 /* It was present, and valid,
189 * copy it over to the reply skb. */
190 err = nla_put_nohdr(adm_ctx.reply_skb,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
192 info->attrs[DRBD_NLA_CFG_CONTEXT]);
193 if (err)
194 goto fail;
195
196 /* and assign stuff to the global adm_ctx */
197 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200198 if (nla)
199 adm_ctx.volume = nla_get_u32(nla);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200200 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100201 if (nla)
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200202 adm_ctx.resource_name = nla_data(nla);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200203 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
204 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
205 if ((adm_ctx.my_addr &&
206 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
207 (adm_ctx.peer_addr &&
208 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
209 err = -EINVAL;
210 goto fail;
211 }
212 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100213
214 adm_ctx.minor = d_in->minor;
215 adm_ctx.mdev = minor_to_mdev(d_in->minor);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200216 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100217
218 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
219 drbd_msg_put_info("unknown minor");
220 return ERR_MINOR_INVALID;
221 }
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200222 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
223 drbd_msg_put_info("unknown resource");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100224 return ERR_INVALID_REQUEST;
225 }
226
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200227 if (flags & DRBD_ADM_NEED_CONNECTION) {
228 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
229 drbd_msg_put_info("no resource name expected");
230 return ERR_INVALID_REQUEST;
231 }
232 if (adm_ctx.mdev) {
233 drbd_msg_put_info("no minor number expected");
234 return ERR_INVALID_REQUEST;
235 }
236 if (adm_ctx.my_addr && adm_ctx.peer_addr)
237 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
238 nla_len(adm_ctx.my_addr),
239 nla_data(adm_ctx.peer_addr),
240 nla_len(adm_ctx.peer_addr));
241 if (!adm_ctx.tconn) {
242 drbd_msg_put_info("unknown connection");
243 return ERR_INVALID_REQUEST;
244 }
245 }
246
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100247 /* some more paranoia, if the request was over-determined */
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100248 if (adm_ctx.mdev && adm_ctx.tconn &&
249 adm_ctx.mdev->tconn != adm_ctx.tconn) {
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200251 adm_ctx.minor, adm_ctx.resource_name,
252 adm_ctx.mdev->tconn->name);
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200253 drbd_msg_put_info("minor exists in different resource");
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100254 return ERR_INVALID_REQUEST;
255 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100256 if (adm_ctx.mdev &&
257 adm_ctx.volume != VOLUME_UNSPECIFIED &&
258 adm_ctx.volume != adm_ctx.mdev->vnr) {
259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
260 adm_ctx.minor, adm_ctx.volume,
261 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100262 drbd_msg_put_info("minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100263 return ERR_INVALID_REQUEST;
264 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200265
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100266 return NO_ERROR;
267
268fail:
269 nlmsg_free(adm_ctx.reply_skb);
270 adm_ctx.reply_skb = NULL;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200271 return err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100272}
273
274static int drbd_adm_finish(struct genl_info *info, int retcode)
275{
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200276 if (adm_ctx.tconn) {
277 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
278 adm_ctx.tconn = NULL;
279 }
280
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100281 if (!adm_ctx.reply_skb)
282 return -ENOMEM;
283
284 adm_ctx.reply_dh->ret_code = retcode;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100285 drbd_adm_send_reply(adm_ctx.reply_skb, info);
286 return 0;
287}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100289static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
290{
291 char *afs;
292
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200293 /* FIXME: A future version will not allow this case. */
294 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
295 return;
296
297 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
298 case AF_INET6:
299 afs = "ipv6";
300 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
301 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
302 break;
303 case AF_INET:
304 afs = "ipv4";
305 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
306 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
307 break;
308 default:
309 afs = "ssocks";
310 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
311 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100312 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200313 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100314}
315
Philipp Reisnerb411b362009-09-25 16:07:19 -0700316int drbd_khelper(struct drbd_conf *mdev, char *cmd)
317{
318 char *envp[] = { "HOME=/",
319 "TERM=linux",
320 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100321 (char[20]) { }, /* address family */
322 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100324 char mb[12];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 char *argv[] = {usermode_helper, cmd, mb, NULL };
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200326 struct drbd_tconn *tconn = mdev->tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100327 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700328 int ret;
329
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200330 if (current == tconn->worker.task)
331 set_bit(CALLBACK_PENDING, &tconn->flags);
332
Philipp Reisnerb411b362009-09-25 16:07:19 -0700333 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200334 setup_khelper_env(tconn, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Lars Ellenberg1090c052010-07-19 17:41:04 +0200336 /* The helper may take some time.
337 * write out any unsynced meta data changes now */
338 drbd_md_sync(mdev);
339
Philipp Reisnerb411b362009-09-25 16:07:19 -0700340 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100341 sib.sib_reason = SIB_HELPER_PRE;
342 sib.helper_name = cmd;
343 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700344 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
345 if (ret)
346 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
347 usermode_helper, cmd, mb,
348 (ret >> 8) & 0xff, ret);
349 else
350 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
351 usermode_helper, cmd, mb,
352 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100353 sib.sib_reason = SIB_HELPER_POST;
354 sib.helper_exit_code = ret;
355 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700356
Lars Ellenberg6f3465e2012-07-30 09:08:25 +0200357 if (current == tconn->worker.task)
358 clear_bit(CALLBACK_PENDING, &tconn->flags);
359
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 if (ret < 0) /* Ignore any ERRNOs we got. */
361 ret = 0;
362
363 return ret;
364}
365
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100366static void conn_md_sync(struct drbd_tconn *tconn)
367{
368 struct drbd_conf *mdev;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100369 int vnr;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100370
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200371 rcu_read_lock();
372 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
373 kref_get(&mdev->kref);
374 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100375 drbd_md_sync(mdev);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200376 kref_put(&mdev->kref, &drbd_minor_destroy);
377 rcu_read_lock();
378 }
379 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100380}
381
382int conn_khelper(struct drbd_tconn *tconn, char *cmd)
383{
384 char *envp[] = { "HOME=/",
385 "TERM=linux",
386 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
387 (char[20]) { }, /* address family */
388 (char[60]) { }, /* address */
389 NULL };
390 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
391 int ret;
392
393 setup_khelper_env(tconn, envp);
394 conn_md_sync(tconn);
395
396 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
397 /* TODO: conn_bcast_event() ?? */
398
399 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
400 if (ret)
401 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
402 usermode_helper, cmd, tconn->name,
403 (ret >> 8) & 0xff, ret);
404 else
405 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
406 usermode_helper, cmd, tconn->name,
407 (ret >> 8) & 0xff, ret);
408 /* TODO: conn_bcast_event() ?? */
409
410 if (ret < 0) /* Ignore any ERRNOs we got. */
411 ret = 0;
412
413 return ret;
414}
415
Philipp Reisnercb703452011-03-24 11:03:07 +0100416static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700417{
Philipp Reisnercb703452011-03-24 11:03:07 +0100418 enum drbd_fencing_p fp = FP_NOT_AVAIL;
419 struct drbd_conf *mdev;
420 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421
Philipp Reisner695d08f2011-04-11 22:53:32 -0700422 rcu_read_lock();
Philipp Reisnercb703452011-03-24 11:03:07 +0100423 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
424 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200425 fp = max_t(enum drbd_fencing_p, fp,
426 rcu_dereference(mdev->ldev->disk_conf)->fencing);
Philipp Reisnercb703452011-03-24 11:03:07 +0100427 put_ldev(mdev);
428 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700429 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700430 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700431
Philipp Reisnercb703452011-03-24 11:03:07 +0100432 return fp;
433}
434
435bool conn_try_outdate_peer(struct drbd_tconn *tconn)
436{
437 union drbd_state mask = { };
438 union drbd_state val = { };
439 enum drbd_fencing_p fp;
440 char *ex_to_string;
441 int r;
442
443 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
444 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
445 return false;
446 }
447
448 fp = highest_fencing_policy(tconn);
449 switch (fp) {
450 case FP_NOT_AVAIL:
451 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
452 goto out;
453 case FP_DONT_CARE:
454 return true;
455 default: ;
456 }
457
458 r = conn_khelper(tconn, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700459
460 switch ((r>>8) & 0xff) {
461 case 3: /* peer is inconsistent */
462 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100463 mask.pdsk = D_MASK;
464 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700465 break;
466 case 4: /* peer got outdated, or was already outdated */
467 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100468 mask.pdsk = D_MASK;
469 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470 break;
471 case 5: /* peer was down */
Philipp Reisnercb703452011-03-24 11:03:07 +0100472 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 /* we will(have) create(d) a new UUID anyways... */
474 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100475 mask.pdsk = D_MASK;
476 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700477 } else {
478 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479 }
480 break;
481 case 6: /* Peer is primary, voluntarily outdate myself.
482 * This is useful when an unconnected R_SECONDARY is asked to
483 * become R_PRIMARY, but finds the other peer being active. */
484 ex_to_string = "peer is active";
Philipp Reisnercb703452011-03-24 11:03:07 +0100485 conn_warn(tconn, "Peer is primary, outdating myself.\n");
486 mask.disk = D_MASK;
487 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700488 break;
489 case 7:
490 if (fp != FP_STONITH)
Philipp Reisnercb703452011-03-24 11:03:07 +0100491 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100493 mask.pdsk = D_MASK;
494 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700495 break;
496 default:
497 /* The script is broken ... */
Philipp Reisnercb703452011-03-24 11:03:07 +0100498 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
499 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700500 }
501
Philipp Reisnercb703452011-03-24 11:03:07 +0100502 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
503 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200504
Philipp Reisnercb703452011-03-24 11:03:07 +0100505 out:
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200506
Philipp Reisnercb703452011-03-24 11:03:07 +0100507 /* Not using
508 conn_request_state(tconn, mask, val, CS_VERBOSE);
509 here, because we might were able to re-establish the connection in the
510 meantime. */
511 spin_lock_irq(&tconn->req_lock);
Philipp Reisnera1096a62012-04-06 12:07:34 +0200512 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
Philipp Reisnercb703452011-03-24 11:03:07 +0100513 _conn_request_state(tconn, mask, val, CS_VERBOSE);
514 spin_unlock_irq(&tconn->req_lock);
515
516 return conn_highest_pdsk(tconn) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700517}
518
Philipp Reisner87f7be42010-06-11 13:56:33 +0200519static int _try_outdate_peer_async(void *data)
520{
Philipp Reisnercb703452011-03-24 11:03:07 +0100521 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200522
Philipp Reisnercb703452011-03-24 11:03:07 +0100523 conn_try_outdate_peer(tconn);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200524
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200525 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200526 return 0;
527}
528
Philipp Reisnercb703452011-03-24 11:03:07 +0100529void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200530{
531 struct task_struct *opa;
532
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200533 kref_get(&tconn->kref);
Philipp Reisnercb703452011-03-24 11:03:07 +0100534 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200535 if (IS_ERR(opa)) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100536 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200537 kref_put(&tconn->kref, &conn_destroy);
538 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200539}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700540
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100541enum drbd_state_rv
542drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700543{
544 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100545 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200546 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700547 int try = 0;
548 int forced = 0;
549 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700550
551 if (new_role == R_PRIMARY)
Philipp Reisner0625ac12011-02-07 14:49:19 +0100552 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700553
Philipp Reisner8410da82011-02-11 20:11:10 +0100554 mutex_lock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700555
556 mask.i = 0; mask.role = R_MASK;
557 val.i = 0; val.role = new_role;
558
559 while (try++ < max_tries) {
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100560 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700561
562 /* in case we first succeeded to outdate,
563 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100564 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700565 val.pdsk = 0;
566 mask.pdsk = 0;
567 continue;
568 }
569
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100570 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100571 (mdev->state.disk < D_UP_TO_DATE &&
572 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573 mask.disk = D_MASK;
574 val.disk = D_UP_TO_DATE;
575 forced = 1;
576 continue;
577 }
578
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100579 if (rv == SS_NO_UP_TO_DATE_DISK &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700580 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
581 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700582
Philipp Reisnercb703452011-03-24 11:03:07 +0100583 if (conn_try_outdate_peer(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700584 val.disk = D_UP_TO_DATE;
585 mask.disk = D_MASK;
586 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700587 continue;
588 }
589
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100590 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100591 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100592 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100593 if (!conn_try_outdate_peer(mdev->tconn) && force) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700594 dev_warn(DEV, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100595 mask.pdsk = D_MASK;
596 val.pdsk = D_OUTDATED;
597
Philipp Reisnerb411b362009-09-25 16:07:19 -0700598 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700599 continue;
600 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100601 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700602 /* Maybe the peer is detected as dead very soon...
603 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200604 int timeo;
605 rcu_read_lock();
606 nc = rcu_dereference(mdev->tconn->net_conf);
607 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
608 rcu_read_unlock();
609 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700610 if (try < max_tries)
611 try = max_tries - 1;
612 continue;
613 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100614 if (rv < SS_SUCCESS) {
615 rv = _drbd_request_state(mdev, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700616 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100617 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100618 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700619 }
620 break;
621 }
622
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100623 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100624 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700625
626 if (forced)
627 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
628
629 /* Wait until nothing is on the fly :) */
630 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
631
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100632 /* FIXME also wait for all pending P_BARRIER_ACK? */
633
Philipp Reisnerb411b362009-09-25 16:07:19 -0700634 if (new_role == R_SECONDARY) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100635 set_disk_ro(mdev->vdisk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700636 if (get_ldev(mdev)) {
637 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
638 put_ldev(mdev);
639 }
640 } else {
Philipp Reisnera0095502011-05-03 13:14:15 +0200641 mutex_lock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200642 nc = mdev->tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200643 if (nc)
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200644 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
Philipp Reisnera0095502011-05-03 13:14:15 +0200645 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200646
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100647 set_disk_ro(mdev->vdisk, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700648 if (get_ldev(mdev)) {
649 if (((mdev->state.conn < C_CONNECTED ||
650 mdev->state.pdsk <= D_FAILED)
651 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
652 drbd_uuid_new_current(mdev);
653
654 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
655 put_ldev(mdev);
656 }
657 }
658
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100659 /* writeout of activity log covered areas of the bitmap
660 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700661
662 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
663 /* if this was forced, we should consider sync */
664 if (forced)
665 drbd_send_uuids(mdev);
Philipp Reisner43de7c82011-11-10 13:16:13 +0100666 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700667 }
668
669 drbd_md_sync(mdev);
670
671 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100672out:
Philipp Reisner8410da82011-02-11 20:11:10 +0100673 mutex_unlock(mdev->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100674 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700675}
676
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100677static const char *from_attrs_err_to_txt(int err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700678{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100679 return err == -ENOMSG ? "required attribute missing" :
680 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100681 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100682 "invalid attribute value";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700683}
684
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100685int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700686{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100687 struct set_role_parms parms;
688 int err;
689 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700690
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100691 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
692 if (!adm_ctx.reply_skb)
693 return retcode;
694 if (retcode != NO_ERROR)
695 goto out;
696
697 memset(&parms, 0, sizeof(parms));
698 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100699 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100700 if (err) {
701 retcode = ERR_MANDATORY_TAG;
702 drbd_msg_put_info(from_attrs_err_to_txt(err));
703 goto out;
704 }
705 }
706
707 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
708 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
709 else
710 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
711out:
712 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700713 return 0;
714}
715
716/* initializes the md.*_offset members, so we are able to find
717 * the on disk meta data */
718static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
719 struct drbd_backing_dev *bdev)
720{
721 sector_t md_size_sect = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200722 int meta_dev_idx;
723
724 rcu_read_lock();
725 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
726
727 switch (meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700728 default:
729 /* v07 style fixed size indexed meta data */
730 bdev->md.md_size_sect = MD_RESERVED_SECT;
731 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
732 bdev->md.al_offset = MD_AL_OFFSET;
733 bdev->md.bm_offset = MD_BM_OFFSET;
734 break;
735 case DRBD_MD_INDEX_FLEX_EXT:
736 /* just occupy the full device; unit: sectors */
737 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
738 bdev->md.md_offset = 0;
739 bdev->md.al_offset = MD_AL_OFFSET;
740 bdev->md.bm_offset = MD_BM_OFFSET;
741 break;
742 case DRBD_MD_INDEX_INTERNAL:
743 case DRBD_MD_INDEX_FLEX_INT:
744 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
745 /* al size is still fixed */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100746 bdev->md.al_offset = -MD_AL_SECTORS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700747 /* we need (slightly less than) ~ this much bitmap sectors: */
748 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
749 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
750 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
751 md_size_sect = ALIGN(md_size_sect, 8);
752
753 /* plus the "drbd meta data super block",
754 * and the activity log; */
755 md_size_sect += MD_BM_OFFSET;
756
757 bdev->md.md_size_sect = md_size_sect;
758 /* bitmap offset is adjusted by 'super' block size */
759 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
760 break;
761 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200762 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700763}
764
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100765/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766char *ppsize(char *buf, unsigned long long size)
767{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100768 /* Needs 9 bytes at max including trailing NUL:
769 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700770 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
771 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100772 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773 /* shift + round */
774 size = (size >> 10) + !!(size & (1<<9));
775 base++;
776 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100777 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700778
779 return buf;
780}
781
782/* there is still a theoretical deadlock when called from receiver
783 * on an D_INCONSISTENT R_PRIMARY:
784 * remote READ does inc_ap_bio, receiver would need to receive answer
785 * packet from remote to dec_ap_bio again.
786 * receiver receive_sizes(), comes here,
787 * waits for ap_bio_cnt == 0. -> deadlock.
788 * but this cannot happen, actually, because:
789 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
790 * (not connected, or bad/no disk on peer):
791 * see drbd_fail_request_early, ap_bio_cnt is zero.
792 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
793 * peer may not initiate a resize.
794 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100795/* Note these are not to be confused with
796 * drbd_adm_suspend_io/drbd_adm_resume_io,
797 * which are (sub) state changes triggered by admin (drbdsetup),
798 * and can be long lived.
799 * This changes an mdev->flag, is triggered by drbd internals,
800 * and should be short-lived. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700801void drbd_suspend_io(struct drbd_conf *mdev)
802{
803 set_bit(SUSPEND_IO, &mdev->flags);
Philipp Reisner2aebfab2011-03-28 16:48:11 +0200804 if (drbd_suspended(mdev))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200805 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700806 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
807}
808
809void drbd_resume_io(struct drbd_conf *mdev)
810{
811 clear_bit(SUSPEND_IO, &mdev->flags);
812 wake_up(&mdev->misc_wait);
813}
814
815/**
816 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
817 * @mdev: DRBD device.
818 *
819 * Returns 0 on success, negative return values indicate errors.
820 * You should call drbd_md_sync() after calling this function.
821 */
Bart Van Assche24c48302011-05-21 18:32:29 +0200822enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700823{
824 sector_t prev_first_sect, prev_size; /* previous meta location */
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200825 sector_t la_size, u_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700826 sector_t size;
827 char ppb[10];
828
829 int md_moved, la_size_changed;
830 enum determine_dev_size rv = unchanged;
831
832 /* race:
833 * application request passes inc_ap_bio,
834 * but then cannot get an AL-reference.
835 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
836 *
837 * to avoid that:
838 * Suspend IO right here.
839 * still lock the act_log to not trigger ASSERTs there.
840 */
841 drbd_suspend_io(mdev);
842
843 /* no wait necessary anymore, actually we could assert that */
844 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
845
846 prev_first_sect = drbd_md_first_sector(mdev->ldev);
847 prev_size = mdev->ldev->md.md_size_sect;
848 la_size = mdev->ldev->md.la_size_sect;
849
850 /* TODO: should only be some assert here, not (re)init... */
851 drbd_md_set_sector_offsets(mdev, mdev->ldev);
852
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200853 rcu_read_lock();
854 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
855 rcu_read_unlock();
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200856 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700857
858 if (drbd_get_capacity(mdev->this_bdev) != size ||
859 drbd_bm_capacity(mdev) != size) {
860 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100861 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700862 if (unlikely(err)) {
863 /* currently there is only one error: ENOMEM! */
864 size = drbd_bm_capacity(mdev)>>1;
865 if (size == 0) {
866 dev_err(DEV, "OUT OF MEMORY! "
867 "Could not allocate bitmap!\n");
868 } else {
869 dev_err(DEV, "BM resizing failed. "
870 "Leaving size unchanged at size = %lu KB\n",
871 (unsigned long)size);
872 }
873 rv = dev_size_error;
874 }
875 /* racy, see comments above. */
876 drbd_set_my_capacity(mdev, size);
877 mdev->ldev->md.la_size_sect = size;
878 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
879 (unsigned long long)size>>1);
880 }
881 if (rv == dev_size_error)
882 goto out;
883
884 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
885
886 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
887 || prev_size != mdev->ldev->md.md_size_sect;
888
889 if (la_size_changed || md_moved) {
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100890 int err;
891
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892 drbd_al_shrink(mdev); /* All extents inactive. */
893 dev_info(DEV, "Writing the whole bitmap, %s\n",
894 la_size_changed && md_moved ? "size changed and md moved" :
895 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100896 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
Philipp Reisnerfef45d22012-08-14 11:46:59 +0200897 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
898 "size changed", BM_LOCKED_MASK);
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100899 if (err) {
900 rv = dev_size_error;
901 goto out;
902 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700903 drbd_md_mark_dirty(mdev);
904 }
905
906 if (size > la_size)
907 rv = grew;
908 if (size < la_size)
909 rv = shrunk;
910out:
911 lc_unlock(mdev->act_log);
912 wake_up(&mdev->al_wait);
913 drbd_resume_io(mdev);
914
915 return rv;
916}
917
918sector_t
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200919drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
920 sector_t u_size, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700921{
922 sector_t p_size = mdev->p_size; /* partner's disk size. */
923 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
924 sector_t m_size; /* my size */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700925 sector_t size = 0;
926
927 m_size = drbd_get_max_capacity(bdev);
928
Philipp Reisnera393db62009-12-22 13:35:52 +0100929 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
930 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
931 p_size = m_size;
932 }
933
Philipp Reisnerb411b362009-09-25 16:07:19 -0700934 if (p_size && m_size) {
935 size = min_t(sector_t, p_size, m_size);
936 } else {
937 if (la_size) {
938 size = la_size;
939 if (m_size && m_size < size)
940 size = m_size;
941 if (p_size && p_size < size)
942 size = p_size;
943 } else {
944 if (m_size)
945 size = m_size;
946 if (p_size)
947 size = p_size;
948 }
949 }
950
951 if (size == 0)
952 dev_err(DEV, "Both nodes diskless!\n");
953
954 if (u_size) {
955 if (u_size > size)
956 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
957 (unsigned long)u_size>>1, (unsigned long)size>>1);
958 else
959 size = u_size;
960 }
961
962 return size;
963}
964
965/**
966 * drbd_check_al_size() - Ensures that the AL is of the right size
967 * @mdev: DRBD device.
968 *
969 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
970 * failed, and 0 on success. You should call drbd_md_sync() after you called
971 * this function.
972 */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100973static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700974{
975 struct lru_cache *n, *t;
976 struct lc_element *e;
977 unsigned int in_use;
978 int i;
979
Philipp Reisnerb411b362009-09-25 16:07:19 -0700980 if (mdev->act_log &&
Lars Ellenbergf3990022011-03-23 14:31:09 +0100981 mdev->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700982 return 0;
983
984 in_use = 0;
985 t = mdev->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100986 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +0100987 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700988
989 if (n == NULL) {
990 dev_err(DEV, "Cannot allocate act_log lru!\n");
991 return -ENOMEM;
992 }
993 spin_lock_irq(&mdev->al_lock);
994 if (t) {
995 for (i = 0; i < t->nr_elements; i++) {
996 e = lc_element_by_index(t, i);
997 if (e->refcnt)
998 dev_err(DEV, "refcnt(%d)==%d\n",
999 e->lc_number, e->refcnt);
1000 in_use += e->refcnt;
1001 }
1002 }
1003 if (!in_use)
1004 mdev->act_log = n;
1005 spin_unlock_irq(&mdev->al_lock);
1006 if (in_use) {
1007 dev_err(DEV, "Activity log still in use!\n");
1008 lc_destroy(n);
1009 return -EBUSY;
1010 } else {
1011 if (t)
1012 lc_destroy(t);
1013 }
1014 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1015 return 0;
1016}
1017
Philipp Reisner99432fc2011-05-20 16:39:13 +02001018static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001019{
1020 struct request_queue * const q = mdev->rq_queue;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001021 int max_hw_sectors = max_bio_size >> 9;
1022 int max_segments = 0;
1023
1024 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1025 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1026
1027 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001028 rcu_read_lock();
1029 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1030 rcu_read_unlock();
Philipp Reisner99432fc2011-05-20 16:39:13 +02001031 put_ldev(mdev);
1032 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001033
Philipp Reisnerb411b362009-09-25 16:07:19 -07001034 blk_queue_logical_block_size(q, 512);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001035 blk_queue_max_hw_sectors(q, max_hw_sectors);
1036 /* This is the workaround for "bio would need to, but cannot, be split" */
1037 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1038 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001039
Philipp Reisner99432fc2011-05-20 16:39:13 +02001040 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1041 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001042
Philipp Reisner99432fc2011-05-20 16:39:13 +02001043 blk_queue_stack_limits(q, b);
1044
1045 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1046 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1047 q->backing_dev_info.ra_pages,
1048 b->backing_dev_info.ra_pages);
1049 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1050 }
1051 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001052 }
1053}
1054
Philipp Reisner99432fc2011-05-20 16:39:13 +02001055void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1056{
1057 int now, new, local, peer;
1058
1059 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1060 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1061 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1062
1063 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1064 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1065 mdev->local_max_bio_size = local;
1066 put_ldev(mdev);
1067 }
1068
1069 /* We may ignore peer limits if the peer is modern enough.
1070 Because new from 8.3.8 onwards the peer can use multiple
1071 BIOs for a single peer_request */
1072 if (mdev->state.conn >= C_CONNECTED) {
Philipp Reisner31890f42011-01-19 14:12:51 +01001073 if (mdev->tconn->agreed_pro_version < 94)
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001074 peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1075 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
Philipp Reisner31890f42011-01-19 14:12:51 +01001076 else if (mdev->tconn->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001077 peer = DRBD_MAX_SIZE_H80_PACKET;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001078 else if (mdev->tconn->agreed_pro_version < 100)
1079 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1080 else
Philipp Reisner99432fc2011-05-20 16:39:13 +02001081 peer = DRBD_MAX_BIO_SIZE;
1082 }
1083
1084 new = min_t(int, local, peer);
1085
1086 if (mdev->state.role == R_PRIMARY && new < now)
1087 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1088
1089 if (new != now)
1090 dev_info(DEV, "max BIO size = %u\n", new);
1091
1092 drbd_setup_queue_param(mdev, new);
1093}
1094
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001095/* Starts the worker thread */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001096static void conn_reconfig_start(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001097{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001098 drbd_thread_start(&tconn->worker);
1099 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001100}
1101
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001102/* if still unconfigured, stops worker again. */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001103static void conn_reconfig_done(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001104{
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001105 bool stop_threads;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001106 spin_lock_irq(&tconn->req_lock);
Philipp Reisnere0e16652011-07-11 17:04:23 +02001107 stop_threads = conn_all_vols_unconf(tconn) &&
1108 tconn->cstate == C_STANDALONE;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001109 spin_unlock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001110 if (stop_threads) {
1111 /* asender is implicitly stopped by receiver
Philipp Reisner81fa2e62011-05-04 15:10:30 +02001112 * in conn_disconnect() */
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001113 drbd_thread_stop(&tconn->receiver);
1114 drbd_thread_stop(&tconn->worker);
1115 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001116}
1117
Philipp Reisner07782862010-08-31 12:00:50 +02001118/* Make sure IO is suspended before calling this function(). */
1119static void drbd_suspend_al(struct drbd_conf *mdev)
1120{
1121 int s = 0;
1122
Lars Ellenberg61610422011-02-21 13:20:54 +01001123 if (!lc_try_lock(mdev->act_log)) {
Philipp Reisner07782862010-08-31 12:00:50 +02001124 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1125 return;
1126 }
1127
Lars Ellenberg61610422011-02-21 13:20:54 +01001128 drbd_al_shrink(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001129 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner07782862010-08-31 12:00:50 +02001130 if (mdev->state.conn < C_CONNECTED)
1131 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001132 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg61610422011-02-21 13:20:54 +01001133 lc_unlock(mdev->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001134
1135 if (s)
1136 dev_info(DEV, "Suspended AL updates\n");
1137}
1138
Lars Ellenberg5979e362011-04-27 21:09:55 +02001139
1140static bool should_set_defaults(struct genl_info *info)
1141{
1142 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1143 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1144}
1145
Philipp Reisnerd589a212011-05-04 10:06:52 +02001146static void enforce_disk_conf_limits(struct disk_conf *dc)
1147{
1148 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1149 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1150 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1151 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1152
1153 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1154 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1155}
1156
Lars Ellenbergf3990022011-03-23 14:31:09 +01001157int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1158{
1159 enum drbd_ret_code retcode;
1160 struct drbd_conf *mdev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001161 struct disk_conf *new_disk_conf, *old_disk_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02001162 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001163 int err, fifo_size;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001164
1165 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1166 if (!adm_ctx.reply_skb)
1167 return retcode;
1168 if (retcode != NO_ERROR)
1169 goto out;
1170
1171 mdev = adm_ctx.mdev;
1172
1173 /* we also need a disk
1174 * to change the options on */
1175 if (!get_ldev(mdev)) {
1176 retcode = ERR_NO_DISK;
1177 goto out;
1178 }
1179
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001180 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001181 if (!new_disk_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001182 retcode = ERR_NOMEM;
1183 goto fail;
1184 }
1185
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001186 mutex_lock(&mdev->tconn->conf_update);
1187 old_disk_conf = mdev->ldev->disk_conf;
1188 *new_disk_conf = *old_disk_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001189 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001190 set_disk_conf_defaults(new_disk_conf);
Lars Ellenberg5979e362011-04-27 21:09:55 +02001191
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001192 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001193 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001194 retcode = ERR_MANDATORY_TAG;
1195 drbd_msg_put_info(from_attrs_err_to_txt(err));
1196 }
1197
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001198 if (!expect(new_disk_conf->resync_rate >= 1))
1199 new_disk_conf->resync_rate = 1;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001200
Philipp Reisnerd589a212011-05-04 10:06:52 +02001201 enforce_disk_conf_limits(new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001202
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001203 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02001204 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02001205 new_plan = fifo_alloc(fifo_size);
1206 if (!new_plan) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001207 dev_err(DEV, "kmalloc of fifo_buffer failed");
1208 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001209 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001210 }
1211 }
1212
Lars Ellenberg0ee98e22012-08-20 14:54:48 +02001213 drbd_suspend_io(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001214 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1215 drbd_al_shrink(mdev);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001216 err = drbd_check_al_size(mdev, new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001217 lc_unlock(mdev->act_log);
1218 wake_up(&mdev->al_wait);
Lars Ellenberg0ee98e22012-08-20 14:54:48 +02001219 drbd_resume_io(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001220
1221 if (err) {
1222 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001223 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001224 }
1225
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001226 write_lock_irq(&global_state_lock);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001227 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001228 if (retcode == NO_ERROR) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001229 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001230 drbd_resync_after_changed(mdev);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001231 }
1232 write_unlock_irq(&global_state_lock);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001233
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001234 if (retcode != NO_ERROR)
1235 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001236
Philipp Reisner813472c2011-05-03 16:47:02 +02001237 if (new_plan) {
1238 old_plan = mdev->rs_plan_s;
1239 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
Philipp Reisner9958c852011-05-03 16:19:31 +02001240 }
Philipp Reisner9958c852011-05-03 16:19:31 +02001241
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001242 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001243
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001244 if (new_disk_conf->al_updates)
1245 mdev->ldev->md.flags &= MDF_AL_DISABLED;
1246 else
1247 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1248
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001249 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1250
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001251 drbd_md_sync(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001252
1253 if (mdev->state.conn >= C_CONNECTED)
1254 drbd_send_sync_param(mdev);
1255
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001256 synchronize_rcu();
1257 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001258 kfree(old_plan);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001259 mod_timer(&mdev->request_timer, jiffies + HZ);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001260 goto success;
1261
1262fail_unlock:
1263 mutex_unlock(&mdev->tconn->conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001264 fail:
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001265 kfree(new_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001266 kfree(new_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001267success:
1268 put_ldev(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001269 out:
1270 drbd_adm_finish(info, retcode);
1271 return 0;
1272}
1273
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001274int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001275{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001276 struct drbd_conf *mdev;
1277 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001278 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279 enum determine_dev_size dd;
1280 sector_t max_possible_sectors;
1281 sector_t min_md_device_sectors;
1282 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001283 struct disk_conf *new_disk_conf = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001284 struct block_device *bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001285 struct lru_cache *resync_lru = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001286 struct fifo_buffer *new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001287 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001288 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001289 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001290
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001291 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1292 if (!adm_ctx.reply_skb)
1293 return retcode;
1294 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001295 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001296
1297 mdev = adm_ctx.mdev;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001298 conn_reconfig_start(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001299
1300 /* if you want to reconfigure, please tear down first */
1301 if (mdev->state.disk > D_DISKLESS) {
1302 retcode = ERR_DISK_CONFIGURED;
1303 goto fail;
1304 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001305 /* It may just now have detached because of IO error. Make sure
1306 * drbd_ldev_destroy is done already, we may end up here very fast,
1307 * e.g. if someone calls attach from the on-io-error handler,
1308 * to realize a "hot spare" feature (not that I'd recommend that) */
1309 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001310
Lars Ellenberg0c849662012-07-30 09:07:28 +02001311 /* make sure there is no leftover from previous force-detach attempts */
1312 clear_bit(FORCE_DETACH, &mdev->flags);
1313
Lars Ellenberga3248962012-07-30 09:10:41 +02001314 /* and no leftover from previously aborted resync or verify, either */
1315 mdev->rs_total = 0;
1316 mdev->rs_failed = 0;
1317 atomic_set(&mdev->rs_pending_cnt, 0);
1318
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001319 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001320 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1321 if (!nbc) {
1322 retcode = ERR_NOMEM;
1323 goto fail;
1324 }
Philipp Reisner39a1aa7f2012-08-08 21:19:09 +02001325 spin_lock_init(&nbc->md.uuid_lock);
1326
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001327 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1328 if (!new_disk_conf) {
1329 retcode = ERR_NOMEM;
1330 goto fail;
1331 }
1332 nbc->disk_conf = new_disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001333
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001334 set_disk_conf_defaults(new_disk_conf);
1335 err = disk_conf_from_attrs(new_disk_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001336 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001338 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001339 goto fail;
1340 }
1341
Philipp Reisnerd589a212011-05-04 10:06:52 +02001342 enforce_disk_conf_limits(new_disk_conf);
1343
Philipp Reisner9958c852011-05-03 16:19:31 +02001344 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1345 if (!new_plan) {
1346 retcode = ERR_NOMEM;
1347 goto fail;
1348 }
1349
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001350 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001351 retcode = ERR_MD_IDX_INVALID;
1352 goto fail;
1353 }
1354
Philipp Reisner44ed1672011-04-19 17:10:19 +02001355 rcu_read_lock();
1356 nc = rcu_dereference(mdev->tconn->net_conf);
1357 if (nc) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001358 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001359 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001360 retcode = ERR_STONITH_AND_PROT_A;
1361 goto fail;
1362 }
1363 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001364 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001365
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001366 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001367 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
Tejun Heoe525fd82010-11-13 11:55:17 +01001368 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001369 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001370 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001371 retcode = ERR_OPEN_DISK;
1372 goto fail;
1373 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001374 nbc->backing_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001375
Tejun Heoe525fd82010-11-13 11:55:17 +01001376 /*
1377 * meta_dev_idx >= 0: external fixed size, possibly multiple
1378 * drbd sharing one meta device. TODO in that case, paranoia
1379 * check that [md_bdev, meta_dev_idx] is not yet used by some
1380 * other drbd minor! (if you use drbd.conf + drbdadm, that
1381 * should check it for you already; but if you don't, or
1382 * someone fooled it, we need to double check here)
1383 */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001384 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001385 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001386 (new_disk_conf->meta_dev_idx < 0) ?
Tejun Heod4d77622010-11-13 11:55:18 +01001387 (void *)mdev : (void *)drbd_m_holder);
Tejun Heoe525fd82010-11-13 11:55:17 +01001388 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001389 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001390 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001391 retcode = ERR_OPEN_MD_DISK;
1392 goto fail;
1393 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001394 nbc->md_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001395
Tejun Heoe525fd82010-11-13 11:55:17 +01001396 if ((nbc->backing_bdev == nbc->md_bdev) !=
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001397 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1398 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001399 retcode = ERR_MD_IDX_INVALID;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001400 goto fail;
1401 }
1402
1403 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001404 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001405 offsetof(struct bm_extent, lce));
1406 if (!resync_lru) {
1407 retcode = ERR_NOMEM;
Tejun Heoe525fd82010-11-13 11:55:17 +01001408 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001409 }
1410
1411 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1412 drbd_md_set_sector_offsets(mdev, nbc);
1413
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001414 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001415 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1416 (unsigned long long) drbd_get_max_capacity(nbc),
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001417 (unsigned long long) new_disk_conf->disk_size);
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001418 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001419 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001420 }
1421
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001422 if (new_disk_conf->meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001423 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1424 /* at least one MB, otherwise it does not make sense */
1425 min_md_device_sectors = (2<<10);
1426 } else {
1427 max_possible_sectors = DRBD_MAX_SECTORS;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001428 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001429 }
1430
Philipp Reisnerb411b362009-09-25 16:07:19 -07001431 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001432 retcode = ERR_MD_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433 dev_warn(DEV, "refusing attach: md-device too small, "
1434 "at least %llu sectors needed for this meta-disk type\n",
1435 (unsigned long long) min_md_device_sectors);
Tejun Heoe525fd82010-11-13 11:55:17 +01001436 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001437 }
1438
1439 /* Make sure the new disk is big enough
1440 * (we may currently be R_PRIMARY with no local disk...) */
1441 if (drbd_get_max_capacity(nbc) <
1442 drbd_get_capacity(mdev->this_bdev)) {
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001443 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001444 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001445 }
1446
1447 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1448
Lars Ellenberg13529942009-10-12 19:07:49 +02001449 if (nbc->known_size > max_possible_sectors) {
1450 dev_warn(DEV, "==> truncating very big lower level device "
1451 "to currently maximum possible %llu sectors <==\n",
1452 (unsigned long long) max_possible_sectors);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001453 if (new_disk_conf->meta_dev_idx >= 0)
Lars Ellenberg13529942009-10-12 19:07:49 +02001454 dev_warn(DEV, "==>> using internal or flexible "
1455 "meta data may help <<==\n");
1456 }
1457
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458 drbd_suspend_io(mdev);
1459 /* also wait for the last barrier ack. */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01001460 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1461 * We need a way to either ignore barrier acks for barriers sent before a device
1462 * was attached, or a way to wait for all pending barrier acks to come in.
1463 * As barriers are counted per resource,
1464 * we'd need to suspend io on all devices of a resource.
1465 */
Philipp Reisner2aebfab2011-03-28 16:48:11 +02001466 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001467 /* and for any other previously queued work */
Philipp Reisnera21e9292011-02-08 15:08:49 +01001468 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001469
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001470 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1471 retcode = rv; /* FIXME: Type mismatch. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001472 drbd_resume_io(mdev);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001473 if (rv < SS_SUCCESS)
Tejun Heoe525fd82010-11-13 11:55:17 +01001474 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475
1476 if (!get_ldev_if_state(mdev, D_ATTACHING))
1477 goto force_diskless;
1478
1479 drbd_md_set_sector_offsets(mdev, nbc);
1480
1481 if (!mdev->bitmap) {
1482 if (drbd_bm_init(mdev)) {
1483 retcode = ERR_NOMEM;
1484 goto force_diskless_dec;
1485 }
1486 }
1487
1488 retcode = drbd_md_read(mdev, nbc);
1489 if (retcode != NO_ERROR)
1490 goto force_diskless_dec;
1491
1492 if (mdev->state.conn < C_CONNECTED &&
1493 mdev->state.role == R_PRIMARY &&
1494 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1495 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1496 (unsigned long long)mdev->ed_uuid);
1497 retcode = ERR_DATA_NOT_CURRENT;
1498 goto force_diskless_dec;
1499 }
1500
1501 /* Since we are diskless, fix the activity log first... */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001502 if (drbd_check_al_size(mdev, new_disk_conf)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001503 retcode = ERR_NOMEM;
1504 goto force_diskless_dec;
1505 }
1506
1507 /* Prevent shrinking of consistent devices ! */
1508 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001509 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510 dev_warn(DEV, "refusing to truncate a consistent device\n");
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001511 retcode = ERR_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512 goto force_diskless_dec;
1513 }
1514
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515 /* Reset the "barriers don't work" bits here, then force meta data to
1516 * be written, to ensure we determine if barriers are supported. */
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001517 if (new_disk_conf->md_flushes)
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001518 clear_bit(MD_NO_FUA, &mdev->flags);
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001519 else
1520 set_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001521
1522 /* Point of no return reached.
1523 * Devices and memory are no longer released by error cleanup below.
1524 * now mdev takes over responsibility, and the state engine should
1525 * clean it up somewhere. */
1526 D_ASSERT(mdev->ldev == NULL);
1527 mdev->ldev = nbc;
1528 mdev->resync = resync_lru;
Philipp Reisner9958c852011-05-03 16:19:31 +02001529 mdev->rs_plan_s = new_plan;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001530 nbc = NULL;
1531 resync_lru = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001532 new_disk_conf = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001533 new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001534
Philipp Reisner4b0007c2011-11-09 20:12:34 +01001535 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001536
1537 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1538 set_bit(CRASHED_PRIMARY, &mdev->flags);
1539 else
1540 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1541
Philipp Reisner894c6a92010-06-18 16:03:20 +02001542 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02001543 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001544 set_bit(CRASHED_PRIMARY, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001545
1546 mdev->send_cnt = 0;
1547 mdev->recv_cnt = 0;
1548 mdev->read_cnt = 0;
1549 mdev->writ_cnt = 0;
1550
Philipp Reisner99432fc2011-05-20 16:39:13 +02001551 drbd_reconsider_max_bio_size(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001552
1553 /* If I am currently not R_PRIMARY,
1554 * but meta data primary indicator is set,
1555 * I just now recover from a hard crash,
1556 * and have been R_PRIMARY before that crash.
1557 *
1558 * Now, if I had no connection before that crash
1559 * (have been degraded R_PRIMARY), chances are that
1560 * I won't find my peer now either.
1561 *
1562 * In that case, and _only_ in that case,
1563 * we use the degr-wfc-timeout instead of the default,
1564 * so we can automatically recover from a crash of a
1565 * degraded but active "cluster" after a certain timeout.
1566 */
1567 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1568 if (mdev->state.role != R_PRIMARY &&
1569 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1570 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1571 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1572
Bart Van Assche24c48302011-05-21 18:32:29 +02001573 dd = drbd_determine_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001574 if (dd == dev_size_error) {
1575 retcode = ERR_NOMEM_BITMAP;
1576 goto force_diskless_dec;
1577 } else if (dd == grew)
1578 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1579
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001580 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1581 (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1582 drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001583 dev_info(DEV, "Assuming that all blocks are out of sync "
1584 "(aka FullSync)\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001585 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1586 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001587 retcode = ERR_IO_MD_DISK;
1588 goto force_diskless_dec;
1589 }
1590 } else {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001591 if (drbd_bitmap_io(mdev, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01001592 "read from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593 retcode = ERR_IO_MD_DISK;
1594 goto force_diskless_dec;
1595 }
1596 }
1597
Philipp Reisner07782862010-08-31 12:00:50 +02001598 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1599 drbd_suspend_al(mdev); /* IO is still suspended here... */
1600
Philipp Reisner87eeee42011-01-19 14:16:30 +01001601 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02001602 os = drbd_read_state(mdev);
1603 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001604 /* If MDF_CONSISTENT is not set go into inconsistent state,
1605 otherwise investigate MDF_WasUpToDate...
1606 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1607 otherwise into D_CONSISTENT state.
1608 */
1609 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1610 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1611 ns.disk = D_CONSISTENT;
1612 else
1613 ns.disk = D_OUTDATED;
1614 } else {
1615 ns.disk = D_INCONSISTENT;
1616 }
1617
1618 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1619 ns.pdsk = D_OUTDATED;
1620
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001621 rcu_read_lock();
1622 if (ns.disk == D_CONSISTENT &&
1623 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001624 ns.disk = D_UP_TO_DATE;
1625
1626 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1627 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1628 this point, because drbd_request_state() modifies these
1629 flags. */
1630
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001631 if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
1632 mdev->ldev->md.flags &= MDF_AL_DISABLED;
1633 else
1634 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1635
1636 rcu_read_unlock();
1637
Philipp Reisnerb411b362009-09-25 16:07:19 -07001638 /* In case we are C_CONNECTED postpone any decision on the new disk
1639 state after the negotiation phase. */
1640 if (mdev->state.conn == C_CONNECTED) {
1641 mdev->new_state_tmp.i = ns.i;
1642 ns.i = os.i;
1643 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02001644
1645 /* We expect to receive up-to-date UUIDs soon.
1646 To avoid a race in receive_state, free p_uuid while
1647 holding req_lock. I.e. atomic with the state change */
1648 kfree(mdev->p_uuid);
1649 mdev->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001650 }
1651
1652 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001653 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001654
1655 if (rv < SS_SUCCESS)
1656 goto force_diskless_dec;
1657
Philipp Reisnercdfda632011-07-05 15:38:59 +02001658 mod_timer(&mdev->request_timer, jiffies + HZ);
1659
Philipp Reisnerb411b362009-09-25 16:07:19 -07001660 if (mdev->state.role == R_PRIMARY)
1661 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1662 else
1663 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1664
1665 drbd_md_mark_dirty(mdev);
1666 drbd_md_sync(mdev);
1667
1668 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1669 put_ldev(mdev);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001670 conn_reconfig_done(mdev->tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001671 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001672 return 0;
1673
1674 force_diskless_dec:
1675 put_ldev(mdev);
1676 force_diskless:
Philipp Reisner9510b242011-07-01 17:00:57 +02001677 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001678 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001679 fail:
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001680 conn_reconfig_done(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001681 if (nbc) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001682 if (nbc->backing_bdev)
1683 blkdev_put(nbc->backing_bdev,
1684 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1685 if (nbc->md_bdev)
1686 blkdev_put(nbc->md_bdev,
1687 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001688 kfree(nbc);
1689 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001690 kfree(new_disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001691 lc_destroy(resync_lru);
Philipp Reisner9958c852011-05-03 16:19:31 +02001692 kfree(new_plan);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001693
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001694 finish:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001695 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001696 return 0;
1697}
1698
Philipp Reisnercdfda632011-07-05 15:38:59 +02001699static int adm_detach(struct drbd_conf *mdev, int force)
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001700{
Philipp Reisner19f83c72011-03-29 14:21:03 +02001701 enum drbd_state_rv retcode;
Lars Ellenberg009ba892011-05-02 11:51:31 +02001702 int ret;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001703
1704 if (force) {
Lars Ellenberg0c849662012-07-30 09:07:28 +02001705 set_bit(FORCE_DETACH, &mdev->flags);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001706 drbd_force_state(mdev, NS(disk, D_FAILED));
1707 retcode = SS_SUCCESS;
1708 goto out;
1709 }
1710
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001711 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
Philipp Reisner0cfac5d2011-11-10 12:12:52 +01001712 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
Lars Ellenberg009ba892011-05-02 11:51:31 +02001713 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
Philipp Reisner0cfac5d2011-11-10 12:12:52 +01001714 drbd_md_put_buffer(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001715 /* D_FAILED will transition to DISKLESS. */
1716 ret = wait_event_interruptible(mdev->misc_wait,
1717 mdev->state.disk != D_FAILED);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001718 drbd_resume_io(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001719 if ((int)retcode == (int)SS_IS_DISKLESS)
1720 retcode = SS_NOTHING_TO_DO;
1721 if (ret)
1722 retcode = ERR_INTR;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001723out:
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001724 return retcode;
1725}
1726
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001727/* Detaching the disk is a process in multiple stages. First we need to lock
1728 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1729 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1730 * internal references as well.
1731 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001732int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001733{
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001734 enum drbd_ret_code retcode;
Philipp Reisnercdfda632011-07-05 15:38:59 +02001735 struct detach_parms parms = { };
1736 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001737
1738 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1739 if (!adm_ctx.reply_skb)
1740 return retcode;
1741 if (retcode != NO_ERROR)
1742 goto out;
1743
Philipp Reisnercdfda632011-07-05 15:38:59 +02001744 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1745 err = detach_parms_from_attrs(&parms, info);
1746 if (err) {
1747 retcode = ERR_MANDATORY_TAG;
1748 drbd_msg_put_info(from_attrs_err_to_txt(err));
1749 goto out;
1750 }
1751 }
1752
1753 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001754out:
1755 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001756 return 0;
1757}
1758
Lars Ellenbergf3990022011-03-23 14:31:09 +01001759static bool conn_resync_running(struct drbd_tconn *tconn)
1760{
1761 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001762 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001763 int vnr;
1764
Philipp Reisner695d08f2011-04-11 22:53:32 -07001765 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001766 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1767 if (mdev->state.conn == C_SYNC_SOURCE ||
1768 mdev->state.conn == C_SYNC_TARGET ||
1769 mdev->state.conn == C_PAUSED_SYNC_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001770 mdev->state.conn == C_PAUSED_SYNC_T) {
1771 rv = true;
1772 break;
1773 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001774 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001775 rcu_read_unlock();
1776
1777 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001778}
1779
1780static bool conn_ov_running(struct drbd_tconn *tconn)
1781{
1782 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001783 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001784 int vnr;
1785
Philipp Reisner695d08f2011-04-11 22:53:32 -07001786 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001787 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1788 if (mdev->state.conn == C_VERIFY_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001789 mdev->state.conn == C_VERIFY_T) {
1790 rv = true;
1791 break;
1792 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001793 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001794 rcu_read_unlock();
1795
1796 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001797}
1798
Philipp Reisnercd643972011-04-13 18:00:59 -07001799static enum drbd_ret_code
Philipp Reisner44ed1672011-04-19 17:10:19 +02001800_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07001801{
1802 struct drbd_conf *mdev;
1803 int i;
1804
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02001805 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1806 if (new_conf->wire_protocol != old_conf->wire_protocol)
1807 return ERR_NEED_APV_100;
1808
1809 if (new_conf->two_primaries != old_conf->two_primaries)
1810 return ERR_NEED_APV_100;
1811
1812 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1813 return ERR_NEED_APV_100;
1814
1815 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1816 return ERR_NEED_APV_100;
1817 }
1818
1819 if (!new_conf->two_primaries &&
1820 conn_highest_role(tconn) == R_PRIMARY &&
1821 conn_highest_peer(tconn) == R_PRIMARY)
1822 return ERR_NEED_ALLOW_TWO_PRI;
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001823
Philipp Reisnercd643972011-04-13 18:00:59 -07001824 if (new_conf->two_primaries &&
1825 (new_conf->wire_protocol != DRBD_PROT_C))
1826 return ERR_NOT_PROTO_C;
1827
Philipp Reisnercd643972011-04-13 18:00:59 -07001828 idr_for_each_entry(&tconn->volumes, mdev, i) {
1829 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001830 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
Philipp Reisnercd643972011-04-13 18:00:59 -07001831 put_ldev(mdev);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001832 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07001833 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07001834 }
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02001835 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
Lars Ellenbergeb120102012-08-01 12:46:20 +02001836 return ERR_DISCARD_IMPOSSIBLE;
Philipp Reisnercd643972011-04-13 18:00:59 -07001837 }
Philipp Reisnercd643972011-04-13 18:00:59 -07001838
1839 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1840 return ERR_CONG_NOT_PROTO_A;
1841
1842 return NO_ERROR;
1843}
1844
Philipp Reisner44ed1672011-04-19 17:10:19 +02001845static enum drbd_ret_code
1846check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1847{
1848 static enum drbd_ret_code rv;
1849 struct drbd_conf *mdev;
1850 int i;
1851
1852 rcu_read_lock();
1853 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1854 rcu_read_unlock();
1855
1856 /* tconn->volumes protected by genl_lock() here */
1857 idr_for_each_entry(&tconn->volumes, mdev, i) {
1858 if (!mdev->bitmap) {
1859 if(drbd_bm_init(mdev))
1860 return ERR_NOMEM;
1861 }
1862 }
1863
1864 return rv;
1865}
1866
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001867struct crypto {
1868 struct crypto_hash *verify_tfm;
1869 struct crypto_hash *csums_tfm;
1870 struct crypto_hash *cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001871 struct crypto_hash *integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001872};
1873
1874static int
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001875alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001876{
1877 if (!tfm_name[0])
1878 return NO_ERROR;
1879
1880 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1881 if (IS_ERR(*tfm)) {
1882 *tfm = NULL;
1883 return err_alg;
1884 }
1885
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001886 return NO_ERROR;
1887}
1888
1889static enum drbd_ret_code
1890alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1891{
1892 char hmac_name[CRYPTO_MAX_ALG_NAME];
1893 enum drbd_ret_code rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001894
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001895 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1896 ERR_CSUMS_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001897 if (rv != NO_ERROR)
1898 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001899 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1900 ERR_VERIFY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001901 if (rv != NO_ERROR)
1902 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001903 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1904 ERR_INTEGRITY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001905 if (rv != NO_ERROR)
1906 return rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001907 if (new_conf->cram_hmac_alg[0] != 0) {
1908 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1909 new_conf->cram_hmac_alg);
1910
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001911 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1912 ERR_AUTH_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001913 }
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001914
1915 return rv;
1916}
1917
1918static void free_crypto(struct crypto *crypto)
1919{
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001920 crypto_free_hash(crypto->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001921 crypto_free_hash(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001922 crypto_free_hash(crypto->csums_tfm);
1923 crypto_free_hash(crypto->verify_tfm);
1924}
1925
Lars Ellenbergf3990022011-03-23 14:31:09 +01001926int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1927{
1928 enum drbd_ret_code retcode;
1929 struct drbd_tconn *tconn;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001930 struct net_conf *old_conf, *new_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001931 int err;
1932 int ovr; /* online verify running */
1933 int rsr; /* re-sync running */
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001934 struct crypto crypto = { };
Lars Ellenbergf3990022011-03-23 14:31:09 +01001935
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02001936 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001937 if (!adm_ctx.reply_skb)
1938 return retcode;
1939 if (retcode != NO_ERROR)
1940 goto out;
1941
1942 tconn = adm_ctx.tconn;
1943
1944 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1945 if (!new_conf) {
1946 retcode = ERR_NOMEM;
1947 goto out;
1948 }
1949
Lars Ellenbergf3990022011-03-23 14:31:09 +01001950 conn_reconfig_start(tconn);
1951
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001952 mutex_lock(&tconn->data.mutex);
Philipp Reisnera0095502011-05-03 13:14:15 +02001953 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001954 old_conf = tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001955
1956 if (!old_conf) {
1957 drbd_msg_put_info("net conf missing, try connect");
1958 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001959 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001960 }
1961
1962 *new_conf = *old_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001963 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001964 set_net_conf_defaults(new_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001965
Lars Ellenbergf3990022011-03-23 14:31:09 +01001966 err = net_conf_from_attrs_for_change(new_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001967 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001968 retcode = ERR_MANDATORY_TAG;
1969 drbd_msg_put_info(from_attrs_err_to_txt(err));
1970 goto fail;
1971 }
1972
Philipp Reisnercd643972011-04-13 18:00:59 -07001973 retcode = check_net_options(tconn, new_conf);
1974 if (retcode != NO_ERROR)
1975 goto fail;
1976
Lars Ellenbergf3990022011-03-23 14:31:09 +01001977 /* re-sync running */
1978 rsr = conn_resync_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001979 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001980 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001981 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001982 }
1983
Lars Ellenbergf3990022011-03-23 14:31:09 +01001984 /* online verify running */
1985 ovr = conn_ov_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001986 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1987 retcode = ERR_VERIFY_RUNNING;
1988 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001989 }
1990
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001991 retcode = alloc_crypto(&crypto, new_conf);
1992 if (retcode != NO_ERROR)
1993 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001994
Philipp Reisner44ed1672011-04-19 17:10:19 +02001995 rcu_assign_pointer(tconn->net_conf, new_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001996
1997 if (!rsr) {
1998 crypto_free_hash(tconn->csums_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001999 tconn->csums_tfm = crypto.csums_tfm;
2000 crypto.csums_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002001 }
2002 if (!ovr) {
2003 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002004 tconn->verify_tfm = crypto.verify_tfm;
2005 crypto.verify_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002006 }
2007
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002008 crypto_free_hash(tconn->integrity_tfm);
2009 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02002010 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002011 /* Do this without trying to take tconn->data.mutex again. */
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02002012 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002013
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002014 crypto_free_hash(tconn->cram_hmac_tfm);
2015 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2016
Philipp Reisnera0095502011-05-03 13:14:15 +02002017 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002018 mutex_unlock(&tconn->data.mutex);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002019 synchronize_rcu();
2020 kfree(old_conf);
2021
Lars Ellenbergf3990022011-03-23 14:31:09 +01002022 if (tconn->cstate >= C_WF_REPORT_PARAMS)
2023 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2024
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002025 goto done;
2026
Lars Ellenbergf3990022011-03-23 14:31:09 +01002027 fail:
Philipp Reisnera0095502011-05-03 13:14:15 +02002028 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002029 mutex_unlock(&tconn->data.mutex);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002030 free_crypto(&crypto);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002031 kfree(new_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002032 done:
Lars Ellenbergf3990022011-03-23 14:31:09 +01002033 conn_reconfig_done(tconn);
2034 out:
2035 drbd_adm_finish(info, retcode);
2036 return 0;
2037}
2038
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002039int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002040{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002041 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002042 struct net_conf *old_conf, *new_conf = NULL;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002043 struct crypto crypto = { };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002044 struct drbd_tconn *tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002045 enum drbd_ret_code retcode;
2046 int i;
2047 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002048
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002049 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002050
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002051 if (!adm_ctx.reply_skb)
2052 return retcode;
2053 if (retcode != NO_ERROR)
2054 goto out;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002055 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2056 drbd_msg_put_info("connection endpoint(s) missing");
2057 retcode = ERR_INVALID_REQUEST;
2058 goto out;
2059 }
2060
2061 /* No need for _rcu here. All reconfiguration is
2062 * strictly serialized on genl_lock(). We are protected against
2063 * concurrent reconfiguration/addition/deletion */
2064 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2065 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2066 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2067 retcode = ERR_LOCAL_ADDR;
2068 goto out;
2069 }
2070
2071 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2072 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2073 retcode = ERR_PEER_ADDR;
2074 goto out;
2075 }
2076 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002077
2078 tconn = adm_ctx.tconn;
Philipp Reisner80883192011-02-18 14:56:45 +01002079 conn_reconfig_start(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080
Philipp Reisner80883192011-02-18 14:56:45 +01002081 if (tconn->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002082 retcode = ERR_NET_CONFIGURED;
2083 goto fail;
2084 }
2085
Andreas Gruenbachera209b4a2011-08-17 12:43:25 +02002086 /* allocation not in the IO path, drbdsetup / netlink process context */
Lars Ellenberg5979e362011-04-27 21:09:55 +02002087 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002088 if (!new_conf) {
2089 retcode = ERR_NOMEM;
2090 goto fail;
2091 }
2092
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002093 set_net_conf_defaults(new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002094
Lars Ellenbergf3990022011-03-23 14:31:09 +01002095 err = net_conf_from_attrs(new_conf, info);
Lars Ellenberg25e40932011-08-19 10:39:00 +02002096 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002097 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002098 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002099 goto fail;
2100 }
2101
Philipp Reisnercd643972011-04-13 18:00:59 -07002102 retcode = check_net_options(tconn, new_conf);
2103 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002104 goto fail;
Philipp Reisner422028b2010-10-27 11:12:07 +02002105
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002106 retcode = alloc_crypto(&crypto, new_conf);
2107 if (retcode != NO_ERROR)
2108 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002109
Philipp Reisnerb411b362009-09-25 16:07:19 -07002110 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2111
Philipp Reisner80883192011-02-18 14:56:45 +01002112 conn_flush_workqueue(tconn);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002113
Philipp Reisnera0095502011-05-03 13:14:15 +02002114 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002115 old_conf = tconn->net_conf;
2116 if (old_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002117 retcode = ERR_NET_CONFIGURED;
Philipp Reisnera0095502011-05-03 13:14:15 +02002118 mutex_unlock(&tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002119 goto fail;
2120 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002121 rcu_assign_pointer(tconn->net_conf, new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002122
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002123 conn_free_crypto(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002124 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002125 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002126 tconn->csums_tfm = crypto.csums_tfm;
2127 tconn->verify_tfm = crypto.verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002128
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002129 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2130 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2131 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2132 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2133
Philipp Reisnera0095502011-05-03 13:14:15 +02002134 mutex_unlock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002135
Philipp Reisner695d08f2011-04-11 22:53:32 -07002136 rcu_read_lock();
Philipp Reisner80883192011-02-18 14:56:45 +01002137 idr_for_each_entry(&tconn->volumes, mdev, i) {
2138 mdev->send_cnt = 0;
2139 mdev->recv_cnt = 0;
Philipp Reisner80883192011-02-18 14:56:45 +01002140 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002141 rcu_read_unlock();
Lars Ellenberg5ee743e2011-04-26 16:22:25 +02002142
2143 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2144
Philipp Reisner80883192011-02-18 14:56:45 +01002145 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002146 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002147 return 0;
2148
2149fail:
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002150 free_crypto(&crypto);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002151 kfree(new_conf);
2152
Philipp Reisner80883192011-02-18 14:56:45 +01002153 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002154out:
2155 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002156 return 0;
2157}
2158
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002159static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2160{
2161 enum drbd_state_rv rv;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002162
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002163 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2164 force ? CS_HARD : 0);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002165
2166 switch (rv) {
2167 case SS_NOTHING_TO_DO:
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002168 break;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002169 case SS_ALREADY_STANDALONE:
2170 return SS_SUCCESS;
2171 case SS_PRIMARY_NOP:
2172 /* Our state checking code wants to see the peer outdated. */
2173 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002174 pdsk, D_OUTDATED), CS_VERBOSE);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002175 break;
2176 case SS_CW_FAILED_BY_PEER:
2177 /* The peer probably wants to see us outdated. */
2178 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2179 disk, D_OUTDATED), 0);
2180 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002181 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2182 CS_HARD);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002183 }
2184 break;
2185 default:;
2186 /* no special handling necessary */
2187 }
2188
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002189 if (rv >= SS_SUCCESS) {
2190 enum drbd_state_rv rv2;
2191 /* No one else can reconfigure the network while I am here.
2192 * The state handling only uses drbd_thread_stop_nowait(),
2193 * we want to really wait here until the receiver is no more.
2194 */
2195 drbd_thread_stop(&adm_ctx.tconn->receiver);
2196
2197 /* Race breaker. This additional state change request may be
2198 * necessary, if this was a forced disconnect during a receiver
2199 * restart. We may have "killed" the receiver thread just
2200 * after drbdd_init() returned. Typically, we should be
2201 * C_STANDALONE already, now, and this becomes a no-op.
2202 */
2203 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2204 CS_VERBOSE | CS_HARD);
2205 if (rv2 < SS_SUCCESS)
2206 conn_err(tconn,
2207 "unexpected rv2=%d in conn_try_disconnect()\n",
2208 rv2);
2209 }
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002210 return rv;
2211}
2212
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002213int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002214{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002215 struct disconnect_parms parms;
2216 struct drbd_tconn *tconn;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002217 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002218 enum drbd_ret_code retcode;
2219 int err;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002220
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002221 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002222 if (!adm_ctx.reply_skb)
2223 return retcode;
2224 if (retcode != NO_ERROR)
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002225 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002226
2227 tconn = adm_ctx.tconn;
2228 memset(&parms, 0, sizeof(parms));
2229 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002230 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002231 if (err) {
2232 retcode = ERR_MANDATORY_TAG;
2233 drbd_msg_put_info(from_attrs_err_to_txt(err));
2234 goto fail;
2235 }
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002236 }
2237
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002238 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2239 if (rv < SS_SUCCESS)
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002240 retcode = rv; /* FIXME: Type mismatch. */
2241 else
2242 retcode = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002243 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002244 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002245 return 0;
2246}
2247
2248void resync_after_online_grow(struct drbd_conf *mdev)
2249{
2250 int iass; /* I am sync source */
2251
2252 dev_info(DEV, "Resync of new storage after online grow\n");
2253 if (mdev->state.role != mdev->state.peer)
2254 iass = (mdev->state.role == R_PRIMARY);
2255 else
Lars Ellenberg427c0432012-08-01 12:43:01 +02002256 iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002257
2258 if (iass)
2259 drbd_start_resync(mdev, C_SYNC_SOURCE);
2260 else
2261 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2262}
2263
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002264int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002265{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002266 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002267 struct resize_parms rs;
2268 struct drbd_conf *mdev;
2269 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002270 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002271 enum dds_flags ddsf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002272 sector_t u_size;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002273 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002274
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002275 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2276 if (!adm_ctx.reply_skb)
2277 return retcode;
2278 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002279 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002280
2281 memset(&rs, 0, sizeof(struct resize_parms));
2282 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002283 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002284 if (err) {
2285 retcode = ERR_MANDATORY_TAG;
2286 drbd_msg_put_info(from_attrs_err_to_txt(err));
2287 goto fail;
2288 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002289 }
2290
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002291 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002292 if (mdev->state.conn > C_CONNECTED) {
2293 retcode = ERR_RESIZE_RESYNC;
2294 goto fail;
2295 }
2296
2297 if (mdev->state.role == R_SECONDARY &&
2298 mdev->state.peer == R_SECONDARY) {
2299 retcode = ERR_NO_PRIMARY;
2300 goto fail;
2301 }
2302
2303 if (!get_ldev(mdev)) {
2304 retcode = ERR_NO_DISK;
2305 goto fail;
2306 }
2307
Philipp Reisner31890f42011-01-19 14:12:51 +01002308 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002309 retcode = ERR_NEED_APV_93;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002310 goto fail_ldev;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002311 }
2312
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002313 rcu_read_lock();
2314 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2315 rcu_read_unlock();
2316 if (u_size != (sector_t)rs.resize_size) {
2317 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2318 if (!new_disk_conf) {
2319 retcode = ERR_NOMEM;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002320 goto fail_ldev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002321 }
2322 }
2323
Philipp Reisner087c2492010-03-26 13:49:56 +01002324 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002325 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002326
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002327 if (new_disk_conf) {
2328 mutex_lock(&mdev->tconn->conf_update);
2329 old_disk_conf = mdev->ldev->disk_conf;
2330 *new_disk_conf = *old_disk_conf;
2331 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2332 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2333 mutex_unlock(&mdev->tconn->conf_update);
2334 synchronize_rcu();
2335 kfree(old_disk_conf);
2336 }
2337
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002338 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Bart Van Assche24c48302011-05-21 18:32:29 +02002339 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002340 drbd_md_sync(mdev);
2341 put_ldev(mdev);
2342 if (dd == dev_size_error) {
2343 retcode = ERR_NOMEM_BITMAP;
2344 goto fail;
2345 }
2346
Philipp Reisner087c2492010-03-26 13:49:56 +01002347 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002348 if (dd == grew)
2349 set_bit(RESIZE_PENDING, &mdev->flags);
2350
2351 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002352 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002353 }
2354
2355 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002356 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002357 return 0;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002358
2359 fail_ldev:
2360 put_ldev(mdev);
2361 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002362}
2363
Lars Ellenbergf3990022011-03-23 14:31:09 +01002364int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002365{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002366 enum drbd_ret_code retcode;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002367 struct drbd_tconn *tconn;
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002368 struct res_opts res_opts;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002369 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002371 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002372 if (!adm_ctx.reply_skb)
2373 return retcode;
2374 if (retcode != NO_ERROR)
2375 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002376 tconn = adm_ctx.tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002377
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002378 res_opts = tconn->res_opts;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002379 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002380 set_res_opts_defaults(&res_opts);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002381
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002382 err = res_opts_from_attrs(&res_opts, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002383 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002384 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002385 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002386 goto fail;
2387 }
2388
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002389 err = set_resource_options(tconn, &res_opts);
2390 if (err) {
2391 retcode = ERR_INVALID_REQUEST;
2392 if (err == -ENOMEM)
2393 retcode = ERR_NOMEM;
Philipp Reisner778f2712010-07-06 11:14:00 +02002394 }
2395
Philipp Reisnerb411b362009-09-25 16:07:19 -07002396fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002397 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002398 return 0;
2399}
2400
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002401int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002402{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002403 struct drbd_conf *mdev;
2404 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2405
2406 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2407 if (!adm_ctx.reply_skb)
2408 return retcode;
2409 if (retcode != NO_ERROR)
2410 goto out;
2411
2412 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002413
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002414 /* If there is still bitmap IO pending, probably because of a previous
Lars Ellenberg970fbde2012-07-30 09:11:38 +02002415 * resync just being finished, wait for it before requesting a new resync.
2416 * Also wait for it's after_state_ch(). */
Lars Ellenberg5016b822012-05-07 12:00:56 +02002417 drbd_suspend_io(mdev);
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002418 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg970fbde2012-07-30 09:11:38 +02002419 drbd_flush_workqueue(mdev);
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002420
Philipp Reisnerb411b362009-09-25 16:07:19 -07002421 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2422
2423 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2424 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2425
2426 while (retcode == SS_NEED_CONNECTION) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01002427 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002428 if (mdev->state.conn < C_CONNECTED)
2429 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002430 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002431
2432 if (retcode != SS_NEED_CONNECTION)
2433 break;
2434
2435 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2436 }
Lars Ellenberg5016b822012-05-07 12:00:56 +02002437 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002438
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002439out:
2440 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002441 return 0;
2442}
2443
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002444static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2445 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002446{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002447 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002448
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002449 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2450 if (!adm_ctx.reply_skb)
2451 return retcode;
2452 if (retcode != NO_ERROR)
2453 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002454
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002455 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2456out:
2457 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002458 return 0;
2459}
2460
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002461static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2462{
2463 int rv;
2464
2465 rv = drbd_bmio_set_n_write(mdev);
2466 drbd_suspend_al(mdev);
2467 return rv;
2468}
2469
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002470int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002471{
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002472 int retcode; /* drbd_ret_code, drbd_state_rv */
2473 struct drbd_conf *mdev;
2474
2475 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2476 if (!adm_ctx.reply_skb)
2477 return retcode;
2478 if (retcode != NO_ERROR)
2479 goto out;
2480
2481 mdev = adm_ctx.mdev;
2482
Lars Ellenberg5016b822012-05-07 12:00:56 +02002483 /* If there is still bitmap IO pending, probably because of a previous
Lars Ellenberg970fbde2012-07-30 09:11:38 +02002484 * resync just being finished, wait for it before requesting a new resync.
2485 * Also wait for it's after_state_ch(). */
Lars Ellenberg5016b822012-05-07 12:00:56 +02002486 drbd_suspend_io(mdev);
2487 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg970fbde2012-07-30 09:11:38 +02002488 drbd_flush_workqueue(mdev);
Lars Ellenberg5016b822012-05-07 12:00:56 +02002489
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002490 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
2491 if (retcode < SS_SUCCESS) {
2492 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
2493 /* The peer will get a resync upon connect anyways.
2494 * Just make that into a full resync. */
2495 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2496 if (retcode >= SS_SUCCESS) {
2497 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2498 "set_n_write from invalidate_peer",
2499 BM_LOCKED_SET_ALLOWED))
2500 retcode = ERR_IO_MD_DISK;
2501 }
2502 } else
2503 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
2504 }
Lars Ellenberg5016b822012-05-07 12:00:56 +02002505 drbd_resume_io(mdev);
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01002506
2507out:
2508 drbd_adm_finish(info, retcode);
2509 return 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002510}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002511
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002512int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2513{
2514 enum drbd_ret_code retcode;
2515
2516 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2517 if (!adm_ctx.reply_skb)
2518 return retcode;
2519 if (retcode != NO_ERROR)
2520 goto out;
2521
2522 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002523 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002524out:
2525 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002526 return 0;
2527}
2528
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002529int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002530{
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002531 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002532 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002533
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002534 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2535 if (!adm_ctx.reply_skb)
2536 return retcode;
2537 if (retcode != NO_ERROR)
2538 goto out;
2539
2540 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2541 s = adm_ctx.mdev->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01002542 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2543 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2544 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2545 } else {
2546 retcode = ERR_PAUSE_IS_CLEAR;
2547 }
2548 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002549
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002550out:
2551 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002552 return 0;
2553}
2554
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002555int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002556{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002557 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002558}
2559
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002560int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002561{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002562 struct drbd_conf *mdev;
2563 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2564
2565 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2566 if (!adm_ctx.reply_skb)
2567 return retcode;
2568 if (retcode != NO_ERROR)
2569 goto out;
2570
2571 mdev = adm_ctx.mdev;
Philipp Reisner43a51822010-06-11 11:26:34 +02002572 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2573 drbd_uuid_new_current(mdev);
2574 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02002575 }
Philipp Reisner265be2d2010-05-31 10:14:17 +02002576 drbd_suspend_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002577 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2578 if (retcode == SS_SUCCESS) {
Philipp Reisner265be2d2010-05-31 10:14:17 +02002579 if (mdev->state.conn < C_CONNECTED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002580 tl_clear(mdev->tconn);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002581 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002582 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002583 }
2584 drbd_resume_io(mdev);
2585
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002586out:
2587 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002588 return 0;
2589}
2590
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002591int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002592{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002593 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002594}
2595
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002596int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002597{
2598 struct nlattr *nla;
2599 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2600 if (!nla)
2601 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002602 if (vnr != VOLUME_UNSPECIFIED &&
2603 nla_put_u32(skb, T_ctx_volume, vnr))
2604 goto nla_put_failure;
2605 if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2606 goto nla_put_failure;
2607 if (tconn->my_addr_len &&
2608 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2609 goto nla_put_failure;
2610 if (tconn->peer_addr_len &&
2611 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2612 goto nla_put_failure;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002613 nla_nest_end(skb, nla);
2614 return 0;
2615
2616nla_put_failure:
2617 if (nla)
2618 nla_nest_cancel(skb, nla);
2619 return -EMSGSIZE;
2620}
2621
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002622int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2623 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002624{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002625 struct state_info *si = NULL; /* for sizeof(si->member); */
Philipp Reisner44ed1672011-04-19 17:10:19 +02002626 struct net_conf *nc;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002627 struct nlattr *nla;
2628 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002629 int err = 0;
2630 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002631
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002632 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2633 * to. So we better exclude_sensitive information.
2634 *
2635 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2636 * in the context of the requesting user process. Exclude sensitive
2637 * information, unless current has superuser.
2638 *
2639 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2640 * relies on the current implementation of netlink_dump(), which
2641 * executes the dump callback successively from netlink_recvmsg(),
2642 * always in the context of the receiving process */
2643 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002644
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002645 got_ldev = get_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002646
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002647 /* We need to add connection name and volume number information still.
2648 * Minor number is in drbd_genlmsghdr. */
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002649 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002650 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002651
Lars Ellenbergf3990022011-03-23 14:31:09 +01002652 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2653 goto nla_put_failure;
2654
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002655 rcu_read_lock();
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002656 if (got_ldev)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002657 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002658 goto nla_put_failure;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002659
Philipp Reisner44ed1672011-04-19 17:10:19 +02002660 nc = rcu_dereference(mdev->tconn->net_conf);
2661 if (nc)
2662 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2663 rcu_read_unlock();
2664 if (err)
2665 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002667 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2668 if (!nla)
2669 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002670 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2671 nla_put_u32(skb, T_current_state, mdev->state.i) ||
2672 nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
Philipp Marek3174f8c2012-03-03 21:04:30 +01002673 nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
2674 nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
2675 nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
2676 nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
2677 nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
2678 nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
2679 nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
2680 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
2681 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
2682 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002683 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002684
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002685 if (got_ldev) {
Philipp Reisner39a1aa7f2012-08-08 21:19:09 +02002686 int err;
2687
2688 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2689 err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2690 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2691
2692 if (err)
2693 goto nla_put_failure;
2694
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002695 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002696 nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2697 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2698 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002699 if (C_SYNC_SOURCE <= mdev->state.conn &&
2700 C_PAUSED_SYNC_T >= mdev->state.conn) {
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002701 if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2702 nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2703 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002704 }
2705 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002706
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002707 if (sib) {
2708 switch(sib->sib_reason) {
2709 case SIB_SYNC_PROGRESS:
2710 case SIB_GET_STATUS_REPLY:
2711 break;
2712 case SIB_STATE_CHANGE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002713 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2714 nla_put_u32(skb, T_new_state, sib->ns.i))
2715 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002716 break;
2717 case SIB_HELPER_POST:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002718 if (nla_put_u32(skb, T_helper_exit_code,
2719 sib->helper_exit_code))
2720 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002721 /* fall through */
2722 case SIB_HELPER_PRE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02002723 if (nla_put_string(skb, T_helper, sib->helper_name))
2724 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002725 break;
2726 }
2727 }
2728 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002729
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002730 if (0)
2731nla_put_failure:
2732 err = -EMSGSIZE;
2733 if (got_ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002734 put_ldev(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002735 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002736}
2737
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002738int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002740 enum drbd_ret_code retcode;
2741 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002742
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002743 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2744 if (!adm_ctx.reply_skb)
2745 return retcode;
2746 if (retcode != NO_ERROR)
2747 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002748
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002749 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2750 if (err) {
2751 nlmsg_free(adm_ctx.reply_skb);
2752 return err;
2753 }
2754out:
2755 drbd_adm_finish(info, retcode);
2756 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002757}
2758
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002759int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002760{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002761 struct drbd_conf *mdev;
2762 struct drbd_genlmsghdr *dh;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002763 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2764 struct drbd_tconn *tconn = NULL;
2765 struct drbd_tconn *tmp;
2766 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002767
Lars Ellenberg543cc102011-03-10 22:18:18 +01002768 /* Open coded, deferred, iteration:
2769 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2770 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2771 * ...
2772 * }
2773 * }
2774 * where tconn is cb->args[0];
2775 * and i is cb->args[1];
2776 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002777 * cb->args[2] indicates if we shall loop over all resources,
2778 * or just dump all volumes of a single resource.
2779 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002780 * This may miss entries inserted after this dump started,
2781 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01002782 *
2783 * We need to make sure the mdev won't disappear while
2784 * we are looking at it, and revalidate our iterators
2785 * on each iteration.
2786 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002787
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002788 /* synchronize with conn_create()/conn_destroy() */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002789 rcu_read_lock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002790 /* revalidate iterator position */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002791 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
Lars Ellenberg543cc102011-03-10 22:18:18 +01002792 if (pos == NULL) {
2793 /* first iteration */
2794 pos = tmp;
2795 tconn = pos;
2796 break;
2797 }
2798 if (tmp == pos) {
2799 tconn = pos;
2800 break;
2801 }
2802 }
2803 if (tconn) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002804next_tconn:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002805 mdev = idr_get_next(&tconn->volumes, &volume);
2806 if (!mdev) {
2807 /* No more volumes to dump on this tconn.
2808 * Advance tconn iterator. */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002809 pos = list_entry_rcu(tconn->all_tconn.next,
2810 struct drbd_tconn, all_tconn);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002811 /* Did we dump any volume on this tconn yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002812 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002813 /* If we reached the end of the list,
2814 * or only a single resource dump was requested,
2815 * we are done. */
2816 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2817 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002818 volume = 0;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002819 tconn = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002820 goto next_tconn;
2821 }
2822 }
2823
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002824 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2825 cb->nlh->nlmsg_seq, &drbd_genl_family,
2826 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2827 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002828 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002829
Lars Ellenberg543cc102011-03-10 22:18:18 +01002830 if (!mdev) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002831 /* This is a tconn without a single volume.
2832 * Suprisingly enough, it may have a network
2833 * configuration. */
2834 struct net_conf *nc;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002835 dh->minor = -1U;
2836 dh->ret_code = NO_ERROR;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002837 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002838 goto cancel;
2839 nc = rcu_dereference(tconn->net_conf);
2840 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2841 goto cancel;
2842 goto done;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002843 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002844
Lars Ellenberg543cc102011-03-10 22:18:18 +01002845 D_ASSERT(mdev->vnr == volume);
2846 D_ASSERT(mdev->tconn == tconn);
2847
2848 dh->minor = mdev_to_minor(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002849 dh->ret_code = NO_ERROR;
2850
2851 if (nla_put_status_info(skb, mdev, NULL)) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002852cancel:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002853 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002854 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002855 }
Lars Ellenberg367d675d2011-07-11 23:49:55 +02002856done:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002857 genlmsg_end(skb, dh);
2858 }
2859
Lars Ellenberg543cc102011-03-10 22:18:18 +01002860out:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002861 rcu_read_unlock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002862 /* where to start the next iteration */
2863 cb->args[0] = (long)pos;
2864 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002865
Lars Ellenberg543cc102011-03-10 22:18:18 +01002866 /* No more tconns/volumes/minors found results in an empty skb.
2867 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002868 return skb->len;
2869}
2870
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002871/*
2872 * Request status of all resources, or of all volumes within a single resource.
2873 *
2874 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2875 * Which means we cannot use the family->attrbuf or other such members, because
2876 * dump is NOT protected by the genl_lock(). During dump, we only have access
2877 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2878 *
2879 * Once things are setup properly, we call into get_one_status().
2880 */
2881int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2882{
2883 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2884 struct nlattr *nla;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002885 const char *resource_name;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002886 struct drbd_tconn *tconn;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002887 int maxtype;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002888
2889 /* Is this a followup call? */
2890 if (cb->args[0]) {
2891 /* ... of a single resource dump,
2892 * and the resource iterator has been advanced already? */
2893 if (cb->args[2] && cb->args[2] != cb->args[0])
2894 return 0; /* DONE. */
2895 goto dump;
2896 }
2897
2898 /* First call (from netlink_dump_start). We need to figure out
2899 * which resource(s) the user wants us to dump. */
2900 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2901 nlmsg_attrlen(cb->nlh, hdrlen),
2902 DRBD_NLA_CFG_CONTEXT);
2903
2904 /* No explicit context given. Dump all. */
2905 if (!nla)
2906 goto dump;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002907 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2908 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2909 if (IS_ERR(nla))
2910 return PTR_ERR(nla);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002911 /* context given, but no name present? */
2912 if (!nla)
2913 return -EINVAL;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002914 resource_name = nla_data(nla);
2915 tconn = conn_get_by_name(resource_name);
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002916
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002917 if (!tconn)
2918 return -ENODEV;
2919
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002920 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2921
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002922 /* prime iterators, and set "filter" mode mark:
2923 * only dump this tconn. */
2924 cb->args[0] = (long)tconn;
2925 /* cb->args[1] = 0; passed in this way. */
2926 cb->args[2] = (long)tconn;
2927
2928dump:
2929 return get_one_status(skb, cb);
2930}
2931
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002932int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2933{
2934 enum drbd_ret_code retcode;
2935 struct timeout_parms tp;
2936 int err;
2937
2938 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2939 if (!adm_ctx.reply_skb)
2940 return retcode;
2941 if (retcode != NO_ERROR)
2942 goto out;
2943
2944 tp.timeout_type =
2945 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2946 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2947 UT_DEFAULT;
2948
2949 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2950 if (err) {
2951 nlmsg_free(adm_ctx.reply_skb);
2952 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002953 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002954out:
2955 drbd_adm_finish(info, retcode);
2956 return 0;
2957}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002958
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002959int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2960{
2961 struct drbd_conf *mdev;
2962 enum drbd_ret_code retcode;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002963 struct start_ov_parms parms;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002964
2965 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2966 if (!adm_ctx.reply_skb)
2967 return retcode;
2968 if (retcode != NO_ERROR)
2969 goto out;
2970
2971 mdev = adm_ctx.mdev;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002972
2973 /* resume from last known position, if possible */
2974 parms.ov_start_sector = mdev->ov_start_sector;
2975 parms.ov_stop_sector = ULLONG_MAX;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002976 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002977 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002978 if (err) {
2979 retcode = ERR_MANDATORY_TAG;
2980 drbd_msg_put_info(from_attrs_err_to_txt(err));
2981 goto out;
2982 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002983 }
Lars Ellenberg58ffa582012-07-26 14:09:49 +02002984 /* w_make_ov_request expects position to be aligned */
2985 mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
2986 mdev->ov_stop_sector = parms.ov_stop_sector;
2987
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002988 /* If there is still bitmap IO pending, e.g. previous resync or verify
2989 * just being finished, wait for it before requesting a new resync. */
Lars Ellenberg5016b822012-05-07 12:00:56 +02002990 drbd_suspend_io(mdev);
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002991 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002992 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
Lars Ellenberg5016b822012-05-07 12:00:56 +02002993 drbd_resume_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002994out:
2995 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002996 return 0;
2997}
2998
2999
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003000int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003001{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003002 struct drbd_conf *mdev;
3003 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003004 int skip_initial_sync = 0;
3005 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003006 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003007
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003008 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3009 if (!adm_ctx.reply_skb)
3010 return retcode;
3011 if (retcode != NO_ERROR)
3012 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003014 mdev = adm_ctx.mdev;
3015 memset(&args, 0, sizeof(args));
3016 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01003017 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003018 if (err) {
3019 retcode = ERR_MANDATORY_TAG;
3020 drbd_msg_put_info(from_attrs_err_to_txt(err));
3021 goto out_nolock;
3022 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003023 }
3024
Philipp Reisner8410da82011-02-11 20:11:10 +01003025 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003026
3027 if (!get_ldev(mdev)) {
3028 retcode = ERR_NO_DISK;
3029 goto out;
3030 }
3031
3032 /* this is "skip initial sync", assume to be clean */
Philipp Reisner31890f42011-01-19 14:12:51 +01003033 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3035 dev_info(DEV, "Preparing to skip initial sync\n");
3036 skip_initial_sync = 1;
3037 } else if (mdev->state.conn != C_STANDALONE) {
3038 retcode = ERR_CONNECTED;
3039 goto out_dec;
3040 }
3041
3042 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3043 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3044
3045 if (args.clear_bm) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003046 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3047 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003048 if (err) {
3049 dev_err(DEV, "Writing bitmap failed with %d\n",err);
3050 retcode = ERR_IO_MD_DISK;
3051 }
3052 if (skip_initial_sync) {
3053 drbd_send_uuids_skip_initial_sync(mdev);
3054 _drbd_uuid_set(mdev, UI_BITMAP, 0);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003055 drbd_print_uuids(mdev, "cleared bitmap UUID");
Philipp Reisner87eeee42011-01-19 14:16:30 +01003056 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003057 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3058 CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01003059 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003060 }
3061 }
3062
3063 drbd_md_sync(mdev);
3064out_dec:
3065 put_ldev(mdev);
3066out:
Philipp Reisner8410da82011-02-11 20:11:10 +01003067 mutex_unlock(mdev->state_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003068out_nolock:
3069 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003070 return 0;
3071}
3072
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003073static enum drbd_ret_code
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003074drbd_check_resource_name(const char *name)
Philipp Reisner774b3052011-02-22 02:07:03 -05003075{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003076 if (!name || !name[0]) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003077 drbd_msg_put_info("resource name missing");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003078 return ERR_MANDATORY_TAG;
3079 }
3080 /* if we want to use these in sysfs/configfs/debugfs some day,
3081 * we must not allow slashes */
3082 if (strchr(name, '/')) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003083 drbd_msg_put_info("invalid resource name");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003084 return ERR_INVALID_REQUEST;
3085 }
3086 return NO_ERROR;
3087}
Philipp Reisner774b3052011-02-22 02:07:03 -05003088
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003089int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003090{
3091 enum drbd_ret_code retcode;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003092 struct res_opts res_opts;
3093 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003094
3095 retcode = drbd_adm_prepare(skb, info, 0);
3096 if (!adm_ctx.reply_skb)
3097 return retcode;
3098 if (retcode != NO_ERROR)
3099 goto out;
3100
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003101 set_res_opts_defaults(&res_opts);
3102 err = res_opts_from_attrs(&res_opts, info);
3103 if (err && err != -ENOMSG) {
3104 retcode = ERR_MANDATORY_TAG;
3105 drbd_msg_put_info(from_attrs_err_to_txt(err));
3106 goto out;
3107 }
3108
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003109 retcode = drbd_check_resource_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003110 if (retcode != NO_ERROR)
3111 goto out;
3112
3113 if (adm_ctx.tconn) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01003114 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3115 retcode = ERR_INVALID_REQUEST;
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003116 drbd_msg_put_info("resource exists");
Lars Ellenberg38f19612011-03-14 13:22:35 +01003117 }
3118 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003119 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003120 }
3121
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003122 if (!conn_create(adm_ctx.resource_name, &res_opts))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003123 retcode = ERR_NOMEM;
3124out:
3125 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003126 return 0;
3127}
3128
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003129int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003130{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003131 struct drbd_genlmsghdr *dh = info->userhdr;
3132 enum drbd_ret_code retcode;
Philipp Reisner774b3052011-02-22 02:07:03 -05003133
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003134 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003135 if (!adm_ctx.reply_skb)
3136 return retcode;
3137 if (retcode != NO_ERROR)
3138 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003139
Andreas Gruenbacherf2257a52011-07-14 16:00:40 +02003140 if (dh->minor > MINORMASK) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003141 drbd_msg_put_info("requested minor out of range");
3142 retcode = ERR_INVALID_REQUEST;
3143 goto out;
3144 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02003145 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003146 drbd_msg_put_info("requested volume id out of range");
3147 retcode = ERR_INVALID_REQUEST;
3148 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003149 }
3150
Lars Ellenberg38f19612011-03-14 13:22:35 +01003151 /* drbd_adm_prepare made sure already
3152 * that mdev->tconn and mdev->vnr match the request. */
3153 if (adm_ctx.mdev) {
3154 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3155 retcode = ERR_MINOR_EXISTS;
3156 /* else: still NO_ERROR */
3157 goto out;
3158 }
3159
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003160 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3161out:
3162 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003163 return 0;
3164}
3165
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003166static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3167{
3168 if (mdev->state.disk == D_DISKLESS &&
3169 /* no need to be mdev->state.conn == C_STANDALONE &&
3170 * we may want to delete a minor from a live replication group.
3171 */
3172 mdev->state.role == R_SECONDARY) {
Philipp Reisner369bea62011-07-06 23:04:44 +02003173 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3174 CS_VERBOSE + CS_WAIT_COMPLETE);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02003175 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3176 idr_remove(&minors, mdev_to_minor(mdev));
3177 del_gendisk(mdev->vdisk);
3178 synchronize_rcu();
3179 kref_put(&mdev->kref, &drbd_minor_destroy);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003180 return NO_ERROR;
3181 } else
3182 return ERR_MINOR_CONFIGURED;
3183}
3184
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003185int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003186{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003187 enum drbd_ret_code retcode;
3188
3189 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3190 if (!adm_ctx.reply_skb)
3191 return retcode;
3192 if (retcode != NO_ERROR)
3193 goto out;
3194
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003195 retcode = adm_delete_minor(adm_ctx.mdev);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003196out:
3197 drbd_adm_finish(info, retcode);
3198 return 0;
3199}
3200
3201int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3202{
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003203 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003204 struct drbd_conf *mdev;
3205 unsigned i;
3206
3207 retcode = drbd_adm_prepare(skb, info, 0);
3208 if (!adm_ctx.reply_skb)
3209 return retcode;
3210 if (retcode != NO_ERROR)
3211 goto out;
3212
3213 if (!adm_ctx.tconn) {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003214 retcode = ERR_RES_NOT_KNOWN;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003215 goto out;
3216 }
3217
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003218 /* demote */
3219 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3220 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3221 if (retcode < SS_SUCCESS) {
3222 drbd_msg_put_info("failed to demote");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003223 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003224 }
3225 }
3226
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003227 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3228 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003229 drbd_msg_put_info("failed to disconnect");
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003230 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003231 }
3232
3233 /* detach */
3234 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
Philipp Reisnercdfda632011-07-05 15:38:59 +02003235 retcode = adm_detach(mdev, 0);
Lars Ellenberg27012382012-07-24 10:13:55 +02003236 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003237 drbd_msg_put_info("failed to detach");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003238 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003239 }
3240 }
3241
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003242 /* If we reach this, all volumes (of this tconn) are Secondary,
3243 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003244 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003245 drbd_thread_stop(&adm_ctx.tconn->worker);
3246
3247 /* Now, nothing can fail anymore */
3248
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003249 /* delete volumes */
3250 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3251 retcode = adm_delete_minor(mdev);
3252 if (retcode != NO_ERROR) {
3253 /* "can not happen" */
3254 drbd_msg_put_info("failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07003255 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003256 }
3257 }
3258
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003259 /* delete connection */
3260 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003261 list_del_rcu(&adm_ctx.tconn->all_tconn);
3262 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003263 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3264
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003265 retcode = NO_ERROR;
3266 } else {
3267 /* "can not happen" */
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003268 retcode = ERR_RES_IN_USE;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003269 drbd_msg_put_info("failed to delete connection");
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003270 }
Philipp Reisneref356262011-04-13 14:21:29 -07003271 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003272out:
3273 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003274 return 0;
3275}
3276
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003277int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003278{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003279 enum drbd_ret_code retcode;
3280
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003281 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003282 if (!adm_ctx.reply_skb)
3283 return retcode;
3284 if (retcode != NO_ERROR)
3285 goto out;
3286
3287 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003288 list_del_rcu(&adm_ctx.tconn->all_tconn);
3289 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003290 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3291
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003292 retcode = NO_ERROR;
Philipp Reisner774b3052011-02-22 02:07:03 -05003293 } else {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003294 retcode = ERR_RES_IN_USE;
Philipp Reisner774b3052011-02-22 02:07:03 -05003295 }
3296
Lars Ellenberg992d6e92011-05-02 11:47:18 +02003297 if (retcode == NO_ERROR)
3298 drbd_thread_stop(&adm_ctx.tconn->worker);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003299out:
3300 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003301 return 0;
3302}
3303
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003304void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003305{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003306 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3307 struct sk_buff *msg;
3308 struct drbd_genlmsghdr *d_out;
3309 unsigned seq;
3310 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003311
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003312 seq = atomic_inc_return(&drbd_genl_seq);
3313 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3314 if (!msg)
3315 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003316
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003317 err = -EMSGSIZE;
3318 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3319 if (!d_out) /* cannot happen, but anyways. */
3320 goto nla_put_failure;
3321 d_out->minor = mdev_to_minor(mdev);
Andreas Gruenbacher6f9b5f82011-05-06 01:03:32 +02003322 d_out->ret_code = NO_ERROR;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02003323
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003324 if (nla_put_status_info(msg, mdev, sib))
3325 goto nla_put_failure;
3326 genlmsg_end(msg, d_out);
3327 err = drbd_genl_multicast_events(msg, 0);
3328 /* msg has been consumed or freed in netlink_broadcast() */
3329 if (err && err != -ESRCH)
3330 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003331
Philipp Reisnerb411b362009-09-25 16:07:19 -07003332 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003333
3334nla_put_failure:
3335 nlmsg_free(msg);
3336failed:
3337 dev_err(DEV, "Error %d while broadcasting event. "
3338 "Event seq:%u sib_reason:%u\n",
3339 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003340}