blob: 930af5dbfa7619ce006aed1893ae4b1c7fec5771 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020035#include "drbd_req.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070038#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020039#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070040
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010041#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010043/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +020050int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010052int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010053
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010056int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010057int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010071int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010072int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
Andreas Gruenbacher01b39b52011-06-10 12:57:26 +020078#include "drbd_nla.h"
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010079#include <linux/genl_magic_func.h>
80
81/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070082static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010084/* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
87 */
88static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
90 unsigned int minor;
91 /* assigned from request attributes, if present */
92 unsigned int volume;
93#define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +020096 char *resource_name;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +020097 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
Philipp Reisnerb411b362009-09-25 16:07:19 -070099
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100100 /* reply buffer */
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
107} adm_ctx;
108
109static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
110{
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700114}
115
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100116/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
Lars Ellenberg8432b312011-03-08 16:11:16 +0100118int drbd_msg_put_info(const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100119{
120 struct sk_buff *skb = adm_ctx.reply_skb;
121 struct nlattr *nla;
122 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100124 if (!info || !info[0])
125 return 0;
126
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 if (!nla)
129 return err;
130
131 err = nla_put_string(skb, T_info_text, info);
132 if (err) {
133 nla_nest_cancel(skb, nla);
134 return err;
135 } else
136 nla_nest_end(skb, nla);
137 return 0;
138}
139
140/* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
144 */
145#define DRBD_ADM_NEED_MINOR 1
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200146#define DRBD_ADM_NEED_RESOURCE 2
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200147#define DRBD_ADM_NEED_CONNECTION 4
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100148static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
149 unsigned flags)
150{
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
153 int err;
154
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
156
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd != DRBD_ADM_GET_STATUS
159 && security_netlink_recv(skb, CAP_SYS_ADMIN))
160 return -EPERM;
161
162 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200163 if (!adm_ctx.reply_skb) {
164 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100165 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200166 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100167
168 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
169 info, &drbd_genl_family, 0, cmd);
170 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
171 * but anyways */
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200172 if (!adm_ctx.reply_dh) {
173 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100174 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200175 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100176
177 adm_ctx.reply_dh->minor = d_in->minor;
178 adm_ctx.reply_dh->ret_code = NO_ERROR;
179
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200180 adm_ctx.volume = VOLUME_UNSPECIFIED;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100181 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
182 struct nlattr *nla;
183 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100184 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100185 if (err)
186 goto fail;
187
188 /* It was present, and valid,
189 * copy it over to the reply skb. */
190 err = nla_put_nohdr(adm_ctx.reply_skb,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
192 info->attrs[DRBD_NLA_CFG_CONTEXT]);
193 if (err)
194 goto fail;
195
196 /* and assign stuff to the global adm_ctx */
197 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200198 if (nla)
199 adm_ctx.volume = nla_get_u32(nla);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200200 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100201 if (nla)
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200202 adm_ctx.resource_name = nla_data(nla);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200203 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
204 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
205 if ((adm_ctx.my_addr &&
206 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
207 (adm_ctx.peer_addr &&
208 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
209 err = -EINVAL;
210 goto fail;
211 }
212 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100213
214 adm_ctx.minor = d_in->minor;
215 adm_ctx.mdev = minor_to_mdev(d_in->minor);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200216 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100217
218 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
219 drbd_msg_put_info("unknown minor");
220 return ERR_MINOR_INVALID;
221 }
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200222 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
223 drbd_msg_put_info("unknown resource");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100224 return ERR_INVALID_REQUEST;
225 }
226
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200227 if (flags & DRBD_ADM_NEED_CONNECTION) {
228 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
229 drbd_msg_put_info("no resource name expected");
230 return ERR_INVALID_REQUEST;
231 }
232 if (adm_ctx.mdev) {
233 drbd_msg_put_info("no minor number expected");
234 return ERR_INVALID_REQUEST;
235 }
236 if (adm_ctx.my_addr && adm_ctx.peer_addr)
237 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
238 nla_len(adm_ctx.my_addr),
239 nla_data(adm_ctx.peer_addr),
240 nla_len(adm_ctx.peer_addr));
241 if (!adm_ctx.tconn) {
242 drbd_msg_put_info("unknown connection");
243 return ERR_INVALID_REQUEST;
244 }
245 }
246
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100247 /* some more paranoia, if the request was over-determined */
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100248 if (adm_ctx.mdev && adm_ctx.tconn &&
249 adm_ctx.mdev->tconn != adm_ctx.tconn) {
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200251 adm_ctx.minor, adm_ctx.resource_name,
252 adm_ctx.mdev->tconn->name);
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200253 drbd_msg_put_info("minor exists in different resource");
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100254 return ERR_INVALID_REQUEST;
255 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100256 if (adm_ctx.mdev &&
257 adm_ctx.volume != VOLUME_UNSPECIFIED &&
258 adm_ctx.volume != adm_ctx.mdev->vnr) {
259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
260 adm_ctx.minor, adm_ctx.volume,
261 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100262 drbd_msg_put_info("minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100263 return ERR_INVALID_REQUEST;
264 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200265
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100266 return NO_ERROR;
267
268fail:
269 nlmsg_free(adm_ctx.reply_skb);
270 adm_ctx.reply_skb = NULL;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200271 return err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100272}
273
274static int drbd_adm_finish(struct genl_info *info, int retcode)
275{
276 struct nlattr *nla;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200277 const char *resource_name = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100278
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200279 if (adm_ctx.tconn) {
280 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
281 adm_ctx.tconn = NULL;
282 }
283
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100284 if (!adm_ctx.reply_skb)
285 return -ENOMEM;
286
287 adm_ctx.reply_dh->ret_code = retcode;
288
289 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
290 if (nla) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200291 int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
292 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
293 if (nla && !IS_ERR(nla))
294 resource_name = nla_data(nla);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100295 }
296
297 drbd_adm_send_reply(adm_ctx.reply_skb, info);
298 return 0;
299}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700300
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100301static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
302{
303 char *afs;
304
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200305 /* FIXME: A future version will not allow this case. */
306 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
307 return;
308
309 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
310 case AF_INET6:
311 afs = "ipv6";
312 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
313 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
314 break;
315 case AF_INET:
316 afs = "ipv4";
317 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
318 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
319 break;
320 default:
321 afs = "ssocks";
322 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
323 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100324 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200325 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100326}
327
Philipp Reisnerb411b362009-09-25 16:07:19 -0700328int drbd_khelper(struct drbd_conf *mdev, char *cmd)
329{
330 char *envp[] = { "HOME=/",
331 "TERM=linux",
332 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100333 (char[20]) { }, /* address family */
334 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335 NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100336 char mb[12];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 char *argv[] = {usermode_helper, cmd, mb, NULL };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100338 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700339 int ret;
340
341 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100342 setup_khelper_env(mdev->tconn, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700343
Lars Ellenberg1090c052010-07-19 17:41:04 +0200344 /* The helper may take some time.
345 * write out any unsynced meta data changes now */
346 drbd_md_sync(mdev);
347
Philipp Reisnerb411b362009-09-25 16:07:19 -0700348 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100349 sib.sib_reason = SIB_HELPER_PRE;
350 sib.helper_name = cmd;
351 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700352 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
353 if (ret)
354 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
355 usermode_helper, cmd, mb,
356 (ret >> 8) & 0xff, ret);
357 else
358 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
359 usermode_helper, cmd, mb,
360 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100361 sib.sib_reason = SIB_HELPER_POST;
362 sib.helper_exit_code = ret;
363 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700364
365 if (ret < 0) /* Ignore any ERRNOs we got. */
366 ret = 0;
367
368 return ret;
369}
370
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100371static void conn_md_sync(struct drbd_tconn *tconn)
372{
373 struct drbd_conf *mdev;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100374 int vnr;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100375
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200376 rcu_read_lock();
377 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
378 kref_get(&mdev->kref);
379 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100380 drbd_md_sync(mdev);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200381 kref_put(&mdev->kref, &drbd_minor_destroy);
382 rcu_read_lock();
383 }
384 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100385}
386
387int conn_khelper(struct drbd_tconn *tconn, char *cmd)
388{
389 char *envp[] = { "HOME=/",
390 "TERM=linux",
391 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
392 (char[20]) { }, /* address family */
393 (char[60]) { }, /* address */
394 NULL };
395 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
396 int ret;
397
398 setup_khelper_env(tconn, envp);
399 conn_md_sync(tconn);
400
401 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
402 /* TODO: conn_bcast_event() ?? */
403
404 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
405 if (ret)
406 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
407 usermode_helper, cmd, tconn->name,
408 (ret >> 8) & 0xff, ret);
409 else
410 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
411 usermode_helper, cmd, tconn->name,
412 (ret >> 8) & 0xff, ret);
413 /* TODO: conn_bcast_event() ?? */
414
415 if (ret < 0) /* Ignore any ERRNOs we got. */
416 ret = 0;
417
418 return ret;
419}
420
Philipp Reisnercb703452011-03-24 11:03:07 +0100421static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700422{
Philipp Reisnercb703452011-03-24 11:03:07 +0100423 enum drbd_fencing_p fp = FP_NOT_AVAIL;
424 struct drbd_conf *mdev;
425 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700426
Philipp Reisner695d08f2011-04-11 22:53:32 -0700427 rcu_read_lock();
Philipp Reisnercb703452011-03-24 11:03:07 +0100428 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
429 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200430 fp = max_t(enum drbd_fencing_p, fp,
431 rcu_dereference(mdev->ldev->disk_conf)->fencing);
Philipp Reisnercb703452011-03-24 11:03:07 +0100432 put_ldev(mdev);
433 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700435 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700436
Philipp Reisnercb703452011-03-24 11:03:07 +0100437 return fp;
438}
439
440bool conn_try_outdate_peer(struct drbd_tconn *tconn)
441{
442 union drbd_state mask = { };
443 union drbd_state val = { };
444 enum drbd_fencing_p fp;
445 char *ex_to_string;
446 int r;
447
448 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
449 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
450 return false;
451 }
452
453 fp = highest_fencing_policy(tconn);
454 switch (fp) {
455 case FP_NOT_AVAIL:
456 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
457 goto out;
458 case FP_DONT_CARE:
459 return true;
460 default: ;
461 }
462
463 r = conn_khelper(tconn, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700464
465 switch ((r>>8) & 0xff) {
466 case 3: /* peer is inconsistent */
467 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100468 mask.pdsk = D_MASK;
469 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470 break;
471 case 4: /* peer got outdated, or was already outdated */
472 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100473 mask.pdsk = D_MASK;
474 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700475 break;
476 case 5: /* peer was down */
Philipp Reisnercb703452011-03-24 11:03:07 +0100477 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700478 /* we will(have) create(d) a new UUID anyways... */
479 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100480 mask.pdsk = D_MASK;
481 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700482 } else {
483 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700484 }
485 break;
486 case 6: /* Peer is primary, voluntarily outdate myself.
487 * This is useful when an unconnected R_SECONDARY is asked to
488 * become R_PRIMARY, but finds the other peer being active. */
489 ex_to_string = "peer is active";
Philipp Reisnercb703452011-03-24 11:03:07 +0100490 conn_warn(tconn, "Peer is primary, outdating myself.\n");
491 mask.disk = D_MASK;
492 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700493 break;
494 case 7:
495 if (fp != FP_STONITH)
Philipp Reisnercb703452011-03-24 11:03:07 +0100496 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100498 mask.pdsk = D_MASK;
499 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700500 break;
501 default:
502 /* The script is broken ... */
Philipp Reisnercb703452011-03-24 11:03:07 +0100503 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
504 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700505 }
506
Philipp Reisnercb703452011-03-24 11:03:07 +0100507 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
508 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200509
Philipp Reisnercb703452011-03-24 11:03:07 +0100510 out:
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200511
Philipp Reisnercb703452011-03-24 11:03:07 +0100512 /* Not using
513 conn_request_state(tconn, mask, val, CS_VERBOSE);
514 here, because we might were able to re-establish the connection in the
515 meantime. */
516 spin_lock_irq(&tconn->req_lock);
517 if (tconn->cstate < C_WF_REPORT_PARAMS)
518 _conn_request_state(tconn, mask, val, CS_VERBOSE);
519 spin_unlock_irq(&tconn->req_lock);
520
521 return conn_highest_pdsk(tconn) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700522}
523
Philipp Reisner87f7be42010-06-11 13:56:33 +0200524static int _try_outdate_peer_async(void *data)
525{
Philipp Reisnercb703452011-03-24 11:03:07 +0100526 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200527
Philipp Reisnercb703452011-03-24 11:03:07 +0100528 conn_try_outdate_peer(tconn);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200529
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200530 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200531 return 0;
532}
533
Philipp Reisnercb703452011-03-24 11:03:07 +0100534void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200535{
536 struct task_struct *opa;
537
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200538 kref_get(&tconn->kref);
Philipp Reisnercb703452011-03-24 11:03:07 +0100539 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200540 if (IS_ERR(opa)) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100541 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200542 kref_put(&tconn->kref, &conn_destroy);
543 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200544}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100546enum drbd_state_rv
547drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700548{
549 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100550 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200551 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700552 int try = 0;
553 int forced = 0;
554 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700555
556 if (new_role == R_PRIMARY)
Philipp Reisner0625ac12011-02-07 14:49:19 +0100557 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700558
Philipp Reisner8410da82011-02-11 20:11:10 +0100559 mutex_lock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700560
561 mask.i = 0; mask.role = R_MASK;
562 val.i = 0; val.role = new_role;
563
564 while (try++ < max_tries) {
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100565 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700566
567 /* in case we first succeeded to outdate,
568 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100569 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700570 val.pdsk = 0;
571 mask.pdsk = 0;
572 continue;
573 }
574
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100575 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100576 (mdev->state.disk < D_UP_TO_DATE &&
577 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700578 mask.disk = D_MASK;
579 val.disk = D_UP_TO_DATE;
580 forced = 1;
581 continue;
582 }
583
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100584 if (rv == SS_NO_UP_TO_DATE_DISK &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700585 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
586 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700587
Philipp Reisnercb703452011-03-24 11:03:07 +0100588 if (conn_try_outdate_peer(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700589 val.disk = D_UP_TO_DATE;
590 mask.disk = D_MASK;
591 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700592 continue;
593 }
594
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100595 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100596 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100597 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100598 if (!conn_try_outdate_peer(mdev->tconn) && force) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700599 dev_warn(DEV, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100600 mask.pdsk = D_MASK;
601 val.pdsk = D_OUTDATED;
602
Philipp Reisnerb411b362009-09-25 16:07:19 -0700603 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700604 continue;
605 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100606 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700607 /* Maybe the peer is detected as dead very soon...
608 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200609 int timeo;
610 rcu_read_lock();
611 nc = rcu_dereference(mdev->tconn->net_conf);
612 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
613 rcu_read_unlock();
614 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700615 if (try < max_tries)
616 try = max_tries - 1;
617 continue;
618 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100619 if (rv < SS_SUCCESS) {
620 rv = _drbd_request_state(mdev, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700621 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100622 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100623 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700624 }
625 break;
626 }
627
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100628 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100629 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700630
631 if (forced)
632 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
633
634 /* Wait until nothing is on the fly :) */
635 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
636
637 if (new_role == R_SECONDARY) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100638 set_disk_ro(mdev->vdisk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700639 if (get_ldev(mdev)) {
640 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
641 put_ldev(mdev);
642 }
643 } else {
Philipp Reisnera0095502011-05-03 13:14:15 +0200644 mutex_lock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200645 nc = mdev->tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200646 if (nc)
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200647 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
Philipp Reisnera0095502011-05-03 13:14:15 +0200648 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200649
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100650 set_disk_ro(mdev->vdisk, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700651 if (get_ldev(mdev)) {
652 if (((mdev->state.conn < C_CONNECTED ||
653 mdev->state.pdsk <= D_FAILED)
654 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
655 drbd_uuid_new_current(mdev);
656
657 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
658 put_ldev(mdev);
659 }
660 }
661
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100662 /* writeout of activity log covered areas of the bitmap
663 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700664
665 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
666 /* if this was forced, we should consider sync */
667 if (forced)
668 drbd_send_uuids(mdev);
669 drbd_send_state(mdev);
670 }
671
672 drbd_md_sync(mdev);
673
674 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100675out:
Philipp Reisner8410da82011-02-11 20:11:10 +0100676 mutex_unlock(mdev->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100677 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700678}
679
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100680static const char *from_attrs_err_to_txt(int err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700681{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100682 return err == -ENOMSG ? "required attribute missing" :
683 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100684 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100685 "invalid attribute value";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700686}
687
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100688int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700689{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100690 struct set_role_parms parms;
691 int err;
692 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700693
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100694 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
695 if (!adm_ctx.reply_skb)
696 return retcode;
697 if (retcode != NO_ERROR)
698 goto out;
699
700 memset(&parms, 0, sizeof(parms));
701 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100702 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100703 if (err) {
704 retcode = ERR_MANDATORY_TAG;
705 drbd_msg_put_info(from_attrs_err_to_txt(err));
706 goto out;
707 }
708 }
709
710 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
711 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
712 else
713 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
714out:
715 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716 return 0;
717}
718
719/* initializes the md.*_offset members, so we are able to find
720 * the on disk meta data */
721static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
722 struct drbd_backing_dev *bdev)
723{
724 sector_t md_size_sect = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200725 int meta_dev_idx;
726
727 rcu_read_lock();
728 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
729
730 switch (meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700731 default:
732 /* v07 style fixed size indexed meta data */
733 bdev->md.md_size_sect = MD_RESERVED_SECT;
734 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
735 bdev->md.al_offset = MD_AL_OFFSET;
736 bdev->md.bm_offset = MD_BM_OFFSET;
737 break;
738 case DRBD_MD_INDEX_FLEX_EXT:
739 /* just occupy the full device; unit: sectors */
740 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
741 bdev->md.md_offset = 0;
742 bdev->md.al_offset = MD_AL_OFFSET;
743 bdev->md.bm_offset = MD_BM_OFFSET;
744 break;
745 case DRBD_MD_INDEX_INTERNAL:
746 case DRBD_MD_INDEX_FLEX_INT:
747 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
748 /* al size is still fixed */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100749 bdev->md.al_offset = -MD_AL_SECTORS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700750 /* we need (slightly less than) ~ this much bitmap sectors: */
751 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
752 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
753 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
754 md_size_sect = ALIGN(md_size_sect, 8);
755
756 /* plus the "drbd meta data super block",
757 * and the activity log; */
758 md_size_sect += MD_BM_OFFSET;
759
760 bdev->md.md_size_sect = md_size_sect;
761 /* bitmap offset is adjusted by 'super' block size */
762 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
763 break;
764 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200765 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766}
767
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100768/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700769char *ppsize(char *buf, unsigned long long size)
770{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100771 /* Needs 9 bytes at max including trailing NUL:
772 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
774 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100775 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700776 /* shift + round */
777 size = (size >> 10) + !!(size & (1<<9));
778 base++;
779 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100780 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700781
782 return buf;
783}
784
785/* there is still a theoretical deadlock when called from receiver
786 * on an D_INCONSISTENT R_PRIMARY:
787 * remote READ does inc_ap_bio, receiver would need to receive answer
788 * packet from remote to dec_ap_bio again.
789 * receiver receive_sizes(), comes here,
790 * waits for ap_bio_cnt == 0. -> deadlock.
791 * but this cannot happen, actually, because:
792 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
793 * (not connected, or bad/no disk on peer):
794 * see drbd_fail_request_early, ap_bio_cnt is zero.
795 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
796 * peer may not initiate a resize.
797 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100798/* Note these are not to be confused with
799 * drbd_adm_suspend_io/drbd_adm_resume_io,
800 * which are (sub) state changes triggered by admin (drbdsetup),
801 * and can be long lived.
802 * This changes an mdev->flag, is triggered by drbd internals,
803 * and should be short-lived. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700804void drbd_suspend_io(struct drbd_conf *mdev)
805{
806 set_bit(SUSPEND_IO, &mdev->flags);
Philipp Reisner2aebfab2011-03-28 16:48:11 +0200807 if (drbd_suspended(mdev))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200808 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700809 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
810}
811
812void drbd_resume_io(struct drbd_conf *mdev)
813{
814 clear_bit(SUSPEND_IO, &mdev->flags);
815 wake_up(&mdev->misc_wait);
816}
817
818/**
819 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
820 * @mdev: DRBD device.
821 *
822 * Returns 0 on success, negative return values indicate errors.
823 * You should call drbd_md_sync() after calling this function.
824 */
Bart Van Assche24c48302011-05-21 18:32:29 +0200825enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700826{
827 sector_t prev_first_sect, prev_size; /* previous meta location */
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200828 sector_t la_size, u_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700829 sector_t size;
830 char ppb[10];
831
832 int md_moved, la_size_changed;
833 enum determine_dev_size rv = unchanged;
834
835 /* race:
836 * application request passes inc_ap_bio,
837 * but then cannot get an AL-reference.
838 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
839 *
840 * to avoid that:
841 * Suspend IO right here.
842 * still lock the act_log to not trigger ASSERTs there.
843 */
844 drbd_suspend_io(mdev);
845
846 /* no wait necessary anymore, actually we could assert that */
847 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
848
849 prev_first_sect = drbd_md_first_sector(mdev->ldev);
850 prev_size = mdev->ldev->md.md_size_sect;
851 la_size = mdev->ldev->md.la_size_sect;
852
853 /* TODO: should only be some assert here, not (re)init... */
854 drbd_md_set_sector_offsets(mdev, mdev->ldev);
855
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200856 rcu_read_lock();
857 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
858 rcu_read_unlock();
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200859 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700860
861 if (drbd_get_capacity(mdev->this_bdev) != size ||
862 drbd_bm_capacity(mdev) != size) {
863 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100864 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700865 if (unlikely(err)) {
866 /* currently there is only one error: ENOMEM! */
867 size = drbd_bm_capacity(mdev)>>1;
868 if (size == 0) {
869 dev_err(DEV, "OUT OF MEMORY! "
870 "Could not allocate bitmap!\n");
871 } else {
872 dev_err(DEV, "BM resizing failed. "
873 "Leaving size unchanged at size = %lu KB\n",
874 (unsigned long)size);
875 }
876 rv = dev_size_error;
877 }
878 /* racy, see comments above. */
879 drbd_set_my_capacity(mdev, size);
880 mdev->ldev->md.la_size_sect = size;
881 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
882 (unsigned long long)size>>1);
883 }
884 if (rv == dev_size_error)
885 goto out;
886
887 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
888
889 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
890 || prev_size != mdev->ldev->md.md_size_sect;
891
892 if (la_size_changed || md_moved) {
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100893 int err;
894
Philipp Reisnerb411b362009-09-25 16:07:19 -0700895 drbd_al_shrink(mdev); /* All extents inactive. */
896 dev_info(DEV, "Writing the whole bitmap, %s\n",
897 la_size_changed && md_moved ? "size changed and md moved" :
898 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100899 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
900 err = drbd_bitmap_io(mdev, &drbd_bm_write,
901 "size changed", BM_LOCKED_MASK);
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100902 if (err) {
903 rv = dev_size_error;
904 goto out;
905 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700906 drbd_md_mark_dirty(mdev);
907 }
908
909 if (size > la_size)
910 rv = grew;
911 if (size < la_size)
912 rv = shrunk;
913out:
914 lc_unlock(mdev->act_log);
915 wake_up(&mdev->al_wait);
916 drbd_resume_io(mdev);
917
918 return rv;
919}
920
921sector_t
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200922drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
923 sector_t u_size, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700924{
925 sector_t p_size = mdev->p_size; /* partner's disk size. */
926 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
927 sector_t m_size; /* my size */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700928 sector_t size = 0;
929
930 m_size = drbd_get_max_capacity(bdev);
931
Philipp Reisnera393db62009-12-22 13:35:52 +0100932 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
933 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
934 p_size = m_size;
935 }
936
Philipp Reisnerb411b362009-09-25 16:07:19 -0700937 if (p_size && m_size) {
938 size = min_t(sector_t, p_size, m_size);
939 } else {
940 if (la_size) {
941 size = la_size;
942 if (m_size && m_size < size)
943 size = m_size;
944 if (p_size && p_size < size)
945 size = p_size;
946 } else {
947 if (m_size)
948 size = m_size;
949 if (p_size)
950 size = p_size;
951 }
952 }
953
954 if (size == 0)
955 dev_err(DEV, "Both nodes diskless!\n");
956
957 if (u_size) {
958 if (u_size > size)
959 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
960 (unsigned long)u_size>>1, (unsigned long)size>>1);
961 else
962 size = u_size;
963 }
964
965 return size;
966}
967
968/**
969 * drbd_check_al_size() - Ensures that the AL is of the right size
970 * @mdev: DRBD device.
971 *
972 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
973 * failed, and 0 on success. You should call drbd_md_sync() after you called
974 * this function.
975 */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100976static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700977{
978 struct lru_cache *n, *t;
979 struct lc_element *e;
980 unsigned int in_use;
981 int i;
982
Philipp Reisnerb411b362009-09-25 16:07:19 -0700983 if (mdev->act_log &&
Lars Ellenbergf3990022011-03-23 14:31:09 +0100984 mdev->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985 return 0;
986
987 in_use = 0;
988 t = mdev->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100989 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +0100990 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991
992 if (n == NULL) {
993 dev_err(DEV, "Cannot allocate act_log lru!\n");
994 return -ENOMEM;
995 }
996 spin_lock_irq(&mdev->al_lock);
997 if (t) {
998 for (i = 0; i < t->nr_elements; i++) {
999 e = lc_element_by_index(t, i);
1000 if (e->refcnt)
1001 dev_err(DEV, "refcnt(%d)==%d\n",
1002 e->lc_number, e->refcnt);
1003 in_use += e->refcnt;
1004 }
1005 }
1006 if (!in_use)
1007 mdev->act_log = n;
1008 spin_unlock_irq(&mdev->al_lock);
1009 if (in_use) {
1010 dev_err(DEV, "Activity log still in use!\n");
1011 lc_destroy(n);
1012 return -EBUSY;
1013 } else {
1014 if (t)
1015 lc_destroy(t);
1016 }
1017 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1018 return 0;
1019}
1020
Philipp Reisner99432fc2011-05-20 16:39:13 +02001021static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001022{
1023 struct request_queue * const q = mdev->rq_queue;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001024 int max_hw_sectors = max_bio_size >> 9;
1025 int max_segments = 0;
1026
1027 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1028 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1029
1030 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001031 rcu_read_lock();
1032 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1033 rcu_read_unlock();
Philipp Reisner99432fc2011-05-20 16:39:13 +02001034 put_ldev(mdev);
1035 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001036
Philipp Reisnerb411b362009-09-25 16:07:19 -07001037 blk_queue_logical_block_size(q, 512);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001038 blk_queue_max_hw_sectors(q, max_hw_sectors);
1039 /* This is the workaround for "bio would need to, but cannot, be split" */
1040 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1041 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001042
Philipp Reisner99432fc2011-05-20 16:39:13 +02001043 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1044 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001045
Philipp Reisner99432fc2011-05-20 16:39:13 +02001046 blk_queue_stack_limits(q, b);
1047
1048 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1049 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1050 q->backing_dev_info.ra_pages,
1051 b->backing_dev_info.ra_pages);
1052 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1053 }
1054 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001055 }
1056}
1057
Philipp Reisner99432fc2011-05-20 16:39:13 +02001058void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1059{
1060 int now, new, local, peer;
1061
1062 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1063 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1064 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1065
1066 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1067 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1068 mdev->local_max_bio_size = local;
1069 put_ldev(mdev);
1070 }
1071
1072 /* We may ignore peer limits if the peer is modern enough.
1073 Because new from 8.3.8 onwards the peer can use multiple
1074 BIOs for a single peer_request */
1075 if (mdev->state.conn >= C_CONNECTED) {
Philipp Reisner31890f42011-01-19 14:12:51 +01001076 if (mdev->tconn->agreed_pro_version < 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001077 peer = mdev->peer_max_bio_size;
Philipp Reisner31890f42011-01-19 14:12:51 +01001078 else if (mdev->tconn->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001079 peer = DRBD_MAX_SIZE_H80_PACKET;
1080 else /* drbd 8.3.8 onwards */
1081 peer = DRBD_MAX_BIO_SIZE;
1082 }
1083
1084 new = min_t(int, local, peer);
1085
1086 if (mdev->state.role == R_PRIMARY && new < now)
1087 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1088
1089 if (new != now)
1090 dev_info(DEV, "max BIO size = %u\n", new);
1091
1092 drbd_setup_queue_param(mdev, new);
1093}
1094
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001095/* Starts the worker thread */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001096static void conn_reconfig_start(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001097{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001098 drbd_thread_start(&tconn->worker);
1099 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001100}
1101
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001102/* if still unconfigured, stops worker again. */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001103static void conn_reconfig_done(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001104{
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001105 bool stop_threads;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001106 spin_lock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001107 stop_threads = conn_all_vols_unconf(tconn);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001108 spin_unlock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001109 if (stop_threads) {
1110 /* asender is implicitly stopped by receiver
Philipp Reisner81fa2e62011-05-04 15:10:30 +02001111 * in conn_disconnect() */
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001112 drbd_thread_stop(&tconn->receiver);
1113 drbd_thread_stop(&tconn->worker);
1114 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001115}
1116
Philipp Reisner07782862010-08-31 12:00:50 +02001117/* Make sure IO is suspended before calling this function(). */
1118static void drbd_suspend_al(struct drbd_conf *mdev)
1119{
1120 int s = 0;
1121
Lars Ellenberg61610422011-02-21 13:20:54 +01001122 if (!lc_try_lock(mdev->act_log)) {
Philipp Reisner07782862010-08-31 12:00:50 +02001123 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1124 return;
1125 }
1126
Lars Ellenberg61610422011-02-21 13:20:54 +01001127 drbd_al_shrink(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001128 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner07782862010-08-31 12:00:50 +02001129 if (mdev->state.conn < C_CONNECTED)
1130 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001131 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg61610422011-02-21 13:20:54 +01001132 lc_unlock(mdev->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001133
1134 if (s)
1135 dev_info(DEV, "Suspended AL updates\n");
1136}
1137
Lars Ellenberg5979e362011-04-27 21:09:55 +02001138
1139static bool should_set_defaults(struct genl_info *info)
1140{
1141 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1142 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1143}
1144
Philipp Reisnerd589a212011-05-04 10:06:52 +02001145static void enforce_disk_conf_limits(struct disk_conf *dc)
1146{
1147 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1148 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1149 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1150 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1151
1152 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1153 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1154}
1155
Lars Ellenbergf3990022011-03-23 14:31:09 +01001156int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1157{
1158 enum drbd_ret_code retcode;
1159 struct drbd_conf *mdev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001160 struct disk_conf *new_disk_conf, *old_disk_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02001161 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001162 int err, fifo_size;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001163
1164 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1165 if (!adm_ctx.reply_skb)
1166 return retcode;
1167 if (retcode != NO_ERROR)
1168 goto out;
1169
1170 mdev = adm_ctx.mdev;
1171
1172 /* we also need a disk
1173 * to change the options on */
1174 if (!get_ldev(mdev)) {
1175 retcode = ERR_NO_DISK;
1176 goto out;
1177 }
1178
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001179 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001180 if (!new_disk_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001181 retcode = ERR_NOMEM;
1182 goto fail;
1183 }
1184
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001185 mutex_lock(&mdev->tconn->conf_update);
1186 old_disk_conf = mdev->ldev->disk_conf;
1187 *new_disk_conf = *old_disk_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001188 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001189 set_disk_conf_defaults(new_disk_conf);
Lars Ellenberg5979e362011-04-27 21:09:55 +02001190
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001191 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001192 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001193 retcode = ERR_MANDATORY_TAG;
1194 drbd_msg_put_info(from_attrs_err_to_txt(err));
1195 }
1196
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001197 if (!expect(new_disk_conf->resync_rate >= 1))
1198 new_disk_conf->resync_rate = 1;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001199
Philipp Reisnerd589a212011-05-04 10:06:52 +02001200 enforce_disk_conf_limits(new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001201
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001202 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02001203 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02001204 new_plan = fifo_alloc(fifo_size);
1205 if (!new_plan) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001206 dev_err(DEV, "kmalloc of fifo_buffer failed");
1207 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001208 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001209 }
1210 }
1211
Lars Ellenbergf3990022011-03-23 14:31:09 +01001212 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1213 drbd_al_shrink(mdev);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001214 err = drbd_check_al_size(mdev, new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001215 lc_unlock(mdev->act_log);
1216 wake_up(&mdev->al_wait);
1217
1218 if (err) {
1219 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001220 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001221 }
1222
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001223 write_lock_irq(&global_state_lock);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001224 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001225 if (retcode == NO_ERROR) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001226 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001227 drbd_resync_after_changed(mdev);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001228 }
1229 write_unlock_irq(&global_state_lock);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001230
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001231 if (retcode != NO_ERROR)
1232 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001233
Philipp Reisner813472c2011-05-03 16:47:02 +02001234 if (new_plan) {
1235 old_plan = mdev->rs_plan_s;
1236 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
Philipp Reisner9958c852011-05-03 16:19:31 +02001237 }
Philipp Reisner9958c852011-05-03 16:19:31 +02001238
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001239 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001240 drbd_md_sync(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001241
1242 if (mdev->state.conn >= C_CONNECTED)
1243 drbd_send_sync_param(mdev);
1244
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001245 synchronize_rcu();
1246 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001247 kfree(old_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001248 goto success;
1249
1250fail_unlock:
1251 mutex_unlock(&mdev->tconn->conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001252 fail:
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001253 kfree(new_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001254 kfree(new_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001255success:
1256 put_ldev(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001257 out:
1258 drbd_adm_finish(info, retcode);
1259 return 0;
1260}
1261
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001262int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001264 struct drbd_conf *mdev;
1265 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001266 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001267 enum determine_dev_size dd;
1268 sector_t max_possible_sectors;
1269 sector_t min_md_device_sectors;
1270 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001271 struct disk_conf *new_disk_conf = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001272 struct block_device *bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001273 struct lru_cache *resync_lru = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001274 struct fifo_buffer *new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001275 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001276 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001277 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001278 int cp_discovered = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001280 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1281 if (!adm_ctx.reply_skb)
1282 return retcode;
1283 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001284 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001285
1286 mdev = adm_ctx.mdev;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001287 conn_reconfig_start(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001288
1289 /* if you want to reconfigure, please tear down first */
1290 if (mdev->state.disk > D_DISKLESS) {
1291 retcode = ERR_DISK_CONFIGURED;
1292 goto fail;
1293 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001294 /* It may just now have detached because of IO error. Make sure
1295 * drbd_ldev_destroy is done already, we may end up here very fast,
1296 * e.g. if someone calls attach from the on-io-error handler,
1297 * to realize a "hot spare" feature (not that I'd recommend that) */
1298 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001299
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001300 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001301 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1302 if (!nbc) {
1303 retcode = ERR_NOMEM;
1304 goto fail;
1305 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001306 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1307 if (!new_disk_conf) {
1308 retcode = ERR_NOMEM;
1309 goto fail;
1310 }
1311 nbc->disk_conf = new_disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001312
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001313 set_disk_conf_defaults(new_disk_conf);
1314 err = disk_conf_from_attrs(new_disk_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001315 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001316 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001317 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001318 goto fail;
1319 }
1320
Philipp Reisnerd589a212011-05-04 10:06:52 +02001321 enforce_disk_conf_limits(new_disk_conf);
1322
Philipp Reisner9958c852011-05-03 16:19:31 +02001323 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1324 if (!new_plan) {
1325 retcode = ERR_NOMEM;
1326 goto fail;
1327 }
1328
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001329 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001330 retcode = ERR_MD_IDX_INVALID;
1331 goto fail;
1332 }
1333
Philipp Reisner44ed1672011-04-19 17:10:19 +02001334 rcu_read_lock();
1335 nc = rcu_dereference(mdev->tconn->net_conf);
1336 if (nc) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001337 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001338 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001339 retcode = ERR_STONITH_AND_PROT_A;
1340 goto fail;
1341 }
1342 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001343 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001344
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001345 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001346 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
Tejun Heoe525fd82010-11-13 11:55:17 +01001347 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001348 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001349 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001350 retcode = ERR_OPEN_DISK;
1351 goto fail;
1352 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001353 nbc->backing_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001354
Tejun Heoe525fd82010-11-13 11:55:17 +01001355 /*
1356 * meta_dev_idx >= 0: external fixed size, possibly multiple
1357 * drbd sharing one meta device. TODO in that case, paranoia
1358 * check that [md_bdev, meta_dev_idx] is not yet used by some
1359 * other drbd minor! (if you use drbd.conf + drbdadm, that
1360 * should check it for you already; but if you don't, or
1361 * someone fooled it, we need to double check here)
1362 */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001363 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001364 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001365 (new_disk_conf->meta_dev_idx < 0) ?
Tejun Heod4d77622010-11-13 11:55:18 +01001366 (void *)mdev : (void *)drbd_m_holder);
Tejun Heoe525fd82010-11-13 11:55:17 +01001367 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001368 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001369 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001370 retcode = ERR_OPEN_MD_DISK;
1371 goto fail;
1372 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001373 nbc->md_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001374
Tejun Heoe525fd82010-11-13 11:55:17 +01001375 if ((nbc->backing_bdev == nbc->md_bdev) !=
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001376 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1377 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001378 retcode = ERR_MD_IDX_INVALID;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001379 goto fail;
1380 }
1381
1382 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001383 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001384 offsetof(struct bm_extent, lce));
1385 if (!resync_lru) {
1386 retcode = ERR_NOMEM;
Tejun Heoe525fd82010-11-13 11:55:17 +01001387 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001388 }
1389
1390 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1391 drbd_md_set_sector_offsets(mdev, nbc);
1392
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001393 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001394 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1395 (unsigned long long) drbd_get_max_capacity(nbc),
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001396 (unsigned long long) new_disk_conf->disk_size);
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001397 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001398 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001399 }
1400
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001401 if (new_disk_conf->meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1403 /* at least one MB, otherwise it does not make sense */
1404 min_md_device_sectors = (2<<10);
1405 } else {
1406 max_possible_sectors = DRBD_MAX_SECTORS;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001407 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001408 }
1409
Philipp Reisnerb411b362009-09-25 16:07:19 -07001410 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001411 retcode = ERR_MD_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001412 dev_warn(DEV, "refusing attach: md-device too small, "
1413 "at least %llu sectors needed for this meta-disk type\n",
1414 (unsigned long long) min_md_device_sectors);
Tejun Heoe525fd82010-11-13 11:55:17 +01001415 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001416 }
1417
1418 /* Make sure the new disk is big enough
1419 * (we may currently be R_PRIMARY with no local disk...) */
1420 if (drbd_get_max_capacity(nbc) <
1421 drbd_get_capacity(mdev->this_bdev)) {
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001422 retcode = ERR_DISK_TOO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001423 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001424 }
1425
1426 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1427
Lars Ellenberg13529942009-10-12 19:07:49 +02001428 if (nbc->known_size > max_possible_sectors) {
1429 dev_warn(DEV, "==> truncating very big lower level device "
1430 "to currently maximum possible %llu sectors <==\n",
1431 (unsigned long long) max_possible_sectors);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001432 if (new_disk_conf->meta_dev_idx >= 0)
Lars Ellenberg13529942009-10-12 19:07:49 +02001433 dev_warn(DEV, "==>> using internal or flexible "
1434 "meta data may help <<==\n");
1435 }
1436
Philipp Reisnerb411b362009-09-25 16:07:19 -07001437 drbd_suspend_io(mdev);
1438 /* also wait for the last barrier ack. */
Philipp Reisner2aebfab2011-03-28 16:48:11 +02001439 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440 /* and for any other previously queued work */
Philipp Reisnera21e9292011-02-08 15:08:49 +01001441 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001443 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1444 retcode = rv; /* FIXME: Type mismatch. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001445 drbd_resume_io(mdev);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001446 if (rv < SS_SUCCESS)
Tejun Heoe525fd82010-11-13 11:55:17 +01001447 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448
1449 if (!get_ldev_if_state(mdev, D_ATTACHING))
1450 goto force_diskless;
1451
1452 drbd_md_set_sector_offsets(mdev, nbc);
1453
1454 if (!mdev->bitmap) {
1455 if (drbd_bm_init(mdev)) {
1456 retcode = ERR_NOMEM;
1457 goto force_diskless_dec;
1458 }
1459 }
1460
1461 retcode = drbd_md_read(mdev, nbc);
1462 if (retcode != NO_ERROR)
1463 goto force_diskless_dec;
1464
1465 if (mdev->state.conn < C_CONNECTED &&
1466 mdev->state.role == R_PRIMARY &&
1467 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1468 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1469 (unsigned long long)mdev->ed_uuid);
1470 retcode = ERR_DATA_NOT_CURRENT;
1471 goto force_diskless_dec;
1472 }
1473
1474 /* Since we are diskless, fix the activity log first... */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001475 if (drbd_check_al_size(mdev, new_disk_conf)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001476 retcode = ERR_NOMEM;
1477 goto force_diskless_dec;
1478 }
1479
1480 /* Prevent shrinking of consistent devices ! */
1481 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001482 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001483 dev_warn(DEV, "refusing to truncate a consistent device\n");
Lars Ellenberg67b58bf2011-06-06 15:36:04 +02001484 retcode = ERR_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001485 goto force_diskless_dec;
1486 }
1487
1488 if (!drbd_al_read_log(mdev, nbc)) {
1489 retcode = ERR_IO_MD_DISK;
1490 goto force_diskless_dec;
1491 }
1492
Philipp Reisnerb411b362009-09-25 16:07:19 -07001493 /* Reset the "barriers don't work" bits here, then force meta data to
1494 * be written, to ensure we determine if barriers are supported. */
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001495 if (new_disk_conf->md_flushes)
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001496 clear_bit(MD_NO_FUA, &mdev->flags);
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001497 else
1498 set_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001499
1500 /* Point of no return reached.
1501 * Devices and memory are no longer released by error cleanup below.
1502 * now mdev takes over responsibility, and the state engine should
1503 * clean it up somewhere. */
1504 D_ASSERT(mdev->ldev == NULL);
1505 mdev->ldev = nbc;
1506 mdev->resync = resync_lru;
Philipp Reisner9958c852011-05-03 16:19:31 +02001507 mdev->rs_plan_s = new_plan;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001508 nbc = NULL;
1509 resync_lru = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001510 new_disk_conf = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001511 new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512
Philipp Reisner2451fc32010-08-24 13:43:11 +02001513 mdev->write_ordering = WO_bdev_flush;
1514 drbd_bump_write_ordering(mdev, WO_bdev_flush);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515
1516 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1517 set_bit(CRASHED_PRIMARY, &mdev->flags);
1518 else
1519 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1520
Philipp Reisner894c6a92010-06-18 16:03:20 +02001521 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001522 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001523 set_bit(CRASHED_PRIMARY, &mdev->flags);
1524 cp_discovered = 1;
1525 }
1526
1527 mdev->send_cnt = 0;
1528 mdev->recv_cnt = 0;
1529 mdev->read_cnt = 0;
1530 mdev->writ_cnt = 0;
1531
Philipp Reisner99432fc2011-05-20 16:39:13 +02001532 drbd_reconsider_max_bio_size(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001533
1534 /* If I am currently not R_PRIMARY,
1535 * but meta data primary indicator is set,
1536 * I just now recover from a hard crash,
1537 * and have been R_PRIMARY before that crash.
1538 *
1539 * Now, if I had no connection before that crash
1540 * (have been degraded R_PRIMARY), chances are that
1541 * I won't find my peer now either.
1542 *
1543 * In that case, and _only_ in that case,
1544 * we use the degr-wfc-timeout instead of the default,
1545 * so we can automatically recover from a crash of a
1546 * degraded but active "cluster" after a certain timeout.
1547 */
1548 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1549 if (mdev->state.role != R_PRIMARY &&
1550 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1551 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1552 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1553
Bart Van Assche24c48302011-05-21 18:32:29 +02001554 dd = drbd_determine_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001555 if (dd == dev_size_error) {
1556 retcode = ERR_NOMEM_BITMAP;
1557 goto force_diskless_dec;
1558 } else if (dd == grew)
1559 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1560
1561 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1562 dev_info(DEV, "Assuming that all blocks are out of sync "
1563 "(aka FullSync)\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001564 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1565 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001566 retcode = ERR_IO_MD_DISK;
1567 goto force_diskless_dec;
1568 }
1569 } else {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001570 if (drbd_bitmap_io(mdev, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01001571 "read from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001572 retcode = ERR_IO_MD_DISK;
1573 goto force_diskless_dec;
1574 }
1575 }
1576
1577 if (cp_discovered) {
1578 drbd_al_apply_to_bm(mdev);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001579 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1580 "crashed primary apply AL", BM_LOCKED_MASK)) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001581 retcode = ERR_IO_MD_DISK;
1582 goto force_diskless_dec;
1583 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001584 }
1585
Philipp Reisner07782862010-08-31 12:00:50 +02001586 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1587 drbd_suspend_al(mdev); /* IO is still suspended here... */
1588
Philipp Reisner87eeee42011-01-19 14:16:30 +01001589 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02001590 os = drbd_read_state(mdev);
1591 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001592 /* If MDF_CONSISTENT is not set go into inconsistent state,
1593 otherwise investigate MDF_WasUpToDate...
1594 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1595 otherwise into D_CONSISTENT state.
1596 */
1597 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1598 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1599 ns.disk = D_CONSISTENT;
1600 else
1601 ns.disk = D_OUTDATED;
1602 } else {
1603 ns.disk = D_INCONSISTENT;
1604 }
1605
1606 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1607 ns.pdsk = D_OUTDATED;
1608
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001609 rcu_read_lock();
1610 if (ns.disk == D_CONSISTENT &&
1611 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001612 ns.disk = D_UP_TO_DATE;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001613 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001614
1615 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1616 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1617 this point, because drbd_request_state() modifies these
1618 flags. */
1619
1620 /* In case we are C_CONNECTED postpone any decision on the new disk
1621 state after the negotiation phase. */
1622 if (mdev->state.conn == C_CONNECTED) {
1623 mdev->new_state_tmp.i = ns.i;
1624 ns.i = os.i;
1625 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02001626
1627 /* We expect to receive up-to-date UUIDs soon.
1628 To avoid a race in receive_state, free p_uuid while
1629 holding req_lock. I.e. atomic with the state change */
1630 kfree(mdev->p_uuid);
1631 mdev->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001632 }
1633
1634 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001635 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001636
1637 if (rv < SS_SUCCESS)
1638 goto force_diskless_dec;
1639
1640 if (mdev->state.role == R_PRIMARY)
1641 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1642 else
1643 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1644
1645 drbd_md_mark_dirty(mdev);
1646 drbd_md_sync(mdev);
1647
1648 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1649 put_ldev(mdev);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001650 conn_reconfig_done(mdev->tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001651 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001652 return 0;
1653
1654 force_diskless_dec:
1655 put_ldev(mdev);
1656 force_diskless:
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001657 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001658 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001659 fail:
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001660 conn_reconfig_done(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001661 if (nbc) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001662 if (nbc->backing_bdev)
1663 blkdev_put(nbc->backing_bdev,
1664 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1665 if (nbc->md_bdev)
1666 blkdev_put(nbc->md_bdev,
1667 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001668 kfree(nbc);
1669 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001670 kfree(new_disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001671 lc_destroy(resync_lru);
Philipp Reisner9958c852011-05-03 16:19:31 +02001672 kfree(new_plan);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001673
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001674 finish:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001675 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001676 return 0;
1677}
1678
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001679static int adm_detach(struct drbd_conf *mdev)
1680{
Philipp Reisner19f83c72011-03-29 14:21:03 +02001681 enum drbd_state_rv retcode;
Lars Ellenberg009ba892011-05-02 11:51:31 +02001682 int ret;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001683 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
Lars Ellenberg009ba892011-05-02 11:51:31 +02001684 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1685 /* D_FAILED will transition to DISKLESS. */
1686 ret = wait_event_interruptible(mdev->misc_wait,
1687 mdev->state.disk != D_FAILED);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001688 drbd_resume_io(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001689 if ((int)retcode == (int)SS_IS_DISKLESS)
1690 retcode = SS_NOTHING_TO_DO;
1691 if (ret)
1692 retcode = ERR_INTR;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001693 return retcode;
1694}
1695
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001696/* Detaching the disk is a process in multiple stages. First we need to lock
1697 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1698 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1699 * internal references as well.
1700 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001701int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001702{
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001703 enum drbd_ret_code retcode;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001704
1705 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1706 if (!adm_ctx.reply_skb)
1707 return retcode;
1708 if (retcode != NO_ERROR)
1709 goto out;
1710
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001711 retcode = adm_detach(adm_ctx.mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001712out:
1713 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001714 return 0;
1715}
1716
Lars Ellenbergf3990022011-03-23 14:31:09 +01001717static bool conn_resync_running(struct drbd_tconn *tconn)
1718{
1719 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001720 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001721 int vnr;
1722
Philipp Reisner695d08f2011-04-11 22:53:32 -07001723 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001724 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1725 if (mdev->state.conn == C_SYNC_SOURCE ||
1726 mdev->state.conn == C_SYNC_TARGET ||
1727 mdev->state.conn == C_PAUSED_SYNC_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001728 mdev->state.conn == C_PAUSED_SYNC_T) {
1729 rv = true;
1730 break;
1731 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001732 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001733 rcu_read_unlock();
1734
1735 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001736}
1737
1738static bool conn_ov_running(struct drbd_tconn *tconn)
1739{
1740 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001741 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001742 int vnr;
1743
Philipp Reisner695d08f2011-04-11 22:53:32 -07001744 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001745 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1746 if (mdev->state.conn == C_VERIFY_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001747 mdev->state.conn == C_VERIFY_T) {
1748 rv = true;
1749 break;
1750 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001751 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001752 rcu_read_unlock();
1753
1754 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001755}
1756
Philipp Reisnercd643972011-04-13 18:00:59 -07001757static enum drbd_ret_code
Philipp Reisner44ed1672011-04-19 17:10:19 +02001758_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07001759{
1760 struct drbd_conf *mdev;
1761 int i;
1762
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02001763 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1764 if (new_conf->wire_protocol != old_conf->wire_protocol)
1765 return ERR_NEED_APV_100;
1766
1767 if (new_conf->two_primaries != old_conf->two_primaries)
1768 return ERR_NEED_APV_100;
1769
1770 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1771 return ERR_NEED_APV_100;
1772
1773 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1774 return ERR_NEED_APV_100;
1775 }
1776
1777 if (!new_conf->two_primaries &&
1778 conn_highest_role(tconn) == R_PRIMARY &&
1779 conn_highest_peer(tconn) == R_PRIMARY)
1780 return ERR_NEED_ALLOW_TWO_PRI;
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001781
Philipp Reisnercd643972011-04-13 18:00:59 -07001782 if (new_conf->two_primaries &&
1783 (new_conf->wire_protocol != DRBD_PROT_C))
1784 return ERR_NOT_PROTO_C;
1785
Philipp Reisnercd643972011-04-13 18:00:59 -07001786 idr_for_each_entry(&tconn->volumes, mdev, i) {
1787 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001788 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
Philipp Reisnercd643972011-04-13 18:00:59 -07001789 put_ldev(mdev);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001790 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07001791 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07001792 }
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02001793 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
Philipp Reisnercd643972011-04-13 18:00:59 -07001794 return ERR_DISCARD;
Philipp Reisnercd643972011-04-13 18:00:59 -07001795 }
Philipp Reisnercd643972011-04-13 18:00:59 -07001796
1797 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1798 return ERR_CONG_NOT_PROTO_A;
1799
1800 return NO_ERROR;
1801}
1802
Philipp Reisner44ed1672011-04-19 17:10:19 +02001803static enum drbd_ret_code
1804check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1805{
1806 static enum drbd_ret_code rv;
1807 struct drbd_conf *mdev;
1808 int i;
1809
1810 rcu_read_lock();
1811 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1812 rcu_read_unlock();
1813
1814 /* tconn->volumes protected by genl_lock() here */
1815 idr_for_each_entry(&tconn->volumes, mdev, i) {
1816 if (!mdev->bitmap) {
1817 if(drbd_bm_init(mdev))
1818 return ERR_NOMEM;
1819 }
1820 }
1821
1822 return rv;
1823}
1824
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001825struct crypto {
1826 struct crypto_hash *verify_tfm;
1827 struct crypto_hash *csums_tfm;
1828 struct crypto_hash *cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001829 struct crypto_hash *integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001830 void *int_dig_in;
1831 void *int_dig_vv;
1832};
1833
1834static int
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001835alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001836{
1837 if (!tfm_name[0])
1838 return NO_ERROR;
1839
1840 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1841 if (IS_ERR(*tfm)) {
1842 *tfm = NULL;
1843 return err_alg;
1844 }
1845
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001846 return NO_ERROR;
1847}
1848
1849static enum drbd_ret_code
1850alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1851{
1852 char hmac_name[CRYPTO_MAX_ALG_NAME];
1853 enum drbd_ret_code rv;
1854 int hash_size;
1855
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001856 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1857 ERR_CSUMS_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001858 if (rv != NO_ERROR)
1859 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001860 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1861 ERR_VERIFY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001862 if (rv != NO_ERROR)
1863 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001864 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1865 ERR_INTEGRITY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001866 if (rv != NO_ERROR)
1867 return rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001868 if (new_conf->cram_hmac_alg[0] != 0) {
1869 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1870 new_conf->cram_hmac_alg);
1871
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001872 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1873 ERR_AUTH_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001874 }
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001875 if (crypto->integrity_tfm) {
1876 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001877 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1878 if (!crypto->int_dig_in)
1879 return ERR_NOMEM;
1880 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1881 if (!crypto->int_dig_vv)
1882 return ERR_NOMEM;
1883 }
1884
1885 return rv;
1886}
1887
1888static void free_crypto(struct crypto *crypto)
1889{
1890 kfree(crypto->int_dig_in);
1891 kfree(crypto->int_dig_vv);
1892 crypto_free_hash(crypto->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001893 crypto_free_hash(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001894 crypto_free_hash(crypto->csums_tfm);
1895 crypto_free_hash(crypto->verify_tfm);
1896}
1897
Lars Ellenbergf3990022011-03-23 14:31:09 +01001898int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1899{
1900 enum drbd_ret_code retcode;
1901 struct drbd_tconn *tconn;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001902 struct net_conf *old_conf, *new_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001903 int err;
1904 int ovr; /* online verify running */
1905 int rsr; /* re-sync running */
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001906 struct crypto crypto = { };
Lars Ellenbergf3990022011-03-23 14:31:09 +01001907
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02001908 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001909 if (!adm_ctx.reply_skb)
1910 return retcode;
1911 if (retcode != NO_ERROR)
1912 goto out;
1913
1914 tconn = adm_ctx.tconn;
1915
1916 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1917 if (!new_conf) {
1918 retcode = ERR_NOMEM;
1919 goto out;
1920 }
1921
Lars Ellenbergf3990022011-03-23 14:31:09 +01001922 conn_reconfig_start(tconn);
1923
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001924 mutex_lock(&tconn->data.mutex);
Philipp Reisnera0095502011-05-03 13:14:15 +02001925 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001926 old_conf = tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001927
1928 if (!old_conf) {
1929 drbd_msg_put_info("net conf missing, try connect");
1930 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001931 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001932 }
1933
1934 *new_conf = *old_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001935 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001936 set_net_conf_defaults(new_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001937
Lars Ellenbergf3990022011-03-23 14:31:09 +01001938 err = net_conf_from_attrs_for_change(new_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001939 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001940 retcode = ERR_MANDATORY_TAG;
1941 drbd_msg_put_info(from_attrs_err_to_txt(err));
1942 goto fail;
1943 }
1944
Philipp Reisnercd643972011-04-13 18:00:59 -07001945 retcode = check_net_options(tconn, new_conf);
1946 if (retcode != NO_ERROR)
1947 goto fail;
1948
Lars Ellenbergf3990022011-03-23 14:31:09 +01001949 /* re-sync running */
1950 rsr = conn_resync_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001951 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001952 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001953 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001954 }
1955
Lars Ellenbergf3990022011-03-23 14:31:09 +01001956 /* online verify running */
1957 ovr = conn_ov_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001958 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1959 retcode = ERR_VERIFY_RUNNING;
1960 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001961 }
1962
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001963 retcode = alloc_crypto(&crypto, new_conf);
1964 if (retcode != NO_ERROR)
1965 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001966
Philipp Reisner44ed1672011-04-19 17:10:19 +02001967 rcu_assign_pointer(tconn->net_conf, new_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001968
1969 if (!rsr) {
1970 crypto_free_hash(tconn->csums_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001971 tconn->csums_tfm = crypto.csums_tfm;
1972 crypto.csums_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001973 }
1974 if (!ovr) {
1975 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001976 tconn->verify_tfm = crypto.verify_tfm;
1977 crypto.verify_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001978 }
1979
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001980 kfree(tconn->int_dig_in);
1981 tconn->int_dig_in = crypto.int_dig_in;
1982 kfree(tconn->int_dig_vv);
1983 tconn->int_dig_vv = crypto.int_dig_vv;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001984 crypto_free_hash(tconn->integrity_tfm);
1985 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001986 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001987 /* Do this without trying to take tconn->data.mutex again. */
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001988 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001989
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001990 crypto_free_hash(tconn->cram_hmac_tfm);
1991 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1992
Philipp Reisnera0095502011-05-03 13:14:15 +02001993 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001994 mutex_unlock(&tconn->data.mutex);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001995 synchronize_rcu();
1996 kfree(old_conf);
1997
Lars Ellenbergf3990022011-03-23 14:31:09 +01001998 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1999 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2000
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002001 goto done;
2002
Lars Ellenbergf3990022011-03-23 14:31:09 +01002003 fail:
Philipp Reisnera0095502011-05-03 13:14:15 +02002004 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02002005 mutex_unlock(&tconn->data.mutex);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002006 free_crypto(&crypto);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002007 kfree(new_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002008 done:
Lars Ellenbergf3990022011-03-23 14:31:09 +01002009 conn_reconfig_done(tconn);
2010 out:
2011 drbd_adm_finish(info, retcode);
2012 return 0;
2013}
2014
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002015int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002016{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002017 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002018 struct net_conf *old_conf, *new_conf = NULL;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002019 struct crypto crypto = { };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002020 struct drbd_tconn *tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002021 enum drbd_ret_code retcode;
2022 int i;
2023 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002024
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002025 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002026
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002027 if (!adm_ctx.reply_skb)
2028 return retcode;
2029 if (retcode != NO_ERROR)
2030 goto out;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002031 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2032 drbd_msg_put_info("connection endpoint(s) missing");
2033 retcode = ERR_INVALID_REQUEST;
2034 goto out;
2035 }
2036
2037 /* No need for _rcu here. All reconfiguration is
2038 * strictly serialized on genl_lock(). We are protected against
2039 * concurrent reconfiguration/addition/deletion */
2040 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2041 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2042 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2043 retcode = ERR_LOCAL_ADDR;
2044 goto out;
2045 }
2046
2047 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2048 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2049 retcode = ERR_PEER_ADDR;
2050 goto out;
2051 }
2052 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002053
2054 tconn = adm_ctx.tconn;
Philipp Reisner80883192011-02-18 14:56:45 +01002055 conn_reconfig_start(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002056
Philipp Reisner80883192011-02-18 14:56:45 +01002057 if (tconn->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002058 retcode = ERR_NET_CONFIGURED;
2059 goto fail;
2060 }
2061
2062 /* allocation not in the IO path, cqueue thread context */
Lars Ellenberg5979e362011-04-27 21:09:55 +02002063 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002064 if (!new_conf) {
2065 retcode = ERR_NOMEM;
2066 goto fail;
2067 }
2068
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002069 set_net_conf_defaults(new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002070
Lars Ellenbergf3990022011-03-23 14:31:09 +01002071 err = net_conf_from_attrs(new_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002072 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002073 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002074 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002075 goto fail;
2076 }
2077
Philipp Reisnercd643972011-04-13 18:00:59 -07002078 retcode = check_net_options(tconn, new_conf);
2079 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080 goto fail;
Philipp Reisner422028b2010-10-27 11:12:07 +02002081
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002082 retcode = alloc_crypto(&crypto, new_conf);
2083 if (retcode != NO_ERROR)
2084 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002085
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2087
Philipp Reisner80883192011-02-18 14:56:45 +01002088 conn_flush_workqueue(tconn);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002089
Philipp Reisnera0095502011-05-03 13:14:15 +02002090 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002091 old_conf = tconn->net_conf;
2092 if (old_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002093 retcode = ERR_NET_CONFIGURED;
Philipp Reisnera0095502011-05-03 13:14:15 +02002094 mutex_unlock(&tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002095 goto fail;
2096 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002097 rcu_assign_pointer(tconn->net_conf, new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002098
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002099 conn_free_crypto(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002100 tconn->int_dig_in = crypto.int_dig_in;
2101 tconn->int_dig_vv = crypto.int_dig_vv;
2102 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002103 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002104 tconn->csums_tfm = crypto.csums_tfm;
2105 tconn->verify_tfm = crypto.verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002106
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002107 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2108 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2109 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2110 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2111
Philipp Reisnera0095502011-05-03 13:14:15 +02002112 mutex_unlock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002113
Philipp Reisner695d08f2011-04-11 22:53:32 -07002114 rcu_read_lock();
Philipp Reisner80883192011-02-18 14:56:45 +01002115 idr_for_each_entry(&tconn->volumes, mdev, i) {
2116 mdev->send_cnt = 0;
2117 mdev->recv_cnt = 0;
Philipp Reisner80883192011-02-18 14:56:45 +01002118 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002119 rcu_read_unlock();
Lars Ellenberg5ee743e2011-04-26 16:22:25 +02002120
2121 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2122
Philipp Reisner80883192011-02-18 14:56:45 +01002123 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002124 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002125 return 0;
2126
2127fail:
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002128 free_crypto(&crypto);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002129 kfree(new_conf);
2130
Philipp Reisner80883192011-02-18 14:56:45 +01002131 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002132out:
2133 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002134 return 0;
2135}
2136
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002137static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2138{
2139 enum drbd_state_rv rv;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002140
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002141 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2142 force ? CS_HARD : 0);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002143
2144 switch (rv) {
2145 case SS_NOTHING_TO_DO:
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002146 break;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002147 case SS_ALREADY_STANDALONE:
2148 return SS_SUCCESS;
2149 case SS_PRIMARY_NOP:
2150 /* Our state checking code wants to see the peer outdated. */
2151 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002152 pdsk, D_OUTDATED), CS_VERBOSE);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002153 break;
2154 case SS_CW_FAILED_BY_PEER:
2155 /* The peer probably wants to see us outdated. */
2156 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2157 disk, D_OUTDATED), 0);
2158 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002159 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2160 CS_HARD);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002161 }
2162 break;
2163 default:;
2164 /* no special handling necessary */
2165 }
2166
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002167 if (rv >= SS_SUCCESS) {
2168 enum drbd_state_rv rv2;
2169 /* No one else can reconfigure the network while I am here.
2170 * The state handling only uses drbd_thread_stop_nowait(),
2171 * we want to really wait here until the receiver is no more.
2172 */
2173 drbd_thread_stop(&adm_ctx.tconn->receiver);
2174
2175 /* Race breaker. This additional state change request may be
2176 * necessary, if this was a forced disconnect during a receiver
2177 * restart. We may have "killed" the receiver thread just
2178 * after drbdd_init() returned. Typically, we should be
2179 * C_STANDALONE already, now, and this becomes a no-op.
2180 */
2181 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2182 CS_VERBOSE | CS_HARD);
2183 if (rv2 < SS_SUCCESS)
2184 conn_err(tconn,
2185 "unexpected rv2=%d in conn_try_disconnect()\n",
2186 rv2);
2187 }
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002188 return rv;
2189}
2190
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002191int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002192{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002193 struct disconnect_parms parms;
2194 struct drbd_tconn *tconn;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002195 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002196 enum drbd_ret_code retcode;
2197 int err;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002198
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002199 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002200 if (!adm_ctx.reply_skb)
2201 return retcode;
2202 if (retcode != NO_ERROR)
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002203 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002204
2205 tconn = adm_ctx.tconn;
2206 memset(&parms, 0, sizeof(parms));
2207 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002208 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002209 if (err) {
2210 retcode = ERR_MANDATORY_TAG;
2211 drbd_msg_put_info(from_attrs_err_to_txt(err));
2212 goto fail;
2213 }
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002214 }
2215
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002216 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2217 if (rv < SS_SUCCESS)
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002218 retcode = rv; /* FIXME: Type mismatch. */
2219 else
2220 retcode = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002221 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002222 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002223 return 0;
2224}
2225
2226void resync_after_online_grow(struct drbd_conf *mdev)
2227{
2228 int iass; /* I am sync source */
2229
2230 dev_info(DEV, "Resync of new storage after online grow\n");
2231 if (mdev->state.role != mdev->state.peer)
2232 iass = (mdev->state.role == R_PRIMARY);
2233 else
Philipp Reisner25703f82011-02-07 14:35:25 +01002234 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002235
2236 if (iass)
2237 drbd_start_resync(mdev, C_SYNC_SOURCE);
2238 else
2239 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2240}
2241
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002242int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002243{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002244 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002245 struct resize_parms rs;
2246 struct drbd_conf *mdev;
2247 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002248 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002249 enum dds_flags ddsf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002250 sector_t u_size;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002251 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002252
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002253 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2254 if (!adm_ctx.reply_skb)
2255 return retcode;
2256 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002257 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002258
2259 memset(&rs, 0, sizeof(struct resize_parms));
2260 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002261 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002262 if (err) {
2263 retcode = ERR_MANDATORY_TAG;
2264 drbd_msg_put_info(from_attrs_err_to_txt(err));
2265 goto fail;
2266 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002267 }
2268
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002269 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002270 if (mdev->state.conn > C_CONNECTED) {
2271 retcode = ERR_RESIZE_RESYNC;
2272 goto fail;
2273 }
2274
2275 if (mdev->state.role == R_SECONDARY &&
2276 mdev->state.peer == R_SECONDARY) {
2277 retcode = ERR_NO_PRIMARY;
2278 goto fail;
2279 }
2280
2281 if (!get_ldev(mdev)) {
2282 retcode = ERR_NO_DISK;
2283 goto fail;
2284 }
2285
Philipp Reisner31890f42011-01-19 14:12:51 +01002286 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002287 retcode = ERR_NEED_APV_93;
2288 goto fail;
2289 }
2290
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002291 rcu_read_lock();
2292 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2293 rcu_read_unlock();
2294 if (u_size != (sector_t)rs.resize_size) {
2295 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2296 if (!new_disk_conf) {
2297 retcode = ERR_NOMEM;
2298 goto fail;
2299 }
2300 }
2301
Philipp Reisner087c2492010-03-26 13:49:56 +01002302 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002303 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002304
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002305 if (new_disk_conf) {
2306 mutex_lock(&mdev->tconn->conf_update);
2307 old_disk_conf = mdev->ldev->disk_conf;
2308 *new_disk_conf = *old_disk_conf;
2309 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2310 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2311 mutex_unlock(&mdev->tconn->conf_update);
2312 synchronize_rcu();
2313 kfree(old_disk_conf);
2314 }
2315
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002316 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Bart Van Assche24c48302011-05-21 18:32:29 +02002317 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002318 drbd_md_sync(mdev);
2319 put_ldev(mdev);
2320 if (dd == dev_size_error) {
2321 retcode = ERR_NOMEM_BITMAP;
2322 goto fail;
2323 }
2324
Philipp Reisner087c2492010-03-26 13:49:56 +01002325 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002326 if (dd == grew)
2327 set_bit(RESIZE_PENDING, &mdev->flags);
2328
2329 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002330 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002331 }
2332
2333 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002334 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002335 return 0;
2336}
2337
Lars Ellenbergf3990022011-03-23 14:31:09 +01002338int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002339{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002340 enum drbd_ret_code retcode;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002341 struct drbd_tconn *tconn;
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002342 struct res_opts res_opts;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002343 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002344
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02002345 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002346 if (!adm_ctx.reply_skb)
2347 return retcode;
2348 if (retcode != NO_ERROR)
2349 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002350 tconn = adm_ctx.tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002351
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002352 res_opts = tconn->res_opts;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002353 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002354 set_res_opts_defaults(&res_opts);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002355
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002356 err = res_opts_from_attrs(&res_opts, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002357 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002358 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002359 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002360 goto fail;
2361 }
2362
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002363 err = set_resource_options(tconn, &res_opts);
2364 if (err) {
2365 retcode = ERR_INVALID_REQUEST;
2366 if (err == -ENOMEM)
2367 retcode = ERR_NOMEM;
Philipp Reisner778f2712010-07-06 11:14:00 +02002368 }
2369
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002371 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002372 return 0;
2373}
2374
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002375int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002376{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002377 struct drbd_conf *mdev;
2378 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2379
2380 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2381 if (!adm_ctx.reply_skb)
2382 return retcode;
2383 if (retcode != NO_ERROR)
2384 goto out;
2385
2386 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002387
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002388 /* If there is still bitmap IO pending, probably because of a previous
2389 * resync just being finished, wait for it before requesting a new resync. */
2390 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2391
Philipp Reisnerb411b362009-09-25 16:07:19 -07002392 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2393
2394 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2395 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2396
2397 while (retcode == SS_NEED_CONNECTION) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01002398 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002399 if (mdev->state.conn < C_CONNECTED)
2400 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002401 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002402
2403 if (retcode != SS_NEED_CONNECTION)
2404 break;
2405
2406 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2407 }
2408
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002409out:
2410 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002411 return 0;
2412}
2413
Philipp Reisner07782862010-08-31 12:00:50 +02002414static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2415{
2416 int rv;
2417
2418 rv = drbd_bmio_set_n_write(mdev);
2419 drbd_suspend_al(mdev);
2420 return rv;
2421}
2422
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002423static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2424 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002425{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002426 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002427
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002428 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2429 if (!adm_ctx.reply_skb)
2430 return retcode;
2431 if (retcode != NO_ERROR)
2432 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002433
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002434 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2435out:
2436 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002437 return 0;
2438}
2439
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002440int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002441{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002442 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2443}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002444
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002445int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2446{
2447 enum drbd_ret_code retcode;
2448
2449 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2450 if (!adm_ctx.reply_skb)
2451 return retcode;
2452 if (retcode != NO_ERROR)
2453 goto out;
2454
2455 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002456 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002457out:
2458 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002459 return 0;
2460}
2461
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002462int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002463{
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002464 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002465 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002466
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002467 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2468 if (!adm_ctx.reply_skb)
2469 return retcode;
2470 if (retcode != NO_ERROR)
2471 goto out;
2472
2473 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2474 s = adm_ctx.mdev->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01002475 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2476 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2477 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2478 } else {
2479 retcode = ERR_PAUSE_IS_CLEAR;
2480 }
2481 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002482
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002483out:
2484 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002485 return 0;
2486}
2487
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002488int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002489{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002490 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002491}
2492
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002493int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002494{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002495 struct drbd_conf *mdev;
2496 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2497
2498 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2499 if (!adm_ctx.reply_skb)
2500 return retcode;
2501 if (retcode != NO_ERROR)
2502 goto out;
2503
2504 mdev = adm_ctx.mdev;
Philipp Reisner43a51822010-06-11 11:26:34 +02002505 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2506 drbd_uuid_new_current(mdev);
2507 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02002508 }
Philipp Reisner265be2d2010-05-31 10:14:17 +02002509 drbd_suspend_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002510 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2511 if (retcode == SS_SUCCESS) {
Philipp Reisner265be2d2010-05-31 10:14:17 +02002512 if (mdev->state.conn < C_CONNECTED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002513 tl_clear(mdev->tconn);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002514 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002515 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002516 }
2517 drbd_resume_io(mdev);
2518
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002519out:
2520 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002521 return 0;
2522}
2523
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002524int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002525{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002526 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002527}
2528
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002529int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002530{
2531 struct nlattr *nla;
2532 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2533 if (!nla)
2534 goto nla_put_failure;
2535 if (vnr != VOLUME_UNSPECIFIED)
2536 NLA_PUT_U32(skb, T_ctx_volume, vnr);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002537 NLA_PUT_STRING(skb, T_ctx_resource_name, tconn->name);
2538 if (tconn->my_addr_len)
2539 NLA_PUT(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr);
2540 if (tconn->peer_addr_len)
2541 NLA_PUT(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002542 nla_nest_end(skb, nla);
2543 return 0;
2544
2545nla_put_failure:
2546 if (nla)
2547 nla_nest_cancel(skb, nla);
2548 return -EMSGSIZE;
2549}
2550
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002551int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2552 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002553{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002554 struct state_info *si = NULL; /* for sizeof(si->member); */
Philipp Reisner44ed1672011-04-19 17:10:19 +02002555 struct net_conf *nc;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002556 struct nlattr *nla;
2557 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002558 int err = 0;
2559 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002560
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002561 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2562 * to. So we better exclude_sensitive information.
2563 *
2564 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2565 * in the context of the requesting user process. Exclude sensitive
2566 * information, unless current has superuser.
2567 *
2568 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2569 * relies on the current implementation of netlink_dump(), which
2570 * executes the dump callback successively from netlink_recvmsg(),
2571 * always in the context of the receiving process */
2572 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002573
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002574 got_ldev = get_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002575
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002576 /* We need to add connection name and volume number information still.
2577 * Minor number is in drbd_genlmsghdr. */
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002578 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002579 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002580
Lars Ellenbergf3990022011-03-23 14:31:09 +01002581 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2582 goto nla_put_failure;
2583
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002584 rcu_read_lock();
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002585 if (got_ldev)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002586 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002587 goto nla_put_failure;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002588
Philipp Reisner44ed1672011-04-19 17:10:19 +02002589 nc = rcu_dereference(mdev->tconn->net_conf);
2590 if (nc)
2591 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2592 rcu_read_unlock();
2593 if (err)
2594 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002595
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002596 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2597 if (!nla)
2598 goto nla_put_failure;
2599 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2600 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2601 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2602 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002603
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002604 if (got_ldev) {
2605 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2606 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2607 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2608 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2609 if (C_SYNC_SOURCE <= mdev->state.conn &&
2610 C_PAUSED_SYNC_T >= mdev->state.conn) {
2611 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2612 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002613 }
2614 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002615
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002616 if (sib) {
2617 switch(sib->sib_reason) {
2618 case SIB_SYNC_PROGRESS:
2619 case SIB_GET_STATUS_REPLY:
2620 break;
2621 case SIB_STATE_CHANGE:
2622 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2623 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2624 break;
2625 case SIB_HELPER_POST:
2626 NLA_PUT_U32(skb,
2627 T_helper_exit_code, sib->helper_exit_code);
2628 /* fall through */
2629 case SIB_HELPER_PRE:
2630 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2631 break;
2632 }
2633 }
2634 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002635
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002636 if (0)
2637nla_put_failure:
2638 err = -EMSGSIZE;
2639 if (got_ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002640 put_ldev(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002641 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002642}
2643
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002644int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002645{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002646 enum drbd_ret_code retcode;
2647 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002648
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002649 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2650 if (!adm_ctx.reply_skb)
2651 return retcode;
2652 if (retcode != NO_ERROR)
2653 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002654
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002655 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2656 if (err) {
2657 nlmsg_free(adm_ctx.reply_skb);
2658 return err;
2659 }
2660out:
2661 drbd_adm_finish(info, retcode);
2662 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002663}
2664
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002665int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002667 struct drbd_conf *mdev;
2668 struct drbd_genlmsghdr *dh;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002669 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2670 struct drbd_tconn *tconn = NULL;
2671 struct drbd_tconn *tmp;
2672 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002673
Lars Ellenberg543cc102011-03-10 22:18:18 +01002674 /* Open coded, deferred, iteration:
2675 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2676 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2677 * ...
2678 * }
2679 * }
2680 * where tconn is cb->args[0];
2681 * and i is cb->args[1];
2682 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002683 * cb->args[2] indicates if we shall loop over all resources,
2684 * or just dump all volumes of a single resource.
2685 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002686 * This may miss entries inserted after this dump started,
2687 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01002688 *
2689 * We need to make sure the mdev won't disappear while
2690 * we are looking at it, and revalidate our iterators
2691 * on each iteration.
2692 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002693
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002694 /* synchronize with conn_create()/conn_destroy() */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002695 rcu_read_lock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002696 /* revalidate iterator position */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002697 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
Lars Ellenberg543cc102011-03-10 22:18:18 +01002698 if (pos == NULL) {
2699 /* first iteration */
2700 pos = tmp;
2701 tconn = pos;
2702 break;
2703 }
2704 if (tmp == pos) {
2705 tconn = pos;
2706 break;
2707 }
2708 }
2709 if (tconn) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002710next_tconn:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002711 mdev = idr_get_next(&tconn->volumes, &volume);
2712 if (!mdev) {
2713 /* No more volumes to dump on this tconn.
2714 * Advance tconn iterator. */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002715 pos = list_entry_rcu(tconn->all_tconn.next,
2716 struct drbd_tconn, all_tconn);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002717 /* Did we dump any volume on this tconn yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002718 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002719 /* If we reached the end of the list,
2720 * or only a single resource dump was requested,
2721 * we are done. */
2722 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2723 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002724 volume = 0;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002725 tconn = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002726 goto next_tconn;
2727 }
2728 }
2729
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002730 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2731 cb->nlh->nlmsg_seq, &drbd_genl_family,
2732 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2733 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002734 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002735
Lars Ellenberg543cc102011-03-10 22:18:18 +01002736 if (!mdev) {
2737 /* this is a tconn without a single volume */
2738 dh->minor = -1U;
2739 dh->ret_code = NO_ERROR;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002740 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
Lars Ellenberg543cc102011-03-10 22:18:18 +01002741 genlmsg_cancel(skb, dh);
2742 else
2743 genlmsg_end(skb, dh);
2744 goto out;
2745 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002746
Lars Ellenberg543cc102011-03-10 22:18:18 +01002747 D_ASSERT(mdev->vnr == volume);
2748 D_ASSERT(mdev->tconn == tconn);
2749
2750 dh->minor = mdev_to_minor(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002751 dh->ret_code = NO_ERROR;
2752
2753 if (nla_put_status_info(skb, mdev, NULL)) {
2754 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002755 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002756 }
2757 genlmsg_end(skb, dh);
2758 }
2759
Lars Ellenberg543cc102011-03-10 22:18:18 +01002760out:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002761 rcu_read_unlock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002762 /* where to start the next iteration */
2763 cb->args[0] = (long)pos;
2764 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002765
Lars Ellenberg543cc102011-03-10 22:18:18 +01002766 /* No more tconns/volumes/minors found results in an empty skb.
2767 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002768 return skb->len;
2769}
2770
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002771/*
2772 * Request status of all resources, or of all volumes within a single resource.
2773 *
2774 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2775 * Which means we cannot use the family->attrbuf or other such members, because
2776 * dump is NOT protected by the genl_lock(). During dump, we only have access
2777 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2778 *
2779 * Once things are setup properly, we call into get_one_status().
2780 */
2781int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2782{
2783 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2784 struct nlattr *nla;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002785 const char *resource_name;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002786 struct drbd_tconn *tconn;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002787 int maxtype;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002788
2789 /* Is this a followup call? */
2790 if (cb->args[0]) {
2791 /* ... of a single resource dump,
2792 * and the resource iterator has been advanced already? */
2793 if (cb->args[2] && cb->args[2] != cb->args[0])
2794 return 0; /* DONE. */
2795 goto dump;
2796 }
2797
2798 /* First call (from netlink_dump_start). We need to figure out
2799 * which resource(s) the user wants us to dump. */
2800 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2801 nlmsg_attrlen(cb->nlh, hdrlen),
2802 DRBD_NLA_CFG_CONTEXT);
2803
2804 /* No explicit context given. Dump all. */
2805 if (!nla)
2806 goto dump;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002807 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2808 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2809 if (IS_ERR(nla))
2810 return PTR_ERR(nla);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002811 /* context given, but no name present? */
2812 if (!nla)
2813 return -EINVAL;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002814 resource_name = nla_data(nla);
2815 tconn = conn_get_by_name(resource_name);
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002816
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002817 if (!tconn)
2818 return -ENODEV;
2819
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002820 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2821
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002822 /* prime iterators, and set "filter" mode mark:
2823 * only dump this tconn. */
2824 cb->args[0] = (long)tconn;
2825 /* cb->args[1] = 0; passed in this way. */
2826 cb->args[2] = (long)tconn;
2827
2828dump:
2829 return get_one_status(skb, cb);
2830}
2831
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002832int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2833{
2834 enum drbd_ret_code retcode;
2835 struct timeout_parms tp;
2836 int err;
2837
2838 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2839 if (!adm_ctx.reply_skb)
2840 return retcode;
2841 if (retcode != NO_ERROR)
2842 goto out;
2843
2844 tp.timeout_type =
2845 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2846 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2847 UT_DEFAULT;
2848
2849 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2850 if (err) {
2851 nlmsg_free(adm_ctx.reply_skb);
2852 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002853 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002854out:
2855 drbd_adm_finish(info, retcode);
2856 return 0;
2857}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002858
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002859int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2860{
2861 struct drbd_conf *mdev;
2862 enum drbd_ret_code retcode;
2863
2864 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2865 if (!adm_ctx.reply_skb)
2866 return retcode;
2867 if (retcode != NO_ERROR)
2868 goto out;
2869
2870 mdev = adm_ctx.mdev;
2871 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2872 /* resume from last known position, if possible */
2873 struct start_ov_parms parms =
2874 { .ov_start_sector = mdev->ov_start_sector };
Lars Ellenbergf3990022011-03-23 14:31:09 +01002875 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002876 if (err) {
2877 retcode = ERR_MANDATORY_TAG;
2878 drbd_msg_put_info(from_attrs_err_to_txt(err));
2879 goto out;
2880 }
2881 /* w_make_ov_request expects position to be aligned */
2882 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2883 }
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002884 /* If there is still bitmap IO pending, e.g. previous resync or verify
2885 * just being finished, wait for it before requesting a new resync. */
2886 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002887 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2888out:
2889 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002890 return 0;
2891}
2892
2893
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002894int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002895{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002896 struct drbd_conf *mdev;
2897 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002898 int skip_initial_sync = 0;
2899 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002900 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002901
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002902 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2903 if (!adm_ctx.reply_skb)
2904 return retcode;
2905 if (retcode != NO_ERROR)
2906 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002907
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002908 mdev = adm_ctx.mdev;
2909 memset(&args, 0, sizeof(args));
2910 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002911 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002912 if (err) {
2913 retcode = ERR_MANDATORY_TAG;
2914 drbd_msg_put_info(from_attrs_err_to_txt(err));
2915 goto out_nolock;
2916 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002917 }
2918
Philipp Reisner8410da82011-02-11 20:11:10 +01002919 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002920
2921 if (!get_ldev(mdev)) {
2922 retcode = ERR_NO_DISK;
2923 goto out;
2924 }
2925
2926 /* this is "skip initial sync", assume to be clean */
Philipp Reisner31890f42011-01-19 14:12:51 +01002927 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002928 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2929 dev_info(DEV, "Preparing to skip initial sync\n");
2930 skip_initial_sync = 1;
2931 } else if (mdev->state.conn != C_STANDALONE) {
2932 retcode = ERR_CONNECTED;
2933 goto out_dec;
2934 }
2935
2936 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2937 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2938
2939 if (args.clear_bm) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002940 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2941 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002942 if (err) {
2943 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2944 retcode = ERR_IO_MD_DISK;
2945 }
2946 if (skip_initial_sync) {
2947 drbd_send_uuids_skip_initial_sync(mdev);
2948 _drbd_uuid_set(mdev, UI_BITMAP, 0);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002949 drbd_print_uuids(mdev, "cleared bitmap UUID");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002950 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002951 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2952 CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002953 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002954 }
2955 }
2956
2957 drbd_md_sync(mdev);
2958out_dec:
2959 put_ldev(mdev);
2960out:
Philipp Reisner8410da82011-02-11 20:11:10 +01002961 mutex_unlock(mdev->state_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002962out_nolock:
2963 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002964 return 0;
2965}
2966
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002967static enum drbd_ret_code
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002968drbd_check_resource_name(const char *name)
Philipp Reisner774b3052011-02-22 02:07:03 -05002969{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002970 if (!name || !name[0]) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002971 drbd_msg_put_info("resource name missing");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002972 return ERR_MANDATORY_TAG;
2973 }
2974 /* if we want to use these in sysfs/configfs/debugfs some day,
2975 * we must not allow slashes */
2976 if (strchr(name, '/')) {
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02002977 drbd_msg_put_info("invalid resource name");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002978 return ERR_INVALID_REQUEST;
2979 }
2980 return NO_ERROR;
2981}
Philipp Reisner774b3052011-02-22 02:07:03 -05002982
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02002983int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002984{
2985 enum drbd_ret_code retcode;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002986 struct res_opts res_opts;
2987 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002988
2989 retcode = drbd_adm_prepare(skb, info, 0);
2990 if (!adm_ctx.reply_skb)
2991 return retcode;
2992 if (retcode != NO_ERROR)
2993 goto out;
2994
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002995 set_res_opts_defaults(&res_opts);
2996 err = res_opts_from_attrs(&res_opts, info);
2997 if (err && err != -ENOMSG) {
2998 retcode = ERR_MANDATORY_TAG;
2999 drbd_msg_put_info(from_attrs_err_to_txt(err));
3000 goto out;
3001 }
3002
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02003003 retcode = drbd_check_resource_name(adm_ctx.resource_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003004 if (retcode != NO_ERROR)
3005 goto out;
3006
3007 if (adm_ctx.tconn) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01003008 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3009 retcode = ERR_INVALID_REQUEST;
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003010 drbd_msg_put_info("resource exists");
Lars Ellenberg38f19612011-03-14 13:22:35 +01003011 }
3012 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003013 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003014 }
3015
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02003016 if (!conn_create(adm_ctx.resource_name, &res_opts))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003017 retcode = ERR_NOMEM;
3018out:
3019 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003020 return 0;
3021}
3022
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003023int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003024{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003025 struct drbd_genlmsghdr *dh = info->userhdr;
3026 enum drbd_ret_code retcode;
Philipp Reisner774b3052011-02-22 02:07:03 -05003027
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003028 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003029 if (!adm_ctx.reply_skb)
3030 return retcode;
3031 if (retcode != NO_ERROR)
3032 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003033
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003034 /* FIXME drop minor_count parameter, limit to MINORMASK */
3035 if (dh->minor >= minor_count) {
3036 drbd_msg_put_info("requested minor out of range");
3037 retcode = ERR_INVALID_REQUEST;
3038 goto out;
3039 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02003040 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003041 drbd_msg_put_info("requested volume id out of range");
3042 retcode = ERR_INVALID_REQUEST;
3043 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003044 }
3045
Lars Ellenberg38f19612011-03-14 13:22:35 +01003046 /* drbd_adm_prepare made sure already
3047 * that mdev->tconn and mdev->vnr match the request. */
3048 if (adm_ctx.mdev) {
3049 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3050 retcode = ERR_MINOR_EXISTS;
3051 /* else: still NO_ERROR */
3052 goto out;
3053 }
3054
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003055 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3056out:
3057 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003058 return 0;
3059}
3060
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003061static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3062{
3063 if (mdev->state.disk == D_DISKLESS &&
3064 /* no need to be mdev->state.conn == C_STANDALONE &&
3065 * we may want to delete a minor from a live replication group.
3066 */
3067 mdev->state.role == R_SECONDARY) {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02003068 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3069 idr_remove(&minors, mdev_to_minor(mdev));
3070 del_gendisk(mdev->vdisk);
3071 synchronize_rcu();
3072 kref_put(&mdev->kref, &drbd_minor_destroy);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003073 return NO_ERROR;
3074 } else
3075 return ERR_MINOR_CONFIGURED;
3076}
3077
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003078int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003079{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003080 enum drbd_ret_code retcode;
3081
3082 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3083 if (!adm_ctx.reply_skb)
3084 return retcode;
3085 if (retcode != NO_ERROR)
3086 goto out;
3087
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003088 retcode = adm_delete_minor(adm_ctx.mdev);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003089out:
3090 drbd_adm_finish(info, retcode);
3091 return 0;
3092}
3093
3094int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3095{
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003096 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003097 struct drbd_conf *mdev;
3098 unsigned i;
3099
3100 retcode = drbd_adm_prepare(skb, info, 0);
3101 if (!adm_ctx.reply_skb)
3102 return retcode;
3103 if (retcode != NO_ERROR)
3104 goto out;
3105
3106 if (!adm_ctx.tconn) {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003107 retcode = ERR_RES_NOT_KNOWN;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003108 goto out;
3109 }
3110
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003111 /* demote */
3112 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3113 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3114 if (retcode < SS_SUCCESS) {
3115 drbd_msg_put_info("failed to demote");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003116 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003117 }
3118 }
3119
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003120 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3121 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003122 drbd_msg_put_info("failed to disconnect");
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003123 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003124 }
3125
3126 /* detach */
3127 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003128 retcode = adm_detach(mdev);
3129 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003130 drbd_msg_put_info("failed to detach");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003131 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003132 }
3133 }
3134
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003135 /* If we reach this, all volumes (of this tconn) are Secondary,
3136 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003137 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003138 drbd_thread_stop(&adm_ctx.tconn->worker);
3139
3140 /* Now, nothing can fail anymore */
3141
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003142 /* delete volumes */
3143 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3144 retcode = adm_delete_minor(mdev);
3145 if (retcode != NO_ERROR) {
3146 /* "can not happen" */
3147 drbd_msg_put_info("failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07003148 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003149 }
3150 }
3151
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003152 /* delete connection */
3153 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003154 list_del_rcu(&adm_ctx.tconn->all_tconn);
3155 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003156 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3157
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003158 retcode = NO_ERROR;
3159 } else {
3160 /* "can not happen" */
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003161 retcode = ERR_RES_IN_USE;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003162 drbd_msg_put_info("failed to delete connection");
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003163 }
Philipp Reisneref356262011-04-13 14:21:29 -07003164 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003165out:
3166 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003167 return 0;
3168}
3169
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003170int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003171{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003172 enum drbd_ret_code retcode;
3173
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +02003174 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003175 if (!adm_ctx.reply_skb)
3176 return retcode;
3177 if (retcode != NO_ERROR)
3178 goto out;
3179
3180 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003181 list_del_rcu(&adm_ctx.tconn->all_tconn);
3182 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003183 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3184
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003185 retcode = NO_ERROR;
Philipp Reisner774b3052011-02-22 02:07:03 -05003186 } else {
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02003187 retcode = ERR_RES_IN_USE;
Philipp Reisner774b3052011-02-22 02:07:03 -05003188 }
3189
Lars Ellenberg992d6e92011-05-02 11:47:18 +02003190 if (retcode == NO_ERROR)
3191 drbd_thread_stop(&adm_ctx.tconn->worker);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003192out:
3193 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003194 return 0;
3195}
3196
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003197void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003198{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003199 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3200 struct sk_buff *msg;
3201 struct drbd_genlmsghdr *d_out;
3202 unsigned seq;
3203 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003204
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003205 seq = atomic_inc_return(&drbd_genl_seq);
3206 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3207 if (!msg)
3208 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003209
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003210 err = -EMSGSIZE;
3211 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3212 if (!d_out) /* cannot happen, but anyways. */
3213 goto nla_put_failure;
3214 d_out->minor = mdev_to_minor(mdev);
Andreas Gruenbacher6f9b5f82011-05-06 01:03:32 +02003215 d_out->ret_code = NO_ERROR;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02003216
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003217 if (nla_put_status_info(msg, mdev, sib))
3218 goto nla_put_failure;
3219 genlmsg_end(msg, d_out);
3220 err = drbd_genl_multicast_events(msg, 0);
3221 /* msg has been consumed or freed in netlink_broadcast() */
3222 if (err && err != -ESRCH)
3223 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003224
Philipp Reisnerb411b362009-09-25 16:07:19 -07003225 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003226
3227nla_put_failure:
3228 nlmsg_free(msg);
3229failed:
3230 dev_err(DEV, "Error %d while broadcasting event. "
3231 "Event seq:%u sib_reason:%u\n",
3232 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003233}