blob: 23c34baa75a4f764bebb7c39e613df3c63bb780d [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020035#include "drbd_req.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070038#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020039#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070040
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010041#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010043/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010052int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010053
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010056int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010057int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010071int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010072int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
78#include <linux/genl_magic_func.h>
79
80/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070081static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010083/* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
86 */
87static struct drbd_config_context {
88 /* assigned from drbd_genlmsghdr */
89 unsigned int minor;
90 /* assigned from request attributes, if present */
91 unsigned int volume;
92#define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
95 char *conn_name;
Philipp Reisnerb411b362009-09-25 16:07:19 -070096
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010097 /* reply buffer */
98 struct sk_buff *reply_skb;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr *reply_dh;
101 /* resolved from attributes, if possible */
102 struct drbd_conf *mdev;
103 struct drbd_tconn *tconn;
104} adm_ctx;
105
106static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107{
108 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109 if (genlmsg_reply(skb, info))
110 printk(KERN_ERR "drbd: error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700111}
112
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100113/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
Lars Ellenberg8432b312011-03-08 16:11:16 +0100115int drbd_msg_put_info(const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100116{
117 struct sk_buff *skb = adm_ctx.reply_skb;
118 struct nlattr *nla;
119 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700120
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100121 if (!info || !info[0])
122 return 0;
123
124 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125 if (!nla)
126 return err;
127
128 err = nla_put_string(skb, T_info_text, info);
129 if (err) {
130 nla_nest_cancel(skb, nla);
131 return err;
132 } else
133 nla_nest_end(skb, nla);
134 return 0;
135}
136
137/* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
141 */
142#define DRBD_ADM_NEED_MINOR 1
143#define DRBD_ADM_NEED_CONN 2
144static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145 unsigned flags)
146{
147 struct drbd_genlmsghdr *d_in = info->userhdr;
148 const u8 cmd = info->genlhdr->cmd;
149 int err;
150
151 memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd != DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb, CAP_SYS_ADMIN))
156 return -EPERM;
157
158 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159 if (!adm_ctx.reply_skb)
160 goto fail;
161
162 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163 info, &drbd_genl_family, 0, cmd);
164 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
165 * but anyways */
166 if (!adm_ctx.reply_dh)
167 goto fail;
168
169 adm_ctx.reply_dh->minor = d_in->minor;
170 adm_ctx.reply_dh->ret_code = NO_ERROR;
171
172 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
173 struct nlattr *nla;
174 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100175 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100176 if (err)
177 goto fail;
178
179 /* It was present, and valid,
180 * copy it over to the reply skb. */
181 err = nla_put_nohdr(adm_ctx.reply_skb,
182 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183 info->attrs[DRBD_NLA_CFG_CONTEXT]);
184 if (err)
185 goto fail;
186
187 /* and assign stuff to the global adm_ctx */
188 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
191 if (nla)
192 adm_ctx.conn_name = nla_data(nla);
193 } else
194 adm_ctx.volume = VOLUME_UNSPECIFIED;
195
196 adm_ctx.minor = d_in->minor;
197 adm_ctx.mdev = minor_to_mdev(d_in->minor);
198 adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
199
200 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201 drbd_msg_put_info("unknown minor");
202 return ERR_MINOR_INVALID;
203 }
204 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205 drbd_msg_put_info("unknown connection");
206 return ERR_INVALID_REQUEST;
207 }
208
209 /* some more paranoia, if the request was over-determined */
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100210 if (adm_ctx.mdev && adm_ctx.tconn &&
211 adm_ctx.mdev->tconn != adm_ctx.tconn) {
212 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214 drbd_msg_put_info("minor exists in different connection");
215 return ERR_INVALID_REQUEST;
216 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100217 if (adm_ctx.mdev &&
218 adm_ctx.volume != VOLUME_UNSPECIFIED &&
219 adm_ctx.volume != adm_ctx.mdev->vnr) {
220 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221 adm_ctx.minor, adm_ctx.volume,
222 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100223 drbd_msg_put_info("minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100224 return ERR_INVALID_REQUEST;
225 }
Lars Ellenbergcffec5b2011-03-10 23:33:15 +0100226 if (adm_ctx.mdev && !adm_ctx.tconn)
227 adm_ctx.tconn = adm_ctx.mdev->tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100228 return NO_ERROR;
229
230fail:
231 nlmsg_free(adm_ctx.reply_skb);
232 adm_ctx.reply_skb = NULL;
233 return -ENOMEM;
234}
235
236static int drbd_adm_finish(struct genl_info *info, int retcode)
237{
238 struct nlattr *nla;
239 const char *conn_name = NULL;
240
241 if (!adm_ctx.reply_skb)
242 return -ENOMEM;
243
244 adm_ctx.reply_dh->ret_code = retcode;
245
246 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
247 if (nla) {
248 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
249 if (nla)
250 conn_name = nla_data(nla);
251 }
252
253 drbd_adm_send_reply(adm_ctx.reply_skb, info);
254 return 0;
255}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700256
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100257static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
258{
259 char *afs;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200260 struct net_conf *nc;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100261
Philipp Reisner44ed1672011-04-19 17:10:19 +0200262 rcu_read_lock();
263 nc = rcu_dereference(tconn->net_conf);
264 if (nc) {
265 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100266 case AF_INET6:
267 afs = "ipv6";
268 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200269 &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100270 break;
271 case AF_INET:
272 afs = "ipv4";
273 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200274 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100275 break;
276 default:
277 afs = "ssocks";
278 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200279 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100280 }
281 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100282 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200283 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100284}
285
Philipp Reisnerb411b362009-09-25 16:07:19 -0700286int drbd_khelper(struct drbd_conf *mdev, char *cmd)
287{
288 char *envp[] = { "HOME=/",
289 "TERM=linux",
290 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100291 (char[20]) { }, /* address family */
292 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700293 NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100294 char mb[12];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700295 char *argv[] = {usermode_helper, cmd, mb, NULL };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100296 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700297 int ret;
298
299 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100300 setup_khelper_env(mdev->tconn, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700301
Lars Ellenberg1090c052010-07-19 17:41:04 +0200302 /* The helper may take some time.
303 * write out any unsynced meta data changes now */
304 drbd_md_sync(mdev);
305
Philipp Reisnerb411b362009-09-25 16:07:19 -0700306 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100307 sib.sib_reason = SIB_HELPER_PRE;
308 sib.helper_name = cmd;
309 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700310 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
311 if (ret)
312 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
313 usermode_helper, cmd, mb,
314 (ret >> 8) & 0xff, ret);
315 else
316 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
317 usermode_helper, cmd, mb,
318 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100319 sib.sib_reason = SIB_HELPER_POST;
320 sib.helper_exit_code = ret;
321 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700322
323 if (ret < 0) /* Ignore any ERRNOs we got. */
324 ret = 0;
325
326 return ret;
327}
328
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100329static void conn_md_sync(struct drbd_tconn *tconn)
330{
331 struct drbd_conf *mdev;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100332 int vnr;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100333
Philipp Reisnerd3fcb492011-04-13 14:46:05 -0700334 down_read(&drbd_cfg_rwsem);
Philipp Reisnere90285e2011-03-22 12:51:21 +0100335 idr_for_each_entry(&tconn->volumes, mdev, vnr)
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100336 drbd_md_sync(mdev);
Philipp Reisnerd3fcb492011-04-13 14:46:05 -0700337 up_read(&drbd_cfg_rwsem);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100338}
339
340int conn_khelper(struct drbd_tconn *tconn, char *cmd)
341{
342 char *envp[] = { "HOME=/",
343 "TERM=linux",
344 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
345 (char[20]) { }, /* address family */
346 (char[60]) { }, /* address */
347 NULL };
348 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
349 int ret;
350
351 setup_khelper_env(tconn, envp);
352 conn_md_sync(tconn);
353
354 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
355 /* TODO: conn_bcast_event() ?? */
356
357 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
358 if (ret)
359 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
360 usermode_helper, cmd, tconn->name,
361 (ret >> 8) & 0xff, ret);
362 else
363 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
364 usermode_helper, cmd, tconn->name,
365 (ret >> 8) & 0xff, ret);
366 /* TODO: conn_bcast_event() ?? */
367
368 if (ret < 0) /* Ignore any ERRNOs we got. */
369 ret = 0;
370
371 return ret;
372}
373
Philipp Reisnercb703452011-03-24 11:03:07 +0100374static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700375{
Philipp Reisnercb703452011-03-24 11:03:07 +0100376 enum drbd_fencing_p fp = FP_NOT_AVAIL;
377 struct drbd_conf *mdev;
378 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379
Philipp Reisner695d08f2011-04-11 22:53:32 -0700380 rcu_read_lock();
Philipp Reisnercb703452011-03-24 11:03:07 +0100381 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
382 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
383 fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
384 put_ldev(mdev);
385 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700386 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700387 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700388
Philipp Reisnercb703452011-03-24 11:03:07 +0100389 return fp;
390}
391
392bool conn_try_outdate_peer(struct drbd_tconn *tconn)
393{
394 union drbd_state mask = { };
395 union drbd_state val = { };
396 enum drbd_fencing_p fp;
397 char *ex_to_string;
398 int r;
399
400 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
401 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
402 return false;
403 }
404
405 fp = highest_fencing_policy(tconn);
406 switch (fp) {
407 case FP_NOT_AVAIL:
408 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
409 goto out;
410 case FP_DONT_CARE:
411 return true;
412 default: ;
413 }
414
415 r = conn_khelper(tconn, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700416
417 switch ((r>>8) & 0xff) {
418 case 3: /* peer is inconsistent */
419 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100420 mask.pdsk = D_MASK;
421 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700422 break;
423 case 4: /* peer got outdated, or was already outdated */
424 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100425 mask.pdsk = D_MASK;
426 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427 break;
428 case 5: /* peer was down */
Philipp Reisnercb703452011-03-24 11:03:07 +0100429 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430 /* we will(have) create(d) a new UUID anyways... */
431 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100432 mask.pdsk = D_MASK;
433 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 } else {
435 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700436 }
437 break;
438 case 6: /* Peer is primary, voluntarily outdate myself.
439 * This is useful when an unconnected R_SECONDARY is asked to
440 * become R_PRIMARY, but finds the other peer being active. */
441 ex_to_string = "peer is active";
Philipp Reisnercb703452011-03-24 11:03:07 +0100442 conn_warn(tconn, "Peer is primary, outdating myself.\n");
443 mask.disk = D_MASK;
444 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700445 break;
446 case 7:
447 if (fp != FP_STONITH)
Philipp Reisnercb703452011-03-24 11:03:07 +0100448 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100450 mask.pdsk = D_MASK;
451 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700452 break;
453 default:
454 /* The script is broken ... */
Philipp Reisnercb703452011-03-24 11:03:07 +0100455 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
456 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700457 }
458
Philipp Reisnercb703452011-03-24 11:03:07 +0100459 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
460 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200461
Philipp Reisnercb703452011-03-24 11:03:07 +0100462 out:
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200463
Philipp Reisnercb703452011-03-24 11:03:07 +0100464 /* Not using
465 conn_request_state(tconn, mask, val, CS_VERBOSE);
466 here, because we might were able to re-establish the connection in the
467 meantime. */
468 spin_lock_irq(&tconn->req_lock);
469 if (tconn->cstate < C_WF_REPORT_PARAMS)
470 _conn_request_state(tconn, mask, val, CS_VERBOSE);
471 spin_unlock_irq(&tconn->req_lock);
472
473 return conn_highest_pdsk(tconn) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700474}
475
Philipp Reisner87f7be42010-06-11 13:56:33 +0200476static int _try_outdate_peer_async(void *data)
477{
Philipp Reisnercb703452011-03-24 11:03:07 +0100478 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200479
Philipp Reisnercb703452011-03-24 11:03:07 +0100480 conn_try_outdate_peer(tconn);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200481
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200482 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200483 return 0;
484}
485
Philipp Reisnercb703452011-03-24 11:03:07 +0100486void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200487{
488 struct task_struct *opa;
489
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200490 kref_get(&tconn->kref);
Philipp Reisnercb703452011-03-24 11:03:07 +0100491 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200492 if (IS_ERR(opa)) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100493 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200494 kref_put(&tconn->kref, &conn_destroy);
495 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200496}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100498enum drbd_state_rv
499drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700500{
501 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100502 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200503 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700504 int try = 0;
505 int forced = 0;
506 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700507
508 if (new_role == R_PRIMARY)
Philipp Reisner0625ac12011-02-07 14:49:19 +0100509 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700510
Philipp Reisner8410da82011-02-11 20:11:10 +0100511 mutex_lock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700512
513 mask.i = 0; mask.role = R_MASK;
514 val.i = 0; val.role = new_role;
515
516 while (try++ < max_tries) {
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100517 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700518
519 /* in case we first succeeded to outdate,
520 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100521 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700522 val.pdsk = 0;
523 mask.pdsk = 0;
524 continue;
525 }
526
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100527 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100528 (mdev->state.disk < D_UP_TO_DATE &&
529 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700530 mask.disk = D_MASK;
531 val.disk = D_UP_TO_DATE;
532 forced = 1;
533 continue;
534 }
535
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100536 if (rv == SS_NO_UP_TO_DATE_DISK &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
538 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539
Philipp Reisnercb703452011-03-24 11:03:07 +0100540 if (conn_try_outdate_peer(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700541 val.disk = D_UP_TO_DATE;
542 mask.disk = D_MASK;
543 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700544 continue;
545 }
546
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100547 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100548 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100549 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100550 if (!conn_try_outdate_peer(mdev->tconn) && force) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700551 dev_warn(DEV, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100552 mask.pdsk = D_MASK;
553 val.pdsk = D_OUTDATED;
554
Philipp Reisnerb411b362009-09-25 16:07:19 -0700555 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700556 continue;
557 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100558 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700559 /* Maybe the peer is detected as dead very soon...
560 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200561 int timeo;
562 rcu_read_lock();
563 nc = rcu_dereference(mdev->tconn->net_conf);
564 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
565 rcu_read_unlock();
566 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700567 if (try < max_tries)
568 try = max_tries - 1;
569 continue;
570 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100571 if (rv < SS_SUCCESS) {
572 rv = _drbd_request_state(mdev, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100574 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100575 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700576 }
577 break;
578 }
579
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100580 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100581 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700582
583 if (forced)
584 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
585
586 /* Wait until nothing is on the fly :) */
587 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
588
589 if (new_role == R_SECONDARY) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100590 set_disk_ro(mdev->vdisk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700591 if (get_ldev(mdev)) {
592 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
593 put_ldev(mdev);
594 }
595 } else {
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200596 mutex_lock(&mdev->tconn->net_conf_update);
597 nc = mdev->tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200598 if (nc)
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200599 nc->want_lose = 0; /* without copy; single bit op is atomic */
600 mutex_unlock(&mdev->tconn->net_conf_update);
601
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100602 set_disk_ro(mdev->vdisk, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700603 if (get_ldev(mdev)) {
604 if (((mdev->state.conn < C_CONNECTED ||
605 mdev->state.pdsk <= D_FAILED)
606 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
607 drbd_uuid_new_current(mdev);
608
609 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
610 put_ldev(mdev);
611 }
612 }
613
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100614 /* writeout of activity log covered areas of the bitmap
615 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700616
617 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
618 /* if this was forced, we should consider sync */
619 if (forced)
620 drbd_send_uuids(mdev);
621 drbd_send_state(mdev);
622 }
623
624 drbd_md_sync(mdev);
625
626 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100627out:
Philipp Reisner8410da82011-02-11 20:11:10 +0100628 mutex_unlock(mdev->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100629 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700630}
631
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100632static const char *from_attrs_err_to_txt(int err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700633{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100634 return err == -ENOMSG ? "required attribute missing" :
635 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100636 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100637 "invalid attribute value";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700638}
639
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100640int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700641{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100642 struct set_role_parms parms;
643 int err;
644 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700645
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100646 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
647 if (!adm_ctx.reply_skb)
648 return retcode;
649 if (retcode != NO_ERROR)
650 goto out;
651
652 memset(&parms, 0, sizeof(parms));
653 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100654 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100655 if (err) {
656 retcode = ERR_MANDATORY_TAG;
657 drbd_msg_put_info(from_attrs_err_to_txt(err));
658 goto out;
659 }
660 }
661
662 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
663 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
664 else
665 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
666out:
667 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700668 return 0;
669}
670
671/* initializes the md.*_offset members, so we are able to find
672 * the on disk meta data */
673static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
674 struct drbd_backing_dev *bdev)
675{
676 sector_t md_size_sect = 0;
677 switch (bdev->dc.meta_dev_idx) {
678 default:
679 /* v07 style fixed size indexed meta data */
680 bdev->md.md_size_sect = MD_RESERVED_SECT;
681 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
682 bdev->md.al_offset = MD_AL_OFFSET;
683 bdev->md.bm_offset = MD_BM_OFFSET;
684 break;
685 case DRBD_MD_INDEX_FLEX_EXT:
686 /* just occupy the full device; unit: sectors */
687 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
688 bdev->md.md_offset = 0;
689 bdev->md.al_offset = MD_AL_OFFSET;
690 bdev->md.bm_offset = MD_BM_OFFSET;
691 break;
692 case DRBD_MD_INDEX_INTERNAL:
693 case DRBD_MD_INDEX_FLEX_INT:
694 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
695 /* al size is still fixed */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100696 bdev->md.al_offset = -MD_AL_SECTORS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700697 /* we need (slightly less than) ~ this much bitmap sectors: */
698 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
699 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
700 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
701 md_size_sect = ALIGN(md_size_sect, 8);
702
703 /* plus the "drbd meta data super block",
704 * and the activity log; */
705 md_size_sect += MD_BM_OFFSET;
706
707 bdev->md.md_size_sect = md_size_sect;
708 /* bitmap offset is adjusted by 'super' block size */
709 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
710 break;
711 }
712}
713
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100714/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700715char *ppsize(char *buf, unsigned long long size)
716{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100717 /* Needs 9 bytes at max including trailing NUL:
718 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700719 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
720 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100721 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700722 /* shift + round */
723 size = (size >> 10) + !!(size & (1<<9));
724 base++;
725 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100726 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700727
728 return buf;
729}
730
731/* there is still a theoretical deadlock when called from receiver
732 * on an D_INCONSISTENT R_PRIMARY:
733 * remote READ does inc_ap_bio, receiver would need to receive answer
734 * packet from remote to dec_ap_bio again.
735 * receiver receive_sizes(), comes here,
736 * waits for ap_bio_cnt == 0. -> deadlock.
737 * but this cannot happen, actually, because:
738 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
739 * (not connected, or bad/no disk on peer):
740 * see drbd_fail_request_early, ap_bio_cnt is zero.
741 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
742 * peer may not initiate a resize.
743 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100744/* Note these are not to be confused with
745 * drbd_adm_suspend_io/drbd_adm_resume_io,
746 * which are (sub) state changes triggered by admin (drbdsetup),
747 * and can be long lived.
748 * This changes an mdev->flag, is triggered by drbd internals,
749 * and should be short-lived. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700750void drbd_suspend_io(struct drbd_conf *mdev)
751{
752 set_bit(SUSPEND_IO, &mdev->flags);
Philipp Reisner2aebfab2011-03-28 16:48:11 +0200753 if (drbd_suspended(mdev))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200754 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700755 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
756}
757
758void drbd_resume_io(struct drbd_conf *mdev)
759{
760 clear_bit(SUSPEND_IO, &mdev->flags);
761 wake_up(&mdev->misc_wait);
762}
763
764/**
765 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
766 * @mdev: DRBD device.
767 *
768 * Returns 0 on success, negative return values indicate errors.
769 * You should call drbd_md_sync() after calling this function.
770 */
Bart Van Assche24c48302011-05-21 18:32:29 +0200771enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772{
773 sector_t prev_first_sect, prev_size; /* previous meta location */
774 sector_t la_size;
775 sector_t size;
776 char ppb[10];
777
778 int md_moved, la_size_changed;
779 enum determine_dev_size rv = unchanged;
780
781 /* race:
782 * application request passes inc_ap_bio,
783 * but then cannot get an AL-reference.
784 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
785 *
786 * to avoid that:
787 * Suspend IO right here.
788 * still lock the act_log to not trigger ASSERTs there.
789 */
790 drbd_suspend_io(mdev);
791
792 /* no wait necessary anymore, actually we could assert that */
793 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
794
795 prev_first_sect = drbd_md_first_sector(mdev->ldev);
796 prev_size = mdev->ldev->md.md_size_sect;
797 la_size = mdev->ldev->md.la_size_sect;
798
799 /* TODO: should only be some assert here, not (re)init... */
800 drbd_md_set_sector_offsets(mdev, mdev->ldev);
801
Philipp Reisnerd8450302010-03-24 15:51:26 +0100802 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700803
804 if (drbd_get_capacity(mdev->this_bdev) != size ||
805 drbd_bm_capacity(mdev) != size) {
806 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100807 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700808 if (unlikely(err)) {
809 /* currently there is only one error: ENOMEM! */
810 size = drbd_bm_capacity(mdev)>>1;
811 if (size == 0) {
812 dev_err(DEV, "OUT OF MEMORY! "
813 "Could not allocate bitmap!\n");
814 } else {
815 dev_err(DEV, "BM resizing failed. "
816 "Leaving size unchanged at size = %lu KB\n",
817 (unsigned long)size);
818 }
819 rv = dev_size_error;
820 }
821 /* racy, see comments above. */
822 drbd_set_my_capacity(mdev, size);
823 mdev->ldev->md.la_size_sect = size;
824 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
825 (unsigned long long)size>>1);
826 }
827 if (rv == dev_size_error)
828 goto out;
829
830 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
831
832 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
833 || prev_size != mdev->ldev->md.md_size_sect;
834
835 if (la_size_changed || md_moved) {
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100836 int err;
837
Philipp Reisnerb411b362009-09-25 16:07:19 -0700838 drbd_al_shrink(mdev); /* All extents inactive. */
839 dev_info(DEV, "Writing the whole bitmap, %s\n",
840 la_size_changed && md_moved ? "size changed and md moved" :
841 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100842 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
843 err = drbd_bitmap_io(mdev, &drbd_bm_write,
844 "size changed", BM_LOCKED_MASK);
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100845 if (err) {
846 rv = dev_size_error;
847 goto out;
848 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700849 drbd_md_mark_dirty(mdev);
850 }
851
852 if (size > la_size)
853 rv = grew;
854 if (size < la_size)
855 rv = shrunk;
856out:
857 lc_unlock(mdev->act_log);
858 wake_up(&mdev->al_wait);
859 drbd_resume_io(mdev);
860
861 return rv;
862}
863
864sector_t
Philipp Reisnera393db62009-12-22 13:35:52 +0100865drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700866{
867 sector_t p_size = mdev->p_size; /* partner's disk size. */
868 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
869 sector_t m_size; /* my size */
870 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
871 sector_t size = 0;
872
873 m_size = drbd_get_max_capacity(bdev);
874
Philipp Reisnera393db62009-12-22 13:35:52 +0100875 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
876 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
877 p_size = m_size;
878 }
879
Philipp Reisnerb411b362009-09-25 16:07:19 -0700880 if (p_size && m_size) {
881 size = min_t(sector_t, p_size, m_size);
882 } else {
883 if (la_size) {
884 size = la_size;
885 if (m_size && m_size < size)
886 size = m_size;
887 if (p_size && p_size < size)
888 size = p_size;
889 } else {
890 if (m_size)
891 size = m_size;
892 if (p_size)
893 size = p_size;
894 }
895 }
896
897 if (size == 0)
898 dev_err(DEV, "Both nodes diskless!\n");
899
900 if (u_size) {
901 if (u_size > size)
902 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
903 (unsigned long)u_size>>1, (unsigned long)size>>1);
904 else
905 size = u_size;
906 }
907
908 return size;
909}
910
911/**
912 * drbd_check_al_size() - Ensures that the AL is of the right size
913 * @mdev: DRBD device.
914 *
915 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
916 * failed, and 0 on success. You should call drbd_md_sync() after you called
917 * this function.
918 */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100919static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700920{
921 struct lru_cache *n, *t;
922 struct lc_element *e;
923 unsigned int in_use;
924 int i;
925
Lars Ellenbergf3990022011-03-23 14:31:09 +0100926 if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
927 dc->al_extents = DRBD_AL_EXTENTS_MIN;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700928
929 if (mdev->act_log &&
Lars Ellenbergf3990022011-03-23 14:31:09 +0100930 mdev->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700931 return 0;
932
933 in_use = 0;
934 t = mdev->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100935 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +0100936 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700937
938 if (n == NULL) {
939 dev_err(DEV, "Cannot allocate act_log lru!\n");
940 return -ENOMEM;
941 }
942 spin_lock_irq(&mdev->al_lock);
943 if (t) {
944 for (i = 0; i < t->nr_elements; i++) {
945 e = lc_element_by_index(t, i);
946 if (e->refcnt)
947 dev_err(DEV, "refcnt(%d)==%d\n",
948 e->lc_number, e->refcnt);
949 in_use += e->refcnt;
950 }
951 }
952 if (!in_use)
953 mdev->act_log = n;
954 spin_unlock_irq(&mdev->al_lock);
955 if (in_use) {
956 dev_err(DEV, "Activity log still in use!\n");
957 lc_destroy(n);
958 return -EBUSY;
959 } else {
960 if (t)
961 lc_destroy(t);
962 }
963 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
964 return 0;
965}
966
Philipp Reisner99432fc2011-05-20 16:39:13 +0200967static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700968{
969 struct request_queue * const q = mdev->rq_queue;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200970 int max_hw_sectors = max_bio_size >> 9;
971 int max_segments = 0;
972
973 if (get_ldev_if_state(mdev, D_ATTACHING)) {
974 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
975
976 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
977 max_segments = mdev->ldev->dc.max_bio_bvecs;
978 put_ldev(mdev);
979 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700980
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981 blk_queue_logical_block_size(q, 512);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100982 blk_queue_max_hw_sectors(q, max_hw_sectors);
983 /* This is the workaround for "bio would need to, but cannot, be split" */
984 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
985 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700986
Philipp Reisner99432fc2011-05-20 16:39:13 +0200987 if (get_ldev_if_state(mdev, D_ATTACHING)) {
988 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700989
Philipp Reisner99432fc2011-05-20 16:39:13 +0200990 blk_queue_stack_limits(q, b);
991
992 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
993 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
994 q->backing_dev_info.ra_pages,
995 b->backing_dev_info.ra_pages);
996 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
997 }
998 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700999 }
1000}
1001
Philipp Reisner99432fc2011-05-20 16:39:13 +02001002void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1003{
1004 int now, new, local, peer;
1005
1006 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1007 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1008 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1009
1010 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1011 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1012 mdev->local_max_bio_size = local;
1013 put_ldev(mdev);
1014 }
1015
1016 /* We may ignore peer limits if the peer is modern enough.
1017 Because new from 8.3.8 onwards the peer can use multiple
1018 BIOs for a single peer_request */
1019 if (mdev->state.conn >= C_CONNECTED) {
Philipp Reisner31890f42011-01-19 14:12:51 +01001020 if (mdev->tconn->agreed_pro_version < 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001021 peer = mdev->peer_max_bio_size;
Philipp Reisner31890f42011-01-19 14:12:51 +01001022 else if (mdev->tconn->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001023 peer = DRBD_MAX_SIZE_H80_PACKET;
1024 else /* drbd 8.3.8 onwards */
1025 peer = DRBD_MAX_BIO_SIZE;
1026 }
1027
1028 new = min_t(int, local, peer);
1029
1030 if (mdev->state.role == R_PRIMARY && new < now)
1031 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1032
1033 if (new != now)
1034 dev_info(DEV, "max BIO size = %u\n", new);
1035
1036 drbd_setup_queue_param(mdev, new);
1037}
1038
Philipp Reisnerb411b362009-09-25 16:07:19 -07001039/* serialize deconfig (worker exiting, doing cleanup)
1040 * and reconfig (drbdsetup disk, drbdsetup net)
1041 *
Lars Ellenbergc518d042010-09-01 09:50:23 +02001042 * Wait for a potentially exiting worker, then restart it,
1043 * or start a new one. Flush any pending work, there may still be an
1044 * after_state_change queued.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001045 */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001046static void conn_reconfig_start(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001047{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001048 wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
1049 wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
1050 drbd_thread_start(&tconn->worker);
1051 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001052}
1053
1054/* if still unconfigured, stops worker again.
1055 * if configured now, clears CONFIG_PENDING.
1056 * wakes potential waiters */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001057static void conn_reconfig_done(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001058{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001059 spin_lock_irq(&tconn->req_lock);
1060 if (conn_all_vols_unconf(tconn)) {
1061 set_bit(OBJECT_DYING, &tconn->flags);
1062 drbd_thread_stop_nowait(&tconn->worker);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001063 } else
Philipp Reisner0e29d162011-02-18 14:23:11 +01001064 clear_bit(CONFIG_PENDING, &tconn->flags);
1065 spin_unlock_irq(&tconn->req_lock);
1066 wake_up(&tconn->ping_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001067}
1068
Philipp Reisner07782862010-08-31 12:00:50 +02001069/* Make sure IO is suspended before calling this function(). */
1070static void drbd_suspend_al(struct drbd_conf *mdev)
1071{
1072 int s = 0;
1073
Lars Ellenberg61610422011-02-21 13:20:54 +01001074 if (!lc_try_lock(mdev->act_log)) {
Philipp Reisner07782862010-08-31 12:00:50 +02001075 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1076 return;
1077 }
1078
Lars Ellenberg61610422011-02-21 13:20:54 +01001079 drbd_al_shrink(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001080 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner07782862010-08-31 12:00:50 +02001081 if (mdev->state.conn < C_CONNECTED)
1082 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001083 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg61610422011-02-21 13:20:54 +01001084 lc_unlock(mdev->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001085
1086 if (s)
1087 dev_info(DEV, "Suspended AL updates\n");
1088}
1089
Lars Ellenbergf3990022011-03-23 14:31:09 +01001090int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1091{
1092 enum drbd_ret_code retcode;
1093 struct drbd_conf *mdev;
1094 struct disk_conf *ndc; /* new disk conf */
1095 int err, fifo_size;
1096 int *rs_plan_s = NULL;
1097
1098 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1099 if (!adm_ctx.reply_skb)
1100 return retcode;
1101 if (retcode != NO_ERROR)
1102 goto out;
1103
1104 mdev = adm_ctx.mdev;
1105
1106 /* we also need a disk
1107 * to change the options on */
1108 if (!get_ldev(mdev)) {
1109 retcode = ERR_NO_DISK;
1110 goto out;
1111 }
1112
1113/* FIXME freeze IO, cluster wide.
1114 *
1115 * We should make sure no-one uses
1116 * some half-updated struct when we
1117 * assign it later. */
1118
1119 ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
1120 if (!ndc) {
1121 retcode = ERR_NOMEM;
1122 goto fail;
1123 }
1124
1125 memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
1126 err = disk_conf_from_attrs_for_change(ndc, info);
1127 if (err) {
1128 retcode = ERR_MANDATORY_TAG;
1129 drbd_msg_put_info(from_attrs_err_to_txt(err));
1130 }
1131
1132 if (!expect(ndc->resync_rate >= 1))
1133 ndc->resync_rate = 1;
1134
1135 /* clip to allowed range */
1136 if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
1137 ndc->al_extents = DRBD_AL_EXTENTS_MIN;
1138 if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
1139 ndc->al_extents = DRBD_AL_EXTENTS_MAX;
1140
1141 /* most sanity checks done, try to assign the new sync-after
1142 * dependency. need to hold the global lock in there,
1143 * to avoid a race in the dependency loop check. */
1144 retcode = drbd_alter_sa(mdev, ndc->resync_after);
1145 if (retcode != NO_ERROR)
1146 goto fail;
1147
1148 fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1149 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1150 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1151 if (!rs_plan_s) {
1152 dev_err(DEV, "kmalloc of fifo_buffer failed");
1153 retcode = ERR_NOMEM;
1154 goto fail;
1155 }
1156 }
1157
1158 if (fifo_size != mdev->rs_plan_s.size) {
1159 kfree(mdev->rs_plan_s.values);
1160 mdev->rs_plan_s.values = rs_plan_s;
1161 mdev->rs_plan_s.size = fifo_size;
1162 mdev->rs_planed = 0;
1163 rs_plan_s = NULL;
1164 }
1165
1166 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1167 drbd_al_shrink(mdev);
1168 err = drbd_check_al_size(mdev, ndc);
1169 lc_unlock(mdev->act_log);
1170 wake_up(&mdev->al_wait);
1171
1172 if (err) {
1173 retcode = ERR_NOMEM;
1174 goto fail;
1175 }
1176
1177 /* FIXME
1178 * To avoid someone looking at a half-updated struct, we probably
1179 * should have a rw-semaphor on net_conf and disk_conf.
1180 */
1181 mdev->ldev->dc = *ndc;
1182
1183 drbd_md_sync(mdev);
1184
1185
1186 if (mdev->state.conn >= C_CONNECTED)
1187 drbd_send_sync_param(mdev);
1188
1189 fail:
1190 put_ldev(mdev);
1191 kfree(ndc);
1192 kfree(rs_plan_s);
1193 out:
1194 drbd_adm_finish(info, retcode);
1195 return 0;
1196}
1197
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001198int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001199{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001200 struct drbd_conf *mdev;
1201 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001202 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001203 enum determine_dev_size dd;
1204 sector_t max_possible_sectors;
1205 sector_t min_md_device_sectors;
1206 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Tejun Heoe525fd82010-11-13 11:55:17 +01001207 struct block_device *bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001208 struct lru_cache *resync_lru = NULL;
1209 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001210 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001211 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001212 int cp_discovered = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001213
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001214 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1215 if (!adm_ctx.reply_skb)
1216 return retcode;
1217 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001218 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001219
1220 mdev = adm_ctx.mdev;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001221 conn_reconfig_start(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001222
1223 /* if you want to reconfigure, please tear down first */
1224 if (mdev->state.disk > D_DISKLESS) {
1225 retcode = ERR_DISK_CONFIGURED;
1226 goto fail;
1227 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001228 /* It may just now have detached because of IO error. Make sure
1229 * drbd_ldev_destroy is done already, we may end up here very fast,
1230 * e.g. if someone calls attach from the on-io-error handler,
1231 * to realize a "hot spare" feature (not that I'd recommend that) */
1232 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001233
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001234 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001235 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1236 if (!nbc) {
1237 retcode = ERR_NOMEM;
1238 goto fail;
1239 }
1240
Lars Ellenbergf3990022011-03-23 14:31:09 +01001241 nbc->dc = (struct disk_conf) {
1242 {}, 0, /* backing_dev */
1243 {}, 0, /* meta_dev */
1244 0, /* meta_dev_idx */
1245 DRBD_DISK_SIZE_SECT_DEF, /* disk_size */
1246 DRBD_MAX_BIO_BVECS_DEF, /* max_bio_bvecs */
1247 DRBD_ON_IO_ERROR_DEF, /* on_io_error */
1248 DRBD_FENCING_DEF, /* fencing */
1249 DRBD_RATE_DEF, /* resync_rate */
1250 DRBD_AFTER_DEF, /* resync_after */
1251 DRBD_AL_EXTENTS_DEF, /* al_extents */
1252 DRBD_C_PLAN_AHEAD_DEF, /* c_plan_ahead */
1253 DRBD_C_DELAY_TARGET_DEF, /* c_delay_target */
1254 DRBD_C_FILL_TARGET_DEF, /* c_fill_target */
1255 DRBD_C_MAX_RATE_DEF, /* c_max_rate */
1256 DRBD_C_MIN_RATE_DEF, /* c_min_rate */
1257 0, /* no_disk_barrier */
1258 0, /* no_disk_flush */
1259 0, /* no_disk_drain */
1260 0, /* no_md_flush */
1261 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262
Lars Ellenbergf3990022011-03-23 14:31:09 +01001263 err = disk_conf_from_attrs(&nbc->dc, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001264 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001265 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001266 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001267 goto fail;
1268 }
1269
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001270 if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001271 retcode = ERR_MD_IDX_INVALID;
1272 goto fail;
1273 }
1274
Philipp Reisner44ed1672011-04-19 17:10:19 +02001275 rcu_read_lock();
1276 nc = rcu_dereference(mdev->tconn->net_conf);
1277 if (nc) {
1278 if (nbc->dc.fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1279 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001280 retcode = ERR_STONITH_AND_PROT_A;
1281 goto fail;
1282 }
1283 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001284 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001285
Tejun Heod4d77622010-11-13 11:55:18 +01001286 bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1287 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
Tejun Heoe525fd82010-11-13 11:55:17 +01001288 if (IS_ERR(bdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001289 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001290 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001291 retcode = ERR_OPEN_DISK;
1292 goto fail;
1293 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001294 nbc->backing_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001295
Tejun Heoe525fd82010-11-13 11:55:17 +01001296 /*
1297 * meta_dev_idx >= 0: external fixed size, possibly multiple
1298 * drbd sharing one meta device. TODO in that case, paranoia
1299 * check that [md_bdev, meta_dev_idx] is not yet used by some
1300 * other drbd minor! (if you use drbd.conf + drbdadm, that
1301 * should check it for you already; but if you don't, or
1302 * someone fooled it, we need to double check here)
1303 */
Tejun Heod4d77622010-11-13 11:55:18 +01001304 bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1305 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001306 ((int)nbc->dc.meta_dev_idx < 0) ?
Tejun Heod4d77622010-11-13 11:55:18 +01001307 (void *)mdev : (void *)drbd_m_holder);
Tejun Heoe525fd82010-11-13 11:55:17 +01001308 if (IS_ERR(bdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001309 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001310 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001311 retcode = ERR_OPEN_MD_DISK;
1312 goto fail;
1313 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001314 nbc->md_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001315
Tejun Heoe525fd82010-11-13 11:55:17 +01001316 if ((nbc->backing_bdev == nbc->md_bdev) !=
1317 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1318 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1319 retcode = ERR_MD_IDX_INVALID;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001320 goto fail;
1321 }
1322
1323 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001324 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001325 offsetof(struct bm_extent, lce));
1326 if (!resync_lru) {
1327 retcode = ERR_NOMEM;
Tejun Heoe525fd82010-11-13 11:55:17 +01001328 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001329 }
1330
1331 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1332 drbd_md_set_sector_offsets(mdev, nbc);
1333
1334 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1335 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1336 (unsigned long long) drbd_get_max_capacity(nbc),
1337 (unsigned long long) nbc->dc.disk_size);
1338 retcode = ERR_DISK_TO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001339 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001340 }
1341
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001342 if ((int)nbc->dc.meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001343 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1344 /* at least one MB, otherwise it does not make sense */
1345 min_md_device_sectors = (2<<10);
1346 } else {
1347 max_possible_sectors = DRBD_MAX_SECTORS;
1348 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1349 }
1350
Philipp Reisnerb411b362009-09-25 16:07:19 -07001351 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1352 retcode = ERR_MD_DISK_TO_SMALL;
1353 dev_warn(DEV, "refusing attach: md-device too small, "
1354 "at least %llu sectors needed for this meta-disk type\n",
1355 (unsigned long long) min_md_device_sectors);
Tejun Heoe525fd82010-11-13 11:55:17 +01001356 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001357 }
1358
1359 /* Make sure the new disk is big enough
1360 * (we may currently be R_PRIMARY with no local disk...) */
1361 if (drbd_get_max_capacity(nbc) <
1362 drbd_get_capacity(mdev->this_bdev)) {
1363 retcode = ERR_DISK_TO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001364 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365 }
1366
1367 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1368
Lars Ellenberg13529942009-10-12 19:07:49 +02001369 if (nbc->known_size > max_possible_sectors) {
1370 dev_warn(DEV, "==> truncating very big lower level device "
1371 "to currently maximum possible %llu sectors <==\n",
1372 (unsigned long long) max_possible_sectors);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001373 if ((int)nbc->dc.meta_dev_idx >= 0)
Lars Ellenberg13529942009-10-12 19:07:49 +02001374 dev_warn(DEV, "==>> using internal or flexible "
1375 "meta data may help <<==\n");
1376 }
1377
Philipp Reisnerb411b362009-09-25 16:07:19 -07001378 drbd_suspend_io(mdev);
1379 /* also wait for the last barrier ack. */
Philipp Reisner2aebfab2011-03-28 16:48:11 +02001380 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001381 /* and for any other previously queued work */
Philipp Reisnera21e9292011-02-08 15:08:49 +01001382 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001383
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001384 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1385 retcode = rv; /* FIXME: Type mismatch. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001386 drbd_resume_io(mdev);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001387 if (rv < SS_SUCCESS)
Tejun Heoe525fd82010-11-13 11:55:17 +01001388 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001389
1390 if (!get_ldev_if_state(mdev, D_ATTACHING))
1391 goto force_diskless;
1392
1393 drbd_md_set_sector_offsets(mdev, nbc);
1394
1395 if (!mdev->bitmap) {
1396 if (drbd_bm_init(mdev)) {
1397 retcode = ERR_NOMEM;
1398 goto force_diskless_dec;
1399 }
1400 }
1401
1402 retcode = drbd_md_read(mdev, nbc);
1403 if (retcode != NO_ERROR)
1404 goto force_diskless_dec;
1405
1406 if (mdev->state.conn < C_CONNECTED &&
1407 mdev->state.role == R_PRIMARY &&
1408 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1409 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1410 (unsigned long long)mdev->ed_uuid);
1411 retcode = ERR_DATA_NOT_CURRENT;
1412 goto force_diskless_dec;
1413 }
1414
1415 /* Since we are diskless, fix the activity log first... */
Lars Ellenbergf3990022011-03-23 14:31:09 +01001416 if (drbd_check_al_size(mdev, &nbc->dc)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417 retcode = ERR_NOMEM;
1418 goto force_diskless_dec;
1419 }
1420
1421 /* Prevent shrinking of consistent devices ! */
1422 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnera393db62009-12-22 13:35:52 +01001423 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001424 dev_warn(DEV, "refusing to truncate a consistent device\n");
1425 retcode = ERR_DISK_TO_SMALL;
1426 goto force_diskless_dec;
1427 }
1428
1429 if (!drbd_al_read_log(mdev, nbc)) {
1430 retcode = ERR_IO_MD_DISK;
1431 goto force_diskless_dec;
1432 }
1433
Philipp Reisnerb411b362009-09-25 16:07:19 -07001434 /* Reset the "barriers don't work" bits here, then force meta data to
1435 * be written, to ensure we determine if barriers are supported. */
1436 if (nbc->dc.no_md_flush)
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001437 set_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001438 else
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001439 clear_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440
1441 /* Point of no return reached.
1442 * Devices and memory are no longer released by error cleanup below.
1443 * now mdev takes over responsibility, and the state engine should
1444 * clean it up somewhere. */
1445 D_ASSERT(mdev->ldev == NULL);
1446 mdev->ldev = nbc;
1447 mdev->resync = resync_lru;
1448 nbc = NULL;
1449 resync_lru = NULL;
1450
Philipp Reisner2451fc32010-08-24 13:43:11 +02001451 mdev->write_ordering = WO_bdev_flush;
1452 drbd_bump_write_ordering(mdev, WO_bdev_flush);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001453
1454 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1455 set_bit(CRASHED_PRIMARY, &mdev->flags);
1456 else
1457 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1458
Philipp Reisner894c6a92010-06-18 16:03:20 +02001459 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001460 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001461 set_bit(CRASHED_PRIMARY, &mdev->flags);
1462 cp_discovered = 1;
1463 }
1464
1465 mdev->send_cnt = 0;
1466 mdev->recv_cnt = 0;
1467 mdev->read_cnt = 0;
1468 mdev->writ_cnt = 0;
1469
Philipp Reisner99432fc2011-05-20 16:39:13 +02001470 drbd_reconsider_max_bio_size(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001471
1472 /* If I am currently not R_PRIMARY,
1473 * but meta data primary indicator is set,
1474 * I just now recover from a hard crash,
1475 * and have been R_PRIMARY before that crash.
1476 *
1477 * Now, if I had no connection before that crash
1478 * (have been degraded R_PRIMARY), chances are that
1479 * I won't find my peer now either.
1480 *
1481 * In that case, and _only_ in that case,
1482 * we use the degr-wfc-timeout instead of the default,
1483 * so we can automatically recover from a crash of a
1484 * degraded but active "cluster" after a certain timeout.
1485 */
1486 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1487 if (mdev->state.role != R_PRIMARY &&
1488 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1489 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1490 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1491
Bart Van Assche24c48302011-05-21 18:32:29 +02001492 dd = drbd_determine_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001493 if (dd == dev_size_error) {
1494 retcode = ERR_NOMEM_BITMAP;
1495 goto force_diskless_dec;
1496 } else if (dd == grew)
1497 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1498
1499 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1500 dev_info(DEV, "Assuming that all blocks are out of sync "
1501 "(aka FullSync)\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001502 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1503 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504 retcode = ERR_IO_MD_DISK;
1505 goto force_diskless_dec;
1506 }
1507 } else {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001508 if (drbd_bitmap_io(mdev, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01001509 "read from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510 retcode = ERR_IO_MD_DISK;
1511 goto force_diskless_dec;
1512 }
1513 }
1514
1515 if (cp_discovered) {
1516 drbd_al_apply_to_bm(mdev);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001517 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1518 "crashed primary apply AL", BM_LOCKED_MASK)) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001519 retcode = ERR_IO_MD_DISK;
1520 goto force_diskless_dec;
1521 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522 }
1523
Philipp Reisner07782862010-08-31 12:00:50 +02001524 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1525 drbd_suspend_al(mdev); /* IO is still suspended here... */
1526
Philipp Reisner87eeee42011-01-19 14:16:30 +01001527 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02001528 os = drbd_read_state(mdev);
1529 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001530 /* If MDF_CONSISTENT is not set go into inconsistent state,
1531 otherwise investigate MDF_WasUpToDate...
1532 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1533 otherwise into D_CONSISTENT state.
1534 */
1535 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1536 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1537 ns.disk = D_CONSISTENT;
1538 else
1539 ns.disk = D_OUTDATED;
1540 } else {
1541 ns.disk = D_INCONSISTENT;
1542 }
1543
1544 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1545 ns.pdsk = D_OUTDATED;
1546
1547 if ( ns.disk == D_CONSISTENT &&
1548 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1549 ns.disk = D_UP_TO_DATE;
1550
1551 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1552 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1553 this point, because drbd_request_state() modifies these
1554 flags. */
1555
1556 /* In case we are C_CONNECTED postpone any decision on the new disk
1557 state after the negotiation phase. */
1558 if (mdev->state.conn == C_CONNECTED) {
1559 mdev->new_state_tmp.i = ns.i;
1560 ns.i = os.i;
1561 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02001562
1563 /* We expect to receive up-to-date UUIDs soon.
1564 To avoid a race in receive_state, free p_uuid while
1565 holding req_lock. I.e. atomic with the state change */
1566 kfree(mdev->p_uuid);
1567 mdev->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001568 }
1569
1570 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001571 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001572
1573 if (rv < SS_SUCCESS)
1574 goto force_diskless_dec;
1575
1576 if (mdev->state.role == R_PRIMARY)
1577 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1578 else
1579 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1580
1581 drbd_md_mark_dirty(mdev);
1582 drbd_md_sync(mdev);
1583
1584 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1585 put_ldev(mdev);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001586 conn_reconfig_done(mdev->tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001587 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001588 return 0;
1589
1590 force_diskless_dec:
1591 put_ldev(mdev);
1592 force_diskless:
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001593 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001594 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001595 fail:
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001596 conn_reconfig_done(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001597 if (nbc) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001598 if (nbc->backing_bdev)
1599 blkdev_put(nbc->backing_bdev,
1600 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1601 if (nbc->md_bdev)
1602 blkdev_put(nbc->md_bdev,
1603 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001604 kfree(nbc);
1605 }
1606 lc_destroy(resync_lru);
1607
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001608 finish:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001609 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001610 return 0;
1611}
1612
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001613static int adm_detach(struct drbd_conf *mdev)
1614{
Philipp Reisner19f83c72011-03-29 14:21:03 +02001615 enum drbd_state_rv retcode;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001616 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1617 retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1618 wait_event(mdev->misc_wait,
1619 mdev->state.disk != D_DISKLESS ||
1620 !atomic_read(&mdev->local_cnt));
1621 drbd_resume_io(mdev);
1622 return retcode;
1623}
1624
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001625/* Detaching the disk is a process in multiple stages. First we need to lock
1626 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1627 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1628 * internal references as well.
1629 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001630int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001631{
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001632 enum drbd_ret_code retcode;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001633
1634 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1635 if (!adm_ctx.reply_skb)
1636 return retcode;
1637 if (retcode != NO_ERROR)
1638 goto out;
1639
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001640 retcode = adm_detach(adm_ctx.mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001641out:
1642 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001643 return 0;
1644}
1645
Lars Ellenbergf3990022011-03-23 14:31:09 +01001646static bool conn_resync_running(struct drbd_tconn *tconn)
1647{
1648 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001649 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001650 int vnr;
1651
Philipp Reisner695d08f2011-04-11 22:53:32 -07001652 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001653 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1654 if (mdev->state.conn == C_SYNC_SOURCE ||
1655 mdev->state.conn == C_SYNC_TARGET ||
1656 mdev->state.conn == C_PAUSED_SYNC_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001657 mdev->state.conn == C_PAUSED_SYNC_T) {
1658 rv = true;
1659 break;
1660 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001661 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001662 rcu_read_unlock();
1663
1664 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001665}
1666
1667static bool conn_ov_running(struct drbd_tconn *tconn)
1668{
1669 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001670 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001671 int vnr;
1672
Philipp Reisner695d08f2011-04-11 22:53:32 -07001673 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001674 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1675 if (mdev->state.conn == C_VERIFY_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001676 mdev->state.conn == C_VERIFY_T) {
1677 rv = true;
1678 break;
1679 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001680 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001681 rcu_read_unlock();
1682
1683 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001684}
1685
Philipp Reisnercd643972011-04-13 18:00:59 -07001686static enum drbd_ret_code
Philipp Reisner44ed1672011-04-19 17:10:19 +02001687_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07001688{
1689 struct drbd_conf *mdev;
1690 int i;
1691
Philipp Reisner44ed1672011-04-19 17:10:19 +02001692 if (old_conf && tconn->agreed_pro_version < 100 &&
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001693 tconn->cstate == C_WF_REPORT_PARAMS &&
Philipp Reisner44ed1672011-04-19 17:10:19 +02001694 new_conf->wire_protocol != old_conf->wire_protocol)
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001695 return ERR_NEED_APV_100;
1696
Philipp Reisnercd643972011-04-13 18:00:59 -07001697 if (new_conf->two_primaries &&
1698 (new_conf->wire_protocol != DRBD_PROT_C))
1699 return ERR_NOT_PROTO_C;
1700
Philipp Reisnercd643972011-04-13 18:00:59 -07001701 idr_for_each_entry(&tconn->volumes, mdev, i) {
1702 if (get_ldev(mdev)) {
1703 enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1704 put_ldev(mdev);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001705 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07001706 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07001707 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001708 if (mdev->state.role == R_PRIMARY && new_conf->want_lose)
Philipp Reisnercd643972011-04-13 18:00:59 -07001709 return ERR_DISCARD;
Philipp Reisnercd643972011-04-13 18:00:59 -07001710 }
Philipp Reisnercd643972011-04-13 18:00:59 -07001711
1712 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1713 return ERR_CONG_NOT_PROTO_A;
1714
1715 return NO_ERROR;
1716}
1717
Philipp Reisner44ed1672011-04-19 17:10:19 +02001718static enum drbd_ret_code
1719check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1720{
1721 static enum drbd_ret_code rv;
1722 struct drbd_conf *mdev;
1723 int i;
1724
1725 rcu_read_lock();
1726 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1727 rcu_read_unlock();
1728
1729 /* tconn->volumes protected by genl_lock() here */
1730 idr_for_each_entry(&tconn->volumes, mdev, i) {
1731 if (!mdev->bitmap) {
1732 if(drbd_bm_init(mdev))
1733 return ERR_NOMEM;
1734 }
1735 }
1736
1737 return rv;
1738}
1739
Lars Ellenbergf3990022011-03-23 14:31:09 +01001740int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1741{
1742 enum drbd_ret_code retcode;
1743 struct drbd_tconn *tconn;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001744 struct net_conf *old_conf, *new_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001745 int err;
1746 int ovr; /* online verify running */
1747 int rsr; /* re-sync running */
1748 struct crypto_hash *verify_tfm = NULL;
1749 struct crypto_hash *csums_tfm = NULL;
1750
1751
1752 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1753 if (!adm_ctx.reply_skb)
1754 return retcode;
1755 if (retcode != NO_ERROR)
1756 goto out;
1757
1758 tconn = adm_ctx.tconn;
1759
1760 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1761 if (!new_conf) {
1762 retcode = ERR_NOMEM;
1763 goto out;
1764 }
1765
Lars Ellenbergf3990022011-03-23 14:31:09 +01001766 conn_reconfig_start(tconn);
1767
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001768 mutex_lock(&tconn->net_conf_update);
1769 old_conf = tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001770
1771 if (!old_conf) {
1772 drbd_msg_put_info("net conf missing, try connect");
1773 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001774 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001775 }
1776
1777 *new_conf = *old_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001778
Lars Ellenbergf3990022011-03-23 14:31:09 +01001779 err = net_conf_from_attrs_for_change(new_conf, info);
1780 if (err) {
1781 retcode = ERR_MANDATORY_TAG;
1782 drbd_msg_put_info(from_attrs_err_to_txt(err));
1783 goto fail;
1784 }
1785
Philipp Reisnercd643972011-04-13 18:00:59 -07001786 retcode = check_net_options(tconn, new_conf);
1787 if (retcode != NO_ERROR)
1788 goto fail;
1789
Lars Ellenbergf3990022011-03-23 14:31:09 +01001790 /* re-sync running */
1791 rsr = conn_resync_running(tconn);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001792 if (rsr && old_conf && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001793 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001794 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001795 }
1796
1797 if (!rsr && new_conf->csums_alg[0]) {
1798 csums_tfm = crypto_alloc_hash(new_conf->csums_alg, 0, CRYPTO_ALG_ASYNC);
1799 if (IS_ERR(csums_tfm)) {
1800 csums_tfm = NULL;
1801 retcode = ERR_CSUMS_ALG;
1802 goto fail;
1803 }
1804
1805 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1806 retcode = ERR_CSUMS_ALG_ND;
1807 goto fail;
1808 }
1809 }
1810
1811 /* online verify running */
1812 ovr = conn_ov_running(tconn);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001813 if (ovr) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001814 if (strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001815 retcode = ERR_VERIFY_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001816 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001817 }
1818 }
1819
1820 if (!ovr && new_conf->verify_alg[0]) {
1821 verify_tfm = crypto_alloc_hash(new_conf->verify_alg, 0, CRYPTO_ALG_ASYNC);
1822 if (IS_ERR(verify_tfm)) {
1823 verify_tfm = NULL;
1824 retcode = ERR_VERIFY_ALG;
1825 goto fail;
1826 }
1827
1828 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1829 retcode = ERR_VERIFY_ALG_ND;
1830 goto fail;
1831 }
1832 }
1833
Philipp Reisner44ed1672011-04-19 17:10:19 +02001834 rcu_assign_pointer(tconn->net_conf, new_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001835
1836 if (!rsr) {
1837 crypto_free_hash(tconn->csums_tfm);
1838 tconn->csums_tfm = csums_tfm;
1839 csums_tfm = NULL;
1840 }
1841 if (!ovr) {
1842 crypto_free_hash(tconn->verify_tfm);
1843 tconn->verify_tfm = verify_tfm;
1844 verify_tfm = NULL;
1845 }
1846
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001847 mutex_unlock(&tconn->net_conf_update);
1848 synchronize_rcu();
1849 kfree(old_conf);
1850
Lars Ellenbergf3990022011-03-23 14:31:09 +01001851 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1852 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1853
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001854 goto done;
1855
Lars Ellenbergf3990022011-03-23 14:31:09 +01001856 fail:
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001857 mutex_unlock(&tconn->net_conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001858 crypto_free_hash(csums_tfm);
1859 crypto_free_hash(verify_tfm);
1860 kfree(new_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001861 done:
Lars Ellenbergf3990022011-03-23 14:31:09 +01001862 conn_reconfig_done(tconn);
1863 out:
1864 drbd_adm_finish(info, retcode);
1865 return 0;
1866}
1867
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001868int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001869{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001870 char hmac_name[CRYPTO_MAX_ALG_NAME];
1871 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001872 struct net_conf *old_conf, *new_conf = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001873 struct crypto_hash *tfm = NULL;
1874 struct crypto_hash *integrity_w_tfm = NULL;
1875 struct crypto_hash *integrity_r_tfm = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001876 void *int_dig_in = NULL;
1877 void *int_dig_vv = NULL;
Philipp Reisner80883192011-02-18 14:56:45 +01001878 struct drbd_tconn *oconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001879 struct drbd_tconn *tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001880 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001881 enum drbd_ret_code retcode;
1882 int i;
1883 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001884
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001885 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1886 if (!adm_ctx.reply_skb)
1887 return retcode;
1888 if (retcode != NO_ERROR)
1889 goto out;
1890
1891 tconn = adm_ctx.tconn;
Philipp Reisner80883192011-02-18 14:56:45 +01001892 conn_reconfig_start(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001893
Philipp Reisner80883192011-02-18 14:56:45 +01001894 if (tconn->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001895 retcode = ERR_NET_CONFIGURED;
1896 goto fail;
1897 }
1898
1899 /* allocation not in the IO path, cqueue thread context */
Lars Ellenbergf3990022011-03-23 14:31:09 +01001900 new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001901 if (!new_conf) {
1902 retcode = ERR_NOMEM;
1903 goto fail;
1904 }
1905
Lars Ellenbergf3990022011-03-23 14:31:09 +01001906 *new_conf = (struct net_conf) {
1907 {}, 0, /* my_addr */
1908 {}, 0, /* peer_addr */
1909 {}, 0, /* shared_secret */
1910 {}, 0, /* cram_hmac_alg */
1911 {}, 0, /* integrity_alg */
1912 {}, 0, /* verify_alg */
1913 {}, 0, /* csums_alg */
1914 DRBD_PROTOCOL_DEF, /* wire_protocol */
1915 DRBD_CONNECT_INT_DEF, /* try_connect_int */
1916 DRBD_TIMEOUT_DEF, /* timeout */
1917 DRBD_PING_INT_DEF, /* ping_int */
1918 DRBD_PING_TIMEO_DEF, /* ping_timeo */
1919 DRBD_SNDBUF_SIZE_DEF, /* sndbuf_size */
1920 DRBD_RCVBUF_SIZE_DEF, /* rcvbuf_size */
1921 DRBD_KO_COUNT_DEF, /* ko_count */
1922 DRBD_MAX_BUFFERS_DEF, /* max_buffers */
1923 DRBD_MAX_EPOCH_SIZE_DEF, /* max_epoch_size */
1924 DRBD_UNPLUG_WATERMARK_DEF, /* unplug_watermark */
1925 DRBD_AFTER_SB_0P_DEF, /* after_sb_0p */
1926 DRBD_AFTER_SB_1P_DEF, /* after_sb_1p */
1927 DRBD_AFTER_SB_2P_DEF, /* after_sb_2p */
1928 DRBD_RR_CONFLICT_DEF, /* rr_conflict */
1929 DRBD_ON_CONGESTION_DEF, /* on_congestion */
1930 DRBD_CONG_FILL_DEF, /* cong_fill */
1931 DRBD_CONG_EXTENTS_DEF, /* cong_extents */
1932 0, /* two_primaries */
1933 0, /* want_lose */
1934 0, /* no_cork */
1935 0, /* always_asbp */
1936 0, /* dry_run */
1937 0, /* use_rle */
1938 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07001939
Lars Ellenbergf3990022011-03-23 14:31:09 +01001940 err = net_conf_from_attrs(new_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001941 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001942 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001943 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001944 goto fail;
1945 }
1946
Philipp Reisnercd643972011-04-13 18:00:59 -07001947 retcode = check_net_options(tconn, new_conf);
1948 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001949 goto fail;
Philipp Reisner422028b2010-10-27 11:12:07 +02001950
Philipp Reisnerb411b362009-09-25 16:07:19 -07001951 retcode = NO_ERROR;
1952
1953 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1954 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
Lars Ellenberg543cc102011-03-10 22:18:18 +01001955
Philipp Reisneref356262011-04-13 14:21:29 -07001956 /* No need to take drbd_cfg_rwsem here. All reconfiguration is
Lars Ellenberg543cc102011-03-10 22:18:18 +01001957 * strictly serialized on genl_lock(). We are protected against
1958 * concurrent reconfiguration/addition/deletion */
Philipp Reisner80883192011-02-18 14:56:45 +01001959 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001960 struct net_conf *nc;
Philipp Reisner80883192011-02-18 14:56:45 +01001961 if (oconn == tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001962 continue;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001963
1964 rcu_read_lock();
1965 nc = rcu_dereference(oconn->net_conf);
1966 if (nc) {
1967 taken_addr = (struct sockaddr *)&nc->my_addr;
1968 if (new_conf->my_addr_len == nc->my_addr_len &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07001969 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1970 retcode = ERR_LOCAL_ADDR;
1971
Philipp Reisner44ed1672011-04-19 17:10:19 +02001972 taken_addr = (struct sockaddr *)&nc->peer_addr;
1973 if (new_conf->peer_addr_len == nc->peer_addr_len &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07001974 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1975 retcode = ERR_PEER_ADDR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001976 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001977 rcu_read_unlock();
1978 if (retcode != NO_ERROR)
1979 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001980 }
1981
1982 if (new_conf->cram_hmac_alg[0] != 0) {
1983 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1984 new_conf->cram_hmac_alg);
1985 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1986 if (IS_ERR(tfm)) {
1987 tfm = NULL;
1988 retcode = ERR_AUTH_ALG;
1989 goto fail;
1990 }
1991
Philipp Reisner07982192009-12-28 16:58:38 +01001992 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001993 retcode = ERR_AUTH_ALG_ND;
1994 goto fail;
1995 }
1996 }
1997
1998 if (new_conf->integrity_alg[0]) {
1999 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
2000 if (IS_ERR(integrity_w_tfm)) {
2001 integrity_w_tfm = NULL;
2002 retcode=ERR_INTEGRITY_ALG;
2003 goto fail;
2004 }
2005
2006 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
2007 retcode=ERR_INTEGRITY_ALG_ND;
2008 goto fail;
2009 }
2010
2011 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
2012 if (IS_ERR(integrity_r_tfm)) {
2013 integrity_r_tfm = NULL;
2014 retcode=ERR_INTEGRITY_ALG;
2015 goto fail;
2016 }
2017 }
2018
Philipp Reisnerb411b362009-09-25 16:07:19 -07002019 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2020
Philipp Reisner80883192011-02-18 14:56:45 +01002021 /* allocation not in the IO path, cqueue thread context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002022 if (integrity_w_tfm) {
2023 i = crypto_hash_digestsize(integrity_w_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002024 int_dig_in = kmalloc(i, GFP_KERNEL);
2025 if (!int_dig_in) {
2026 retcode = ERR_NOMEM;
2027 goto fail;
2028 }
2029 int_dig_vv = kmalloc(i, GFP_KERNEL);
2030 if (!int_dig_vv) {
2031 retcode = ERR_NOMEM;
2032 goto fail;
2033 }
2034 }
2035
Philipp Reisner80883192011-02-18 14:56:45 +01002036 conn_flush_workqueue(tconn);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002037
2038 mutex_lock(&tconn->net_conf_update);
2039 old_conf = tconn->net_conf;
2040 if (old_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002041 retcode = ERR_NET_CONFIGURED;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002042 mutex_unlock(&tconn->net_conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002043 goto fail;
2044 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002045 rcu_assign_pointer(tconn->net_conf, new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002046
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002047 conn_free_crypto(tconn);
Philipp Reisner80883192011-02-18 14:56:45 +01002048 tconn->cram_hmac_tfm = tfm;
Philipp Reisner80883192011-02-18 14:56:45 +01002049 tconn->integrity_w_tfm = integrity_w_tfm;
Philipp Reisner80883192011-02-18 14:56:45 +01002050 tconn->integrity_r_tfm = integrity_r_tfm;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002051 tconn->int_dig_in = int_dig_in;
2052 tconn->int_dig_vv = int_dig_vv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002053
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002054 mutex_unlock(&tconn->net_conf_update);
2055
2056 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002057
Philipp Reisner695d08f2011-04-11 22:53:32 -07002058 rcu_read_lock();
Philipp Reisner80883192011-02-18 14:56:45 +01002059 idr_for_each_entry(&tconn->volumes, mdev, i) {
2060 mdev->send_cnt = 0;
2061 mdev->recv_cnt = 0;
2062 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
2063 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002064 rcu_read_unlock();
Philipp Reisner80883192011-02-18 14:56:45 +01002065 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002066 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002067 return 0;
2068
2069fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002070 kfree(int_dig_in);
2071 kfree(int_dig_vv);
2072 crypto_free_hash(tfm);
2073 crypto_free_hash(integrity_w_tfm);
2074 crypto_free_hash(integrity_r_tfm);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002075 kfree(new_conf);
2076
Philipp Reisner80883192011-02-18 14:56:45 +01002077 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002078out:
2079 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080 return 0;
2081}
2082
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002083static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2084{
2085 enum drbd_state_rv rv;
2086 if (force) {
2087 spin_lock_irq(&tconn->req_lock);
2088 if (tconn->cstate >= C_WF_CONNECTION)
2089 _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
2090 spin_unlock_irq(&tconn->req_lock);
2091 return SS_SUCCESS;
2092 }
2093
2094 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
2095
2096 switch (rv) {
2097 case SS_NOTHING_TO_DO:
2098 case SS_ALREADY_STANDALONE:
2099 return SS_SUCCESS;
2100 case SS_PRIMARY_NOP:
2101 /* Our state checking code wants to see the peer outdated. */
2102 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2103 pdsk, D_OUTDATED), CS_VERBOSE);
2104 break;
2105 case SS_CW_FAILED_BY_PEER:
2106 /* The peer probably wants to see us outdated. */
2107 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2108 disk, D_OUTDATED), 0);
2109 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2110 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
2111 rv = SS_SUCCESS;
2112 }
2113 break;
2114 default:;
2115 /* no special handling necessary */
2116 }
2117
2118 return rv;
2119}
2120
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002121int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002122{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002123 struct disconnect_parms parms;
2124 struct drbd_tconn *tconn;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002125 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002126 enum drbd_ret_code retcode;
2127 int err;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002128
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002129 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2130 if (!adm_ctx.reply_skb)
2131 return retcode;
2132 if (retcode != NO_ERROR)
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002133 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002134
2135 tconn = adm_ctx.tconn;
2136 memset(&parms, 0, sizeof(parms));
2137 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002138 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002139 if (err) {
2140 retcode = ERR_MANDATORY_TAG;
2141 drbd_msg_put_info(from_attrs_err_to_txt(err));
2142 goto fail;
2143 }
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002144 }
2145
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002146 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2147 if (rv < SS_SUCCESS)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002148 goto fail;
2149
Philipp Reisnerdf24aa42011-02-15 11:14:44 +01002150 if (wait_event_interruptible(tconn->ping_wait,
2151 tconn->cstate != C_DISCONNECTING)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002152 /* Do not test for mdev->state.conn == C_STANDALONE, since
2153 someone else might connect us in the mean time! */
2154 retcode = ERR_INTR;
2155 goto fail;
2156 }
2157
Philipp Reisnerb411b362009-09-25 16:07:19 -07002158 retcode = NO_ERROR;
2159 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002160 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002161 return 0;
2162}
2163
2164void resync_after_online_grow(struct drbd_conf *mdev)
2165{
2166 int iass; /* I am sync source */
2167
2168 dev_info(DEV, "Resync of new storage after online grow\n");
2169 if (mdev->state.role != mdev->state.peer)
2170 iass = (mdev->state.role == R_PRIMARY);
2171 else
Philipp Reisner25703f82011-02-07 14:35:25 +01002172 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002173
2174 if (iass)
2175 drbd_start_resync(mdev, C_SYNC_SOURCE);
2176 else
2177 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2178}
2179
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002180int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002181{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002182 struct resize_parms rs;
2183 struct drbd_conf *mdev;
2184 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002186 enum dds_flags ddsf;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002187 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002188
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002189 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2190 if (!adm_ctx.reply_skb)
2191 return retcode;
2192 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002193 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002194
2195 memset(&rs, 0, sizeof(struct resize_parms));
2196 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002197 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002198 if (err) {
2199 retcode = ERR_MANDATORY_TAG;
2200 drbd_msg_put_info(from_attrs_err_to_txt(err));
2201 goto fail;
2202 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002203 }
2204
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002205 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002206 if (mdev->state.conn > C_CONNECTED) {
2207 retcode = ERR_RESIZE_RESYNC;
2208 goto fail;
2209 }
2210
2211 if (mdev->state.role == R_SECONDARY &&
2212 mdev->state.peer == R_SECONDARY) {
2213 retcode = ERR_NO_PRIMARY;
2214 goto fail;
2215 }
2216
2217 if (!get_ldev(mdev)) {
2218 retcode = ERR_NO_DISK;
2219 goto fail;
2220 }
2221
Philipp Reisner31890f42011-01-19 14:12:51 +01002222 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002223 retcode = ERR_NEED_APV_93;
2224 goto fail;
2225 }
2226
Philipp Reisner087c2492010-03-26 13:49:56 +01002227 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002228 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229
2230 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002231 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Bart Van Assche24c48302011-05-21 18:32:29 +02002232 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002233 drbd_md_sync(mdev);
2234 put_ldev(mdev);
2235 if (dd == dev_size_error) {
2236 retcode = ERR_NOMEM_BITMAP;
2237 goto fail;
2238 }
2239
Philipp Reisner087c2492010-03-26 13:49:56 +01002240 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002241 if (dd == grew)
2242 set_bit(RESIZE_PENDING, &mdev->flags);
2243
2244 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002245 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002246 }
2247
2248 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002249 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002250 return 0;
2251}
2252
Lars Ellenbergf3990022011-03-23 14:31:09 +01002253int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002254{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002255 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002256 cpumask_var_t new_cpu_mask;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002257 struct drbd_tconn *tconn;
Philipp Reisner778f2712010-07-06 11:14:00 +02002258 int *rs_plan_s = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002259 struct res_opts sc;
2260 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002261
Lars Ellenbergf3990022011-03-23 14:31:09 +01002262 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002263 if (!adm_ctx.reply_skb)
2264 return retcode;
2265 if (retcode != NO_ERROR)
2266 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002267 tconn = adm_ctx.tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002268
Philipp Reisnerb411b362009-09-25 16:07:19 -07002269 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2270 retcode = ERR_NOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002271 drbd_msg_put_info("unable to allocate cpumask");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002272 goto fail;
2273 }
2274
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002275 if (((struct drbd_genlmsghdr*)info->userhdr)->flags
2276 & DRBD_GENL_F_SET_DEFAULTS) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002277 memset(&sc, 0, sizeof(struct res_opts));
Philipp Reisner265be2d2010-05-31 10:14:17 +02002278 sc.on_no_data = DRBD_ON_NO_DATA_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002279 } else
Lars Ellenbergf3990022011-03-23 14:31:09 +01002280 sc = tconn->res_opts;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002281
Lars Ellenbergf3990022011-03-23 14:31:09 +01002282 err = res_opts_from_attrs(&sc, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002283 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002284 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002285 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002286 goto fail;
2287 }
2288
Philipp Reisnerb411b362009-09-25 16:07:19 -07002289 /* silently ignore cpu mask on UP kernel */
2290 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
2291 err = __bitmap_parse(sc.cpu_mask, 32, 0,
2292 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2293 if (err) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002294 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002295 retcode = ERR_CPU_MASK_PARSE;
2296 goto fail;
2297 }
2298 }
2299
Lars Ellenberg7ad651b2011-02-21 13:21:03 +01002300
Lars Ellenbergf3990022011-03-23 14:31:09 +01002301 tconn->res_opts = sc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002302
Lars Ellenbergf3990022011-03-23 14:31:09 +01002303 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2304 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2305 drbd_calc_cpu_mask(tconn);
2306 tconn->receiver.reset_cpu_mask = 1;
2307 tconn->asender.reset_cpu_mask = 1;
2308 tconn->worker.reset_cpu_mask = 1;
Philipp Reisner778f2712010-07-06 11:14:00 +02002309 }
2310
Philipp Reisnerb411b362009-09-25 16:07:19 -07002311fail:
Philipp Reisner778f2712010-07-06 11:14:00 +02002312 kfree(rs_plan_s);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002313 free_cpumask_var(new_cpu_mask);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002314
2315 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002316 return 0;
2317}
2318
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002319int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002320{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002321 struct drbd_conf *mdev;
2322 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2323
2324 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2325 if (!adm_ctx.reply_skb)
2326 return retcode;
2327 if (retcode != NO_ERROR)
2328 goto out;
2329
2330 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002331
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002332 /* If there is still bitmap IO pending, probably because of a previous
2333 * resync just being finished, wait for it before requesting a new resync. */
2334 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2335
Philipp Reisnerb411b362009-09-25 16:07:19 -07002336 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2337
2338 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2339 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2340
2341 while (retcode == SS_NEED_CONNECTION) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01002342 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002343 if (mdev->state.conn < C_CONNECTED)
2344 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002345 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002346
2347 if (retcode != SS_NEED_CONNECTION)
2348 break;
2349
2350 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2351 }
2352
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002353out:
2354 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002355 return 0;
2356}
2357
Philipp Reisner07782862010-08-31 12:00:50 +02002358static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2359{
2360 int rv;
2361
2362 rv = drbd_bmio_set_n_write(mdev);
2363 drbd_suspend_al(mdev);
2364 return rv;
2365}
2366
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002367static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2368 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002369{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002370 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002371
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002372 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2373 if (!adm_ctx.reply_skb)
2374 return retcode;
2375 if (retcode != NO_ERROR)
2376 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002377
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002378 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2379out:
2380 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002381 return 0;
2382}
2383
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002384int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002385{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002386 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2387}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002388
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002389int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2390{
2391 enum drbd_ret_code retcode;
2392
2393 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2394 if (!adm_ctx.reply_skb)
2395 return retcode;
2396 if (retcode != NO_ERROR)
2397 goto out;
2398
2399 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002400 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002401out:
2402 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002403 return 0;
2404}
2405
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002406int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002407{
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002408 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002409 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002410
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002411 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2412 if (!adm_ctx.reply_skb)
2413 return retcode;
2414 if (retcode != NO_ERROR)
2415 goto out;
2416
2417 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2418 s = adm_ctx.mdev->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01002419 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2420 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2421 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2422 } else {
2423 retcode = ERR_PAUSE_IS_CLEAR;
2424 }
2425 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002426
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002427out:
2428 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002429 return 0;
2430}
2431
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002432int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002433{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002434 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002435}
2436
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002437int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002438{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002439 struct drbd_conf *mdev;
2440 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2441
2442 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2443 if (!adm_ctx.reply_skb)
2444 return retcode;
2445 if (retcode != NO_ERROR)
2446 goto out;
2447
2448 mdev = adm_ctx.mdev;
Philipp Reisner43a51822010-06-11 11:26:34 +02002449 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2450 drbd_uuid_new_current(mdev);
2451 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02002452 }
Philipp Reisner265be2d2010-05-31 10:14:17 +02002453 drbd_suspend_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002454 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2455 if (retcode == SS_SUCCESS) {
Philipp Reisner265be2d2010-05-31 10:14:17 +02002456 if (mdev->state.conn < C_CONNECTED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002457 tl_clear(mdev->tconn);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002458 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002459 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002460 }
2461 drbd_resume_io(mdev);
2462
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002463out:
2464 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002465 return 0;
2466}
2467
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002468int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002469{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002470 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002471}
2472
Lars Ellenberg543cc102011-03-10 22:18:18 +01002473int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2474{
2475 struct nlattr *nla;
2476 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2477 if (!nla)
2478 goto nla_put_failure;
2479 if (vnr != VOLUME_UNSPECIFIED)
2480 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2481 NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2482 nla_nest_end(skb, nla);
2483 return 0;
2484
2485nla_put_failure:
2486 if (nla)
2487 nla_nest_cancel(skb, nla);
2488 return -EMSGSIZE;
2489}
2490
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002491int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2492 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002493{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002494 struct state_info *si = NULL; /* for sizeof(si->member); */
Philipp Reisner44ed1672011-04-19 17:10:19 +02002495 struct net_conf *nc;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002496 struct nlattr *nla;
2497 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002498 int err = 0;
2499 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002500
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002501 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2502 * to. So we better exclude_sensitive information.
2503 *
2504 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2505 * in the context of the requesting user process. Exclude sensitive
2506 * information, unless current has superuser.
2507 *
2508 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2509 * relies on the current implementation of netlink_dump(), which
2510 * executes the dump callback successively from netlink_recvmsg(),
2511 * always in the context of the receiving process */
2512 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002513
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002514 got_ldev = get_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002515
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002516 /* We need to add connection name and volume number information still.
2517 * Minor number is in drbd_genlmsghdr. */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002518 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002519 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002520
Lars Ellenbergf3990022011-03-23 14:31:09 +01002521 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2522 goto nla_put_failure;
2523
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002524 if (got_ldev)
2525 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2526 goto nla_put_failure;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002527
2528 rcu_read_lock();
2529 nc = rcu_dereference(mdev->tconn->net_conf);
2530 if (nc)
2531 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2532 rcu_read_unlock();
2533 if (err)
2534 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002535
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002536 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2537 if (!nla)
2538 goto nla_put_failure;
2539 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2540 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2541 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2542 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002543
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002544 if (got_ldev) {
2545 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2546 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2547 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2548 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2549 if (C_SYNC_SOURCE <= mdev->state.conn &&
2550 C_PAUSED_SYNC_T >= mdev->state.conn) {
2551 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2552 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002553 }
2554 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002555
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002556 if (sib) {
2557 switch(sib->sib_reason) {
2558 case SIB_SYNC_PROGRESS:
2559 case SIB_GET_STATUS_REPLY:
2560 break;
2561 case SIB_STATE_CHANGE:
2562 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2563 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2564 break;
2565 case SIB_HELPER_POST:
2566 NLA_PUT_U32(skb,
2567 T_helper_exit_code, sib->helper_exit_code);
2568 /* fall through */
2569 case SIB_HELPER_PRE:
2570 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2571 break;
2572 }
2573 }
2574 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002575
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002576 if (0)
2577nla_put_failure:
2578 err = -EMSGSIZE;
2579 if (got_ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002580 put_ldev(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002581 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002582}
2583
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002584int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002585{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002586 enum drbd_ret_code retcode;
2587 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002588
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002589 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2590 if (!adm_ctx.reply_skb)
2591 return retcode;
2592 if (retcode != NO_ERROR)
2593 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002594
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002595 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2596 if (err) {
2597 nlmsg_free(adm_ctx.reply_skb);
2598 return err;
2599 }
2600out:
2601 drbd_adm_finish(info, retcode);
2602 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002603}
2604
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002605int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002606{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002607 struct drbd_conf *mdev;
2608 struct drbd_genlmsghdr *dh;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002609 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2610 struct drbd_tconn *tconn = NULL;
2611 struct drbd_tconn *tmp;
2612 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002613
Lars Ellenberg543cc102011-03-10 22:18:18 +01002614 /* Open coded, deferred, iteration:
2615 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2616 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2617 * ...
2618 * }
2619 * }
2620 * where tconn is cb->args[0];
2621 * and i is cb->args[1];
2622 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002623 * cb->args[2] indicates if we shall loop over all resources,
2624 * or just dump all volumes of a single resource.
2625 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002626 * This may miss entries inserted after this dump started,
2627 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01002628 *
2629 * We need to make sure the mdev won't disappear while
2630 * we are looking at it, and revalidate our iterators
2631 * on each iteration.
2632 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002633
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002634 /* synchronize with conn_create()/conn_destroy() */
Philipp Reisneref356262011-04-13 14:21:29 -07002635 down_read(&drbd_cfg_rwsem);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002636 /* revalidate iterator position */
2637 list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2638 if (pos == NULL) {
2639 /* first iteration */
2640 pos = tmp;
2641 tconn = pos;
2642 break;
2643 }
2644 if (tmp == pos) {
2645 tconn = pos;
2646 break;
2647 }
2648 }
2649 if (tconn) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002650next_tconn:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002651 mdev = idr_get_next(&tconn->volumes, &volume);
2652 if (!mdev) {
2653 /* No more volumes to dump on this tconn.
2654 * Advance tconn iterator. */
2655 pos = list_entry(tconn->all_tconn.next,
2656 struct drbd_tconn, all_tconn);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002657 /* Did we dump any volume on this tconn yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002658 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002659 /* If we reached the end of the list,
2660 * or only a single resource dump was requested,
2661 * we are done. */
2662 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2663 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002664 volume = 0;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002665 tconn = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002666 goto next_tconn;
2667 }
2668 }
2669
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002670 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2671 cb->nlh->nlmsg_seq, &drbd_genl_family,
2672 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2673 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002674 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002675
Lars Ellenberg543cc102011-03-10 22:18:18 +01002676 if (!mdev) {
2677 /* this is a tconn without a single volume */
2678 dh->minor = -1U;
2679 dh->ret_code = NO_ERROR;
2680 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2681 genlmsg_cancel(skb, dh);
2682 else
2683 genlmsg_end(skb, dh);
2684 goto out;
2685 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002686
Lars Ellenberg543cc102011-03-10 22:18:18 +01002687 D_ASSERT(mdev->vnr == volume);
2688 D_ASSERT(mdev->tconn == tconn);
2689
2690 dh->minor = mdev_to_minor(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002691 dh->ret_code = NO_ERROR;
2692
2693 if (nla_put_status_info(skb, mdev, NULL)) {
2694 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002695 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002696 }
2697 genlmsg_end(skb, dh);
2698 }
2699
Lars Ellenberg543cc102011-03-10 22:18:18 +01002700out:
Philipp Reisneref356262011-04-13 14:21:29 -07002701 up_read(&drbd_cfg_rwsem);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002702 /* where to start the next iteration */
2703 cb->args[0] = (long)pos;
2704 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002705
Lars Ellenberg543cc102011-03-10 22:18:18 +01002706 /* No more tconns/volumes/minors found results in an empty skb.
2707 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002708 return skb->len;
2709}
2710
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002711/*
2712 * Request status of all resources, or of all volumes within a single resource.
2713 *
2714 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2715 * Which means we cannot use the family->attrbuf or other such members, because
2716 * dump is NOT protected by the genl_lock(). During dump, we only have access
2717 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2718 *
2719 * Once things are setup properly, we call into get_one_status().
2720 */
2721int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2722{
2723 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2724 struct nlattr *nla;
2725 const char *conn_name;
2726 struct drbd_tconn *tconn;
2727
2728 /* Is this a followup call? */
2729 if (cb->args[0]) {
2730 /* ... of a single resource dump,
2731 * and the resource iterator has been advanced already? */
2732 if (cb->args[2] && cb->args[2] != cb->args[0])
2733 return 0; /* DONE. */
2734 goto dump;
2735 }
2736
2737 /* First call (from netlink_dump_start). We need to figure out
2738 * which resource(s) the user wants us to dump. */
2739 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2740 nlmsg_attrlen(cb->nlh, hdrlen),
2741 DRBD_NLA_CFG_CONTEXT);
2742
2743 /* No explicit context given. Dump all. */
2744 if (!nla)
2745 goto dump;
2746 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
2747 /* context given, but no name present? */
2748 if (!nla)
2749 return -EINVAL;
2750 conn_name = nla_data(nla);
2751 tconn = conn_by_name(conn_name);
2752 if (!tconn)
2753 return -ENODEV;
2754
2755 /* prime iterators, and set "filter" mode mark:
2756 * only dump this tconn. */
2757 cb->args[0] = (long)tconn;
2758 /* cb->args[1] = 0; passed in this way. */
2759 cb->args[2] = (long)tconn;
2760
2761dump:
2762 return get_one_status(skb, cb);
2763}
2764
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002765int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2766{
2767 enum drbd_ret_code retcode;
2768 struct timeout_parms tp;
2769 int err;
2770
2771 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2772 if (!adm_ctx.reply_skb)
2773 return retcode;
2774 if (retcode != NO_ERROR)
2775 goto out;
2776
2777 tp.timeout_type =
2778 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2779 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2780 UT_DEFAULT;
2781
2782 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2783 if (err) {
2784 nlmsg_free(adm_ctx.reply_skb);
2785 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002786 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002787out:
2788 drbd_adm_finish(info, retcode);
2789 return 0;
2790}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002791
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002792int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2793{
2794 struct drbd_conf *mdev;
2795 enum drbd_ret_code retcode;
2796
2797 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2798 if (!adm_ctx.reply_skb)
2799 return retcode;
2800 if (retcode != NO_ERROR)
2801 goto out;
2802
2803 mdev = adm_ctx.mdev;
2804 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2805 /* resume from last known position, if possible */
2806 struct start_ov_parms parms =
2807 { .ov_start_sector = mdev->ov_start_sector };
Lars Ellenbergf3990022011-03-23 14:31:09 +01002808 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002809 if (err) {
2810 retcode = ERR_MANDATORY_TAG;
2811 drbd_msg_put_info(from_attrs_err_to_txt(err));
2812 goto out;
2813 }
2814 /* w_make_ov_request expects position to be aligned */
2815 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2816 }
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002817 /* If there is still bitmap IO pending, e.g. previous resync or verify
2818 * just being finished, wait for it before requesting a new resync. */
2819 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002820 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2821out:
2822 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002823 return 0;
2824}
2825
2826
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002827int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002828{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002829 struct drbd_conf *mdev;
2830 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002831 int skip_initial_sync = 0;
2832 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002833 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002834
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002835 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2836 if (!adm_ctx.reply_skb)
2837 return retcode;
2838 if (retcode != NO_ERROR)
2839 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002840
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002841 mdev = adm_ctx.mdev;
2842 memset(&args, 0, sizeof(args));
2843 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002844 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002845 if (err) {
2846 retcode = ERR_MANDATORY_TAG;
2847 drbd_msg_put_info(from_attrs_err_to_txt(err));
2848 goto out_nolock;
2849 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002850 }
2851
Philipp Reisner8410da82011-02-11 20:11:10 +01002852 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002853
2854 if (!get_ldev(mdev)) {
2855 retcode = ERR_NO_DISK;
2856 goto out;
2857 }
2858
2859 /* this is "skip initial sync", assume to be clean */
Philipp Reisner31890f42011-01-19 14:12:51 +01002860 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002861 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2862 dev_info(DEV, "Preparing to skip initial sync\n");
2863 skip_initial_sync = 1;
2864 } else if (mdev->state.conn != C_STANDALONE) {
2865 retcode = ERR_CONNECTED;
2866 goto out_dec;
2867 }
2868
2869 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2870 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2871
2872 if (args.clear_bm) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002873 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2874 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002875 if (err) {
2876 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2877 retcode = ERR_IO_MD_DISK;
2878 }
2879 if (skip_initial_sync) {
2880 drbd_send_uuids_skip_initial_sync(mdev);
2881 _drbd_uuid_set(mdev, UI_BITMAP, 0);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002882 drbd_print_uuids(mdev, "cleared bitmap UUID");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002883 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002884 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2885 CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002886 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002887 }
2888 }
2889
2890 drbd_md_sync(mdev);
2891out_dec:
2892 put_ldev(mdev);
2893out:
Philipp Reisner8410da82011-02-11 20:11:10 +01002894 mutex_unlock(mdev->state_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002895out_nolock:
2896 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002897 return 0;
2898}
2899
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002900static enum drbd_ret_code
2901drbd_check_conn_name(const char *name)
Philipp Reisner774b3052011-02-22 02:07:03 -05002902{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002903 if (!name || !name[0]) {
2904 drbd_msg_put_info("connection name missing");
2905 return ERR_MANDATORY_TAG;
2906 }
2907 /* if we want to use these in sysfs/configfs/debugfs some day,
2908 * we must not allow slashes */
2909 if (strchr(name, '/')) {
2910 drbd_msg_put_info("invalid connection name");
2911 return ERR_INVALID_REQUEST;
2912 }
2913 return NO_ERROR;
2914}
Philipp Reisner774b3052011-02-22 02:07:03 -05002915
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002916int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2917{
2918 enum drbd_ret_code retcode;
2919
2920 retcode = drbd_adm_prepare(skb, info, 0);
2921 if (!adm_ctx.reply_skb)
2922 return retcode;
2923 if (retcode != NO_ERROR)
2924 goto out;
2925
2926 retcode = drbd_check_conn_name(adm_ctx.conn_name);
2927 if (retcode != NO_ERROR)
2928 goto out;
2929
2930 if (adm_ctx.tconn) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01002931 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2932 retcode = ERR_INVALID_REQUEST;
2933 drbd_msg_put_info("connection exists");
2934 }
2935 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002936 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05002937 }
2938
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002939 if (!conn_create(adm_ctx.conn_name))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002940 retcode = ERR_NOMEM;
2941out:
2942 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05002943 return 0;
2944}
2945
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002946int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05002947{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002948 struct drbd_genlmsghdr *dh = info->userhdr;
2949 enum drbd_ret_code retcode;
Philipp Reisner774b3052011-02-22 02:07:03 -05002950
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002951 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2952 if (!adm_ctx.reply_skb)
2953 return retcode;
2954 if (retcode != NO_ERROR)
2955 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05002956
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002957 /* FIXME drop minor_count parameter, limit to MINORMASK */
2958 if (dh->minor >= minor_count) {
2959 drbd_msg_put_info("requested minor out of range");
2960 retcode = ERR_INVALID_REQUEST;
2961 goto out;
2962 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02002963 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002964 drbd_msg_put_info("requested volume id out of range");
2965 retcode = ERR_INVALID_REQUEST;
2966 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05002967 }
2968
Lars Ellenberg38f19612011-03-14 13:22:35 +01002969 /* drbd_adm_prepare made sure already
2970 * that mdev->tconn and mdev->vnr match the request. */
2971 if (adm_ctx.mdev) {
2972 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
2973 retcode = ERR_MINOR_EXISTS;
2974 /* else: still NO_ERROR */
2975 goto out;
2976 }
2977
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07002978 down_write(&drbd_cfg_rwsem);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002979 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07002980 up_write(&drbd_cfg_rwsem);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002981out:
2982 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05002983 return 0;
2984}
2985
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002986static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
2987{
2988 if (mdev->state.disk == D_DISKLESS &&
2989 /* no need to be mdev->state.conn == C_STANDALONE &&
2990 * we may want to delete a minor from a live replication group.
2991 */
2992 mdev->state.role == R_SECONDARY) {
Philipp Reisnerff370e52011-04-11 21:10:11 -07002993 drbd_delete_device(mdev);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002994 return NO_ERROR;
2995 } else
2996 return ERR_MINOR_CONFIGURED;
2997}
2998
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002999int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003000{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003001 enum drbd_ret_code retcode;
3002
3003 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3004 if (!adm_ctx.reply_skb)
3005 return retcode;
3006 if (retcode != NO_ERROR)
3007 goto out;
3008
Philipp Reisneref356262011-04-13 14:21:29 -07003009 down_write(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003010 retcode = adm_delete_minor(adm_ctx.mdev);
Philipp Reisneref356262011-04-13 14:21:29 -07003011 up_write(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003012out:
3013 drbd_adm_finish(info, retcode);
3014 return 0;
3015}
3016
3017int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3018{
3019 enum drbd_ret_code retcode;
3020 enum drbd_state_rv rv;
3021 struct drbd_conf *mdev;
3022 unsigned i;
3023
3024 retcode = drbd_adm_prepare(skb, info, 0);
3025 if (!adm_ctx.reply_skb)
3026 return retcode;
3027 if (retcode != NO_ERROR)
3028 goto out;
3029
3030 if (!adm_ctx.tconn) {
3031 retcode = ERR_CONN_NOT_KNOWN;
3032 goto out;
3033 }
3034
Philipp Reisneref356262011-04-13 14:21:29 -07003035 down_read(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003036 /* demote */
3037 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3038 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3039 if (retcode < SS_SUCCESS) {
3040 drbd_msg_put_info("failed to demote");
3041 goto out_unlock;
3042 }
3043 }
3044
3045 /* disconnect */
3046 rv = conn_try_disconnect(adm_ctx.tconn, 0);
3047 if (rv < SS_SUCCESS) {
3048 retcode = rv; /* enum type mismatch! */
3049 drbd_msg_put_info("failed to disconnect");
3050 goto out_unlock;
3051 }
3052
3053 /* detach */
3054 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3055 rv = adm_detach(mdev);
3056 if (rv < SS_SUCCESS) {
3057 retcode = rv; /* enum type mismatch! */
3058 drbd_msg_put_info("failed to detach");
3059 goto out_unlock;
3060 }
3061 }
Philipp Reisneref356262011-04-13 14:21:29 -07003062 up_read(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003063
3064 /* delete volumes */
Philipp Reisneref356262011-04-13 14:21:29 -07003065 down_write(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003066 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3067 retcode = adm_delete_minor(mdev);
3068 if (retcode != NO_ERROR) {
3069 /* "can not happen" */
3070 drbd_msg_put_info("failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07003071 up_write(&drbd_cfg_rwsem);
3072 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003073 }
3074 }
3075
3076 /* stop all threads */
3077 conn_reconfig_done(adm_ctx.tconn);
3078
3079 /* delete connection */
3080 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003081 list_del(&adm_ctx.tconn->all_tconn);
3082 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3083
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003084 retcode = NO_ERROR;
3085 } else {
3086 /* "can not happen" */
3087 retcode = ERR_CONN_IN_USE;
3088 drbd_msg_put_info("failed to delete connection");
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003089 }
Philipp Reisneref356262011-04-13 14:21:29 -07003090
3091 up_write(&drbd_cfg_rwsem);
3092 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003093out_unlock:
Philipp Reisneref356262011-04-13 14:21:29 -07003094 up_read(&drbd_cfg_rwsem);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003095out:
3096 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003097 return 0;
3098}
3099
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003100int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003101{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003102 enum drbd_ret_code retcode;
3103
3104 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3105 if (!adm_ctx.reply_skb)
3106 return retcode;
3107 if (retcode != NO_ERROR)
3108 goto out;
3109
Philipp Reisneref356262011-04-13 14:21:29 -07003110 down_write(&drbd_cfg_rwsem);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003111 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003112 list_del(&adm_ctx.tconn->all_tconn);
3113 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3114
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003115 retcode = NO_ERROR;
Philipp Reisner774b3052011-02-22 02:07:03 -05003116 } else {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003117 retcode = ERR_CONN_IN_USE;
Philipp Reisner774b3052011-02-22 02:07:03 -05003118 }
Philipp Reisneref356262011-04-13 14:21:29 -07003119 up_write(&drbd_cfg_rwsem);
Philipp Reisner774b3052011-02-22 02:07:03 -05003120
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003121out:
3122 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003123 return 0;
3124}
3125
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003126void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003127{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003128 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3129 struct sk_buff *msg;
3130 struct drbd_genlmsghdr *d_out;
3131 unsigned seq;
3132 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003133
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003134 seq = atomic_inc_return(&drbd_genl_seq);
3135 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3136 if (!msg)
3137 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003138
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003139 err = -EMSGSIZE;
3140 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3141 if (!d_out) /* cannot happen, but anyways. */
3142 goto nla_put_failure;
3143 d_out->minor = mdev_to_minor(mdev);
3144 d_out->ret_code = 0;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02003145
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003146 if (nla_put_status_info(msg, mdev, sib))
3147 goto nla_put_failure;
3148 genlmsg_end(msg, d_out);
3149 err = drbd_genl_multicast_events(msg, 0);
3150 /* msg has been consumed or freed in netlink_broadcast() */
3151 if (err && err != -ESRCH)
3152 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003153
Philipp Reisnerb411b362009-09-25 16:07:19 -07003154 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003155
3156nla_put_failure:
3157 nlmsg_free(msg);
3158failed:
3159 dev_err(DEV, "Error %d while broadcasting event. "
3160 "Event seq:%u sib_reason:%u\n",
3161 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003162}