blob: 5747bc6a7c4ccee9a6d27a884d8db7e095ff81aa [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020035#include "drbd_req.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070038#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020039#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070040
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010041#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010043/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010052int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010053
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010056int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010057int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010071int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010072int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
78#include <linux/genl_magic_func.h>
79
80/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070081static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010083/* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
86 */
87static struct drbd_config_context {
88 /* assigned from drbd_genlmsghdr */
89 unsigned int minor;
90 /* assigned from request attributes, if present */
91 unsigned int volume;
92#define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
95 char *conn_name;
Philipp Reisnerb411b362009-09-25 16:07:19 -070096
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010097 /* reply buffer */
98 struct sk_buff *reply_skb;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr *reply_dh;
101 /* resolved from attributes, if possible */
102 struct drbd_conf *mdev;
103 struct drbd_tconn *tconn;
104} adm_ctx;
105
106static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107{
108 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109 if (genlmsg_reply(skb, info))
110 printk(KERN_ERR "drbd: error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700111}
112
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100113/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
Lars Ellenberg8432b312011-03-08 16:11:16 +0100115int drbd_msg_put_info(const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100116{
117 struct sk_buff *skb = adm_ctx.reply_skb;
118 struct nlattr *nla;
119 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700120
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100121 if (!info || !info[0])
122 return 0;
123
124 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125 if (!nla)
126 return err;
127
128 err = nla_put_string(skb, T_info_text, info);
129 if (err) {
130 nla_nest_cancel(skb, nla);
131 return err;
132 } else
133 nla_nest_end(skb, nla);
134 return 0;
135}
136
137/* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
141 */
142#define DRBD_ADM_NEED_MINOR 1
143#define DRBD_ADM_NEED_CONN 2
144static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145 unsigned flags)
146{
147 struct drbd_genlmsghdr *d_in = info->userhdr;
148 const u8 cmd = info->genlhdr->cmd;
149 int err;
150
151 memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd != DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb, CAP_SYS_ADMIN))
156 return -EPERM;
157
158 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159 if (!adm_ctx.reply_skb)
160 goto fail;
161
162 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163 info, &drbd_genl_family, 0, cmd);
164 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
165 * but anyways */
166 if (!adm_ctx.reply_dh)
167 goto fail;
168
169 adm_ctx.reply_dh->minor = d_in->minor;
170 adm_ctx.reply_dh->ret_code = NO_ERROR;
171
172 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
173 struct nlattr *nla;
174 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100175 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100176 if (err)
177 goto fail;
178
179 /* It was present, and valid,
180 * copy it over to the reply skb. */
181 err = nla_put_nohdr(adm_ctx.reply_skb,
182 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183 info->attrs[DRBD_NLA_CFG_CONTEXT]);
184 if (err)
185 goto fail;
186
187 /* and assign stuff to the global adm_ctx */
188 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
191 if (nla)
192 adm_ctx.conn_name = nla_data(nla);
193 } else
194 adm_ctx.volume = VOLUME_UNSPECIFIED;
195
196 adm_ctx.minor = d_in->minor;
197 adm_ctx.mdev = minor_to_mdev(d_in->minor);
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200198 adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100199
200 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201 drbd_msg_put_info("unknown minor");
202 return ERR_MINOR_INVALID;
203 }
204 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205 drbd_msg_put_info("unknown connection");
206 return ERR_INVALID_REQUEST;
207 }
208
209 /* some more paranoia, if the request was over-determined */
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100210 if (adm_ctx.mdev && adm_ctx.tconn &&
211 adm_ctx.mdev->tconn != adm_ctx.tconn) {
212 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214 drbd_msg_put_info("minor exists in different connection");
215 return ERR_INVALID_REQUEST;
216 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100217 if (adm_ctx.mdev &&
218 adm_ctx.volume != VOLUME_UNSPECIFIED &&
219 adm_ctx.volume != adm_ctx.mdev->vnr) {
220 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221 adm_ctx.minor, adm_ctx.volume,
222 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100223 drbd_msg_put_info("minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100224 return ERR_INVALID_REQUEST;
225 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200226
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100227 return NO_ERROR;
228
229fail:
230 nlmsg_free(adm_ctx.reply_skb);
231 adm_ctx.reply_skb = NULL;
232 return -ENOMEM;
233}
234
235static int drbd_adm_finish(struct genl_info *info, int retcode)
236{
237 struct nlattr *nla;
238 const char *conn_name = NULL;
239
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200240 if (adm_ctx.tconn) {
241 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
242 adm_ctx.tconn = NULL;
243 }
244
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100245 if (!adm_ctx.reply_skb)
246 return -ENOMEM;
247
248 adm_ctx.reply_dh->ret_code = retcode;
249
250 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
251 if (nla) {
252 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
253 if (nla)
254 conn_name = nla_data(nla);
255 }
256
257 drbd_adm_send_reply(adm_ctx.reply_skb, info);
258 return 0;
259}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100261static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
262{
263 char *afs;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200264 struct net_conf *nc;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100265
Philipp Reisner44ed1672011-04-19 17:10:19 +0200266 rcu_read_lock();
267 nc = rcu_dereference(tconn->net_conf);
268 if (nc) {
269 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100270 case AF_INET6:
271 afs = "ipv6";
272 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200273 &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100274 break;
275 case AF_INET:
276 afs = "ipv4";
277 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200278 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100279 break;
280 default:
281 afs = "ssocks";
282 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200283 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100284 }
285 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100286 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200287 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100288}
289
Philipp Reisnerb411b362009-09-25 16:07:19 -0700290int drbd_khelper(struct drbd_conf *mdev, char *cmd)
291{
292 char *envp[] = { "HOME=/",
293 "TERM=linux",
294 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100295 (char[20]) { }, /* address family */
296 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700297 NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100298 char mb[12];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700299 char *argv[] = {usermode_helper, cmd, mb, NULL };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100300 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700301 int ret;
302
303 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100304 setup_khelper_env(mdev->tconn, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700305
Lars Ellenberg1090c052010-07-19 17:41:04 +0200306 /* The helper may take some time.
307 * write out any unsynced meta data changes now */
308 drbd_md_sync(mdev);
309
Philipp Reisnerb411b362009-09-25 16:07:19 -0700310 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100311 sib.sib_reason = SIB_HELPER_PRE;
312 sib.helper_name = cmd;
313 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700314 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
315 if (ret)
316 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
317 usermode_helper, cmd, mb,
318 (ret >> 8) & 0xff, ret);
319 else
320 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
321 usermode_helper, cmd, mb,
322 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100323 sib.sib_reason = SIB_HELPER_POST;
324 sib.helper_exit_code = ret;
325 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700326
327 if (ret < 0) /* Ignore any ERRNOs we got. */
328 ret = 0;
329
330 return ret;
331}
332
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100333static void conn_md_sync(struct drbd_tconn *tconn)
334{
335 struct drbd_conf *mdev;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100336 int vnr;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100337
Philipp Reisnerd3fcb492011-04-13 14:46:05 -0700338 down_read(&drbd_cfg_rwsem);
Philipp Reisnere90285e2011-03-22 12:51:21 +0100339 idr_for_each_entry(&tconn->volumes, mdev, vnr)
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100340 drbd_md_sync(mdev);
Philipp Reisnerd3fcb492011-04-13 14:46:05 -0700341 up_read(&drbd_cfg_rwsem);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100342}
343
344int conn_khelper(struct drbd_tconn *tconn, char *cmd)
345{
346 char *envp[] = { "HOME=/",
347 "TERM=linux",
348 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
349 (char[20]) { }, /* address family */
350 (char[60]) { }, /* address */
351 NULL };
352 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
353 int ret;
354
355 setup_khelper_env(tconn, envp);
356 conn_md_sync(tconn);
357
358 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
359 /* TODO: conn_bcast_event() ?? */
360
361 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
362 if (ret)
363 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
364 usermode_helper, cmd, tconn->name,
365 (ret >> 8) & 0xff, ret);
366 else
367 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
368 usermode_helper, cmd, tconn->name,
369 (ret >> 8) & 0xff, ret);
370 /* TODO: conn_bcast_event() ?? */
371
372 if (ret < 0) /* Ignore any ERRNOs we got. */
373 ret = 0;
374
375 return ret;
376}
377
Philipp Reisnercb703452011-03-24 11:03:07 +0100378static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379{
Philipp Reisnercb703452011-03-24 11:03:07 +0100380 enum drbd_fencing_p fp = FP_NOT_AVAIL;
381 struct drbd_conf *mdev;
382 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700383
Philipp Reisner695d08f2011-04-11 22:53:32 -0700384 rcu_read_lock();
Philipp Reisnercb703452011-03-24 11:03:07 +0100385 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
386 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200387 fp = max_t(enum drbd_fencing_p, fp,
388 rcu_dereference(mdev->ldev->disk_conf)->fencing);
Philipp Reisnercb703452011-03-24 11:03:07 +0100389 put_ldev(mdev);
390 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700391 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700392 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700393
Philipp Reisnercb703452011-03-24 11:03:07 +0100394 return fp;
395}
396
397bool conn_try_outdate_peer(struct drbd_tconn *tconn)
398{
399 union drbd_state mask = { };
400 union drbd_state val = { };
401 enum drbd_fencing_p fp;
402 char *ex_to_string;
403 int r;
404
405 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
406 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
407 return false;
408 }
409
410 fp = highest_fencing_policy(tconn);
411 switch (fp) {
412 case FP_NOT_AVAIL:
413 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
414 goto out;
415 case FP_DONT_CARE:
416 return true;
417 default: ;
418 }
419
420 r = conn_khelper(tconn, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421
422 switch ((r>>8) & 0xff) {
423 case 3: /* peer is inconsistent */
424 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100425 mask.pdsk = D_MASK;
426 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427 break;
428 case 4: /* peer got outdated, or was already outdated */
429 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100430 mask.pdsk = D_MASK;
431 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700432 break;
433 case 5: /* peer was down */
Philipp Reisnercb703452011-03-24 11:03:07 +0100434 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700435 /* we will(have) create(d) a new UUID anyways... */
436 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100437 mask.pdsk = D_MASK;
438 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700439 } else {
440 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700441 }
442 break;
443 case 6: /* Peer is primary, voluntarily outdate myself.
444 * This is useful when an unconnected R_SECONDARY is asked to
445 * become R_PRIMARY, but finds the other peer being active. */
446 ex_to_string = "peer is active";
Philipp Reisnercb703452011-03-24 11:03:07 +0100447 conn_warn(tconn, "Peer is primary, outdating myself.\n");
448 mask.disk = D_MASK;
449 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700450 break;
451 case 7:
452 if (fp != FP_STONITH)
Philipp Reisnercb703452011-03-24 11:03:07 +0100453 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700454 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100455 mask.pdsk = D_MASK;
456 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700457 break;
458 default:
459 /* The script is broken ... */
Philipp Reisnercb703452011-03-24 11:03:07 +0100460 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
461 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700462 }
463
Philipp Reisnercb703452011-03-24 11:03:07 +0100464 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
465 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200466
Philipp Reisnercb703452011-03-24 11:03:07 +0100467 out:
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200468
Philipp Reisnercb703452011-03-24 11:03:07 +0100469 /* Not using
470 conn_request_state(tconn, mask, val, CS_VERBOSE);
471 here, because we might were able to re-establish the connection in the
472 meantime. */
473 spin_lock_irq(&tconn->req_lock);
474 if (tconn->cstate < C_WF_REPORT_PARAMS)
475 _conn_request_state(tconn, mask, val, CS_VERBOSE);
476 spin_unlock_irq(&tconn->req_lock);
477
478 return conn_highest_pdsk(tconn) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479}
480
Philipp Reisner87f7be42010-06-11 13:56:33 +0200481static int _try_outdate_peer_async(void *data)
482{
Philipp Reisnercb703452011-03-24 11:03:07 +0100483 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200484
Philipp Reisnercb703452011-03-24 11:03:07 +0100485 conn_try_outdate_peer(tconn);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200486
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200487 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200488 return 0;
489}
490
Philipp Reisnercb703452011-03-24 11:03:07 +0100491void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200492{
493 struct task_struct *opa;
494
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200495 kref_get(&tconn->kref);
Philipp Reisnercb703452011-03-24 11:03:07 +0100496 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200497 if (IS_ERR(opa)) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100498 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200499 kref_put(&tconn->kref, &conn_destroy);
500 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200501}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700502
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100503enum drbd_state_rv
504drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700505{
506 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100507 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200508 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700509 int try = 0;
510 int forced = 0;
511 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700512
513 if (new_role == R_PRIMARY)
Philipp Reisner0625ac12011-02-07 14:49:19 +0100514 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700515
Philipp Reisner8410da82011-02-11 20:11:10 +0100516 mutex_lock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700517
518 mask.i = 0; mask.role = R_MASK;
519 val.i = 0; val.role = new_role;
520
521 while (try++ < max_tries) {
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100522 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700523
524 /* in case we first succeeded to outdate,
525 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100526 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700527 val.pdsk = 0;
528 mask.pdsk = 0;
529 continue;
530 }
531
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100532 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100533 (mdev->state.disk < D_UP_TO_DATE &&
534 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700535 mask.disk = D_MASK;
536 val.disk = D_UP_TO_DATE;
537 forced = 1;
538 continue;
539 }
540
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100541 if (rv == SS_NO_UP_TO_DATE_DISK &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700542 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
543 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700544
Philipp Reisnercb703452011-03-24 11:03:07 +0100545 if (conn_try_outdate_peer(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700546 val.disk = D_UP_TO_DATE;
547 mask.disk = D_MASK;
548 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700549 continue;
550 }
551
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100552 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100553 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100554 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100555 if (!conn_try_outdate_peer(mdev->tconn) && force) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700556 dev_warn(DEV, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100557 mask.pdsk = D_MASK;
558 val.pdsk = D_OUTDATED;
559
Philipp Reisnerb411b362009-09-25 16:07:19 -0700560 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700561 continue;
562 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100563 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700564 /* Maybe the peer is detected as dead very soon...
565 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200566 int timeo;
567 rcu_read_lock();
568 nc = rcu_dereference(mdev->tconn->net_conf);
569 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
570 rcu_read_unlock();
571 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700572 if (try < max_tries)
573 try = max_tries - 1;
574 continue;
575 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100576 if (rv < SS_SUCCESS) {
577 rv = _drbd_request_state(mdev, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700578 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100579 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100580 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581 }
582 break;
583 }
584
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100585 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100586 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700587
588 if (forced)
589 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
590
591 /* Wait until nothing is on the fly :) */
592 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
593
594 if (new_role == R_SECONDARY) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100595 set_disk_ro(mdev->vdisk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700596 if (get_ldev(mdev)) {
597 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
598 put_ldev(mdev);
599 }
600 } else {
Philipp Reisnera0095502011-05-03 13:14:15 +0200601 mutex_lock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200602 nc = mdev->tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200603 if (nc)
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200604 nc->want_lose = 0; /* without copy; single bit op is atomic */
Philipp Reisnera0095502011-05-03 13:14:15 +0200605 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200606
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100607 set_disk_ro(mdev->vdisk, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700608 if (get_ldev(mdev)) {
609 if (((mdev->state.conn < C_CONNECTED ||
610 mdev->state.pdsk <= D_FAILED)
611 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
612 drbd_uuid_new_current(mdev);
613
614 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
615 put_ldev(mdev);
616 }
617 }
618
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100619 /* writeout of activity log covered areas of the bitmap
620 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700621
622 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
623 /* if this was forced, we should consider sync */
624 if (forced)
625 drbd_send_uuids(mdev);
626 drbd_send_state(mdev);
627 }
628
629 drbd_md_sync(mdev);
630
631 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100632out:
Philipp Reisner8410da82011-02-11 20:11:10 +0100633 mutex_unlock(mdev->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100634 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700635}
636
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100637static const char *from_attrs_err_to_txt(int err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700638{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100639 return err == -ENOMSG ? "required attribute missing" :
640 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100641 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100642 "invalid attribute value";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700643}
644
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100645int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700646{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100647 struct set_role_parms parms;
648 int err;
649 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700650
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100651 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
652 if (!adm_ctx.reply_skb)
653 return retcode;
654 if (retcode != NO_ERROR)
655 goto out;
656
657 memset(&parms, 0, sizeof(parms));
658 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100659 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100660 if (err) {
661 retcode = ERR_MANDATORY_TAG;
662 drbd_msg_put_info(from_attrs_err_to_txt(err));
663 goto out;
664 }
665 }
666
667 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
668 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
669 else
670 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
671out:
672 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700673 return 0;
674}
675
676/* initializes the md.*_offset members, so we are able to find
677 * the on disk meta data */
678static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
679 struct drbd_backing_dev *bdev)
680{
681 sector_t md_size_sect = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200682 int meta_dev_idx;
683
684 rcu_read_lock();
685 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
686
687 switch (meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700688 default:
689 /* v07 style fixed size indexed meta data */
690 bdev->md.md_size_sect = MD_RESERVED_SECT;
691 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
692 bdev->md.al_offset = MD_AL_OFFSET;
693 bdev->md.bm_offset = MD_BM_OFFSET;
694 break;
695 case DRBD_MD_INDEX_FLEX_EXT:
696 /* just occupy the full device; unit: sectors */
697 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
698 bdev->md.md_offset = 0;
699 bdev->md.al_offset = MD_AL_OFFSET;
700 bdev->md.bm_offset = MD_BM_OFFSET;
701 break;
702 case DRBD_MD_INDEX_INTERNAL:
703 case DRBD_MD_INDEX_FLEX_INT:
704 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
705 /* al size is still fixed */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100706 bdev->md.al_offset = -MD_AL_SECTORS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 /* we need (slightly less than) ~ this much bitmap sectors: */
708 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
709 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
710 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
711 md_size_sect = ALIGN(md_size_sect, 8);
712
713 /* plus the "drbd meta data super block",
714 * and the activity log; */
715 md_size_sect += MD_BM_OFFSET;
716
717 bdev->md.md_size_sect = md_size_sect;
718 /* bitmap offset is adjusted by 'super' block size */
719 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
720 break;
721 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200722 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700723}
724
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100725/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700726char *ppsize(char *buf, unsigned long long size)
727{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100728 /* Needs 9 bytes at max including trailing NUL:
729 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
731 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100732 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700733 /* shift + round */
734 size = (size >> 10) + !!(size & (1<<9));
735 base++;
736 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100737 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738
739 return buf;
740}
741
742/* there is still a theoretical deadlock when called from receiver
743 * on an D_INCONSISTENT R_PRIMARY:
744 * remote READ does inc_ap_bio, receiver would need to receive answer
745 * packet from remote to dec_ap_bio again.
746 * receiver receive_sizes(), comes here,
747 * waits for ap_bio_cnt == 0. -> deadlock.
748 * but this cannot happen, actually, because:
749 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
750 * (not connected, or bad/no disk on peer):
751 * see drbd_fail_request_early, ap_bio_cnt is zero.
752 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
753 * peer may not initiate a resize.
754 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100755/* Note these are not to be confused with
756 * drbd_adm_suspend_io/drbd_adm_resume_io,
757 * which are (sub) state changes triggered by admin (drbdsetup),
758 * and can be long lived.
759 * This changes an mdev->flag, is triggered by drbd internals,
760 * and should be short-lived. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700761void drbd_suspend_io(struct drbd_conf *mdev)
762{
763 set_bit(SUSPEND_IO, &mdev->flags);
Philipp Reisner2aebfab2011-03-28 16:48:11 +0200764 if (drbd_suspended(mdev))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200765 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
767}
768
769void drbd_resume_io(struct drbd_conf *mdev)
770{
771 clear_bit(SUSPEND_IO, &mdev->flags);
772 wake_up(&mdev->misc_wait);
773}
774
775/**
776 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
777 * @mdev: DRBD device.
778 *
779 * Returns 0 on success, negative return values indicate errors.
780 * You should call drbd_md_sync() after calling this function.
781 */
Bart Van Assche24c48302011-05-21 18:32:29 +0200782enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700783{
784 sector_t prev_first_sect, prev_size; /* previous meta location */
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200785 sector_t la_size, u_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700786 sector_t size;
787 char ppb[10];
788
789 int md_moved, la_size_changed;
790 enum determine_dev_size rv = unchanged;
791
792 /* race:
793 * application request passes inc_ap_bio,
794 * but then cannot get an AL-reference.
795 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
796 *
797 * to avoid that:
798 * Suspend IO right here.
799 * still lock the act_log to not trigger ASSERTs there.
800 */
801 drbd_suspend_io(mdev);
802
803 /* no wait necessary anymore, actually we could assert that */
804 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
805
806 prev_first_sect = drbd_md_first_sector(mdev->ldev);
807 prev_size = mdev->ldev->md.md_size_sect;
808 la_size = mdev->ldev->md.la_size_sect;
809
810 /* TODO: should only be some assert here, not (re)init... */
811 drbd_md_set_sector_offsets(mdev, mdev->ldev);
812
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200813 rcu_read_lock();
814 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
815 rcu_read_unlock();
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200816 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700817
818 if (drbd_get_capacity(mdev->this_bdev) != size ||
819 drbd_bm_capacity(mdev) != size) {
820 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100821 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700822 if (unlikely(err)) {
823 /* currently there is only one error: ENOMEM! */
824 size = drbd_bm_capacity(mdev)>>1;
825 if (size == 0) {
826 dev_err(DEV, "OUT OF MEMORY! "
827 "Could not allocate bitmap!\n");
828 } else {
829 dev_err(DEV, "BM resizing failed. "
830 "Leaving size unchanged at size = %lu KB\n",
831 (unsigned long)size);
832 }
833 rv = dev_size_error;
834 }
835 /* racy, see comments above. */
836 drbd_set_my_capacity(mdev, size);
837 mdev->ldev->md.la_size_sect = size;
838 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
839 (unsigned long long)size>>1);
840 }
841 if (rv == dev_size_error)
842 goto out;
843
844 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
845
846 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
847 || prev_size != mdev->ldev->md.md_size_sect;
848
849 if (la_size_changed || md_moved) {
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100850 int err;
851
Philipp Reisnerb411b362009-09-25 16:07:19 -0700852 drbd_al_shrink(mdev); /* All extents inactive. */
853 dev_info(DEV, "Writing the whole bitmap, %s\n",
854 la_size_changed && md_moved ? "size changed and md moved" :
855 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100856 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
857 err = drbd_bitmap_io(mdev, &drbd_bm_write,
858 "size changed", BM_LOCKED_MASK);
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100859 if (err) {
860 rv = dev_size_error;
861 goto out;
862 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700863 drbd_md_mark_dirty(mdev);
864 }
865
866 if (size > la_size)
867 rv = grew;
868 if (size < la_size)
869 rv = shrunk;
870out:
871 lc_unlock(mdev->act_log);
872 wake_up(&mdev->al_wait);
873 drbd_resume_io(mdev);
874
875 return rv;
876}
877
878sector_t
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200879drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
880 sector_t u_size, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700881{
882 sector_t p_size = mdev->p_size; /* partner's disk size. */
883 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
884 sector_t m_size; /* my size */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700885 sector_t size = 0;
886
887 m_size = drbd_get_max_capacity(bdev);
888
Philipp Reisnera393db62009-12-22 13:35:52 +0100889 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
890 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
891 p_size = m_size;
892 }
893
Philipp Reisnerb411b362009-09-25 16:07:19 -0700894 if (p_size && m_size) {
895 size = min_t(sector_t, p_size, m_size);
896 } else {
897 if (la_size) {
898 size = la_size;
899 if (m_size && m_size < size)
900 size = m_size;
901 if (p_size && p_size < size)
902 size = p_size;
903 } else {
904 if (m_size)
905 size = m_size;
906 if (p_size)
907 size = p_size;
908 }
909 }
910
911 if (size == 0)
912 dev_err(DEV, "Both nodes diskless!\n");
913
914 if (u_size) {
915 if (u_size > size)
916 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
917 (unsigned long)u_size>>1, (unsigned long)size>>1);
918 else
919 size = u_size;
920 }
921
922 return size;
923}
924
925/**
926 * drbd_check_al_size() - Ensures that the AL is of the right size
927 * @mdev: DRBD device.
928 *
929 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
930 * failed, and 0 on success. You should call drbd_md_sync() after you called
931 * this function.
932 */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100933static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700934{
935 struct lru_cache *n, *t;
936 struct lc_element *e;
937 unsigned int in_use;
938 int i;
939
Philipp Reisnerb411b362009-09-25 16:07:19 -0700940 if (mdev->act_log &&
Lars Ellenbergf3990022011-03-23 14:31:09 +0100941 mdev->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700942 return 0;
943
944 in_use = 0;
945 t = mdev->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100946 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +0100947 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700948
949 if (n == NULL) {
950 dev_err(DEV, "Cannot allocate act_log lru!\n");
951 return -ENOMEM;
952 }
953 spin_lock_irq(&mdev->al_lock);
954 if (t) {
955 for (i = 0; i < t->nr_elements; i++) {
956 e = lc_element_by_index(t, i);
957 if (e->refcnt)
958 dev_err(DEV, "refcnt(%d)==%d\n",
959 e->lc_number, e->refcnt);
960 in_use += e->refcnt;
961 }
962 }
963 if (!in_use)
964 mdev->act_log = n;
965 spin_unlock_irq(&mdev->al_lock);
966 if (in_use) {
967 dev_err(DEV, "Activity log still in use!\n");
968 lc_destroy(n);
969 return -EBUSY;
970 } else {
971 if (t)
972 lc_destroy(t);
973 }
974 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
975 return 0;
976}
977
Philipp Reisner99432fc2011-05-20 16:39:13 +0200978static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700979{
980 struct request_queue * const q = mdev->rq_queue;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200981 int max_hw_sectors = max_bio_size >> 9;
982 int max_segments = 0;
983
984 if (get_ldev_if_state(mdev, D_ATTACHING)) {
985 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
986
987 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200988 rcu_read_lock();
989 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
990 rcu_read_unlock();
Philipp Reisner99432fc2011-05-20 16:39:13 +0200991 put_ldev(mdev);
992 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700993
Philipp Reisnerb411b362009-09-25 16:07:19 -0700994 blk_queue_logical_block_size(q, 512);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100995 blk_queue_max_hw_sectors(q, max_hw_sectors);
996 /* This is the workaround for "bio would need to, but cannot, be split" */
997 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
998 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700999
Philipp Reisner99432fc2011-05-20 16:39:13 +02001000 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1001 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001002
Philipp Reisner99432fc2011-05-20 16:39:13 +02001003 blk_queue_stack_limits(q, b);
1004
1005 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1006 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1007 q->backing_dev_info.ra_pages,
1008 b->backing_dev_info.ra_pages);
1009 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1010 }
1011 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001012 }
1013}
1014
Philipp Reisner99432fc2011-05-20 16:39:13 +02001015void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1016{
1017 int now, new, local, peer;
1018
1019 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1020 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1021 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1022
1023 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1024 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1025 mdev->local_max_bio_size = local;
1026 put_ldev(mdev);
1027 }
1028
1029 /* We may ignore peer limits if the peer is modern enough.
1030 Because new from 8.3.8 onwards the peer can use multiple
1031 BIOs for a single peer_request */
1032 if (mdev->state.conn >= C_CONNECTED) {
Philipp Reisner31890f42011-01-19 14:12:51 +01001033 if (mdev->tconn->agreed_pro_version < 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001034 peer = mdev->peer_max_bio_size;
Philipp Reisner31890f42011-01-19 14:12:51 +01001035 else if (mdev->tconn->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001036 peer = DRBD_MAX_SIZE_H80_PACKET;
1037 else /* drbd 8.3.8 onwards */
1038 peer = DRBD_MAX_BIO_SIZE;
1039 }
1040
1041 new = min_t(int, local, peer);
1042
1043 if (mdev->state.role == R_PRIMARY && new < now)
1044 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1045
1046 if (new != now)
1047 dev_info(DEV, "max BIO size = %u\n", new);
1048
1049 drbd_setup_queue_param(mdev, new);
1050}
1051
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001052/* Starts the worker thread */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001053static void conn_reconfig_start(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001054{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001055 drbd_thread_start(&tconn->worker);
1056 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001057}
1058
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001059/* if still unconfigured, stops worker again. */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001060static void conn_reconfig_done(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001061{
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001062 bool stop_threads;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001063 spin_lock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001064 stop_threads = conn_all_vols_unconf(tconn);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001065 spin_unlock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001066 if (stop_threads) {
1067 /* asender is implicitly stopped by receiver
Philipp Reisner81fa2e62011-05-04 15:10:30 +02001068 * in conn_disconnect() */
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001069 drbd_thread_stop(&tconn->receiver);
1070 drbd_thread_stop(&tconn->worker);
1071 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001072}
1073
Philipp Reisner07782862010-08-31 12:00:50 +02001074/* Make sure IO is suspended before calling this function(). */
1075static void drbd_suspend_al(struct drbd_conf *mdev)
1076{
1077 int s = 0;
1078
Lars Ellenberg61610422011-02-21 13:20:54 +01001079 if (!lc_try_lock(mdev->act_log)) {
Philipp Reisner07782862010-08-31 12:00:50 +02001080 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1081 return;
1082 }
1083
Lars Ellenberg61610422011-02-21 13:20:54 +01001084 drbd_al_shrink(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001085 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner07782862010-08-31 12:00:50 +02001086 if (mdev->state.conn < C_CONNECTED)
1087 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001088 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg61610422011-02-21 13:20:54 +01001089 lc_unlock(mdev->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001090
1091 if (s)
1092 dev_info(DEV, "Suspended AL updates\n");
1093}
1094
Lars Ellenberg5979e362011-04-27 21:09:55 +02001095
1096static bool should_set_defaults(struct genl_info *info)
1097{
1098 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1099 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1100}
1101
Philipp Reisnerd589a212011-05-04 10:06:52 +02001102static void enforce_disk_conf_limits(struct disk_conf *dc)
1103{
1104 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1105 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1106 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1107 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1108
1109 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1110 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1111}
1112
Lars Ellenbergf3990022011-03-23 14:31:09 +01001113int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1114{
1115 enum drbd_ret_code retcode;
1116 struct drbd_conf *mdev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001117 struct disk_conf *new_disk_conf, *old_disk_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02001118 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001119 int err, fifo_size;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001120
1121 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1122 if (!adm_ctx.reply_skb)
1123 return retcode;
1124 if (retcode != NO_ERROR)
1125 goto out;
1126
1127 mdev = adm_ctx.mdev;
1128
1129 /* we also need a disk
1130 * to change the options on */
1131 if (!get_ldev(mdev)) {
1132 retcode = ERR_NO_DISK;
1133 goto out;
1134 }
1135
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001136 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001137 if (!new_disk_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001138 retcode = ERR_NOMEM;
1139 goto fail;
1140 }
1141
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001142 mutex_lock(&mdev->tconn->conf_update);
1143 old_disk_conf = mdev->ldev->disk_conf;
1144 *new_disk_conf = *old_disk_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001145 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001146 set_disk_conf_defaults(new_disk_conf);
Lars Ellenberg5979e362011-04-27 21:09:55 +02001147
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001148 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001149 if (err) {
1150 retcode = ERR_MANDATORY_TAG;
1151 drbd_msg_put_info(from_attrs_err_to_txt(err));
1152 }
1153
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001154 if (!expect(new_disk_conf->resync_rate >= 1))
1155 new_disk_conf->resync_rate = 1;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001156
Philipp Reisnerd589a212011-05-04 10:06:52 +02001157 enforce_disk_conf_limits(new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001158
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001159 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02001160 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02001161 new_plan = fifo_alloc(fifo_size);
1162 if (!new_plan) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001163 dev_err(DEV, "kmalloc of fifo_buffer failed");
1164 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001165 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001166 }
1167 }
1168
Lars Ellenbergf3990022011-03-23 14:31:09 +01001169 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1170 drbd_al_shrink(mdev);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001171 err = drbd_check_al_size(mdev, new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001172 lc_unlock(mdev->act_log);
1173 wake_up(&mdev->al_wait);
1174
1175 if (err) {
1176 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001177 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001178 }
1179
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001180 write_lock_irq(&global_state_lock);
1181 retcode = drbd_sync_after_valid(mdev, new_disk_conf->resync_after);
1182 if (retcode == NO_ERROR) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001183 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001184 drbd_sync_after_changed(mdev);
1185 }
1186 write_unlock_irq(&global_state_lock);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001187
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001188 if (retcode != NO_ERROR)
1189 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001190
Philipp Reisner813472c2011-05-03 16:47:02 +02001191 if (new_plan) {
1192 old_plan = mdev->rs_plan_s;
1193 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
Philipp Reisner9958c852011-05-03 16:19:31 +02001194 }
Philipp Reisner9958c852011-05-03 16:19:31 +02001195
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001196 drbd_md_sync(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001197
1198 if (mdev->state.conn >= C_CONNECTED)
1199 drbd_send_sync_param(mdev);
1200
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001201 mutex_unlock(&mdev->tconn->conf_update);
1202 synchronize_rcu();
1203 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001204 kfree(old_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001205 goto success;
1206
1207fail_unlock:
1208 mutex_unlock(&mdev->tconn->conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001209 fail:
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001210 kfree(new_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001211 kfree(new_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001212success:
1213 put_ldev(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001214 out:
1215 drbd_adm_finish(info, retcode);
1216 return 0;
1217}
1218
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001219int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001220{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001221 struct drbd_conf *mdev;
1222 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001223 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001224 enum determine_dev_size dd;
1225 sector_t max_possible_sectors;
1226 sector_t min_md_device_sectors;
1227 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001228 struct disk_conf *new_disk_conf = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001229 struct block_device *bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001230 struct lru_cache *resync_lru = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001231 struct fifo_buffer *new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001232 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001233 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001234 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001235 int cp_discovered = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001236
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001237 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1238 if (!adm_ctx.reply_skb)
1239 return retcode;
1240 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001241 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001242
1243 mdev = adm_ctx.mdev;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001244 conn_reconfig_start(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001245
1246 /* if you want to reconfigure, please tear down first */
1247 if (mdev->state.disk > D_DISKLESS) {
1248 retcode = ERR_DISK_CONFIGURED;
1249 goto fail;
1250 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001251 /* It may just now have detached because of IO error. Make sure
1252 * drbd_ldev_destroy is done already, we may end up here very fast,
1253 * e.g. if someone calls attach from the on-io-error handler,
1254 * to realize a "hot spare" feature (not that I'd recommend that) */
1255 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001256
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001257 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001258 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1259 if (!nbc) {
1260 retcode = ERR_NOMEM;
1261 goto fail;
1262 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001263 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1264 if (!new_disk_conf) {
1265 retcode = ERR_NOMEM;
1266 goto fail;
1267 }
1268 nbc->disk_conf = new_disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001270 set_disk_conf_defaults(new_disk_conf);
1271 err = disk_conf_from_attrs(new_disk_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001272 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001273 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001274 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001275 goto fail;
1276 }
1277
Philipp Reisnerd589a212011-05-04 10:06:52 +02001278 enforce_disk_conf_limits(new_disk_conf);
1279
Philipp Reisner9958c852011-05-03 16:19:31 +02001280 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1281 if (!new_plan) {
1282 retcode = ERR_NOMEM;
1283 goto fail;
1284 }
1285
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001286 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001287 retcode = ERR_MD_IDX_INVALID;
1288 goto fail;
1289 }
1290
Philipp Reisner44ed1672011-04-19 17:10:19 +02001291 rcu_read_lock();
1292 nc = rcu_dereference(mdev->tconn->net_conf);
1293 if (nc) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001294 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001295 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001296 retcode = ERR_STONITH_AND_PROT_A;
1297 goto fail;
1298 }
1299 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001300 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001301
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001302 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001303 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
Tejun Heoe525fd82010-11-13 11:55:17 +01001304 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001305 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001306 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307 retcode = ERR_OPEN_DISK;
1308 goto fail;
1309 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001310 nbc->backing_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001311
Tejun Heoe525fd82010-11-13 11:55:17 +01001312 /*
1313 * meta_dev_idx >= 0: external fixed size, possibly multiple
1314 * drbd sharing one meta device. TODO in that case, paranoia
1315 * check that [md_bdev, meta_dev_idx] is not yet used by some
1316 * other drbd minor! (if you use drbd.conf + drbdadm, that
1317 * should check it for you already; but if you don't, or
1318 * someone fooled it, we need to double check here)
1319 */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001320 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001321 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001322 (new_disk_conf->meta_dev_idx < 0) ?
Tejun Heod4d77622010-11-13 11:55:18 +01001323 (void *)mdev : (void *)drbd_m_holder);
Tejun Heoe525fd82010-11-13 11:55:17 +01001324 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001325 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001326 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001327 retcode = ERR_OPEN_MD_DISK;
1328 goto fail;
1329 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001330 nbc->md_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331
Tejun Heoe525fd82010-11-13 11:55:17 +01001332 if ((nbc->backing_bdev == nbc->md_bdev) !=
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001333 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1334 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001335 retcode = ERR_MD_IDX_INVALID;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001336 goto fail;
1337 }
1338
1339 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001340 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001341 offsetof(struct bm_extent, lce));
1342 if (!resync_lru) {
1343 retcode = ERR_NOMEM;
Tejun Heoe525fd82010-11-13 11:55:17 +01001344 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001345 }
1346
1347 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1348 drbd_md_set_sector_offsets(mdev, nbc);
1349
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001350 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001351 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1352 (unsigned long long) drbd_get_max_capacity(nbc),
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001353 (unsigned long long) new_disk_conf->disk_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001354 retcode = ERR_DISK_TO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001355 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001356 }
1357
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001358 if (new_disk_conf->meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001359 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1360 /* at least one MB, otherwise it does not make sense */
1361 min_md_device_sectors = (2<<10);
1362 } else {
1363 max_possible_sectors = DRBD_MAX_SECTORS;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001364 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365 }
1366
Philipp Reisnerb411b362009-09-25 16:07:19 -07001367 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1368 retcode = ERR_MD_DISK_TO_SMALL;
1369 dev_warn(DEV, "refusing attach: md-device too small, "
1370 "at least %llu sectors needed for this meta-disk type\n",
1371 (unsigned long long) min_md_device_sectors);
Tejun Heoe525fd82010-11-13 11:55:17 +01001372 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001373 }
1374
1375 /* Make sure the new disk is big enough
1376 * (we may currently be R_PRIMARY with no local disk...) */
1377 if (drbd_get_max_capacity(nbc) <
1378 drbd_get_capacity(mdev->this_bdev)) {
1379 retcode = ERR_DISK_TO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001380 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001381 }
1382
1383 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1384
Lars Ellenberg13529942009-10-12 19:07:49 +02001385 if (nbc->known_size > max_possible_sectors) {
1386 dev_warn(DEV, "==> truncating very big lower level device "
1387 "to currently maximum possible %llu sectors <==\n",
1388 (unsigned long long) max_possible_sectors);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001389 if (new_disk_conf->meta_dev_idx >= 0)
Lars Ellenberg13529942009-10-12 19:07:49 +02001390 dev_warn(DEV, "==>> using internal or flexible "
1391 "meta data may help <<==\n");
1392 }
1393
Philipp Reisnerb411b362009-09-25 16:07:19 -07001394 drbd_suspend_io(mdev);
1395 /* also wait for the last barrier ack. */
Philipp Reisner2aebfab2011-03-28 16:48:11 +02001396 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001397 /* and for any other previously queued work */
Philipp Reisnera21e9292011-02-08 15:08:49 +01001398 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001399
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001400 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1401 retcode = rv; /* FIXME: Type mismatch. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402 drbd_resume_io(mdev);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001403 if (rv < SS_SUCCESS)
Tejun Heoe525fd82010-11-13 11:55:17 +01001404 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001405
1406 if (!get_ldev_if_state(mdev, D_ATTACHING))
1407 goto force_diskless;
1408
1409 drbd_md_set_sector_offsets(mdev, nbc);
1410
1411 if (!mdev->bitmap) {
1412 if (drbd_bm_init(mdev)) {
1413 retcode = ERR_NOMEM;
1414 goto force_diskless_dec;
1415 }
1416 }
1417
1418 retcode = drbd_md_read(mdev, nbc);
1419 if (retcode != NO_ERROR)
1420 goto force_diskless_dec;
1421
1422 if (mdev->state.conn < C_CONNECTED &&
1423 mdev->state.role == R_PRIMARY &&
1424 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1425 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1426 (unsigned long long)mdev->ed_uuid);
1427 retcode = ERR_DATA_NOT_CURRENT;
1428 goto force_diskless_dec;
1429 }
1430
1431 /* Since we are diskless, fix the activity log first... */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001432 if (drbd_check_al_size(mdev, new_disk_conf)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433 retcode = ERR_NOMEM;
1434 goto force_diskless_dec;
1435 }
1436
1437 /* Prevent shrinking of consistent devices ! */
1438 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001439 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440 dev_warn(DEV, "refusing to truncate a consistent device\n");
1441 retcode = ERR_DISK_TO_SMALL;
1442 goto force_diskless_dec;
1443 }
1444
1445 if (!drbd_al_read_log(mdev, nbc)) {
1446 retcode = ERR_IO_MD_DISK;
1447 goto force_diskless_dec;
1448 }
1449
Philipp Reisnerb411b362009-09-25 16:07:19 -07001450 /* Reset the "barriers don't work" bits here, then force meta data to
1451 * be written, to ensure we determine if barriers are supported. */
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001452 if (new_disk_conf->md_flushes)
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001453 clear_bit(MD_NO_FUA, &mdev->flags);
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001454 else
1455 set_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001456
1457 /* Point of no return reached.
1458 * Devices and memory are no longer released by error cleanup below.
1459 * now mdev takes over responsibility, and the state engine should
1460 * clean it up somewhere. */
1461 D_ASSERT(mdev->ldev == NULL);
1462 mdev->ldev = nbc;
1463 mdev->resync = resync_lru;
Philipp Reisner9958c852011-05-03 16:19:31 +02001464 mdev->rs_plan_s = new_plan;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001465 nbc = NULL;
1466 resync_lru = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001467 new_disk_conf = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001468 new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001469
Philipp Reisner2451fc32010-08-24 13:43:11 +02001470 mdev->write_ordering = WO_bdev_flush;
1471 drbd_bump_write_ordering(mdev, WO_bdev_flush);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001472
1473 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1474 set_bit(CRASHED_PRIMARY, &mdev->flags);
1475 else
1476 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1477
Philipp Reisner894c6a92010-06-18 16:03:20 +02001478 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001479 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001480 set_bit(CRASHED_PRIMARY, &mdev->flags);
1481 cp_discovered = 1;
1482 }
1483
1484 mdev->send_cnt = 0;
1485 mdev->recv_cnt = 0;
1486 mdev->read_cnt = 0;
1487 mdev->writ_cnt = 0;
1488
Philipp Reisner99432fc2011-05-20 16:39:13 +02001489 drbd_reconsider_max_bio_size(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490
1491 /* If I am currently not R_PRIMARY,
1492 * but meta data primary indicator is set,
1493 * I just now recover from a hard crash,
1494 * and have been R_PRIMARY before that crash.
1495 *
1496 * Now, if I had no connection before that crash
1497 * (have been degraded R_PRIMARY), chances are that
1498 * I won't find my peer now either.
1499 *
1500 * In that case, and _only_ in that case,
1501 * we use the degr-wfc-timeout instead of the default,
1502 * so we can automatically recover from a crash of a
1503 * degraded but active "cluster" after a certain timeout.
1504 */
1505 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1506 if (mdev->state.role != R_PRIMARY &&
1507 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1508 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1509 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1510
Bart Van Assche24c48302011-05-21 18:32:29 +02001511 dd = drbd_determine_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512 if (dd == dev_size_error) {
1513 retcode = ERR_NOMEM_BITMAP;
1514 goto force_diskless_dec;
1515 } else if (dd == grew)
1516 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1517
1518 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1519 dev_info(DEV, "Assuming that all blocks are out of sync "
1520 "(aka FullSync)\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001521 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1522 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001523 retcode = ERR_IO_MD_DISK;
1524 goto force_diskless_dec;
1525 }
1526 } else {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001527 if (drbd_bitmap_io(mdev, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01001528 "read from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001529 retcode = ERR_IO_MD_DISK;
1530 goto force_diskless_dec;
1531 }
1532 }
1533
1534 if (cp_discovered) {
1535 drbd_al_apply_to_bm(mdev);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001536 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1537 "crashed primary apply AL", BM_LOCKED_MASK)) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001538 retcode = ERR_IO_MD_DISK;
1539 goto force_diskless_dec;
1540 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001541 }
1542
Philipp Reisner07782862010-08-31 12:00:50 +02001543 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1544 drbd_suspend_al(mdev); /* IO is still suspended here... */
1545
Philipp Reisner87eeee42011-01-19 14:16:30 +01001546 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02001547 os = drbd_read_state(mdev);
1548 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001549 /* If MDF_CONSISTENT is not set go into inconsistent state,
1550 otherwise investigate MDF_WasUpToDate...
1551 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1552 otherwise into D_CONSISTENT state.
1553 */
1554 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1555 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1556 ns.disk = D_CONSISTENT;
1557 else
1558 ns.disk = D_OUTDATED;
1559 } else {
1560 ns.disk = D_INCONSISTENT;
1561 }
1562
1563 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1564 ns.pdsk = D_OUTDATED;
1565
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001566 rcu_read_lock();
1567 if (ns.disk == D_CONSISTENT &&
1568 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001569 ns.disk = D_UP_TO_DATE;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001570 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001571
1572 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1573 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1574 this point, because drbd_request_state() modifies these
1575 flags. */
1576
1577 /* In case we are C_CONNECTED postpone any decision on the new disk
1578 state after the negotiation phase. */
1579 if (mdev->state.conn == C_CONNECTED) {
1580 mdev->new_state_tmp.i = ns.i;
1581 ns.i = os.i;
1582 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02001583
1584 /* We expect to receive up-to-date UUIDs soon.
1585 To avoid a race in receive_state, free p_uuid while
1586 holding req_lock. I.e. atomic with the state change */
1587 kfree(mdev->p_uuid);
1588 mdev->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001589 }
1590
1591 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001592 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593
1594 if (rv < SS_SUCCESS)
1595 goto force_diskless_dec;
1596
1597 if (mdev->state.role == R_PRIMARY)
1598 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1599 else
1600 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1601
1602 drbd_md_mark_dirty(mdev);
1603 drbd_md_sync(mdev);
1604
1605 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1606 put_ldev(mdev);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001607 conn_reconfig_done(mdev->tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001608 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001609 return 0;
1610
1611 force_diskless_dec:
1612 put_ldev(mdev);
1613 force_diskless:
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001614 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001615 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001616 fail:
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001617 conn_reconfig_done(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001618 if (nbc) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001619 if (nbc->backing_bdev)
1620 blkdev_put(nbc->backing_bdev,
1621 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1622 if (nbc->md_bdev)
1623 blkdev_put(nbc->md_bdev,
1624 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001625 kfree(nbc);
1626 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001627 kfree(new_disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001628 lc_destroy(resync_lru);
Philipp Reisner9958c852011-05-03 16:19:31 +02001629 kfree(new_plan);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001630
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001631 finish:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001632 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001633 return 0;
1634}
1635
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001636static int adm_detach(struct drbd_conf *mdev)
1637{
Philipp Reisner19f83c72011-03-29 14:21:03 +02001638 enum drbd_state_rv retcode;
Lars Ellenberg009ba892011-05-02 11:51:31 +02001639 int ret;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001640 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
Lars Ellenberg009ba892011-05-02 11:51:31 +02001641 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1642 /* D_FAILED will transition to DISKLESS. */
1643 ret = wait_event_interruptible(mdev->misc_wait,
1644 mdev->state.disk != D_FAILED);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001645 drbd_resume_io(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001646 if ((int)retcode == (int)SS_IS_DISKLESS)
1647 retcode = SS_NOTHING_TO_DO;
1648 if (ret)
1649 retcode = ERR_INTR;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001650 return retcode;
1651}
1652
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001653/* Detaching the disk is a process in multiple stages. First we need to lock
1654 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1655 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1656 * internal references as well.
1657 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001658int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001659{
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001660 enum drbd_ret_code retcode;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001661
1662 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1663 if (!adm_ctx.reply_skb)
1664 return retcode;
1665 if (retcode != NO_ERROR)
1666 goto out;
1667
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001668 retcode = adm_detach(adm_ctx.mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001669out:
1670 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001671 return 0;
1672}
1673
Lars Ellenbergf3990022011-03-23 14:31:09 +01001674static bool conn_resync_running(struct drbd_tconn *tconn)
1675{
1676 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001677 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001678 int vnr;
1679
Philipp Reisner695d08f2011-04-11 22:53:32 -07001680 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001681 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1682 if (mdev->state.conn == C_SYNC_SOURCE ||
1683 mdev->state.conn == C_SYNC_TARGET ||
1684 mdev->state.conn == C_PAUSED_SYNC_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001685 mdev->state.conn == C_PAUSED_SYNC_T) {
1686 rv = true;
1687 break;
1688 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001689 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001690 rcu_read_unlock();
1691
1692 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001693}
1694
1695static bool conn_ov_running(struct drbd_tconn *tconn)
1696{
1697 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001698 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001699 int vnr;
1700
Philipp Reisner695d08f2011-04-11 22:53:32 -07001701 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001702 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1703 if (mdev->state.conn == C_VERIFY_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001704 mdev->state.conn == C_VERIFY_T) {
1705 rv = true;
1706 break;
1707 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001708 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001709 rcu_read_unlock();
1710
1711 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001712}
1713
Philipp Reisnercd643972011-04-13 18:00:59 -07001714static enum drbd_ret_code
Philipp Reisner44ed1672011-04-19 17:10:19 +02001715_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07001716{
1717 struct drbd_conf *mdev;
1718 int i;
1719
Philipp Reisner44ed1672011-04-19 17:10:19 +02001720 if (old_conf && tconn->agreed_pro_version < 100 &&
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001721 tconn->cstate == C_WF_REPORT_PARAMS &&
Philipp Reisner44ed1672011-04-19 17:10:19 +02001722 new_conf->wire_protocol != old_conf->wire_protocol)
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001723 return ERR_NEED_APV_100;
1724
Philipp Reisnercd643972011-04-13 18:00:59 -07001725 if (new_conf->two_primaries &&
1726 (new_conf->wire_protocol != DRBD_PROT_C))
1727 return ERR_NOT_PROTO_C;
1728
Philipp Reisnercd643972011-04-13 18:00:59 -07001729 idr_for_each_entry(&tconn->volumes, mdev, i) {
1730 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001731 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
Philipp Reisnercd643972011-04-13 18:00:59 -07001732 put_ldev(mdev);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001733 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07001734 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07001735 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001736 if (mdev->state.role == R_PRIMARY && new_conf->want_lose)
Philipp Reisnercd643972011-04-13 18:00:59 -07001737 return ERR_DISCARD;
Philipp Reisnercd643972011-04-13 18:00:59 -07001738 }
Philipp Reisnercd643972011-04-13 18:00:59 -07001739
1740 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1741 return ERR_CONG_NOT_PROTO_A;
1742
1743 return NO_ERROR;
1744}
1745
Philipp Reisner44ed1672011-04-19 17:10:19 +02001746static enum drbd_ret_code
1747check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1748{
1749 static enum drbd_ret_code rv;
1750 struct drbd_conf *mdev;
1751 int i;
1752
1753 rcu_read_lock();
1754 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1755 rcu_read_unlock();
1756
1757 /* tconn->volumes protected by genl_lock() here */
1758 idr_for_each_entry(&tconn->volumes, mdev, i) {
1759 if (!mdev->bitmap) {
1760 if(drbd_bm_init(mdev))
1761 return ERR_NOMEM;
1762 }
1763 }
1764
1765 return rv;
1766}
1767
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001768struct crypto {
1769 struct crypto_hash *verify_tfm;
1770 struct crypto_hash *csums_tfm;
1771 struct crypto_hash *cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001772 struct crypto_hash *integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001773 void *int_dig_in;
1774 void *int_dig_vv;
1775};
1776
1777static int
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001778alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001779{
1780 if (!tfm_name[0])
1781 return NO_ERROR;
1782
1783 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1784 if (IS_ERR(*tfm)) {
1785 *tfm = NULL;
1786 return err_alg;
1787 }
1788
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001789 return NO_ERROR;
1790}
1791
1792static enum drbd_ret_code
1793alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1794{
1795 char hmac_name[CRYPTO_MAX_ALG_NAME];
1796 enum drbd_ret_code rv;
1797 int hash_size;
1798
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001799 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1800 ERR_CSUMS_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001801 if (rv != NO_ERROR)
1802 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001803 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1804 ERR_VERIFY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001805 if (rv != NO_ERROR)
1806 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001807 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1808 ERR_INTEGRITY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001809 if (rv != NO_ERROR)
1810 return rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001811 if (new_conf->cram_hmac_alg[0] != 0) {
1812 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1813 new_conf->cram_hmac_alg);
1814
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001815 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1816 ERR_AUTH_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001817 }
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001818 if (crypto->integrity_tfm) {
1819 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001820 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1821 if (!crypto->int_dig_in)
1822 return ERR_NOMEM;
1823 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1824 if (!crypto->int_dig_vv)
1825 return ERR_NOMEM;
1826 }
1827
1828 return rv;
1829}
1830
1831static void free_crypto(struct crypto *crypto)
1832{
1833 kfree(crypto->int_dig_in);
1834 kfree(crypto->int_dig_vv);
1835 crypto_free_hash(crypto->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001836 crypto_free_hash(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001837 crypto_free_hash(crypto->csums_tfm);
1838 crypto_free_hash(crypto->verify_tfm);
1839}
1840
Lars Ellenbergf3990022011-03-23 14:31:09 +01001841int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1842{
1843 enum drbd_ret_code retcode;
1844 struct drbd_tconn *tconn;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001845 struct net_conf *old_conf, *new_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001846 int err;
1847 int ovr; /* online verify running */
1848 int rsr; /* re-sync running */
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001849 struct crypto crypto = { };
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001850 bool change_integrity_alg;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001851
1852 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1853 if (!adm_ctx.reply_skb)
1854 return retcode;
1855 if (retcode != NO_ERROR)
1856 goto out;
1857
1858 tconn = adm_ctx.tconn;
1859
1860 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1861 if (!new_conf) {
1862 retcode = ERR_NOMEM;
1863 goto out;
1864 }
1865
Lars Ellenbergf3990022011-03-23 14:31:09 +01001866 conn_reconfig_start(tconn);
1867
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001868 mutex_lock(&tconn->data.mutex);
Philipp Reisnera0095502011-05-03 13:14:15 +02001869 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001870 old_conf = tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001871
1872 if (!old_conf) {
1873 drbd_msg_put_info("net conf missing, try connect");
1874 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001875 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001876 }
1877
1878 *new_conf = *old_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001879 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001880 set_net_conf_defaults(new_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001881
Lars Ellenbergf3990022011-03-23 14:31:09 +01001882 err = net_conf_from_attrs_for_change(new_conf, info);
1883 if (err) {
1884 retcode = ERR_MANDATORY_TAG;
1885 drbd_msg_put_info(from_attrs_err_to_txt(err));
1886 goto fail;
1887 }
1888
Philipp Reisnercd643972011-04-13 18:00:59 -07001889 retcode = check_net_options(tconn, new_conf);
1890 if (retcode != NO_ERROR)
1891 goto fail;
1892
Lars Ellenbergf3990022011-03-23 14:31:09 +01001893 /* re-sync running */
1894 rsr = conn_resync_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001895 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001896 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001897 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001898 }
1899
Lars Ellenbergf3990022011-03-23 14:31:09 +01001900 /* online verify running */
1901 ovr = conn_ov_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001902 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1903 retcode = ERR_VERIFY_RUNNING;
1904 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001905 }
1906
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001907 change_integrity_alg = strcmp(old_conf->integrity_alg,
1908 new_conf->integrity_alg);
1909
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001910 retcode = alloc_crypto(&crypto, new_conf);
1911 if (retcode != NO_ERROR)
1912 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001913
Philipp Reisner44ed1672011-04-19 17:10:19 +02001914 rcu_assign_pointer(tconn->net_conf, new_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001915
1916 if (!rsr) {
1917 crypto_free_hash(tconn->csums_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001918 tconn->csums_tfm = crypto.csums_tfm;
1919 crypto.csums_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001920 }
1921 if (!ovr) {
1922 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001923 tconn->verify_tfm = crypto.verify_tfm;
1924 crypto.verify_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001925 }
1926
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001927 kfree(tconn->int_dig_in);
1928 tconn->int_dig_in = crypto.int_dig_in;
1929 kfree(tconn->int_dig_vv);
1930 tconn->int_dig_vv = crypto.int_dig_vv;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001931 crypto_free_hash(tconn->integrity_tfm);
1932 tconn->integrity_tfm = crypto.integrity_tfm;
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001933 if (change_integrity_alg) {
1934 /* Do this without trying to take tconn->data.mutex again. */
1935 if (__drbd_send_protocol(tconn))
1936 goto fail;
1937 }
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001938
1939 /* FIXME Changing cram_hmac while the connection is established is useless */
1940 crypto_free_hash(tconn->cram_hmac_tfm);
1941 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1942
Philipp Reisnera0095502011-05-03 13:14:15 +02001943 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001944 mutex_unlock(&tconn->data.mutex);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001945 synchronize_rcu();
1946 kfree(old_conf);
1947
Lars Ellenbergf3990022011-03-23 14:31:09 +01001948 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1949 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1950
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001951 goto done;
1952
Lars Ellenbergf3990022011-03-23 14:31:09 +01001953 fail:
Philipp Reisnera0095502011-05-03 13:14:15 +02001954 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001955 mutex_unlock(&tconn->data.mutex);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001956 free_crypto(&crypto);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001957 kfree(new_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001958 done:
Lars Ellenbergf3990022011-03-23 14:31:09 +01001959 conn_reconfig_done(tconn);
1960 out:
1961 drbd_adm_finish(info, retcode);
1962 return 0;
1963}
1964
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001965int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001966{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001967 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001968 struct net_conf *old_conf, *new_conf = NULL;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001969 struct crypto crypto = { };
Philipp Reisner80883192011-02-18 14:56:45 +01001970 struct drbd_tconn *oconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001971 struct drbd_tconn *tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001972 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001973 enum drbd_ret_code retcode;
1974 int i;
1975 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001976
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001977 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1978 if (!adm_ctx.reply_skb)
1979 return retcode;
1980 if (retcode != NO_ERROR)
1981 goto out;
1982
1983 tconn = adm_ctx.tconn;
Philipp Reisner80883192011-02-18 14:56:45 +01001984 conn_reconfig_start(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001985
Philipp Reisner80883192011-02-18 14:56:45 +01001986 if (tconn->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001987 retcode = ERR_NET_CONFIGURED;
1988 goto fail;
1989 }
1990
1991 /* allocation not in the IO path, cqueue thread context */
Lars Ellenberg5979e362011-04-27 21:09:55 +02001992 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001993 if (!new_conf) {
1994 retcode = ERR_NOMEM;
1995 goto fail;
1996 }
1997
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001998 set_net_conf_defaults(new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001999
Lars Ellenbergf3990022011-03-23 14:31:09 +01002000 err = net_conf_from_attrs(new_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002001 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002002 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002003 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002004 goto fail;
2005 }
2006
Philipp Reisnercd643972011-04-13 18:00:59 -07002007 retcode = check_net_options(tconn, new_conf);
2008 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002009 goto fail;
Philipp Reisner422028b2010-10-27 11:12:07 +02002010
Philipp Reisnerb411b362009-09-25 16:07:19 -07002011 retcode = NO_ERROR;
2012
2013 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
2014 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002015
Philipp Reisneref356262011-04-13 14:21:29 -07002016 /* No need to take drbd_cfg_rwsem here. All reconfiguration is
Lars Ellenberg543cc102011-03-10 22:18:18 +01002017 * strictly serialized on genl_lock(). We are protected against
2018 * concurrent reconfiguration/addition/deletion */
Philipp Reisner80883192011-02-18 14:56:45 +01002019 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02002020 struct net_conf *nc;
Philipp Reisner80883192011-02-18 14:56:45 +01002021 if (oconn == tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002022 continue;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002023
2024 rcu_read_lock();
2025 nc = rcu_dereference(oconn->net_conf);
2026 if (nc) {
2027 taken_addr = (struct sockaddr *)&nc->my_addr;
2028 if (new_conf->my_addr_len == nc->my_addr_len &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002029 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
2030 retcode = ERR_LOCAL_ADDR;
2031
Philipp Reisner44ed1672011-04-19 17:10:19 +02002032 taken_addr = (struct sockaddr *)&nc->peer_addr;
2033 if (new_conf->peer_addr_len == nc->peer_addr_len &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002034 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
2035 retcode = ERR_PEER_ADDR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002036 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002037 rcu_read_unlock();
2038 if (retcode != NO_ERROR)
2039 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002040 }
2041
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002042 retcode = alloc_crypto(&crypto, new_conf);
2043 if (retcode != NO_ERROR)
2044 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002045
Philipp Reisnerb411b362009-09-25 16:07:19 -07002046 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2047
Philipp Reisner80883192011-02-18 14:56:45 +01002048 conn_flush_workqueue(tconn);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002049
Philipp Reisnera0095502011-05-03 13:14:15 +02002050 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002051 old_conf = tconn->net_conf;
2052 if (old_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002053 retcode = ERR_NET_CONFIGURED;
Philipp Reisnera0095502011-05-03 13:14:15 +02002054 mutex_unlock(&tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002055 goto fail;
2056 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002057 rcu_assign_pointer(tconn->net_conf, new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002058
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002059 conn_free_crypto(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002060 tconn->int_dig_in = crypto.int_dig_in;
2061 tconn->int_dig_vv = crypto.int_dig_vv;
2062 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002063 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002064 tconn->csums_tfm = crypto.csums_tfm;
2065 tconn->verify_tfm = crypto.verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002066
Philipp Reisnera0095502011-05-03 13:14:15 +02002067 mutex_unlock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002068
Philipp Reisner695d08f2011-04-11 22:53:32 -07002069 rcu_read_lock();
Philipp Reisner80883192011-02-18 14:56:45 +01002070 idr_for_each_entry(&tconn->volumes, mdev, i) {
2071 mdev->send_cnt = 0;
2072 mdev->recv_cnt = 0;
Philipp Reisner80883192011-02-18 14:56:45 +01002073 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002074 rcu_read_unlock();
Lars Ellenberg5ee743e2011-04-26 16:22:25 +02002075
2076 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2077
Philipp Reisner80883192011-02-18 14:56:45 +01002078 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002079 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080 return 0;
2081
2082fail:
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002083 free_crypto(&crypto);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002084 kfree(new_conf);
2085
Philipp Reisner80883192011-02-18 14:56:45 +01002086 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002087out:
2088 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002089 return 0;
2090}
2091
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002092static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2093{
2094 enum drbd_state_rv rv;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002095
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002096 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2097 force ? CS_HARD : 0);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002098
2099 switch (rv) {
2100 case SS_NOTHING_TO_DO:
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002101 break;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002102 case SS_ALREADY_STANDALONE:
2103 return SS_SUCCESS;
2104 case SS_PRIMARY_NOP:
2105 /* Our state checking code wants to see the peer outdated. */
2106 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002107 pdsk, D_OUTDATED), CS_VERBOSE);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002108 break;
2109 case SS_CW_FAILED_BY_PEER:
2110 /* The peer probably wants to see us outdated. */
2111 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2112 disk, D_OUTDATED), 0);
2113 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002114 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2115 CS_HARD);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002116 }
2117 break;
2118 default:;
2119 /* no special handling necessary */
2120 }
2121
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002122 if (rv >= SS_SUCCESS) {
2123 enum drbd_state_rv rv2;
2124 /* No one else can reconfigure the network while I am here.
2125 * The state handling only uses drbd_thread_stop_nowait(),
2126 * we want to really wait here until the receiver is no more.
2127 */
2128 drbd_thread_stop(&adm_ctx.tconn->receiver);
2129
2130 /* Race breaker. This additional state change request may be
2131 * necessary, if this was a forced disconnect during a receiver
2132 * restart. We may have "killed" the receiver thread just
2133 * after drbdd_init() returned. Typically, we should be
2134 * C_STANDALONE already, now, and this becomes a no-op.
2135 */
2136 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2137 CS_VERBOSE | CS_HARD);
2138 if (rv2 < SS_SUCCESS)
2139 conn_err(tconn,
2140 "unexpected rv2=%d in conn_try_disconnect()\n",
2141 rv2);
2142 }
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002143 return rv;
2144}
2145
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002146int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002147{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002148 struct disconnect_parms parms;
2149 struct drbd_tconn *tconn;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002150 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002151 enum drbd_ret_code retcode;
2152 int err;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002153
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002154 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2155 if (!adm_ctx.reply_skb)
2156 return retcode;
2157 if (retcode != NO_ERROR)
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002158 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002159
2160 tconn = adm_ctx.tconn;
2161 memset(&parms, 0, sizeof(parms));
2162 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002163 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002164 if (err) {
2165 retcode = ERR_MANDATORY_TAG;
2166 drbd_msg_put_info(from_attrs_err_to_txt(err));
2167 goto fail;
2168 }
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002169 }
2170
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002171 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2172 if (rv < SS_SUCCESS)
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002173 retcode = rv; /* FIXME: Type mismatch. */
2174 else
2175 retcode = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002176 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002177 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178 return 0;
2179}
2180
2181void resync_after_online_grow(struct drbd_conf *mdev)
2182{
2183 int iass; /* I am sync source */
2184
2185 dev_info(DEV, "Resync of new storage after online grow\n");
2186 if (mdev->state.role != mdev->state.peer)
2187 iass = (mdev->state.role == R_PRIMARY);
2188 else
Philipp Reisner25703f82011-02-07 14:35:25 +01002189 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002190
2191 if (iass)
2192 drbd_start_resync(mdev, C_SYNC_SOURCE);
2193 else
2194 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2195}
2196
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002197int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002198{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002199 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002200 struct resize_parms rs;
2201 struct drbd_conf *mdev;
2202 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002203 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002204 enum dds_flags ddsf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002205 sector_t u_size;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002206 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002207
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002208 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2209 if (!adm_ctx.reply_skb)
2210 return retcode;
2211 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002212 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002213
2214 memset(&rs, 0, sizeof(struct resize_parms));
2215 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002216 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002217 if (err) {
2218 retcode = ERR_MANDATORY_TAG;
2219 drbd_msg_put_info(from_attrs_err_to_txt(err));
2220 goto fail;
2221 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002222 }
2223
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002224 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002225 if (mdev->state.conn > C_CONNECTED) {
2226 retcode = ERR_RESIZE_RESYNC;
2227 goto fail;
2228 }
2229
2230 if (mdev->state.role == R_SECONDARY &&
2231 mdev->state.peer == R_SECONDARY) {
2232 retcode = ERR_NO_PRIMARY;
2233 goto fail;
2234 }
2235
2236 if (!get_ldev(mdev)) {
2237 retcode = ERR_NO_DISK;
2238 goto fail;
2239 }
2240
Philipp Reisner31890f42011-01-19 14:12:51 +01002241 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002242 retcode = ERR_NEED_APV_93;
2243 goto fail;
2244 }
2245
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002246 rcu_read_lock();
2247 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2248 rcu_read_unlock();
2249 if (u_size != (sector_t)rs.resize_size) {
2250 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2251 if (!new_disk_conf) {
2252 retcode = ERR_NOMEM;
2253 goto fail;
2254 }
2255 }
2256
Philipp Reisner087c2492010-03-26 13:49:56 +01002257 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002258 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002259
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002260 if (new_disk_conf) {
2261 mutex_lock(&mdev->tconn->conf_update);
2262 old_disk_conf = mdev->ldev->disk_conf;
2263 *new_disk_conf = *old_disk_conf;
2264 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2265 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2266 mutex_unlock(&mdev->tconn->conf_update);
2267 synchronize_rcu();
2268 kfree(old_disk_conf);
2269 }
2270
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002271 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Bart Van Assche24c48302011-05-21 18:32:29 +02002272 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002273 drbd_md_sync(mdev);
2274 put_ldev(mdev);
2275 if (dd == dev_size_error) {
2276 retcode = ERR_NOMEM_BITMAP;
2277 goto fail;
2278 }
2279
Philipp Reisner087c2492010-03-26 13:49:56 +01002280 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002281 if (dd == grew)
2282 set_bit(RESIZE_PENDING, &mdev->flags);
2283
2284 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002285 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002286 }
2287
2288 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002289 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002290 return 0;
2291}
2292
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002293void drbd_set_res_opts_defaults(struct res_opts *r)
2294{
2295 return set_res_opts_defaults(r);
2296}
2297
Lars Ellenbergf3990022011-03-23 14:31:09 +01002298int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002299{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002300 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002301 cpumask_var_t new_cpu_mask;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002302 struct drbd_tconn *tconn;
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002303 struct res_opts res_opts;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002304 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002305
Lars Ellenbergf3990022011-03-23 14:31:09 +01002306 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002307 if (!adm_ctx.reply_skb)
2308 return retcode;
2309 if (retcode != NO_ERROR)
2310 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002311 tconn = adm_ctx.tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002312
Philipp Reisnerb411b362009-09-25 16:07:19 -07002313 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2314 retcode = ERR_NOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002315 drbd_msg_put_info("unable to allocate cpumask");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002316 goto fail;
2317 }
2318
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002319 res_opts = tconn->res_opts;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002320 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002321 set_res_opts_defaults(&res_opts);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002322
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002323 err = res_opts_from_attrs(&res_opts, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002324 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002325 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002326 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002327 goto fail;
2328 }
2329
Philipp Reisnerb411b362009-09-25 16:07:19 -07002330 /* silently ignore cpu mask on UP kernel */
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002331 if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2332 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
Philipp Reisnerb411b362009-09-25 16:07:19 -07002333 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2334 if (err) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002335 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002336 retcode = ERR_CPU_MASK_PARSE;
2337 goto fail;
2338 }
2339 }
2340
Lars Ellenberg7ad651b2011-02-21 13:21:03 +01002341
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002342 tconn->res_opts = res_opts;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002343
Lars Ellenbergf3990022011-03-23 14:31:09 +01002344 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2345 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2346 drbd_calc_cpu_mask(tconn);
2347 tconn->receiver.reset_cpu_mask = 1;
2348 tconn->asender.reset_cpu_mask = 1;
2349 tconn->worker.reset_cpu_mask = 1;
Philipp Reisner778f2712010-07-06 11:14:00 +02002350 }
2351
Philipp Reisnerb411b362009-09-25 16:07:19 -07002352fail:
2353 free_cpumask_var(new_cpu_mask);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002354
2355 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002356 return 0;
2357}
2358
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002359int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002360{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002361 struct drbd_conf *mdev;
2362 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2363
2364 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2365 if (!adm_ctx.reply_skb)
2366 return retcode;
2367 if (retcode != NO_ERROR)
2368 goto out;
2369
2370 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002371
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002372 /* If there is still bitmap IO pending, probably because of a previous
2373 * resync just being finished, wait for it before requesting a new resync. */
2374 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2375
Philipp Reisnerb411b362009-09-25 16:07:19 -07002376 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2377
2378 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2379 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2380
2381 while (retcode == SS_NEED_CONNECTION) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01002382 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002383 if (mdev->state.conn < C_CONNECTED)
2384 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002385 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002386
2387 if (retcode != SS_NEED_CONNECTION)
2388 break;
2389
2390 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2391 }
2392
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002393out:
2394 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002395 return 0;
2396}
2397
Philipp Reisner07782862010-08-31 12:00:50 +02002398static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2399{
2400 int rv;
2401
2402 rv = drbd_bmio_set_n_write(mdev);
2403 drbd_suspend_al(mdev);
2404 return rv;
2405}
2406
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002407static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2408 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002409{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002410 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002411
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002412 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2413 if (!adm_ctx.reply_skb)
2414 return retcode;
2415 if (retcode != NO_ERROR)
2416 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002417
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002418 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2419out:
2420 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002421 return 0;
2422}
2423
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002424int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002425{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002426 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2427}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002428
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002429int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2430{
2431 enum drbd_ret_code retcode;
2432
2433 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2434 if (!adm_ctx.reply_skb)
2435 return retcode;
2436 if (retcode != NO_ERROR)
2437 goto out;
2438
2439 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002440 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002441out:
2442 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002443 return 0;
2444}
2445
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002446int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002447{
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002448 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002449 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002450
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002451 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2452 if (!adm_ctx.reply_skb)
2453 return retcode;
2454 if (retcode != NO_ERROR)
2455 goto out;
2456
2457 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2458 s = adm_ctx.mdev->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01002459 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2460 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2461 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2462 } else {
2463 retcode = ERR_PAUSE_IS_CLEAR;
2464 }
2465 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002466
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002467out:
2468 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002469 return 0;
2470}
2471
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002472int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002473{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002474 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002475}
2476
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002477int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002478{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002479 struct drbd_conf *mdev;
2480 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2481
2482 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2483 if (!adm_ctx.reply_skb)
2484 return retcode;
2485 if (retcode != NO_ERROR)
2486 goto out;
2487
2488 mdev = adm_ctx.mdev;
Philipp Reisner43a51822010-06-11 11:26:34 +02002489 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2490 drbd_uuid_new_current(mdev);
2491 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02002492 }
Philipp Reisner265be2d2010-05-31 10:14:17 +02002493 drbd_suspend_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002494 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2495 if (retcode == SS_SUCCESS) {
Philipp Reisner265be2d2010-05-31 10:14:17 +02002496 if (mdev->state.conn < C_CONNECTED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002497 tl_clear(mdev->tconn);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002498 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002499 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002500 }
2501 drbd_resume_io(mdev);
2502
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002503out:
2504 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002505 return 0;
2506}
2507
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002508int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002509{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002510 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002511}
2512
Lars Ellenberg543cc102011-03-10 22:18:18 +01002513int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2514{
2515 struct nlattr *nla;
2516 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2517 if (!nla)
2518 goto nla_put_failure;
2519 if (vnr != VOLUME_UNSPECIFIED)
2520 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2521 NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2522 nla_nest_end(skb, nla);
2523 return 0;
2524
2525nla_put_failure:
2526 if (nla)
2527 nla_nest_cancel(skb, nla);
2528 return -EMSGSIZE;
2529}
2530
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002531int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2532 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002533{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002534 struct state_info *si = NULL; /* for sizeof(si->member); */
Philipp Reisner44ed1672011-04-19 17:10:19 +02002535 struct net_conf *nc;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002536 struct nlattr *nla;
2537 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002538 int err = 0;
2539 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002540
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002541 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2542 * to. So we better exclude_sensitive information.
2543 *
2544 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2545 * in the context of the requesting user process. Exclude sensitive
2546 * information, unless current has superuser.
2547 *
2548 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2549 * relies on the current implementation of netlink_dump(), which
2550 * executes the dump callback successively from netlink_recvmsg(),
2551 * always in the context of the receiving process */
2552 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002553
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002554 got_ldev = get_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002555
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002556 /* We need to add connection name and volume number information still.
2557 * Minor number is in drbd_genlmsghdr. */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002558 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002559 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002560
Lars Ellenbergf3990022011-03-23 14:31:09 +01002561 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2562 goto nla_put_failure;
2563
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002564 rcu_read_lock();
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002565 if (got_ldev)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002566 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002567 goto nla_put_failure;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002568
Philipp Reisner44ed1672011-04-19 17:10:19 +02002569 nc = rcu_dereference(mdev->tconn->net_conf);
2570 if (nc)
2571 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2572 rcu_read_unlock();
2573 if (err)
2574 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002575
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002576 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2577 if (!nla)
2578 goto nla_put_failure;
2579 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2580 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2581 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2582 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002583
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002584 if (got_ldev) {
2585 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2586 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2587 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2588 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2589 if (C_SYNC_SOURCE <= mdev->state.conn &&
2590 C_PAUSED_SYNC_T >= mdev->state.conn) {
2591 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2592 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002593 }
2594 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002595
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002596 if (sib) {
2597 switch(sib->sib_reason) {
2598 case SIB_SYNC_PROGRESS:
2599 case SIB_GET_STATUS_REPLY:
2600 break;
2601 case SIB_STATE_CHANGE:
2602 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2603 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2604 break;
2605 case SIB_HELPER_POST:
2606 NLA_PUT_U32(skb,
2607 T_helper_exit_code, sib->helper_exit_code);
2608 /* fall through */
2609 case SIB_HELPER_PRE:
2610 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2611 break;
2612 }
2613 }
2614 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002615
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002616 if (0)
2617nla_put_failure:
2618 err = -EMSGSIZE;
2619 if (got_ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002620 put_ldev(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002621 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002622}
2623
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002624int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002625{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002626 enum drbd_ret_code retcode;
2627 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002628
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002629 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2630 if (!adm_ctx.reply_skb)
2631 return retcode;
2632 if (retcode != NO_ERROR)
2633 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002634
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002635 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2636 if (err) {
2637 nlmsg_free(adm_ctx.reply_skb);
2638 return err;
2639 }
2640out:
2641 drbd_adm_finish(info, retcode);
2642 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002643}
2644
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002645int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002646{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002647 struct drbd_conf *mdev;
2648 struct drbd_genlmsghdr *dh;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002649 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2650 struct drbd_tconn *tconn = NULL;
2651 struct drbd_tconn *tmp;
2652 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002653
Lars Ellenberg543cc102011-03-10 22:18:18 +01002654 /* Open coded, deferred, iteration:
2655 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2656 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2657 * ...
2658 * }
2659 * }
2660 * where tconn is cb->args[0];
2661 * and i is cb->args[1];
2662 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002663 * cb->args[2] indicates if we shall loop over all resources,
2664 * or just dump all volumes of a single resource.
2665 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002666 * This may miss entries inserted after this dump started,
2667 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01002668 *
2669 * We need to make sure the mdev won't disappear while
2670 * we are looking at it, and revalidate our iterators
2671 * on each iteration.
2672 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002673
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002674 /* synchronize with conn_create()/conn_destroy() */
Philipp Reisneref356262011-04-13 14:21:29 -07002675 down_read(&drbd_cfg_rwsem);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002676 /* revalidate iterator position */
2677 list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2678 if (pos == NULL) {
2679 /* first iteration */
2680 pos = tmp;
2681 tconn = pos;
2682 break;
2683 }
2684 if (tmp == pos) {
2685 tconn = pos;
2686 break;
2687 }
2688 }
2689 if (tconn) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002690next_tconn:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002691 mdev = idr_get_next(&tconn->volumes, &volume);
2692 if (!mdev) {
2693 /* No more volumes to dump on this tconn.
2694 * Advance tconn iterator. */
2695 pos = list_entry(tconn->all_tconn.next,
2696 struct drbd_tconn, all_tconn);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002697 /* Did we dump any volume on this tconn yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002698 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002699 /* If we reached the end of the list,
2700 * or only a single resource dump was requested,
2701 * we are done. */
2702 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2703 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002704 volume = 0;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002705 tconn = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002706 goto next_tconn;
2707 }
2708 }
2709
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002710 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2711 cb->nlh->nlmsg_seq, &drbd_genl_family,
2712 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2713 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002714 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002715
Lars Ellenberg543cc102011-03-10 22:18:18 +01002716 if (!mdev) {
2717 /* this is a tconn without a single volume */
2718 dh->minor = -1U;
2719 dh->ret_code = NO_ERROR;
2720 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2721 genlmsg_cancel(skb, dh);
2722 else
2723 genlmsg_end(skb, dh);
2724 goto out;
2725 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002726
Lars Ellenberg543cc102011-03-10 22:18:18 +01002727 D_ASSERT(mdev->vnr == volume);
2728 D_ASSERT(mdev->tconn == tconn);
2729
2730 dh->minor = mdev_to_minor(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002731 dh->ret_code = NO_ERROR;
2732
2733 if (nla_put_status_info(skb, mdev, NULL)) {
2734 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002735 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002736 }
2737 genlmsg_end(skb, dh);
2738 }
2739
Lars Ellenberg543cc102011-03-10 22:18:18 +01002740out:
Philipp Reisneref356262011-04-13 14:21:29 -07002741 up_read(&drbd_cfg_rwsem);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002742 /* where to start the next iteration */
2743 cb->args[0] = (long)pos;
2744 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002745
Lars Ellenberg543cc102011-03-10 22:18:18 +01002746 /* No more tconns/volumes/minors found results in an empty skb.
2747 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002748 return skb->len;
2749}
2750
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002751/*
2752 * Request status of all resources, or of all volumes within a single resource.
2753 *
2754 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2755 * Which means we cannot use the family->attrbuf or other such members, because
2756 * dump is NOT protected by the genl_lock(). During dump, we only have access
2757 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2758 *
2759 * Once things are setup properly, we call into get_one_status().
2760 */
2761int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2762{
2763 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2764 struct nlattr *nla;
2765 const char *conn_name;
2766 struct drbd_tconn *tconn;
2767
2768 /* Is this a followup call? */
2769 if (cb->args[0]) {
2770 /* ... of a single resource dump,
2771 * and the resource iterator has been advanced already? */
2772 if (cb->args[2] && cb->args[2] != cb->args[0])
2773 return 0; /* DONE. */
2774 goto dump;
2775 }
2776
2777 /* First call (from netlink_dump_start). We need to figure out
2778 * which resource(s) the user wants us to dump. */
2779 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2780 nlmsg_attrlen(cb->nlh, hdrlen),
2781 DRBD_NLA_CFG_CONTEXT);
2782
2783 /* No explicit context given. Dump all. */
2784 if (!nla)
2785 goto dump;
2786 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
2787 /* context given, but no name present? */
2788 if (!nla)
2789 return -EINVAL;
2790 conn_name = nla_data(nla);
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002791 tconn = conn_get_by_name(conn_name);
2792
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002793 if (!tconn)
2794 return -ENODEV;
2795
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002796 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2797
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002798 /* prime iterators, and set "filter" mode mark:
2799 * only dump this tconn. */
2800 cb->args[0] = (long)tconn;
2801 /* cb->args[1] = 0; passed in this way. */
2802 cb->args[2] = (long)tconn;
2803
2804dump:
2805 return get_one_status(skb, cb);
2806}
2807
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002808int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2809{
2810 enum drbd_ret_code retcode;
2811 struct timeout_parms tp;
2812 int err;
2813
2814 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2815 if (!adm_ctx.reply_skb)
2816 return retcode;
2817 if (retcode != NO_ERROR)
2818 goto out;
2819
2820 tp.timeout_type =
2821 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2822 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2823 UT_DEFAULT;
2824
2825 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2826 if (err) {
2827 nlmsg_free(adm_ctx.reply_skb);
2828 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002829 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002830out:
2831 drbd_adm_finish(info, retcode);
2832 return 0;
2833}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002834
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002835int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2836{
2837 struct drbd_conf *mdev;
2838 enum drbd_ret_code retcode;
2839
2840 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2841 if (!adm_ctx.reply_skb)
2842 return retcode;
2843 if (retcode != NO_ERROR)
2844 goto out;
2845
2846 mdev = adm_ctx.mdev;
2847 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2848 /* resume from last known position, if possible */
2849 struct start_ov_parms parms =
2850 { .ov_start_sector = mdev->ov_start_sector };
Lars Ellenbergf3990022011-03-23 14:31:09 +01002851 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002852 if (err) {
2853 retcode = ERR_MANDATORY_TAG;
2854 drbd_msg_put_info(from_attrs_err_to_txt(err));
2855 goto out;
2856 }
2857 /* w_make_ov_request expects position to be aligned */
2858 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2859 }
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002860 /* If there is still bitmap IO pending, e.g. previous resync or verify
2861 * just being finished, wait for it before requesting a new resync. */
2862 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002863 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2864out:
2865 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002866 return 0;
2867}
2868
2869
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002870int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002871{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002872 struct drbd_conf *mdev;
2873 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002874 int skip_initial_sync = 0;
2875 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002876 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002877
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002878 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2879 if (!adm_ctx.reply_skb)
2880 return retcode;
2881 if (retcode != NO_ERROR)
2882 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002883
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002884 mdev = adm_ctx.mdev;
2885 memset(&args, 0, sizeof(args));
2886 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002887 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002888 if (err) {
2889 retcode = ERR_MANDATORY_TAG;
2890 drbd_msg_put_info(from_attrs_err_to_txt(err));
2891 goto out_nolock;
2892 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002893 }
2894
Philipp Reisner8410da82011-02-11 20:11:10 +01002895 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002896
2897 if (!get_ldev(mdev)) {
2898 retcode = ERR_NO_DISK;
2899 goto out;
2900 }
2901
2902 /* this is "skip initial sync", assume to be clean */
Philipp Reisner31890f42011-01-19 14:12:51 +01002903 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002904 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2905 dev_info(DEV, "Preparing to skip initial sync\n");
2906 skip_initial_sync = 1;
2907 } else if (mdev->state.conn != C_STANDALONE) {
2908 retcode = ERR_CONNECTED;
2909 goto out_dec;
2910 }
2911
2912 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2913 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2914
2915 if (args.clear_bm) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002916 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2917 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002918 if (err) {
2919 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2920 retcode = ERR_IO_MD_DISK;
2921 }
2922 if (skip_initial_sync) {
2923 drbd_send_uuids_skip_initial_sync(mdev);
2924 _drbd_uuid_set(mdev, UI_BITMAP, 0);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002925 drbd_print_uuids(mdev, "cleared bitmap UUID");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002926 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002927 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2928 CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002929 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002930 }
2931 }
2932
2933 drbd_md_sync(mdev);
2934out_dec:
2935 put_ldev(mdev);
2936out:
Philipp Reisner8410da82011-02-11 20:11:10 +01002937 mutex_unlock(mdev->state_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002938out_nolock:
2939 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002940 return 0;
2941}
2942
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002943static enum drbd_ret_code
2944drbd_check_conn_name(const char *name)
Philipp Reisner774b3052011-02-22 02:07:03 -05002945{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002946 if (!name || !name[0]) {
2947 drbd_msg_put_info("connection name missing");
2948 return ERR_MANDATORY_TAG;
2949 }
2950 /* if we want to use these in sysfs/configfs/debugfs some day,
2951 * we must not allow slashes */
2952 if (strchr(name, '/')) {
2953 drbd_msg_put_info("invalid connection name");
2954 return ERR_INVALID_REQUEST;
2955 }
2956 return NO_ERROR;
2957}
Philipp Reisner774b3052011-02-22 02:07:03 -05002958
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002959int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2960{
2961 enum drbd_ret_code retcode;
2962
2963 retcode = drbd_adm_prepare(skb, info, 0);
2964 if (!adm_ctx.reply_skb)
2965 return retcode;
2966 if (retcode != NO_ERROR)
2967 goto out;
2968
2969 retcode = drbd_check_conn_name(adm_ctx.conn_name);
2970 if (retcode != NO_ERROR)
2971 goto out;
2972
2973 if (adm_ctx.tconn) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01002974 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2975 retcode = ERR_INVALID_REQUEST;
2976 drbd_msg_put_info("connection exists");
2977 }
2978 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002979 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05002980 }
2981
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002982 if (!conn_create(adm_ctx.conn_name))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002983 retcode = ERR_NOMEM;
2984out:
2985 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05002986 return 0;
2987}
2988
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002989int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05002990{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002991 struct drbd_genlmsghdr *dh = info->userhdr;
2992 enum drbd_ret_code retcode;
Philipp Reisner774b3052011-02-22 02:07:03 -05002993
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002994 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2995 if (!adm_ctx.reply_skb)
2996 return retcode;
2997 if (retcode != NO_ERROR)
2998 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05002999
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003000 /* FIXME drop minor_count parameter, limit to MINORMASK */
3001 if (dh->minor >= minor_count) {
3002 drbd_msg_put_info("requested minor out of range");
3003 retcode = ERR_INVALID_REQUEST;
3004 goto out;
3005 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02003006 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003007 drbd_msg_put_info("requested volume id out of range");
3008 retcode = ERR_INVALID_REQUEST;
3009 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003010 }
3011
Lars Ellenberg38f19612011-03-14 13:22:35 +01003012 /* drbd_adm_prepare made sure already
3013 * that mdev->tconn and mdev->vnr match the request. */
3014 if (adm_ctx.mdev) {
3015 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3016 retcode = ERR_MINOR_EXISTS;
3017 /* else: still NO_ERROR */
3018 goto out;
3019 }
3020
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07003021 down_write(&drbd_cfg_rwsem);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003022 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
Philipp Reisnerd3fcb492011-04-13 14:46:05 -07003023 up_write(&drbd_cfg_rwsem);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003024out:
3025 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003026 return 0;
3027}
3028
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003029static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3030{
3031 if (mdev->state.disk == D_DISKLESS &&
3032 /* no need to be mdev->state.conn == C_STANDALONE &&
3033 * we may want to delete a minor from a live replication group.
3034 */
3035 mdev->state.role == R_SECONDARY) {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02003036 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3037 idr_remove(&minors, mdev_to_minor(mdev));
3038 del_gendisk(mdev->vdisk);
3039 synchronize_rcu();
3040 kref_put(&mdev->kref, &drbd_minor_destroy);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003041 return NO_ERROR;
3042 } else
3043 return ERR_MINOR_CONFIGURED;
3044}
3045
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003046int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003047{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003048 enum drbd_ret_code retcode;
3049
3050 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3051 if (!adm_ctx.reply_skb)
3052 return retcode;
3053 if (retcode != NO_ERROR)
3054 goto out;
3055
Philipp Reisneref356262011-04-13 14:21:29 -07003056 down_write(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003057 retcode = adm_delete_minor(adm_ctx.mdev);
Philipp Reisneref356262011-04-13 14:21:29 -07003058 up_write(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003059out:
3060 drbd_adm_finish(info, retcode);
3061 return 0;
3062}
3063
3064int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3065{
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003066 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003067 struct drbd_conf *mdev;
3068 unsigned i;
3069
3070 retcode = drbd_adm_prepare(skb, info, 0);
3071 if (!adm_ctx.reply_skb)
3072 return retcode;
3073 if (retcode != NO_ERROR)
3074 goto out;
3075
3076 if (!adm_ctx.tconn) {
3077 retcode = ERR_CONN_NOT_KNOWN;
3078 goto out;
3079 }
3080
Philipp Reisneref356262011-04-13 14:21:29 -07003081 down_read(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003082 /* demote */
3083 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3084 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3085 if (retcode < SS_SUCCESS) {
3086 drbd_msg_put_info("failed to demote");
3087 goto out_unlock;
3088 }
3089 }
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003090 up_read(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003091
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003092 /* disconnect; may stop the receiver;
3093 * must not hold the drbd_cfg_rwsem */
3094 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3095 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003096 drbd_msg_put_info("failed to disconnect");
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003097 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003098 }
3099
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003100 down_read(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003101 /* detach */
3102 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003103 retcode = adm_detach(mdev);
3104 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003105 drbd_msg_put_info("failed to detach");
3106 goto out_unlock;
3107 }
3108 }
Philipp Reisneref356262011-04-13 14:21:29 -07003109 up_read(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003110
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003111 /* If we reach this, all volumes (of this tconn) are Secondary,
3112 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3113 * actually stopped, state handling only does drbd_thread_stop_nowait().
3114 * This needs to be done without holding drbd_cfg_rwsem. */
3115 drbd_thread_stop(&adm_ctx.tconn->worker);
3116
3117 /* Now, nothing can fail anymore */
3118
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003119 /* delete volumes */
Philipp Reisneref356262011-04-13 14:21:29 -07003120 down_write(&drbd_cfg_rwsem);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003121 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3122 retcode = adm_delete_minor(mdev);
3123 if (retcode != NO_ERROR) {
3124 /* "can not happen" */
3125 drbd_msg_put_info("failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07003126 up_write(&drbd_cfg_rwsem);
3127 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003128 }
3129 }
3130
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003131 /* delete connection */
3132 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003133 list_del(&adm_ctx.tconn->all_tconn);
3134 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3135
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003136 retcode = NO_ERROR;
3137 } else {
3138 /* "can not happen" */
3139 retcode = ERR_CONN_IN_USE;
3140 drbd_msg_put_info("failed to delete connection");
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003141 }
Philipp Reisneref356262011-04-13 14:21:29 -07003142 up_write(&drbd_cfg_rwsem);
3143 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003144out_unlock:
Philipp Reisneref356262011-04-13 14:21:29 -07003145 up_read(&drbd_cfg_rwsem);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003146out:
3147 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003148 return 0;
3149}
3150
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003151int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003152{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003153 enum drbd_ret_code retcode;
3154
3155 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3156 if (!adm_ctx.reply_skb)
3157 return retcode;
3158 if (retcode != NO_ERROR)
3159 goto out;
3160
Philipp Reisneref356262011-04-13 14:21:29 -07003161 down_write(&drbd_cfg_rwsem);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003162 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003163 list_del(&adm_ctx.tconn->all_tconn);
3164 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3165
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003166 retcode = NO_ERROR;
Philipp Reisner774b3052011-02-22 02:07:03 -05003167 } else {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003168 retcode = ERR_CONN_IN_USE;
Philipp Reisner774b3052011-02-22 02:07:03 -05003169 }
Philipp Reisneref356262011-04-13 14:21:29 -07003170 up_write(&drbd_cfg_rwsem);
Philipp Reisner774b3052011-02-22 02:07:03 -05003171
Lars Ellenberg992d6e92011-05-02 11:47:18 +02003172 if (retcode == NO_ERROR)
3173 drbd_thread_stop(&adm_ctx.tconn->worker);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003174out:
3175 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003176 return 0;
3177}
3178
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003179void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003180{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003181 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3182 struct sk_buff *msg;
3183 struct drbd_genlmsghdr *d_out;
3184 unsigned seq;
3185 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003186
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003187 seq = atomic_inc_return(&drbd_genl_seq);
3188 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3189 if (!msg)
3190 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003191
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003192 err = -EMSGSIZE;
3193 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3194 if (!d_out) /* cannot happen, but anyways. */
3195 goto nla_put_failure;
3196 d_out->minor = mdev_to_minor(mdev);
3197 d_out->ret_code = 0;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02003198
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003199 if (nla_put_status_info(msg, mdev, sib))
3200 goto nla_put_failure;
3201 genlmsg_end(msg, d_out);
3202 err = drbd_genl_multicast_events(msg, 0);
3203 /* msg has been consumed or freed in netlink_broadcast() */
3204 if (err && err != -ESRCH)
3205 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003206
Philipp Reisnerb411b362009-09-25 16:07:19 -07003207 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003208
3209nla_put_failure:
3210 nlmsg_free(msg);
3211failed:
3212 dev_err(DEV, "Error %d while broadcasting event. "
3213 "Event seq:%u sib_reason:%u\n",
3214 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003215}