blob: 59923db780b9c92f32d49ffb7b72c84e6caabc25 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020035#include "drbd_req.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070038#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020039#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070040
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010041#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070042
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010043/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010052int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010053
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010056int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010057int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010071int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010072int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
78#include <linux/genl_magic_func.h>
79
80/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070081static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010083/* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
86 */
87static struct drbd_config_context {
88 /* assigned from drbd_genlmsghdr */
89 unsigned int minor;
90 /* assigned from request attributes, if present */
91 unsigned int volume;
92#define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
95 char *conn_name;
Philipp Reisnerb411b362009-09-25 16:07:19 -070096
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010097 /* reply buffer */
98 struct sk_buff *reply_skb;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr *reply_dh;
101 /* resolved from attributes, if possible */
102 struct drbd_conf *mdev;
103 struct drbd_tconn *tconn;
104} adm_ctx;
105
106static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107{
108 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109 if (genlmsg_reply(skb, info))
110 printk(KERN_ERR "drbd: error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700111}
112
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100113/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
Lars Ellenberg8432b312011-03-08 16:11:16 +0100115int drbd_msg_put_info(const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100116{
117 struct sk_buff *skb = adm_ctx.reply_skb;
118 struct nlattr *nla;
119 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700120
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100121 if (!info || !info[0])
122 return 0;
123
124 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125 if (!nla)
126 return err;
127
128 err = nla_put_string(skb, T_info_text, info);
129 if (err) {
130 nla_nest_cancel(skb, nla);
131 return err;
132 } else
133 nla_nest_end(skb, nla);
134 return 0;
135}
136
137/* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
141 */
142#define DRBD_ADM_NEED_MINOR 1
143#define DRBD_ADM_NEED_CONN 2
144static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145 unsigned flags)
146{
147 struct drbd_genlmsghdr *d_in = info->userhdr;
148 const u8 cmd = info->genlhdr->cmd;
149 int err;
150
151 memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd != DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb, CAP_SYS_ADMIN))
156 return -EPERM;
157
158 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200159 if (!adm_ctx.reply_skb) {
160 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100161 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200162 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100163
164 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
165 info, &drbd_genl_family, 0, cmd);
166 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
167 * but anyways */
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200168 if (!adm_ctx.reply_dh) {
169 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100170 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200171 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100172
173 adm_ctx.reply_dh->minor = d_in->minor;
174 adm_ctx.reply_dh->ret_code = NO_ERROR;
175
176 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
177 struct nlattr *nla;
178 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100179 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100180 if (err)
181 goto fail;
182
183 /* It was present, and valid,
184 * copy it over to the reply skb. */
185 err = nla_put_nohdr(adm_ctx.reply_skb,
186 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
187 info->attrs[DRBD_NLA_CFG_CONTEXT]);
188 if (err)
189 goto fail;
190
191 /* and assign stuff to the global adm_ctx */
192 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
193 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
194 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
195 if (nla)
196 adm_ctx.conn_name = nla_data(nla);
197 } else
198 adm_ctx.volume = VOLUME_UNSPECIFIED;
199
200 adm_ctx.minor = d_in->minor;
201 adm_ctx.mdev = minor_to_mdev(d_in->minor);
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200202 adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100203
204 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
205 drbd_msg_put_info("unknown minor");
206 return ERR_MINOR_INVALID;
207 }
208 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
209 drbd_msg_put_info("unknown connection");
210 return ERR_INVALID_REQUEST;
211 }
212
213 /* some more paranoia, if the request was over-determined */
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100214 if (adm_ctx.mdev && adm_ctx.tconn &&
215 adm_ctx.mdev->tconn != adm_ctx.tconn) {
216 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
217 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
218 drbd_msg_put_info("minor exists in different connection");
219 return ERR_INVALID_REQUEST;
220 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100221 if (adm_ctx.mdev &&
222 adm_ctx.volume != VOLUME_UNSPECIFIED &&
223 adm_ctx.volume != adm_ctx.mdev->vnr) {
224 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
225 adm_ctx.minor, adm_ctx.volume,
226 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100227 drbd_msg_put_info("minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100228 return ERR_INVALID_REQUEST;
229 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200230
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100231 return NO_ERROR;
232
233fail:
234 nlmsg_free(adm_ctx.reply_skb);
235 adm_ctx.reply_skb = NULL;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200236 return err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100237}
238
239static int drbd_adm_finish(struct genl_info *info, int retcode)
240{
241 struct nlattr *nla;
242 const char *conn_name = NULL;
243
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200244 if (adm_ctx.tconn) {
245 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
246 adm_ctx.tconn = NULL;
247 }
248
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100249 if (!adm_ctx.reply_skb)
250 return -ENOMEM;
251
252 adm_ctx.reply_dh->ret_code = retcode;
253
254 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
255 if (nla) {
256 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
257 if (nla)
258 conn_name = nla_data(nla);
259 }
260
261 drbd_adm_send_reply(adm_ctx.reply_skb, info);
262 return 0;
263}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700264
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100265static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
266{
267 char *afs;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200268 struct net_conf *nc;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100269
Philipp Reisner44ed1672011-04-19 17:10:19 +0200270 rcu_read_lock();
271 nc = rcu_dereference(tconn->net_conf);
272 if (nc) {
273 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100274 case AF_INET6:
275 afs = "ipv6";
276 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200277 &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100278 break;
279 case AF_INET:
280 afs = "ipv4";
281 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200282 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100283 break;
284 default:
285 afs = "ssocks";
286 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
Philipp Reisner44ed1672011-04-19 17:10:19 +0200287 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100288 }
289 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100290 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200291 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100292}
293
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294int drbd_khelper(struct drbd_conf *mdev, char *cmd)
295{
296 char *envp[] = { "HOME=/",
297 "TERM=linux",
298 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100299 (char[20]) { }, /* address family */
300 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700301 NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100302 char mb[12];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700303 char *argv[] = {usermode_helper, cmd, mb, NULL };
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100304 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700305 int ret;
306
307 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100308 setup_khelper_env(mdev->tconn, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700309
Lars Ellenberg1090c052010-07-19 17:41:04 +0200310 /* The helper may take some time.
311 * write out any unsynced meta data changes now */
312 drbd_md_sync(mdev);
313
Philipp Reisnerb411b362009-09-25 16:07:19 -0700314 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100315 sib.sib_reason = SIB_HELPER_PRE;
316 sib.helper_name = cmd;
317 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
319 if (ret)
320 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
321 usermode_helper, cmd, mb,
322 (ret >> 8) & 0xff, ret);
323 else
324 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
325 usermode_helper, cmd, mb,
326 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100327 sib.sib_reason = SIB_HELPER_POST;
328 sib.helper_exit_code = ret;
329 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700330
331 if (ret < 0) /* Ignore any ERRNOs we got. */
332 ret = 0;
333
334 return ret;
335}
336
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100337static void conn_md_sync(struct drbd_tconn *tconn)
338{
339 struct drbd_conf *mdev;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100340 int vnr;
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100341
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200342 rcu_read_lock();
343 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
344 kref_get(&mdev->kref);
345 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100346 drbd_md_sync(mdev);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +0200347 kref_put(&mdev->kref, &drbd_minor_destroy);
348 rcu_read_lock();
349 }
350 rcu_read_unlock();
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100351}
352
353int conn_khelper(struct drbd_tconn *tconn, char *cmd)
354{
355 char *envp[] = { "HOME=/",
356 "TERM=linux",
357 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
358 (char[20]) { }, /* address family */
359 (char[60]) { }, /* address */
360 NULL };
361 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
362 int ret;
363
364 setup_khelper_env(tconn, envp);
365 conn_md_sync(tconn);
366
367 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
368 /* TODO: conn_bcast_event() ?? */
369
370 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
371 if (ret)
372 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
373 usermode_helper, cmd, tconn->name,
374 (ret >> 8) & 0xff, ret);
375 else
376 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
377 usermode_helper, cmd, tconn->name,
378 (ret >> 8) & 0xff, ret);
379 /* TODO: conn_bcast_event() ?? */
380
381 if (ret < 0) /* Ignore any ERRNOs we got. */
382 ret = 0;
383
384 return ret;
385}
386
Philipp Reisnercb703452011-03-24 11:03:07 +0100387static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700388{
Philipp Reisnercb703452011-03-24 11:03:07 +0100389 enum drbd_fencing_p fp = FP_NOT_AVAIL;
390 struct drbd_conf *mdev;
391 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700392
Philipp Reisner695d08f2011-04-11 22:53:32 -0700393 rcu_read_lock();
Philipp Reisnercb703452011-03-24 11:03:07 +0100394 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
395 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200396 fp = max_t(enum drbd_fencing_p, fp,
397 rcu_dereference(mdev->ldev->disk_conf)->fencing);
Philipp Reisnercb703452011-03-24 11:03:07 +0100398 put_ldev(mdev);
399 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700400 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700401 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700402
Philipp Reisnercb703452011-03-24 11:03:07 +0100403 return fp;
404}
405
406bool conn_try_outdate_peer(struct drbd_tconn *tconn)
407{
408 union drbd_state mask = { };
409 union drbd_state val = { };
410 enum drbd_fencing_p fp;
411 char *ex_to_string;
412 int r;
413
414 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
415 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
416 return false;
417 }
418
419 fp = highest_fencing_policy(tconn);
420 switch (fp) {
421 case FP_NOT_AVAIL:
422 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
423 goto out;
424 case FP_DONT_CARE:
425 return true;
426 default: ;
427 }
428
429 r = conn_khelper(tconn, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430
431 switch ((r>>8) & 0xff) {
432 case 3: /* peer is inconsistent */
433 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100434 mask.pdsk = D_MASK;
435 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700436 break;
437 case 4: /* peer got outdated, or was already outdated */
438 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100439 mask.pdsk = D_MASK;
440 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700441 break;
442 case 5: /* peer was down */
Philipp Reisnercb703452011-03-24 11:03:07 +0100443 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700444 /* we will(have) create(d) a new UUID anyways... */
445 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100446 mask.pdsk = D_MASK;
447 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700448 } else {
449 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700450 }
451 break;
452 case 6: /* Peer is primary, voluntarily outdate myself.
453 * This is useful when an unconnected R_SECONDARY is asked to
454 * become R_PRIMARY, but finds the other peer being active. */
455 ex_to_string = "peer is active";
Philipp Reisnercb703452011-03-24 11:03:07 +0100456 conn_warn(tconn, "Peer is primary, outdating myself.\n");
457 mask.disk = D_MASK;
458 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700459 break;
460 case 7:
461 if (fp != FP_STONITH)
Philipp Reisnercb703452011-03-24 11:03:07 +0100462 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700463 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100464 mask.pdsk = D_MASK;
465 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700466 break;
467 default:
468 /* The script is broken ... */
Philipp Reisnercb703452011-03-24 11:03:07 +0100469 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
470 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700471 }
472
Philipp Reisnercb703452011-03-24 11:03:07 +0100473 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
474 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200475
Philipp Reisnercb703452011-03-24 11:03:07 +0100476 out:
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200477
Philipp Reisnercb703452011-03-24 11:03:07 +0100478 /* Not using
479 conn_request_state(tconn, mask, val, CS_VERBOSE);
480 here, because we might were able to re-establish the connection in the
481 meantime. */
482 spin_lock_irq(&tconn->req_lock);
483 if (tconn->cstate < C_WF_REPORT_PARAMS)
484 _conn_request_state(tconn, mask, val, CS_VERBOSE);
485 spin_unlock_irq(&tconn->req_lock);
486
487 return conn_highest_pdsk(tconn) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700488}
489
Philipp Reisner87f7be42010-06-11 13:56:33 +0200490static int _try_outdate_peer_async(void *data)
491{
Philipp Reisnercb703452011-03-24 11:03:07 +0100492 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200493
Philipp Reisnercb703452011-03-24 11:03:07 +0100494 conn_try_outdate_peer(tconn);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200495
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200496 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200497 return 0;
498}
499
Philipp Reisnercb703452011-03-24 11:03:07 +0100500void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200501{
502 struct task_struct *opa;
503
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200504 kref_get(&tconn->kref);
Philipp Reisnercb703452011-03-24 11:03:07 +0100505 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200506 if (IS_ERR(opa)) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100507 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200508 kref_put(&tconn->kref, &conn_destroy);
509 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200510}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700511
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100512enum drbd_state_rv
513drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700514{
515 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100516 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200517 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700518 int try = 0;
519 int forced = 0;
520 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700521
522 if (new_role == R_PRIMARY)
Philipp Reisner0625ac12011-02-07 14:49:19 +0100523 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700524
Philipp Reisner8410da82011-02-11 20:11:10 +0100525 mutex_lock(mdev->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700526
527 mask.i = 0; mask.role = R_MASK;
528 val.i = 0; val.role = new_role;
529
530 while (try++ < max_tries) {
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100531 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700532
533 /* in case we first succeeded to outdate,
534 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100535 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700536 val.pdsk = 0;
537 mask.pdsk = 0;
538 continue;
539 }
540
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100541 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100542 (mdev->state.disk < D_UP_TO_DATE &&
543 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700544 mask.disk = D_MASK;
545 val.disk = D_UP_TO_DATE;
546 forced = 1;
547 continue;
548 }
549
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100550 if (rv == SS_NO_UP_TO_DATE_DISK &&
Philipp Reisnerb411b362009-09-25 16:07:19 -0700551 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
552 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700553
Philipp Reisnercb703452011-03-24 11:03:07 +0100554 if (conn_try_outdate_peer(mdev->tconn)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700555 val.disk = D_UP_TO_DATE;
556 mask.disk = D_MASK;
557 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700558 continue;
559 }
560
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100561 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100562 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100563 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Philipp Reisnercb703452011-03-24 11:03:07 +0100564 if (!conn_try_outdate_peer(mdev->tconn) && force) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700565 dev_warn(DEV, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100566 mask.pdsk = D_MASK;
567 val.pdsk = D_OUTDATED;
568
Philipp Reisnerb411b362009-09-25 16:07:19 -0700569 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700570 continue;
571 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100572 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573 /* Maybe the peer is detected as dead very soon...
574 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200575 int timeo;
576 rcu_read_lock();
577 nc = rcu_dereference(mdev->tconn->net_conf);
578 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
579 rcu_read_unlock();
580 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581 if (try < max_tries)
582 try = max_tries - 1;
583 continue;
584 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100585 if (rv < SS_SUCCESS) {
586 rv = _drbd_request_state(mdev, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700587 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100588 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100589 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700590 }
591 break;
592 }
593
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100594 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100595 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700596
597 if (forced)
598 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
599
600 /* Wait until nothing is on the fly :) */
601 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
602
603 if (new_role == R_SECONDARY) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100604 set_disk_ro(mdev->vdisk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700605 if (get_ldev(mdev)) {
606 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
607 put_ldev(mdev);
608 }
609 } else {
Philipp Reisnera0095502011-05-03 13:14:15 +0200610 mutex_lock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200611 nc = mdev->tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200612 if (nc)
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200613 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
Philipp Reisnera0095502011-05-03 13:14:15 +0200614 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200615
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100616 set_disk_ro(mdev->vdisk, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700617 if (get_ldev(mdev)) {
618 if (((mdev->state.conn < C_CONNECTED ||
619 mdev->state.pdsk <= D_FAILED)
620 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
621 drbd_uuid_new_current(mdev);
622
623 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
624 put_ldev(mdev);
625 }
626 }
627
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100628 /* writeout of activity log covered areas of the bitmap
629 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700630
631 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
632 /* if this was forced, we should consider sync */
633 if (forced)
634 drbd_send_uuids(mdev);
635 drbd_send_state(mdev);
636 }
637
638 drbd_md_sync(mdev);
639
640 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100641out:
Philipp Reisner8410da82011-02-11 20:11:10 +0100642 mutex_unlock(mdev->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100643 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700644}
645
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100646static const char *from_attrs_err_to_txt(int err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700647{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100648 return err == -ENOMSG ? "required attribute missing" :
649 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100650 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100651 "invalid attribute value";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700652}
653
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100654int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700655{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100656 struct set_role_parms parms;
657 int err;
658 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700659
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100660 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
661 if (!adm_ctx.reply_skb)
662 return retcode;
663 if (retcode != NO_ERROR)
664 goto out;
665
666 memset(&parms, 0, sizeof(parms));
667 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100668 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100669 if (err) {
670 retcode = ERR_MANDATORY_TAG;
671 drbd_msg_put_info(from_attrs_err_to_txt(err));
672 goto out;
673 }
674 }
675
676 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
677 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
678 else
679 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
680out:
681 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700682 return 0;
683}
684
685/* initializes the md.*_offset members, so we are able to find
686 * the on disk meta data */
687static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
688 struct drbd_backing_dev *bdev)
689{
690 sector_t md_size_sect = 0;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200691 int meta_dev_idx;
692
693 rcu_read_lock();
694 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
695
696 switch (meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700697 default:
698 /* v07 style fixed size indexed meta data */
699 bdev->md.md_size_sect = MD_RESERVED_SECT;
700 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
701 bdev->md.al_offset = MD_AL_OFFSET;
702 bdev->md.bm_offset = MD_BM_OFFSET;
703 break;
704 case DRBD_MD_INDEX_FLEX_EXT:
705 /* just occupy the full device; unit: sectors */
706 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
707 bdev->md.md_offset = 0;
708 bdev->md.al_offset = MD_AL_OFFSET;
709 bdev->md.bm_offset = MD_BM_OFFSET;
710 break;
711 case DRBD_MD_INDEX_INTERNAL:
712 case DRBD_MD_INDEX_FLEX_INT:
713 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
714 /* al size is still fixed */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100715 bdev->md.al_offset = -MD_AL_SECTORS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716 /* we need (slightly less than) ~ this much bitmap sectors: */
717 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
718 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
719 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
720 md_size_sect = ALIGN(md_size_sect, 8);
721
722 /* plus the "drbd meta data super block",
723 * and the activity log; */
724 md_size_sect += MD_BM_OFFSET;
725
726 bdev->md.md_size_sect = md_size_sect;
727 /* bitmap offset is adjusted by 'super' block size */
728 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
729 break;
730 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200731 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700732}
733
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100734/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700735char *ppsize(char *buf, unsigned long long size)
736{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100737 /* Needs 9 bytes at max including trailing NUL:
738 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
740 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100741 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700742 /* shift + round */
743 size = (size >> 10) + !!(size & (1<<9));
744 base++;
745 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100746 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700747
748 return buf;
749}
750
751/* there is still a theoretical deadlock when called from receiver
752 * on an D_INCONSISTENT R_PRIMARY:
753 * remote READ does inc_ap_bio, receiver would need to receive answer
754 * packet from remote to dec_ap_bio again.
755 * receiver receive_sizes(), comes here,
756 * waits for ap_bio_cnt == 0. -> deadlock.
757 * but this cannot happen, actually, because:
758 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
759 * (not connected, or bad/no disk on peer):
760 * see drbd_fail_request_early, ap_bio_cnt is zero.
761 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
762 * peer may not initiate a resize.
763 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100764/* Note these are not to be confused with
765 * drbd_adm_suspend_io/drbd_adm_resume_io,
766 * which are (sub) state changes triggered by admin (drbdsetup),
767 * and can be long lived.
768 * This changes an mdev->flag, is triggered by drbd internals,
769 * and should be short-lived. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700770void drbd_suspend_io(struct drbd_conf *mdev)
771{
772 set_bit(SUSPEND_IO, &mdev->flags);
Philipp Reisner2aebfab2011-03-28 16:48:11 +0200773 if (drbd_suspended(mdev))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200774 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700775 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
776}
777
778void drbd_resume_io(struct drbd_conf *mdev)
779{
780 clear_bit(SUSPEND_IO, &mdev->flags);
781 wake_up(&mdev->misc_wait);
782}
783
784/**
785 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
786 * @mdev: DRBD device.
787 *
788 * Returns 0 on success, negative return values indicate errors.
789 * You should call drbd_md_sync() after calling this function.
790 */
Bart Van Assche24c48302011-05-21 18:32:29 +0200791enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700792{
793 sector_t prev_first_sect, prev_size; /* previous meta location */
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200794 sector_t la_size, u_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795 sector_t size;
796 char ppb[10];
797
798 int md_moved, la_size_changed;
799 enum determine_dev_size rv = unchanged;
800
801 /* race:
802 * application request passes inc_ap_bio,
803 * but then cannot get an AL-reference.
804 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
805 *
806 * to avoid that:
807 * Suspend IO right here.
808 * still lock the act_log to not trigger ASSERTs there.
809 */
810 drbd_suspend_io(mdev);
811
812 /* no wait necessary anymore, actually we could assert that */
813 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
814
815 prev_first_sect = drbd_md_first_sector(mdev->ldev);
816 prev_size = mdev->ldev->md.md_size_sect;
817 la_size = mdev->ldev->md.la_size_sect;
818
819 /* TODO: should only be some assert here, not (re)init... */
820 drbd_md_set_sector_offsets(mdev, mdev->ldev);
821
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200822 rcu_read_lock();
823 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
824 rcu_read_unlock();
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200825 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700826
827 if (drbd_get_capacity(mdev->this_bdev) != size ||
828 drbd_bm_capacity(mdev) != size) {
829 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100830 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700831 if (unlikely(err)) {
832 /* currently there is only one error: ENOMEM! */
833 size = drbd_bm_capacity(mdev)>>1;
834 if (size == 0) {
835 dev_err(DEV, "OUT OF MEMORY! "
836 "Could not allocate bitmap!\n");
837 } else {
838 dev_err(DEV, "BM resizing failed. "
839 "Leaving size unchanged at size = %lu KB\n",
840 (unsigned long)size);
841 }
842 rv = dev_size_error;
843 }
844 /* racy, see comments above. */
845 drbd_set_my_capacity(mdev, size);
846 mdev->ldev->md.la_size_sect = size;
847 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
848 (unsigned long long)size>>1);
849 }
850 if (rv == dev_size_error)
851 goto out;
852
853 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
854
855 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
856 || prev_size != mdev->ldev->md.md_size_sect;
857
858 if (la_size_changed || md_moved) {
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100859 int err;
860
Philipp Reisnerb411b362009-09-25 16:07:19 -0700861 drbd_al_shrink(mdev); /* All extents inactive. */
862 dev_info(DEV, "Writing the whole bitmap, %s\n",
863 la_size_changed && md_moved ? "size changed and md moved" :
864 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +0100865 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
866 err = drbd_bitmap_io(mdev, &drbd_bm_write,
867 "size changed", BM_LOCKED_MASK);
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +0100868 if (err) {
869 rv = dev_size_error;
870 goto out;
871 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700872 drbd_md_mark_dirty(mdev);
873 }
874
875 if (size > la_size)
876 rv = grew;
877 if (size < la_size)
878 rv = shrunk;
879out:
880 lc_unlock(mdev->act_log);
881 wake_up(&mdev->al_wait);
882 drbd_resume_io(mdev);
883
884 return rv;
885}
886
887sector_t
Philipp Reisneref5e44a2011-05-03 13:27:43 +0200888drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
889 sector_t u_size, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700890{
891 sector_t p_size = mdev->p_size; /* partner's disk size. */
892 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
893 sector_t m_size; /* my size */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700894 sector_t size = 0;
895
896 m_size = drbd_get_max_capacity(bdev);
897
Philipp Reisnera393db62009-12-22 13:35:52 +0100898 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
899 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
900 p_size = m_size;
901 }
902
Philipp Reisnerb411b362009-09-25 16:07:19 -0700903 if (p_size && m_size) {
904 size = min_t(sector_t, p_size, m_size);
905 } else {
906 if (la_size) {
907 size = la_size;
908 if (m_size && m_size < size)
909 size = m_size;
910 if (p_size && p_size < size)
911 size = p_size;
912 } else {
913 if (m_size)
914 size = m_size;
915 if (p_size)
916 size = p_size;
917 }
918 }
919
920 if (size == 0)
921 dev_err(DEV, "Both nodes diskless!\n");
922
923 if (u_size) {
924 if (u_size > size)
925 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
926 (unsigned long)u_size>>1, (unsigned long)size>>1);
927 else
928 size = u_size;
929 }
930
931 return size;
932}
933
934/**
935 * drbd_check_al_size() - Ensures that the AL is of the right size
936 * @mdev: DRBD device.
937 *
938 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
939 * failed, and 0 on success. You should call drbd_md_sync() after you called
940 * this function.
941 */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100942static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700943{
944 struct lru_cache *n, *t;
945 struct lc_element *e;
946 unsigned int in_use;
947 int i;
948
Philipp Reisnerb411b362009-09-25 16:07:19 -0700949 if (mdev->act_log &&
Lars Ellenbergf3990022011-03-23 14:31:09 +0100950 mdev->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700951 return 0;
952
953 in_use = 0;
954 t = mdev->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100955 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +0100956 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957
958 if (n == NULL) {
959 dev_err(DEV, "Cannot allocate act_log lru!\n");
960 return -ENOMEM;
961 }
962 spin_lock_irq(&mdev->al_lock);
963 if (t) {
964 for (i = 0; i < t->nr_elements; i++) {
965 e = lc_element_by_index(t, i);
966 if (e->refcnt)
967 dev_err(DEV, "refcnt(%d)==%d\n",
968 e->lc_number, e->refcnt);
969 in_use += e->refcnt;
970 }
971 }
972 if (!in_use)
973 mdev->act_log = n;
974 spin_unlock_irq(&mdev->al_lock);
975 if (in_use) {
976 dev_err(DEV, "Activity log still in use!\n");
977 lc_destroy(n);
978 return -EBUSY;
979 } else {
980 if (t)
981 lc_destroy(t);
982 }
983 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
984 return 0;
985}
986
Philipp Reisner99432fc2011-05-20 16:39:13 +0200987static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700988{
989 struct request_queue * const q = mdev->rq_queue;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200990 int max_hw_sectors = max_bio_size >> 9;
991 int max_segments = 0;
992
993 if (get_ldev_if_state(mdev, D_ATTACHING)) {
994 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
995
996 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200997 rcu_read_lock();
998 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
999 rcu_read_unlock();
Philipp Reisner99432fc2011-05-20 16:39:13 +02001000 put_ldev(mdev);
1001 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001002
Philipp Reisnerb411b362009-09-25 16:07:19 -07001003 blk_queue_logical_block_size(q, 512);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001004 blk_queue_max_hw_sectors(q, max_hw_sectors);
1005 /* This is the workaround for "bio would need to, but cannot, be split" */
1006 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1007 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001008
Philipp Reisner99432fc2011-05-20 16:39:13 +02001009 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1010 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001011
Philipp Reisner99432fc2011-05-20 16:39:13 +02001012 blk_queue_stack_limits(q, b);
1013
1014 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1015 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1016 q->backing_dev_info.ra_pages,
1017 b->backing_dev_info.ra_pages);
1018 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1019 }
1020 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021 }
1022}
1023
Philipp Reisner99432fc2011-05-20 16:39:13 +02001024void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1025{
1026 int now, new, local, peer;
1027
1028 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1029 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1030 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1031
1032 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1033 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1034 mdev->local_max_bio_size = local;
1035 put_ldev(mdev);
1036 }
1037
1038 /* We may ignore peer limits if the peer is modern enough.
1039 Because new from 8.3.8 onwards the peer can use multiple
1040 BIOs for a single peer_request */
1041 if (mdev->state.conn >= C_CONNECTED) {
Philipp Reisner31890f42011-01-19 14:12:51 +01001042 if (mdev->tconn->agreed_pro_version < 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001043 peer = mdev->peer_max_bio_size;
Philipp Reisner31890f42011-01-19 14:12:51 +01001044 else if (mdev->tconn->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001045 peer = DRBD_MAX_SIZE_H80_PACKET;
1046 else /* drbd 8.3.8 onwards */
1047 peer = DRBD_MAX_BIO_SIZE;
1048 }
1049
1050 new = min_t(int, local, peer);
1051
1052 if (mdev->state.role == R_PRIMARY && new < now)
1053 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1054
1055 if (new != now)
1056 dev_info(DEV, "max BIO size = %u\n", new);
1057
1058 drbd_setup_queue_param(mdev, new);
1059}
1060
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001061/* Starts the worker thread */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001062static void conn_reconfig_start(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001063{
Philipp Reisner0e29d162011-02-18 14:23:11 +01001064 drbd_thread_start(&tconn->worker);
1065 conn_flush_workqueue(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001066}
1067
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001068/* if still unconfigured, stops worker again. */
Philipp Reisner0e29d162011-02-18 14:23:11 +01001069static void conn_reconfig_done(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001070{
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001071 bool stop_threads;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001072 spin_lock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001073 stop_threads = conn_all_vols_unconf(tconn);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001074 spin_unlock_irq(&tconn->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001075 if (stop_threads) {
1076 /* asender is implicitly stopped by receiver
Philipp Reisner81fa2e62011-05-04 15:10:30 +02001077 * in conn_disconnect() */
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001078 drbd_thread_stop(&tconn->receiver);
1079 drbd_thread_stop(&tconn->worker);
1080 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001081}
1082
Philipp Reisner07782862010-08-31 12:00:50 +02001083/* Make sure IO is suspended before calling this function(). */
1084static void drbd_suspend_al(struct drbd_conf *mdev)
1085{
1086 int s = 0;
1087
Lars Ellenberg61610422011-02-21 13:20:54 +01001088 if (!lc_try_lock(mdev->act_log)) {
Philipp Reisner07782862010-08-31 12:00:50 +02001089 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1090 return;
1091 }
1092
Lars Ellenberg61610422011-02-21 13:20:54 +01001093 drbd_al_shrink(mdev);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001094 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner07782862010-08-31 12:00:50 +02001095 if (mdev->state.conn < C_CONNECTED)
1096 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001097 spin_unlock_irq(&mdev->tconn->req_lock);
Lars Ellenberg61610422011-02-21 13:20:54 +01001098 lc_unlock(mdev->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001099
1100 if (s)
1101 dev_info(DEV, "Suspended AL updates\n");
1102}
1103
Lars Ellenberg5979e362011-04-27 21:09:55 +02001104
1105static bool should_set_defaults(struct genl_info *info)
1106{
1107 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1108 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1109}
1110
Philipp Reisnerd589a212011-05-04 10:06:52 +02001111static void enforce_disk_conf_limits(struct disk_conf *dc)
1112{
1113 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1114 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1115 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1116 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1117
1118 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1119 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1120}
1121
Lars Ellenbergf3990022011-03-23 14:31:09 +01001122int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1123{
1124 enum drbd_ret_code retcode;
1125 struct drbd_conf *mdev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001126 struct disk_conf *new_disk_conf, *old_disk_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02001127 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001128 int err, fifo_size;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001129
1130 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1131 if (!adm_ctx.reply_skb)
1132 return retcode;
1133 if (retcode != NO_ERROR)
1134 goto out;
1135
1136 mdev = adm_ctx.mdev;
1137
1138 /* we also need a disk
1139 * to change the options on */
1140 if (!get_ldev(mdev)) {
1141 retcode = ERR_NO_DISK;
1142 goto out;
1143 }
1144
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001145 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001146 if (!new_disk_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001147 retcode = ERR_NOMEM;
1148 goto fail;
1149 }
1150
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001151 mutex_lock(&mdev->tconn->conf_update);
1152 old_disk_conf = mdev->ldev->disk_conf;
1153 *new_disk_conf = *old_disk_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001154 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001155 set_disk_conf_defaults(new_disk_conf);
Lars Ellenberg5979e362011-04-27 21:09:55 +02001156
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001157 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001158 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001159 retcode = ERR_MANDATORY_TAG;
1160 drbd_msg_put_info(from_attrs_err_to_txt(err));
1161 }
1162
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001163 if (!expect(new_disk_conf->resync_rate >= 1))
1164 new_disk_conf->resync_rate = 1;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001165
Philipp Reisnerd589a212011-05-04 10:06:52 +02001166 enforce_disk_conf_limits(new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001167
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001168 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Philipp Reisner9958c852011-05-03 16:19:31 +02001169 if (fifo_size != mdev->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02001170 new_plan = fifo_alloc(fifo_size);
1171 if (!new_plan) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001172 dev_err(DEV, "kmalloc of fifo_buffer failed");
1173 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001174 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001175 }
1176 }
1177
Lars Ellenbergf3990022011-03-23 14:31:09 +01001178 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1179 drbd_al_shrink(mdev);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001180 err = drbd_check_al_size(mdev, new_disk_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001181 lc_unlock(mdev->act_log);
1182 wake_up(&mdev->al_wait);
1183
1184 if (err) {
1185 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001186 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001187 }
1188
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001189 write_lock_irq(&global_state_lock);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001190 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001191 if (retcode == NO_ERROR) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001192 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
Andreas Gruenbacher95f8efd2011-05-12 11:15:34 +02001193 drbd_resync_after_changed(mdev);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001194 }
1195 write_unlock_irq(&global_state_lock);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001196
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001197 if (retcode != NO_ERROR)
1198 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001199
Philipp Reisner813472c2011-05-03 16:47:02 +02001200 if (new_plan) {
1201 old_plan = mdev->rs_plan_s;
1202 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
Philipp Reisner9958c852011-05-03 16:19:31 +02001203 }
Philipp Reisner9958c852011-05-03 16:19:31 +02001204
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02001205 mutex_unlock(&mdev->tconn->conf_update);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001206 drbd_md_sync(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001207
1208 if (mdev->state.conn >= C_CONNECTED)
1209 drbd_send_sync_param(mdev);
1210
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001211 synchronize_rcu();
1212 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001213 kfree(old_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001214 goto success;
1215
1216fail_unlock:
1217 mutex_unlock(&mdev->tconn->conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001218 fail:
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001219 kfree(new_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001220 kfree(new_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001221success:
1222 put_ldev(mdev);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001223 out:
1224 drbd_adm_finish(info, retcode);
1225 return 0;
1226}
1227
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001228int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001229{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001230 struct drbd_conf *mdev;
1231 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001232 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001233 enum determine_dev_size dd;
1234 sector_t max_possible_sectors;
1235 sector_t min_md_device_sectors;
1236 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001237 struct disk_conf *new_disk_conf = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001238 struct block_device *bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001239 struct lru_cache *resync_lru = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001240 struct fifo_buffer *new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001241 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001242 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001243 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001244 int cp_discovered = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001245
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001246 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1247 if (!adm_ctx.reply_skb)
1248 return retcode;
1249 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001250 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001251
1252 mdev = adm_ctx.mdev;
Philipp Reisner0e29d162011-02-18 14:23:11 +01001253 conn_reconfig_start(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001254
1255 /* if you want to reconfigure, please tear down first */
1256 if (mdev->state.disk > D_DISKLESS) {
1257 retcode = ERR_DISK_CONFIGURED;
1258 goto fail;
1259 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001260 /* It may just now have detached because of IO error. Make sure
1261 * drbd_ldev_destroy is done already, we may end up here very fast,
1262 * e.g. if someone calls attach from the on-io-error handler,
1263 * to realize a "hot spare" feature (not that I'd recommend that) */
1264 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001265
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001266 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001267 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1268 if (!nbc) {
1269 retcode = ERR_NOMEM;
1270 goto fail;
1271 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001272 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1273 if (!new_disk_conf) {
1274 retcode = ERR_NOMEM;
1275 goto fail;
1276 }
1277 nbc->disk_conf = new_disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001278
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001279 set_disk_conf_defaults(new_disk_conf);
1280 err = disk_conf_from_attrs(new_disk_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001281 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001282 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001283 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001284 goto fail;
1285 }
1286
Philipp Reisnerd589a212011-05-04 10:06:52 +02001287 enforce_disk_conf_limits(new_disk_conf);
1288
Philipp Reisner9958c852011-05-03 16:19:31 +02001289 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1290 if (!new_plan) {
1291 retcode = ERR_NOMEM;
1292 goto fail;
1293 }
1294
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001295 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001296 retcode = ERR_MD_IDX_INVALID;
1297 goto fail;
1298 }
1299
Philipp Reisner44ed1672011-04-19 17:10:19 +02001300 rcu_read_lock();
1301 nc = rcu_dereference(mdev->tconn->net_conf);
1302 if (nc) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001303 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001304 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001305 retcode = ERR_STONITH_AND_PROT_A;
1306 goto fail;
1307 }
1308 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001309 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001310
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001311 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001312 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
Tejun Heoe525fd82010-11-13 11:55:17 +01001313 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001314 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001315 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001316 retcode = ERR_OPEN_DISK;
1317 goto fail;
1318 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001319 nbc->backing_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001320
Tejun Heoe525fd82010-11-13 11:55:17 +01001321 /*
1322 * meta_dev_idx >= 0: external fixed size, possibly multiple
1323 * drbd sharing one meta device. TODO in that case, paranoia
1324 * check that [md_bdev, meta_dev_idx] is not yet used by some
1325 * other drbd minor! (if you use drbd.conf + drbdadm, that
1326 * should check it for you already; but if you don't, or
1327 * someone fooled it, we need to double check here)
1328 */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001329 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
Tejun Heod4d77622010-11-13 11:55:18 +01001330 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001331 (new_disk_conf->meta_dev_idx < 0) ?
Tejun Heod4d77622010-11-13 11:55:18 +01001332 (void *)mdev : (void *)drbd_m_holder);
Tejun Heoe525fd82010-11-13 11:55:17 +01001333 if (IS_ERR(bdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001334 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
Tejun Heoe525fd82010-11-13 11:55:17 +01001335 PTR_ERR(bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001336 retcode = ERR_OPEN_MD_DISK;
1337 goto fail;
1338 }
Tejun Heoe525fd82010-11-13 11:55:17 +01001339 nbc->md_bdev = bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001340
Tejun Heoe525fd82010-11-13 11:55:17 +01001341 if ((nbc->backing_bdev == nbc->md_bdev) !=
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001342 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1343 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001344 retcode = ERR_MD_IDX_INVALID;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001345 goto fail;
1346 }
1347
1348 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001349 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001350 offsetof(struct bm_extent, lce));
1351 if (!resync_lru) {
1352 retcode = ERR_NOMEM;
Tejun Heoe525fd82010-11-13 11:55:17 +01001353 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001354 }
1355
1356 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1357 drbd_md_set_sector_offsets(mdev, nbc);
1358
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001359 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001360 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1361 (unsigned long long) drbd_get_max_capacity(nbc),
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001362 (unsigned long long) new_disk_conf->disk_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001363 retcode = ERR_DISK_TO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001364 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365 }
1366
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001367 if (new_disk_conf->meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001368 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1369 /* at least one MB, otherwise it does not make sense */
1370 min_md_device_sectors = (2<<10);
1371 } else {
1372 max_possible_sectors = DRBD_MAX_SECTORS;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001373 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001374 }
1375
Philipp Reisnerb411b362009-09-25 16:07:19 -07001376 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1377 retcode = ERR_MD_DISK_TO_SMALL;
1378 dev_warn(DEV, "refusing attach: md-device too small, "
1379 "at least %llu sectors needed for this meta-disk type\n",
1380 (unsigned long long) min_md_device_sectors);
Tejun Heoe525fd82010-11-13 11:55:17 +01001381 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001382 }
1383
1384 /* Make sure the new disk is big enough
1385 * (we may currently be R_PRIMARY with no local disk...) */
1386 if (drbd_get_max_capacity(nbc) <
1387 drbd_get_capacity(mdev->this_bdev)) {
1388 retcode = ERR_DISK_TO_SMALL;
Tejun Heoe525fd82010-11-13 11:55:17 +01001389 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001390 }
1391
1392 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1393
Lars Ellenberg13529942009-10-12 19:07:49 +02001394 if (nbc->known_size > max_possible_sectors) {
1395 dev_warn(DEV, "==> truncating very big lower level device "
1396 "to currently maximum possible %llu sectors <==\n",
1397 (unsigned long long) max_possible_sectors);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001398 if (new_disk_conf->meta_dev_idx >= 0)
Lars Ellenberg13529942009-10-12 19:07:49 +02001399 dev_warn(DEV, "==>> using internal or flexible "
1400 "meta data may help <<==\n");
1401 }
1402
Philipp Reisnerb411b362009-09-25 16:07:19 -07001403 drbd_suspend_io(mdev);
1404 /* also wait for the last barrier ack. */
Philipp Reisner2aebfab2011-03-28 16:48:11 +02001405 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001406 /* and for any other previously queued work */
Philipp Reisnera21e9292011-02-08 15:08:49 +01001407 drbd_flush_workqueue(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001408
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001409 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1410 retcode = rv; /* FIXME: Type mismatch. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001411 drbd_resume_io(mdev);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001412 if (rv < SS_SUCCESS)
Tejun Heoe525fd82010-11-13 11:55:17 +01001413 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001414
1415 if (!get_ldev_if_state(mdev, D_ATTACHING))
1416 goto force_diskless;
1417
1418 drbd_md_set_sector_offsets(mdev, nbc);
1419
1420 if (!mdev->bitmap) {
1421 if (drbd_bm_init(mdev)) {
1422 retcode = ERR_NOMEM;
1423 goto force_diskless_dec;
1424 }
1425 }
1426
1427 retcode = drbd_md_read(mdev, nbc);
1428 if (retcode != NO_ERROR)
1429 goto force_diskless_dec;
1430
1431 if (mdev->state.conn < C_CONNECTED &&
1432 mdev->state.role == R_PRIMARY &&
1433 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1434 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1435 (unsigned long long)mdev->ed_uuid);
1436 retcode = ERR_DATA_NOT_CURRENT;
1437 goto force_diskless_dec;
1438 }
1439
1440 /* Since we are diskless, fix the activity log first... */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001441 if (drbd_check_al_size(mdev, new_disk_conf)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442 retcode = ERR_NOMEM;
1443 goto force_diskless_dec;
1444 }
1445
1446 /* Prevent shrinking of consistent devices ! */
1447 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001448 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001449 dev_warn(DEV, "refusing to truncate a consistent device\n");
1450 retcode = ERR_DISK_TO_SMALL;
1451 goto force_diskless_dec;
1452 }
1453
1454 if (!drbd_al_read_log(mdev, nbc)) {
1455 retcode = ERR_IO_MD_DISK;
1456 goto force_diskless_dec;
1457 }
1458
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459 /* Reset the "barriers don't work" bits here, then force meta data to
1460 * be written, to ensure we determine if barriers are supported. */
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001461 if (new_disk_conf->md_flushes)
Philipp Reisnera8a4e512010-08-25 10:21:04 +02001462 clear_bit(MD_NO_FUA, &mdev->flags);
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001463 else
1464 set_bit(MD_NO_FUA, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001465
1466 /* Point of no return reached.
1467 * Devices and memory are no longer released by error cleanup below.
1468 * now mdev takes over responsibility, and the state engine should
1469 * clean it up somewhere. */
1470 D_ASSERT(mdev->ldev == NULL);
1471 mdev->ldev = nbc;
1472 mdev->resync = resync_lru;
Philipp Reisner9958c852011-05-03 16:19:31 +02001473 mdev->rs_plan_s = new_plan;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001474 nbc = NULL;
1475 resync_lru = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001476 new_disk_conf = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001477 new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001478
Philipp Reisner2451fc32010-08-24 13:43:11 +02001479 mdev->write_ordering = WO_bdev_flush;
1480 drbd_bump_write_ordering(mdev, WO_bdev_flush);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001481
1482 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1483 set_bit(CRASHED_PRIMARY, &mdev->flags);
1484 else
1485 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1486
Philipp Reisner894c6a92010-06-18 16:03:20 +02001487 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001488 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001489 set_bit(CRASHED_PRIMARY, &mdev->flags);
1490 cp_discovered = 1;
1491 }
1492
1493 mdev->send_cnt = 0;
1494 mdev->recv_cnt = 0;
1495 mdev->read_cnt = 0;
1496 mdev->writ_cnt = 0;
1497
Philipp Reisner99432fc2011-05-20 16:39:13 +02001498 drbd_reconsider_max_bio_size(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001499
1500 /* If I am currently not R_PRIMARY,
1501 * but meta data primary indicator is set,
1502 * I just now recover from a hard crash,
1503 * and have been R_PRIMARY before that crash.
1504 *
1505 * Now, if I had no connection before that crash
1506 * (have been degraded R_PRIMARY), chances are that
1507 * I won't find my peer now either.
1508 *
1509 * In that case, and _only_ in that case,
1510 * we use the degr-wfc-timeout instead of the default,
1511 * so we can automatically recover from a crash of a
1512 * degraded but active "cluster" after a certain timeout.
1513 */
1514 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1515 if (mdev->state.role != R_PRIMARY &&
1516 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1517 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1518 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1519
Bart Van Assche24c48302011-05-21 18:32:29 +02001520 dd = drbd_determine_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001521 if (dd == dev_size_error) {
1522 retcode = ERR_NOMEM_BITMAP;
1523 goto force_diskless_dec;
1524 } else if (dd == grew)
1525 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1526
1527 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1528 dev_info(DEV, "Assuming that all blocks are out of sync "
1529 "(aka FullSync)\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001530 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1531 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001532 retcode = ERR_IO_MD_DISK;
1533 goto force_diskless_dec;
1534 }
1535 } else {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001536 if (drbd_bitmap_io(mdev, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01001537 "read from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538 retcode = ERR_IO_MD_DISK;
1539 goto force_diskless_dec;
1540 }
1541 }
1542
1543 if (cp_discovered) {
1544 drbd_al_apply_to_bm(mdev);
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001545 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1546 "crashed primary apply AL", BM_LOCKED_MASK)) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001547 retcode = ERR_IO_MD_DISK;
1548 goto force_diskless_dec;
1549 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001550 }
1551
Philipp Reisner07782862010-08-31 12:00:50 +02001552 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1553 drbd_suspend_al(mdev); /* IO is still suspended here... */
1554
Philipp Reisner87eeee42011-01-19 14:16:30 +01001555 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner78bae592011-03-28 15:40:12 +02001556 os = drbd_read_state(mdev);
1557 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001558 /* If MDF_CONSISTENT is not set go into inconsistent state,
1559 otherwise investigate MDF_WasUpToDate...
1560 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1561 otherwise into D_CONSISTENT state.
1562 */
1563 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1564 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1565 ns.disk = D_CONSISTENT;
1566 else
1567 ns.disk = D_OUTDATED;
1568 } else {
1569 ns.disk = D_INCONSISTENT;
1570 }
1571
1572 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1573 ns.pdsk = D_OUTDATED;
1574
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001575 rcu_read_lock();
1576 if (ns.disk == D_CONSISTENT &&
1577 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001578 ns.disk = D_UP_TO_DATE;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001579 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001580
1581 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1582 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1583 this point, because drbd_request_state() modifies these
1584 flags. */
1585
1586 /* In case we are C_CONNECTED postpone any decision on the new disk
1587 state after the negotiation phase. */
1588 if (mdev->state.conn == C_CONNECTED) {
1589 mdev->new_state_tmp.i = ns.i;
1590 ns.i = os.i;
1591 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02001592
1593 /* We expect to receive up-to-date UUIDs soon.
1594 To avoid a race in receive_state, free p_uuid while
1595 holding req_lock. I.e. atomic with the state change */
1596 kfree(mdev->p_uuid);
1597 mdev->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001598 }
1599
1600 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001601 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001602
1603 if (rv < SS_SUCCESS)
1604 goto force_diskless_dec;
1605
1606 if (mdev->state.role == R_PRIMARY)
1607 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1608 else
1609 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1610
1611 drbd_md_mark_dirty(mdev);
1612 drbd_md_sync(mdev);
1613
1614 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1615 put_ldev(mdev);
Philipp Reisner0e29d162011-02-18 14:23:11 +01001616 conn_reconfig_done(mdev->tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001617 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001618 return 0;
1619
1620 force_diskless_dec:
1621 put_ldev(mdev);
1622 force_diskless:
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001623 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001624 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001625 fail:
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001626 conn_reconfig_done(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001627 if (nbc) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001628 if (nbc->backing_bdev)
1629 blkdev_put(nbc->backing_bdev,
1630 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1631 if (nbc->md_bdev)
1632 blkdev_put(nbc->md_bdev,
1633 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001634 kfree(nbc);
1635 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001636 kfree(new_disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001637 lc_destroy(resync_lru);
Philipp Reisner9958c852011-05-03 16:19:31 +02001638 kfree(new_plan);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001639
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001640 finish:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001641 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001642 return 0;
1643}
1644
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001645static int adm_detach(struct drbd_conf *mdev)
1646{
Philipp Reisner19f83c72011-03-29 14:21:03 +02001647 enum drbd_state_rv retcode;
Lars Ellenberg009ba892011-05-02 11:51:31 +02001648 int ret;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001649 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
Lars Ellenberg009ba892011-05-02 11:51:31 +02001650 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1651 /* D_FAILED will transition to DISKLESS. */
1652 ret = wait_event_interruptible(mdev->misc_wait,
1653 mdev->state.disk != D_FAILED);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001654 drbd_resume_io(mdev);
Lars Ellenberg009ba892011-05-02 11:51:31 +02001655 if ((int)retcode == (int)SS_IS_DISKLESS)
1656 retcode = SS_NOTHING_TO_DO;
1657 if (ret)
1658 retcode = ERR_INTR;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001659 return retcode;
1660}
1661
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001662/* Detaching the disk is a process in multiple stages. First we need to lock
1663 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1664 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1665 * internal references as well.
1666 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001667int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001668{
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02001669 enum drbd_ret_code retcode;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001670
1671 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1672 if (!adm_ctx.reply_skb)
1673 return retcode;
1674 if (retcode != NO_ERROR)
1675 goto out;
1676
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01001677 retcode = adm_detach(adm_ctx.mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001678out:
1679 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001680 return 0;
1681}
1682
Lars Ellenbergf3990022011-03-23 14:31:09 +01001683static bool conn_resync_running(struct drbd_tconn *tconn)
1684{
1685 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001686 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001687 int vnr;
1688
Philipp Reisner695d08f2011-04-11 22:53:32 -07001689 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001690 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1691 if (mdev->state.conn == C_SYNC_SOURCE ||
1692 mdev->state.conn == C_SYNC_TARGET ||
1693 mdev->state.conn == C_PAUSED_SYNC_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001694 mdev->state.conn == C_PAUSED_SYNC_T) {
1695 rv = true;
1696 break;
1697 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001698 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001699 rcu_read_unlock();
1700
1701 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001702}
1703
1704static bool conn_ov_running(struct drbd_tconn *tconn)
1705{
1706 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -07001707 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001708 int vnr;
1709
Philipp Reisner695d08f2011-04-11 22:53:32 -07001710 rcu_read_lock();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001711 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1712 if (mdev->state.conn == C_VERIFY_S ||
Philipp Reisner695d08f2011-04-11 22:53:32 -07001713 mdev->state.conn == C_VERIFY_T) {
1714 rv = true;
1715 break;
1716 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001717 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07001718 rcu_read_unlock();
1719
1720 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001721}
1722
Philipp Reisnercd643972011-04-13 18:00:59 -07001723static enum drbd_ret_code
Philipp Reisner44ed1672011-04-19 17:10:19 +02001724_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07001725{
1726 struct drbd_conf *mdev;
1727 int i;
1728
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02001729 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1730 if (new_conf->wire_protocol != old_conf->wire_protocol)
1731 return ERR_NEED_APV_100;
1732
1733 if (new_conf->two_primaries != old_conf->two_primaries)
1734 return ERR_NEED_APV_100;
1735
1736 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1737 return ERR_NEED_APV_100;
1738
1739 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1740 return ERR_NEED_APV_100;
1741 }
1742
1743 if (!new_conf->two_primaries &&
1744 conn_highest_role(tconn) == R_PRIMARY &&
1745 conn_highest_peer(tconn) == R_PRIMARY)
1746 return ERR_NEED_ALLOW_TWO_PRI;
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07001747
Philipp Reisnercd643972011-04-13 18:00:59 -07001748 if (new_conf->two_primaries &&
1749 (new_conf->wire_protocol != DRBD_PROT_C))
1750 return ERR_NOT_PROTO_C;
1751
Philipp Reisnercd643972011-04-13 18:00:59 -07001752 idr_for_each_entry(&tconn->volumes, mdev, i) {
1753 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001754 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
Philipp Reisnercd643972011-04-13 18:00:59 -07001755 put_ldev(mdev);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001756 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07001757 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07001758 }
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02001759 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
Philipp Reisnercd643972011-04-13 18:00:59 -07001760 return ERR_DISCARD;
Philipp Reisnercd643972011-04-13 18:00:59 -07001761 }
Philipp Reisnercd643972011-04-13 18:00:59 -07001762
1763 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1764 return ERR_CONG_NOT_PROTO_A;
1765
1766 return NO_ERROR;
1767}
1768
Philipp Reisner44ed1672011-04-19 17:10:19 +02001769static enum drbd_ret_code
1770check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1771{
1772 static enum drbd_ret_code rv;
1773 struct drbd_conf *mdev;
1774 int i;
1775
1776 rcu_read_lock();
1777 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1778 rcu_read_unlock();
1779
1780 /* tconn->volumes protected by genl_lock() here */
1781 idr_for_each_entry(&tconn->volumes, mdev, i) {
1782 if (!mdev->bitmap) {
1783 if(drbd_bm_init(mdev))
1784 return ERR_NOMEM;
1785 }
1786 }
1787
1788 return rv;
1789}
1790
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001791struct crypto {
1792 struct crypto_hash *verify_tfm;
1793 struct crypto_hash *csums_tfm;
1794 struct crypto_hash *cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001795 struct crypto_hash *integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001796 void *int_dig_in;
1797 void *int_dig_vv;
1798};
1799
1800static int
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001801alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001802{
1803 if (!tfm_name[0])
1804 return NO_ERROR;
1805
1806 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1807 if (IS_ERR(*tfm)) {
1808 *tfm = NULL;
1809 return err_alg;
1810 }
1811
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001812 return NO_ERROR;
1813}
1814
1815static enum drbd_ret_code
1816alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1817{
1818 char hmac_name[CRYPTO_MAX_ALG_NAME];
1819 enum drbd_ret_code rv;
1820 int hash_size;
1821
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001822 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1823 ERR_CSUMS_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001824 if (rv != NO_ERROR)
1825 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001826 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1827 ERR_VERIFY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001828 if (rv != NO_ERROR)
1829 return rv;
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001830 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1831 ERR_INTEGRITY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001832 if (rv != NO_ERROR)
1833 return rv;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001834 if (new_conf->cram_hmac_alg[0] != 0) {
1835 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1836 new_conf->cram_hmac_alg);
1837
Andreas Gruenbacher4b6ad6d2011-04-29 10:20:08 +02001838 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1839 ERR_AUTH_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001840 }
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001841 if (crypto->integrity_tfm) {
1842 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001843 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1844 if (!crypto->int_dig_in)
1845 return ERR_NOMEM;
1846 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1847 if (!crypto->int_dig_vv)
1848 return ERR_NOMEM;
1849 }
1850
1851 return rv;
1852}
1853
1854static void free_crypto(struct crypto *crypto)
1855{
1856 kfree(crypto->int_dig_in);
1857 kfree(crypto->int_dig_vv);
1858 crypto_free_hash(crypto->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001859 crypto_free_hash(crypto->integrity_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001860 crypto_free_hash(crypto->csums_tfm);
1861 crypto_free_hash(crypto->verify_tfm);
1862}
1863
Lars Ellenbergf3990022011-03-23 14:31:09 +01001864int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1865{
1866 enum drbd_ret_code retcode;
1867 struct drbd_tconn *tconn;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001868 struct net_conf *old_conf, *new_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001869 int err;
1870 int ovr; /* online verify running */
1871 int rsr; /* re-sync running */
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001872 struct crypto crypto = { };
Lars Ellenbergf3990022011-03-23 14:31:09 +01001873
1874 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1875 if (!adm_ctx.reply_skb)
1876 return retcode;
1877 if (retcode != NO_ERROR)
1878 goto out;
1879
1880 tconn = adm_ctx.tconn;
1881
1882 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1883 if (!new_conf) {
1884 retcode = ERR_NOMEM;
1885 goto out;
1886 }
1887
Lars Ellenbergf3990022011-03-23 14:31:09 +01001888 conn_reconfig_start(tconn);
1889
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001890 mutex_lock(&tconn->data.mutex);
Philipp Reisnera0095502011-05-03 13:14:15 +02001891 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001892 old_conf = tconn->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001893
1894 if (!old_conf) {
1895 drbd_msg_put_info("net conf missing, try connect");
1896 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001897 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001898 }
1899
1900 *new_conf = *old_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001901 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001902 set_net_conf_defaults(new_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001903
Lars Ellenbergf3990022011-03-23 14:31:09 +01001904 err = net_conf_from_attrs_for_change(new_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001905 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001906 retcode = ERR_MANDATORY_TAG;
1907 drbd_msg_put_info(from_attrs_err_to_txt(err));
1908 goto fail;
1909 }
1910
Philipp Reisnercd643972011-04-13 18:00:59 -07001911 retcode = check_net_options(tconn, new_conf);
1912 if (retcode != NO_ERROR)
1913 goto fail;
1914
Lars Ellenbergf3990022011-03-23 14:31:09 +01001915 /* re-sync running */
1916 rsr = conn_resync_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001917 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001918 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001919 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001920 }
1921
Lars Ellenbergf3990022011-03-23 14:31:09 +01001922 /* online verify running */
1923 ovr = conn_ov_running(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001924 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1925 retcode = ERR_VERIFY_RUNNING;
1926 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001927 }
1928
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001929 retcode = alloc_crypto(&crypto, new_conf);
1930 if (retcode != NO_ERROR)
1931 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001932
Philipp Reisner44ed1672011-04-19 17:10:19 +02001933 rcu_assign_pointer(tconn->net_conf, new_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001934
1935 if (!rsr) {
1936 crypto_free_hash(tconn->csums_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001937 tconn->csums_tfm = crypto.csums_tfm;
1938 crypto.csums_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001939 }
1940 if (!ovr) {
1941 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001942 tconn->verify_tfm = crypto.verify_tfm;
1943 crypto.verify_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001944 }
1945
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001946 kfree(tconn->int_dig_in);
1947 tconn->int_dig_in = crypto.int_dig_in;
1948 kfree(tconn->int_dig_vv);
1949 tconn->int_dig_vv = crypto.int_dig_vv;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001950 crypto_free_hash(tconn->integrity_tfm);
1951 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001952 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001953 /* Do this without trying to take tconn->data.mutex again. */
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001954 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001955
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001956 crypto_free_hash(tconn->cram_hmac_tfm);
1957 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1958
Philipp Reisnera0095502011-05-03 13:14:15 +02001959 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001960 mutex_unlock(&tconn->data.mutex);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001961 synchronize_rcu();
1962 kfree(old_conf);
1963
Lars Ellenbergf3990022011-03-23 14:31:09 +01001964 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1965 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1966
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001967 goto done;
1968
Lars Ellenbergf3990022011-03-23 14:31:09 +01001969 fail:
Philipp Reisnera0095502011-05-03 13:14:15 +02001970 mutex_unlock(&tconn->conf_update);
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001971 mutex_unlock(&tconn->data.mutex);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001972 free_crypto(&crypto);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001973 kfree(new_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02001974 done:
Lars Ellenbergf3990022011-03-23 14:31:09 +01001975 conn_reconfig_done(tconn);
1976 out:
1977 drbd_adm_finish(info, retcode);
1978 return 0;
1979}
1980
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001981int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001982{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001983 struct drbd_conf *mdev;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001984 struct net_conf *old_conf, *new_conf = NULL;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02001985 struct crypto crypto = { };
Philipp Reisner80883192011-02-18 14:56:45 +01001986 struct drbd_tconn *oconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001987 struct drbd_tconn *tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001988 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001989 enum drbd_ret_code retcode;
1990 int i;
1991 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001992
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001993 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1994 if (!adm_ctx.reply_skb)
1995 return retcode;
1996 if (retcode != NO_ERROR)
1997 goto out;
1998
1999 tconn = adm_ctx.tconn;
Philipp Reisner80883192011-02-18 14:56:45 +01002000 conn_reconfig_start(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002001
Philipp Reisner80883192011-02-18 14:56:45 +01002002 if (tconn->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002003 retcode = ERR_NET_CONFIGURED;
2004 goto fail;
2005 }
2006
2007 /* allocation not in the IO path, cqueue thread context */
Lars Ellenberg5979e362011-04-27 21:09:55 +02002008 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002009 if (!new_conf) {
2010 retcode = ERR_NOMEM;
2011 goto fail;
2012 }
2013
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002014 set_net_conf_defaults(new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002015
Lars Ellenbergf3990022011-03-23 14:31:09 +01002016 err = net_conf_from_attrs(new_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002017 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002018 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002019 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002020 goto fail;
2021 }
2022
Philipp Reisnercd643972011-04-13 18:00:59 -07002023 retcode = check_net_options(tconn, new_conf);
2024 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002025 goto fail;
Philipp Reisner422028b2010-10-27 11:12:07 +02002026
Philipp Reisnerb411b362009-09-25 16:07:19 -07002027 retcode = NO_ERROR;
2028
2029 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
2030 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002031
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002032 /* No need for _rcu here. All reconfiguration is
Lars Ellenberg543cc102011-03-10 22:18:18 +01002033 * strictly serialized on genl_lock(). We are protected against
2034 * concurrent reconfiguration/addition/deletion */
Philipp Reisner80883192011-02-18 14:56:45 +01002035 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02002036 struct net_conf *nc;
Philipp Reisner80883192011-02-18 14:56:45 +01002037 if (oconn == tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 continue;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002039
2040 rcu_read_lock();
2041 nc = rcu_dereference(oconn->net_conf);
2042 if (nc) {
2043 taken_addr = (struct sockaddr *)&nc->my_addr;
2044 if (new_conf->my_addr_len == nc->my_addr_len &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002045 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
2046 retcode = ERR_LOCAL_ADDR;
2047
Philipp Reisner44ed1672011-04-19 17:10:19 +02002048 taken_addr = (struct sockaddr *)&nc->peer_addr;
2049 if (new_conf->peer_addr_len == nc->peer_addr_len &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002050 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
2051 retcode = ERR_PEER_ADDR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002052 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002053 rcu_read_unlock();
2054 if (retcode != NO_ERROR)
2055 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002056 }
2057
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002058 retcode = alloc_crypto(&crypto, new_conf);
2059 if (retcode != NO_ERROR)
2060 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002061
Philipp Reisnerb411b362009-09-25 16:07:19 -07002062 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2063
Philipp Reisner80883192011-02-18 14:56:45 +01002064 conn_flush_workqueue(tconn);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002065
Philipp Reisnera0095502011-05-03 13:14:15 +02002066 mutex_lock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002067 old_conf = tconn->net_conf;
2068 if (old_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002069 retcode = ERR_NET_CONFIGURED;
Philipp Reisnera0095502011-05-03 13:14:15 +02002070 mutex_unlock(&tconn->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002071 goto fail;
2072 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02002073 rcu_assign_pointer(tconn->net_conf, new_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002074
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002075 conn_free_crypto(tconn);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002076 tconn->int_dig_in = crypto.int_dig_in;
2077 tconn->int_dig_vv = crypto.int_dig_vv;
2078 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002079 tconn->integrity_tfm = crypto.integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002080 tconn->csums_tfm = crypto.csums_tfm;
2081 tconn->verify_tfm = crypto.verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002082
Philipp Reisnera0095502011-05-03 13:14:15 +02002083 mutex_unlock(&tconn->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002084
Philipp Reisner695d08f2011-04-11 22:53:32 -07002085 rcu_read_lock();
Philipp Reisner80883192011-02-18 14:56:45 +01002086 idr_for_each_entry(&tconn->volumes, mdev, i) {
2087 mdev->send_cnt = 0;
2088 mdev->recv_cnt = 0;
Philipp Reisner80883192011-02-18 14:56:45 +01002089 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002090 rcu_read_unlock();
Lars Ellenberg5ee743e2011-04-26 16:22:25 +02002091
2092 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2093
Philipp Reisner80883192011-02-18 14:56:45 +01002094 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002095 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002096 return 0;
2097
2098fail:
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002099 free_crypto(&crypto);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002100 kfree(new_conf);
2101
Philipp Reisner80883192011-02-18 14:56:45 +01002102 conn_reconfig_done(tconn);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002103out:
2104 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002105 return 0;
2106}
2107
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002108static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2109{
2110 enum drbd_state_rv rv;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002111
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002112 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2113 force ? CS_HARD : 0);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002114
2115 switch (rv) {
2116 case SS_NOTHING_TO_DO:
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002117 break;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002118 case SS_ALREADY_STANDALONE:
2119 return SS_SUCCESS;
2120 case SS_PRIMARY_NOP:
2121 /* Our state checking code wants to see the peer outdated. */
2122 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002123 pdsk, D_OUTDATED), CS_VERBOSE);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002124 break;
2125 case SS_CW_FAILED_BY_PEER:
2126 /* The peer probably wants to see us outdated. */
2127 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2128 disk, D_OUTDATED), 0);
2129 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002130 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2131 CS_HARD);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002132 }
2133 break;
2134 default:;
2135 /* no special handling necessary */
2136 }
2137
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002138 if (rv >= SS_SUCCESS) {
2139 enum drbd_state_rv rv2;
2140 /* No one else can reconfigure the network while I am here.
2141 * The state handling only uses drbd_thread_stop_nowait(),
2142 * we want to really wait here until the receiver is no more.
2143 */
2144 drbd_thread_stop(&adm_ctx.tconn->receiver);
2145
2146 /* Race breaker. This additional state change request may be
2147 * necessary, if this was a forced disconnect during a receiver
2148 * restart. We may have "killed" the receiver thread just
2149 * after drbdd_init() returned. Typically, we should be
2150 * C_STANDALONE already, now, and this becomes a no-op.
2151 */
2152 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2153 CS_VERBOSE | CS_HARD);
2154 if (rv2 < SS_SUCCESS)
2155 conn_err(tconn,
2156 "unexpected rv2=%d in conn_try_disconnect()\n",
2157 rv2);
2158 }
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002159 return rv;
2160}
2161
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002162int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002163{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002164 struct disconnect_parms parms;
2165 struct drbd_tconn *tconn;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002166 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002167 enum drbd_ret_code retcode;
2168 int err;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002169
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002170 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2171 if (!adm_ctx.reply_skb)
2172 return retcode;
2173 if (retcode != NO_ERROR)
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002174 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002175
2176 tconn = adm_ctx.tconn;
2177 memset(&parms, 0, sizeof(parms));
2178 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002179 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002180 if (err) {
2181 retcode = ERR_MANDATORY_TAG;
2182 drbd_msg_put_info(from_attrs_err_to_txt(err));
2183 goto fail;
2184 }
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002185 }
2186
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002187 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2188 if (rv < SS_SUCCESS)
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002189 retcode = rv; /* FIXME: Type mismatch. */
2190 else
2191 retcode = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002192 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002193 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002194 return 0;
2195}
2196
2197void resync_after_online_grow(struct drbd_conf *mdev)
2198{
2199 int iass; /* I am sync source */
2200
2201 dev_info(DEV, "Resync of new storage after online grow\n");
2202 if (mdev->state.role != mdev->state.peer)
2203 iass = (mdev->state.role == R_PRIMARY);
2204 else
Philipp Reisner25703f82011-02-07 14:35:25 +01002205 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002206
2207 if (iass)
2208 drbd_start_resync(mdev, C_SYNC_SOURCE);
2209 else
2210 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2211}
2212
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002213int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002214{
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002215 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002216 struct resize_parms rs;
2217 struct drbd_conf *mdev;
2218 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002219 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002220 enum dds_flags ddsf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002221 sector_t u_size;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002222 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002223
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002224 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2225 if (!adm_ctx.reply_skb)
2226 return retcode;
2227 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002228 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002229
2230 memset(&rs, 0, sizeof(struct resize_parms));
2231 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002232 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002233 if (err) {
2234 retcode = ERR_MANDATORY_TAG;
2235 drbd_msg_put_info(from_attrs_err_to_txt(err));
2236 goto fail;
2237 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002238 }
2239
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002240 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002241 if (mdev->state.conn > C_CONNECTED) {
2242 retcode = ERR_RESIZE_RESYNC;
2243 goto fail;
2244 }
2245
2246 if (mdev->state.role == R_SECONDARY &&
2247 mdev->state.peer == R_SECONDARY) {
2248 retcode = ERR_NO_PRIMARY;
2249 goto fail;
2250 }
2251
2252 if (!get_ldev(mdev)) {
2253 retcode = ERR_NO_DISK;
2254 goto fail;
2255 }
2256
Philipp Reisner31890f42011-01-19 14:12:51 +01002257 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002258 retcode = ERR_NEED_APV_93;
2259 goto fail;
2260 }
2261
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002262 rcu_read_lock();
2263 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2264 rcu_read_unlock();
2265 if (u_size != (sector_t)rs.resize_size) {
2266 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2267 if (!new_disk_conf) {
2268 retcode = ERR_NOMEM;
2269 goto fail;
2270 }
2271 }
2272
Philipp Reisner087c2492010-03-26 13:49:56 +01002273 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002274 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002275
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002276 if (new_disk_conf) {
2277 mutex_lock(&mdev->tconn->conf_update);
2278 old_disk_conf = mdev->ldev->disk_conf;
2279 *new_disk_conf = *old_disk_conf;
2280 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2281 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2282 mutex_unlock(&mdev->tconn->conf_update);
2283 synchronize_rcu();
2284 kfree(old_disk_conf);
2285 }
2286
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002287 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Bart Van Assche24c48302011-05-21 18:32:29 +02002288 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002289 drbd_md_sync(mdev);
2290 put_ldev(mdev);
2291 if (dd == dev_size_error) {
2292 retcode = ERR_NOMEM_BITMAP;
2293 goto fail;
2294 }
2295
Philipp Reisner087c2492010-03-26 13:49:56 +01002296 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002297 if (dd == grew)
2298 set_bit(RESIZE_PENDING, &mdev->flags);
2299
2300 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002301 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002302 }
2303
2304 fail:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002305 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002306 return 0;
2307}
2308
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002309void drbd_set_res_opts_defaults(struct res_opts *r)
2310{
2311 return set_res_opts_defaults(r);
2312}
2313
Lars Ellenbergf3990022011-03-23 14:31:09 +01002314int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002315{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002316 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002317 cpumask_var_t new_cpu_mask;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002318 struct drbd_tconn *tconn;
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002319 struct res_opts res_opts;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002320 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002321
Lars Ellenbergf3990022011-03-23 14:31:09 +01002322 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002323 if (!adm_ctx.reply_skb)
2324 return retcode;
2325 if (retcode != NO_ERROR)
2326 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002327 tconn = adm_ctx.tconn;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002328
Philipp Reisnerb411b362009-09-25 16:07:19 -07002329 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2330 retcode = ERR_NOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002331 drbd_msg_put_info("unable to allocate cpumask");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002332 goto fail;
2333 }
2334
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002335 res_opts = tconn->res_opts;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002336 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002337 set_res_opts_defaults(&res_opts);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002338
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002339 err = res_opts_from_attrs(&res_opts, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002340 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002341 retcode = ERR_MANDATORY_TAG;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002342 drbd_msg_put_info(from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002343 goto fail;
2344 }
2345
Philipp Reisnerb411b362009-09-25 16:07:19 -07002346 /* silently ignore cpu mask on UP kernel */
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002347 if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2348 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
Philipp Reisnerb411b362009-09-25 16:07:19 -07002349 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2350 if (err) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002351 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002352 retcode = ERR_CPU_MASK_PARSE;
2353 goto fail;
2354 }
2355 }
2356
Lars Ellenberg7ad651b2011-02-21 13:21:03 +01002357
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002358 tconn->res_opts = res_opts;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002359
Lars Ellenbergf3990022011-03-23 14:31:09 +01002360 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2361 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2362 drbd_calc_cpu_mask(tconn);
2363 tconn->receiver.reset_cpu_mask = 1;
2364 tconn->asender.reset_cpu_mask = 1;
2365 tconn->worker.reset_cpu_mask = 1;
Philipp Reisner778f2712010-07-06 11:14:00 +02002366 }
2367
Philipp Reisnerb411b362009-09-25 16:07:19 -07002368fail:
2369 free_cpumask_var(new_cpu_mask);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002370
2371 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002372 return 0;
2373}
2374
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002375int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002376{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002377 struct drbd_conf *mdev;
2378 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2379
2380 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2381 if (!adm_ctx.reply_skb)
2382 return retcode;
2383 if (retcode != NO_ERROR)
2384 goto out;
2385
2386 mdev = adm_ctx.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002387
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002388 /* If there is still bitmap IO pending, probably because of a previous
2389 * resync just being finished, wait for it before requesting a new resync. */
2390 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2391
Philipp Reisnerb411b362009-09-25 16:07:19 -07002392 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2393
2394 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2395 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2396
2397 while (retcode == SS_NEED_CONNECTION) {
Philipp Reisner87eeee42011-01-19 14:16:30 +01002398 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002399 if (mdev->state.conn < C_CONNECTED)
2400 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002401 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002402
2403 if (retcode != SS_NEED_CONNECTION)
2404 break;
2405
2406 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2407 }
2408
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002409out:
2410 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002411 return 0;
2412}
2413
Philipp Reisner07782862010-08-31 12:00:50 +02002414static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2415{
2416 int rv;
2417
2418 rv = drbd_bmio_set_n_write(mdev);
2419 drbd_suspend_al(mdev);
2420 return rv;
2421}
2422
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002423static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2424 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002425{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002426 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002427
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002428 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2429 if (!adm_ctx.reply_skb)
2430 return retcode;
2431 if (retcode != NO_ERROR)
2432 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002433
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002434 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2435out:
2436 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002437 return 0;
2438}
2439
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002440int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002441{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002442 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2443}
Philipp Reisnerb411b362009-09-25 16:07:19 -07002444
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002445int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2446{
2447 enum drbd_ret_code retcode;
2448
2449 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2450 if (!adm_ctx.reply_skb)
2451 return retcode;
2452 if (retcode != NO_ERROR)
2453 goto out;
2454
2455 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002456 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002457out:
2458 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002459 return 0;
2460}
2461
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002462int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002463{
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002464 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002465 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002466
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002467 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2468 if (!adm_ctx.reply_skb)
2469 return retcode;
2470 if (retcode != NO_ERROR)
2471 goto out;
2472
2473 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2474 s = adm_ctx.mdev->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01002475 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2476 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2477 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2478 } else {
2479 retcode = ERR_PAUSE_IS_CLEAR;
2480 }
2481 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002482
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002483out:
2484 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002485 return 0;
2486}
2487
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002488int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002489{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002490 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002491}
2492
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002493int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002494{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002495 struct drbd_conf *mdev;
2496 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2497
2498 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2499 if (!adm_ctx.reply_skb)
2500 return retcode;
2501 if (retcode != NO_ERROR)
2502 goto out;
2503
2504 mdev = adm_ctx.mdev;
Philipp Reisner43a51822010-06-11 11:26:34 +02002505 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2506 drbd_uuid_new_current(mdev);
2507 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02002508 }
Philipp Reisner265be2d2010-05-31 10:14:17 +02002509 drbd_suspend_io(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002510 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2511 if (retcode == SS_SUCCESS) {
Philipp Reisner265be2d2010-05-31 10:14:17 +02002512 if (mdev->state.conn < C_CONNECTED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002513 tl_clear(mdev->tconn);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002514 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002515 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02002516 }
2517 drbd_resume_io(mdev);
2518
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002519out:
2520 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002521 return 0;
2522}
2523
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002524int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002525{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002526 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002527}
2528
Lars Ellenberg543cc102011-03-10 22:18:18 +01002529int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2530{
2531 struct nlattr *nla;
2532 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2533 if (!nla)
2534 goto nla_put_failure;
2535 if (vnr != VOLUME_UNSPECIFIED)
2536 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2537 NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2538 nla_nest_end(skb, nla);
2539 return 0;
2540
2541nla_put_failure:
2542 if (nla)
2543 nla_nest_cancel(skb, nla);
2544 return -EMSGSIZE;
2545}
2546
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002547int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2548 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002549{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002550 struct state_info *si = NULL; /* for sizeof(si->member); */
Philipp Reisner44ed1672011-04-19 17:10:19 +02002551 struct net_conf *nc;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002552 struct nlattr *nla;
2553 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002554 int err = 0;
2555 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002556
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002557 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2558 * to. So we better exclude_sensitive information.
2559 *
2560 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2561 * in the context of the requesting user process. Exclude sensitive
2562 * information, unless current has superuser.
2563 *
2564 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2565 * relies on the current implementation of netlink_dump(), which
2566 * executes the dump callback successively from netlink_recvmsg(),
2567 * always in the context of the receiving process */
2568 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002569
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002570 got_ldev = get_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002571
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002572 /* We need to add connection name and volume number information still.
2573 * Minor number is in drbd_genlmsghdr. */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002574 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002575 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002576
Lars Ellenbergf3990022011-03-23 14:31:09 +01002577 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2578 goto nla_put_failure;
2579
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002580 rcu_read_lock();
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002581 if (got_ldev)
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002582 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002583 goto nla_put_failure;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002584
Philipp Reisner44ed1672011-04-19 17:10:19 +02002585 nc = rcu_dereference(mdev->tconn->net_conf);
2586 if (nc)
2587 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2588 rcu_read_unlock();
2589 if (err)
2590 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002591
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002592 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2593 if (!nla)
2594 goto nla_put_failure;
2595 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2596 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2597 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2598 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002599
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002600 if (got_ldev) {
2601 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2602 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2603 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2604 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2605 if (C_SYNC_SOURCE <= mdev->state.conn &&
2606 C_PAUSED_SYNC_T >= mdev->state.conn) {
2607 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2608 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002609 }
2610 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002611
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002612 if (sib) {
2613 switch(sib->sib_reason) {
2614 case SIB_SYNC_PROGRESS:
2615 case SIB_GET_STATUS_REPLY:
2616 break;
2617 case SIB_STATE_CHANGE:
2618 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2619 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2620 break;
2621 case SIB_HELPER_POST:
2622 NLA_PUT_U32(skb,
2623 T_helper_exit_code, sib->helper_exit_code);
2624 /* fall through */
2625 case SIB_HELPER_PRE:
2626 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2627 break;
2628 }
2629 }
2630 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002631
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002632 if (0)
2633nla_put_failure:
2634 err = -EMSGSIZE;
2635 if (got_ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002636 put_ldev(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002637 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002638}
2639
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002640int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002641{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002642 enum drbd_ret_code retcode;
2643 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002644
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002645 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2646 if (!adm_ctx.reply_skb)
2647 return retcode;
2648 if (retcode != NO_ERROR)
2649 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002650
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002651 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2652 if (err) {
2653 nlmsg_free(adm_ctx.reply_skb);
2654 return err;
2655 }
2656out:
2657 drbd_adm_finish(info, retcode);
2658 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002659}
2660
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002661int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002662{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002663 struct drbd_conf *mdev;
2664 struct drbd_genlmsghdr *dh;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002665 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2666 struct drbd_tconn *tconn = NULL;
2667 struct drbd_tconn *tmp;
2668 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002669
Lars Ellenberg543cc102011-03-10 22:18:18 +01002670 /* Open coded, deferred, iteration:
2671 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2672 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2673 * ...
2674 * }
2675 * }
2676 * where tconn is cb->args[0];
2677 * and i is cb->args[1];
2678 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002679 * cb->args[2] indicates if we shall loop over all resources,
2680 * or just dump all volumes of a single resource.
2681 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002682 * This may miss entries inserted after this dump started,
2683 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01002684 *
2685 * We need to make sure the mdev won't disappear while
2686 * we are looking at it, and revalidate our iterators
2687 * on each iteration.
2688 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002689
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002690 /* synchronize with conn_create()/conn_destroy() */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002691 rcu_read_lock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002692 /* revalidate iterator position */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002693 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
Lars Ellenberg543cc102011-03-10 22:18:18 +01002694 if (pos == NULL) {
2695 /* first iteration */
2696 pos = tmp;
2697 tconn = pos;
2698 break;
2699 }
2700 if (tmp == pos) {
2701 tconn = pos;
2702 break;
2703 }
2704 }
2705 if (tconn) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002706next_tconn:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002707 mdev = idr_get_next(&tconn->volumes, &volume);
2708 if (!mdev) {
2709 /* No more volumes to dump on this tconn.
2710 * Advance tconn iterator. */
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002711 pos = list_entry_rcu(tconn->all_tconn.next,
2712 struct drbd_tconn, all_tconn);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002713 /* Did we dump any volume on this tconn yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01002714 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002715 /* If we reached the end of the list,
2716 * or only a single resource dump was requested,
2717 * we are done. */
2718 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2719 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002720 volume = 0;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002721 tconn = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01002722 goto next_tconn;
2723 }
2724 }
2725
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002726 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2727 cb->nlh->nlmsg_seq, &drbd_genl_family,
2728 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2729 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01002730 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002731
Lars Ellenberg543cc102011-03-10 22:18:18 +01002732 if (!mdev) {
2733 /* this is a tconn without a single volume */
2734 dh->minor = -1U;
2735 dh->ret_code = NO_ERROR;
2736 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2737 genlmsg_cancel(skb, dh);
2738 else
2739 genlmsg_end(skb, dh);
2740 goto out;
2741 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002742
Lars Ellenberg543cc102011-03-10 22:18:18 +01002743 D_ASSERT(mdev->vnr == volume);
2744 D_ASSERT(mdev->tconn == tconn);
2745
2746 dh->minor = mdev_to_minor(mdev);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002747 dh->ret_code = NO_ERROR;
2748
2749 if (nla_put_status_info(skb, mdev, NULL)) {
2750 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01002751 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002752 }
2753 genlmsg_end(skb, dh);
2754 }
2755
Lars Ellenberg543cc102011-03-10 22:18:18 +01002756out:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002757 rcu_read_unlock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01002758 /* where to start the next iteration */
2759 cb->args[0] = (long)pos;
2760 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002761
Lars Ellenberg543cc102011-03-10 22:18:18 +01002762 /* No more tconns/volumes/minors found results in an empty skb.
2763 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002764 return skb->len;
2765}
2766
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002767/*
2768 * Request status of all resources, or of all volumes within a single resource.
2769 *
2770 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2771 * Which means we cannot use the family->attrbuf or other such members, because
2772 * dump is NOT protected by the genl_lock(). During dump, we only have access
2773 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2774 *
2775 * Once things are setup properly, we call into get_one_status().
2776 */
2777int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2778{
2779 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2780 struct nlattr *nla;
2781 const char *conn_name;
2782 struct drbd_tconn *tconn;
2783
2784 /* Is this a followup call? */
2785 if (cb->args[0]) {
2786 /* ... of a single resource dump,
2787 * and the resource iterator has been advanced already? */
2788 if (cb->args[2] && cb->args[2] != cb->args[0])
2789 return 0; /* DONE. */
2790 goto dump;
2791 }
2792
2793 /* First call (from netlink_dump_start). We need to figure out
2794 * which resource(s) the user wants us to dump. */
2795 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2796 nlmsg_attrlen(cb->nlh, hdrlen),
2797 DRBD_NLA_CFG_CONTEXT);
2798
2799 /* No explicit context given. Dump all. */
2800 if (!nla)
2801 goto dump;
2802 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
2803 /* context given, but no name present? */
2804 if (!nla)
2805 return -EINVAL;
2806 conn_name = nla_data(nla);
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002807 tconn = conn_get_by_name(conn_name);
2808
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002809 if (!tconn)
2810 return -ENODEV;
2811
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002812 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2813
Lars Ellenberg71932ef2011-04-18 09:43:25 +02002814 /* prime iterators, and set "filter" mode mark:
2815 * only dump this tconn. */
2816 cb->args[0] = (long)tconn;
2817 /* cb->args[1] = 0; passed in this way. */
2818 cb->args[2] = (long)tconn;
2819
2820dump:
2821 return get_one_status(skb, cb);
2822}
2823
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002824int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2825{
2826 enum drbd_ret_code retcode;
2827 struct timeout_parms tp;
2828 int err;
2829
2830 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2831 if (!adm_ctx.reply_skb)
2832 return retcode;
2833 if (retcode != NO_ERROR)
2834 goto out;
2835
2836 tp.timeout_type =
2837 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2838 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2839 UT_DEFAULT;
2840
2841 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2842 if (err) {
2843 nlmsg_free(adm_ctx.reply_skb);
2844 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002845 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002846out:
2847 drbd_adm_finish(info, retcode);
2848 return 0;
2849}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002850
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002851int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2852{
2853 struct drbd_conf *mdev;
2854 enum drbd_ret_code retcode;
2855
2856 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2857 if (!adm_ctx.reply_skb)
2858 return retcode;
2859 if (retcode != NO_ERROR)
2860 goto out;
2861
2862 mdev = adm_ctx.mdev;
2863 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2864 /* resume from last known position, if possible */
2865 struct start_ov_parms parms =
2866 { .ov_start_sector = mdev->ov_start_sector };
Lars Ellenbergf3990022011-03-23 14:31:09 +01002867 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002868 if (err) {
2869 retcode = ERR_MANDATORY_TAG;
2870 drbd_msg_put_info(from_attrs_err_to_txt(err));
2871 goto out;
2872 }
2873 /* w_make_ov_request expects position to be aligned */
2874 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2875 }
Lars Ellenberg873b0d52011-01-21 22:53:48 +01002876 /* If there is still bitmap IO pending, e.g. previous resync or verify
2877 * just being finished, wait for it before requesting a new resync. */
2878 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002879 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2880out:
2881 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002882 return 0;
2883}
2884
2885
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002886int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002887{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002888 struct drbd_conf *mdev;
2889 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002890 int skip_initial_sync = 0;
2891 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002892 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002893
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002894 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2895 if (!adm_ctx.reply_skb)
2896 return retcode;
2897 if (retcode != NO_ERROR)
2898 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002899
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002900 mdev = adm_ctx.mdev;
2901 memset(&args, 0, sizeof(args));
2902 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002903 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002904 if (err) {
2905 retcode = ERR_MANDATORY_TAG;
2906 drbd_msg_put_info(from_attrs_err_to_txt(err));
2907 goto out_nolock;
2908 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002909 }
2910
Philipp Reisner8410da82011-02-11 20:11:10 +01002911 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912
2913 if (!get_ldev(mdev)) {
2914 retcode = ERR_NO_DISK;
2915 goto out;
2916 }
2917
2918 /* this is "skip initial sync", assume to be clean */
Philipp Reisner31890f42011-01-19 14:12:51 +01002919 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07002920 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2921 dev_info(DEV, "Preparing to skip initial sync\n");
2922 skip_initial_sync = 1;
2923 } else if (mdev->state.conn != C_STANDALONE) {
2924 retcode = ERR_CONNECTED;
2925 goto out_dec;
2926 }
2927
2928 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2929 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2930
2931 if (args.clear_bm) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002932 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2933 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002934 if (err) {
2935 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2936 retcode = ERR_IO_MD_DISK;
2937 }
2938 if (skip_initial_sync) {
2939 drbd_send_uuids_skip_initial_sync(mdev);
2940 _drbd_uuid_set(mdev, UI_BITMAP, 0);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002941 drbd_print_uuids(mdev, "cleared bitmap UUID");
Philipp Reisner87eeee42011-01-19 14:16:30 +01002942 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002943 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2944 CS_VERBOSE, NULL);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002945 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002946 }
2947 }
2948
2949 drbd_md_sync(mdev);
2950out_dec:
2951 put_ldev(mdev);
2952out:
Philipp Reisner8410da82011-02-11 20:11:10 +01002953 mutex_unlock(mdev->state_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002954out_nolock:
2955 drbd_adm_finish(info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002956 return 0;
2957}
2958
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002959static enum drbd_ret_code
2960drbd_check_conn_name(const char *name)
Philipp Reisner774b3052011-02-22 02:07:03 -05002961{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002962 if (!name || !name[0]) {
2963 drbd_msg_put_info("connection name missing");
2964 return ERR_MANDATORY_TAG;
2965 }
2966 /* if we want to use these in sysfs/configfs/debugfs some day,
2967 * we must not allow slashes */
2968 if (strchr(name, '/')) {
2969 drbd_msg_put_info("invalid connection name");
2970 return ERR_INVALID_REQUEST;
2971 }
2972 return NO_ERROR;
2973}
Philipp Reisner774b3052011-02-22 02:07:03 -05002974
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002975int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2976{
2977 enum drbd_ret_code retcode;
2978
2979 retcode = drbd_adm_prepare(skb, info, 0);
2980 if (!adm_ctx.reply_skb)
2981 return retcode;
2982 if (retcode != NO_ERROR)
2983 goto out;
2984
2985 retcode = drbd_check_conn_name(adm_ctx.conn_name);
2986 if (retcode != NO_ERROR)
2987 goto out;
2988
2989 if (adm_ctx.tconn) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01002990 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2991 retcode = ERR_INVALID_REQUEST;
2992 drbd_msg_put_info("connection exists");
2993 }
2994 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002995 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05002996 }
2997
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002998 if (!conn_create(adm_ctx.conn_name))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002999 retcode = ERR_NOMEM;
3000out:
3001 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003002 return 0;
3003}
3004
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003005int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003006{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003007 struct drbd_genlmsghdr *dh = info->userhdr;
3008 enum drbd_ret_code retcode;
Philipp Reisner774b3052011-02-22 02:07:03 -05003009
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003010 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3011 if (!adm_ctx.reply_skb)
3012 return retcode;
3013 if (retcode != NO_ERROR)
3014 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003015
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003016 /* FIXME drop minor_count parameter, limit to MINORMASK */
3017 if (dh->minor >= minor_count) {
3018 drbd_msg_put_info("requested minor out of range");
3019 retcode = ERR_INVALID_REQUEST;
3020 goto out;
3021 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02003022 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003023 drbd_msg_put_info("requested volume id out of range");
3024 retcode = ERR_INVALID_REQUEST;
3025 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05003026 }
3027
Lars Ellenberg38f19612011-03-14 13:22:35 +01003028 /* drbd_adm_prepare made sure already
3029 * that mdev->tconn and mdev->vnr match the request. */
3030 if (adm_ctx.mdev) {
3031 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3032 retcode = ERR_MINOR_EXISTS;
3033 /* else: still NO_ERROR */
3034 goto out;
3035 }
3036
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003037 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3038out:
3039 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003040 return 0;
3041}
3042
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003043static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3044{
3045 if (mdev->state.disk == D_DISKLESS &&
3046 /* no need to be mdev->state.conn == C_STANDALONE &&
3047 * we may want to delete a minor from a live replication group.
3048 */
3049 mdev->state.role == R_SECONDARY) {
Philipp Reisner81fa2e62011-05-04 15:10:30 +02003050 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3051 idr_remove(&minors, mdev_to_minor(mdev));
3052 del_gendisk(mdev->vdisk);
3053 synchronize_rcu();
3054 kref_put(&mdev->kref, &drbd_minor_destroy);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003055 return NO_ERROR;
3056 } else
3057 return ERR_MINOR_CONFIGURED;
3058}
3059
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003060int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003061{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003062 enum drbd_ret_code retcode;
3063
3064 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3065 if (!adm_ctx.reply_skb)
3066 return retcode;
3067 if (retcode != NO_ERROR)
3068 goto out;
3069
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003070 retcode = adm_delete_minor(adm_ctx.mdev);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003071out:
3072 drbd_adm_finish(info, retcode);
3073 return 0;
3074}
3075
3076int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3077{
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003078 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003079 struct drbd_conf *mdev;
3080 unsigned i;
3081
3082 retcode = drbd_adm_prepare(skb, info, 0);
3083 if (!adm_ctx.reply_skb)
3084 return retcode;
3085 if (retcode != NO_ERROR)
3086 goto out;
3087
3088 if (!adm_ctx.tconn) {
3089 retcode = ERR_CONN_NOT_KNOWN;
3090 goto out;
3091 }
3092
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003093 /* demote */
3094 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3095 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3096 if (retcode < SS_SUCCESS) {
3097 drbd_msg_put_info("failed to demote");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003098 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003099 }
3100 }
3101
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003102 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3103 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003104 drbd_msg_put_info("failed to disconnect");
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003105 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003106 }
3107
3108 /* detach */
3109 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003110 retcode = adm_detach(mdev);
3111 if (retcode < SS_SUCCESS) {
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003112 drbd_msg_put_info("failed to detach");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003113 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003114 }
3115 }
3116
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003117 /* If we reach this, all volumes (of this tconn) are Secondary,
3118 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003119 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02003120 drbd_thread_stop(&adm_ctx.tconn->worker);
3121
3122 /* Now, nothing can fail anymore */
3123
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003124 /* delete volumes */
3125 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3126 retcode = adm_delete_minor(mdev);
3127 if (retcode != NO_ERROR) {
3128 /* "can not happen" */
3129 drbd_msg_put_info("failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07003130 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003131 }
3132 }
3133
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003134 /* delete connection */
3135 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003136 list_del_rcu(&adm_ctx.tconn->all_tconn);
3137 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003138 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3139
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003140 retcode = NO_ERROR;
3141 } else {
3142 /* "can not happen" */
3143 retcode = ERR_CONN_IN_USE;
3144 drbd_msg_put_info("failed to delete connection");
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01003145 }
Philipp Reisneref356262011-04-13 14:21:29 -07003146 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003147out:
3148 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003149 return 0;
3150}
3151
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003152int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05003153{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003154 enum drbd_ret_code retcode;
3155
3156 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3157 if (!adm_ctx.reply_skb)
3158 return retcode;
3159 if (retcode != NO_ERROR)
3160 goto out;
3161
3162 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02003163 list_del_rcu(&adm_ctx.tconn->all_tconn);
3164 synchronize_rcu();
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02003165 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3166
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003167 retcode = NO_ERROR;
Philipp Reisner774b3052011-02-22 02:07:03 -05003168 } else {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003169 retcode = ERR_CONN_IN_USE;
Philipp Reisner774b3052011-02-22 02:07:03 -05003170 }
3171
Lars Ellenberg992d6e92011-05-02 11:47:18 +02003172 if (retcode == NO_ERROR)
3173 drbd_thread_stop(&adm_ctx.tconn->worker);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003174out:
3175 drbd_adm_finish(info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05003176 return 0;
3177}
3178
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003179void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003180{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003181 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3182 struct sk_buff *msg;
3183 struct drbd_genlmsghdr *d_out;
3184 unsigned seq;
3185 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003186
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003187 seq = atomic_inc_return(&drbd_genl_seq);
3188 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3189 if (!msg)
3190 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003191
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003192 err = -EMSGSIZE;
3193 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3194 if (!d_out) /* cannot happen, but anyways. */
3195 goto nla_put_failure;
3196 d_out->minor = mdev_to_minor(mdev);
Andreas Gruenbacher6f9b5f82011-05-06 01:03:32 +02003197 d_out->ret_code = NO_ERROR;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02003198
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003199 if (nla_put_status_info(msg, mdev, sib))
3200 goto nla_put_failure;
3201 genlmsg_end(msg, d_out);
3202 err = drbd_genl_multicast_events(msg, 0);
3203 /* msg has been consumed or freed in netlink_broadcast() */
3204 if (err && err != -ESRCH)
3205 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003206
Philipp Reisnerb411b362009-09-25 16:07:19 -07003207 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003208
3209nla_put_failure:
3210 nlmsg_free(msg);
3211failed:
3212 dev_err(DEV, "Error %d while broadcasting event. "
3213 "Event seq:%u sib_reason:%u\n",
3214 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003215}