blob: ad0fcb43e45c768d693ad2df7c43ce5418c16cfa [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Lars Ellenbergf88c5d92014-03-27 14:10:55 +010026#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
Philipp Reisnerb411b362009-09-25 16:07:19 -070028#include <linux/module.h>
29#include <linux/drbd.h>
30#include <linux/in.h>
31#include <linux/fs.h>
32#include <linux/file.h>
33#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070034#include <linux/blkpg.h>
35#include <linux/cpumask.h>
36#include "drbd_int.h"
Andreas Gruenbachera3603a62011-05-30 11:47:37 +020037#include "drbd_protocol.h"
Philipp Reisner265be2d2010-05-31 10:14:17 +020038#include "drbd_req.h"
Andreas Gruenbachera2972842014-07-31 17:41:33 +020039#include "drbd_state_change.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070040#include <asm/unaligned.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070041#include <linux/drbd_limits.h>
Philipp Reisner87f7be42010-06-11 13:56:33 +020042#include <linux/kthread.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070043
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010044#include <net/genetlink.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070045
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010046/* .doit */
47// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
48// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
49
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +020050int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010052
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +020053int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
54int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +010055int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010056
57int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010059int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010060int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010062int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010063int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
71int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
72int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
Lars Ellenbergf3990022011-03-23 14:31:09 +010074int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010075int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
76int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
77/* .dumpit */
78int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
Andreas Gruenbachera55bbd32014-08-28 13:31:14 +020079int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
80int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
81int drbd_adm_dump_devices_done(struct netlink_callback *cb);
82int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
83int drbd_adm_dump_connections_done(struct netlink_callback *cb);
84int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
85int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
Andreas Gruenbachera2972842014-07-31 17:41:33 +020086int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010087
88#include <linux/drbd_genl_api.h>
Andreas Gruenbacher01b39b52011-06-10 12:57:26 +020089#include "drbd_nla.h"
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010090#include <linux/genl_magic_func.h>
91
Andreas Gruenbachera2972842014-07-31 17:41:33 +020092static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
93static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
94
95DEFINE_MUTEX(notification_mutex);
96
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +010097/* used blkdev_get_by_path, to claim our meta data device(s) */
Philipp Reisnerb411b362009-09-25 16:07:19 -070098static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
99
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100100static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
101{
102 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
103 if (genlmsg_reply(skb, info))
Lars Ellenbergf88c5d92014-03-27 14:10:55 +0100104 pr_err("error sending genl reply\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700105}
106
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100107/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
108 * reason it could fail was no space in skb, and there are 4k available. */
Andreas Gruenbacherf221f4b2014-11-10 17:21:10 +0100109static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100110{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100111 struct nlattr *nla;
112 int err = -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700113
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100114 if (!info || !info[0])
115 return 0;
116
117 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
118 if (!nla)
119 return err;
120
121 err = nla_put_string(skb, T_info_text, info);
122 if (err) {
123 nla_nest_cancel(skb, nla);
124 return err;
125 } else
126 nla_nest_end(skb, nla);
127 return 0;
128}
129
130/* This would be a good candidate for a "pre_doit" hook,
131 * and per-family private info->pointers.
132 * But we need to stay compatible with older kernels.
133 * If it returns successfully, adm_ctx members are valid.
Lars Ellenberg9e276872014-04-28 18:43:22 +0200134 *
135 * At this point, we still rely on the global genl_lock().
136 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
137 * to add additional synchronization against object destruction/modification.
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100138 */
139#define DRBD_ADM_NEED_MINOR 1
Andreas Gruenbacher44e52cf2011-06-14 16:07:32 +0200140#define DRBD_ADM_NEED_RESOURCE 2
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200141#define DRBD_ADM_NEED_CONNECTION 4
Lars Ellenberga910b122014-04-28 18:43:21 +0200142static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
143 struct sk_buff *skb, struct genl_info *info, unsigned flags)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100144{
145 struct drbd_genlmsghdr *d_in = info->userhdr;
146 const u8 cmd = info->genlhdr->cmd;
147 int err;
148
Lars Ellenberga910b122014-04-28 18:43:21 +0200149 memset(adm_ctx, 0, sizeof(*adm_ctx));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100150
151 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
Philipp Reisner98683652012-11-09 14:18:43 +0100152 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100153 return -EPERM;
154
Lars Ellenberga910b122014-04-28 18:43:21 +0200155 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
156 if (!adm_ctx->reply_skb) {
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200157 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100158 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200159 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100160
Lars Ellenberga910b122014-04-28 18:43:21 +0200161 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100162 info, &drbd_genl_family, 0, cmd);
163 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
164 * but anyways */
Lars Ellenberga910b122014-04-28 18:43:21 +0200165 if (!adm_ctx->reply_dh) {
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200166 err = -ENOMEM;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100167 goto fail;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200168 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100169
Lars Ellenberga910b122014-04-28 18:43:21 +0200170 adm_ctx->reply_dh->minor = d_in->minor;
171 adm_ctx->reply_dh->ret_code = NO_ERROR;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100172
Lars Ellenberga910b122014-04-28 18:43:21 +0200173 adm_ctx->volume = VOLUME_UNSPECIFIED;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100174 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
175 struct nlattr *nla;
176 /* parse and validate only */
Lars Ellenbergf3990022011-03-23 14:31:09 +0100177 err = drbd_cfg_context_from_attrs(NULL, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100178 if (err)
179 goto fail;
180
181 /* It was present, and valid,
182 * copy it over to the reply skb. */
Lars Ellenberga910b122014-04-28 18:43:21 +0200183 err = nla_put_nohdr(adm_ctx->reply_skb,
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100184 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
185 info->attrs[DRBD_NLA_CFG_CONTEXT]);
186 if (err)
187 goto fail;
188
Lars Ellenberg9e276872014-04-28 18:43:22 +0200189 /* and assign stuff to the adm_ctx */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100190 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200191 if (nla)
Lars Ellenberga910b122014-04-28 18:43:21 +0200192 adm_ctx->volume = nla_get_u32(nla);
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +0200193 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100194 if (nla)
Lars Ellenberga910b122014-04-28 18:43:21 +0200195 adm_ctx->resource_name = nla_data(nla);
196 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
197 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
198 if ((adm_ctx->my_addr &&
199 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
200 (adm_ctx->peer_addr &&
201 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200202 err = -EINVAL;
203 goto fail;
204 }
205 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100206
Lars Ellenberga910b122014-04-28 18:43:21 +0200207 adm_ctx->minor = d_in->minor;
208 adm_ctx->device = minor_to_device(d_in->minor);
Lars Ellenberg9e276872014-04-28 18:43:22 +0200209
210 /* We are protected by the global genl_lock().
211 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
212 * so make sure this object stays around. */
213 if (adm_ctx->device)
214 kref_get(&adm_ctx->device->kref);
215
Lars Ellenberga910b122014-04-28 18:43:21 +0200216 if (adm_ctx->resource_name) {
217 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +0200218 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100219
Lars Ellenberga910b122014-04-28 18:43:21 +0200220 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
221 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100222 return ERR_MINOR_INVALID;
223 }
Lars Ellenberga910b122014-04-28 18:43:21 +0200224 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
225 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
226 if (adm_ctx->resource_name)
Andreas Gruenbachera10f6b82011-06-09 01:26:16 +0200227 return ERR_RES_NOT_KNOWN;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100228 return ERR_INVALID_REQUEST;
229 }
230
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200231 if (flags & DRBD_ADM_NEED_CONNECTION) {
Lars Ellenberga910b122014-04-28 18:43:21 +0200232 if (adm_ctx->resource) {
233 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200234 return ERR_INVALID_REQUEST;
235 }
Lars Ellenberga910b122014-04-28 18:43:21 +0200236 if (adm_ctx->device) {
237 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200238 return ERR_INVALID_REQUEST;
239 }
Lars Ellenberga910b122014-04-28 18:43:21 +0200240 if (adm_ctx->my_addr && adm_ctx->peer_addr)
241 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
242 nla_len(adm_ctx->my_addr),
243 nla_data(adm_ctx->peer_addr),
244 nla_len(adm_ctx->peer_addr));
245 if (!adm_ctx->connection) {
246 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200247 return ERR_INVALID_REQUEST;
248 }
249 }
250
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100251 /* some more paranoia, if the request was over-determined */
Lars Ellenberga910b122014-04-28 18:43:21 +0200252 if (adm_ctx->device && adm_ctx->resource &&
253 adm_ctx->device->resource != adm_ctx->resource) {
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +0200254 pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
Lars Ellenberga910b122014-04-28 18:43:21 +0200255 adm_ctx->minor, adm_ctx->resource->name,
256 adm_ctx->device->resource->name);
257 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
Lars Ellenberg527f4b22011-03-14 13:58:03 +0100258 return ERR_INVALID_REQUEST;
259 }
Lars Ellenberga910b122014-04-28 18:43:21 +0200260 if (adm_ctx->device &&
261 adm_ctx->volume != VOLUME_UNSPECIFIED &&
262 adm_ctx->volume != adm_ctx->device->vnr) {
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100263 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
Lars Ellenberga910b122014-04-28 18:43:21 +0200264 adm_ctx->minor, adm_ctx->volume,
265 adm_ctx->device->vnr,
266 adm_ctx->device->resource->name);
267 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100268 return ERR_INVALID_REQUEST;
269 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200270
Lars Ellenberg9e276872014-04-28 18:43:22 +0200271 /* still, provide adm_ctx->resource always, if possible. */
272 if (!adm_ctx->resource) {
273 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
274 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
275 if (adm_ctx->resource)
276 kref_get(&adm_ctx->resource->kref);
277 }
278
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100279 return NO_ERROR;
280
281fail:
Lars Ellenberga910b122014-04-28 18:43:21 +0200282 nlmsg_free(adm_ctx->reply_skb);
283 adm_ctx->reply_skb = NULL;
Andreas Gruenbacher1e2a2552011-05-24 14:17:08 +0200284 return err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100285}
286
Lars Ellenberga910b122014-04-28 18:43:21 +0200287static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
288 struct genl_info *info, int retcode)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100289{
Lars Ellenberg9e276872014-04-28 18:43:22 +0200290 if (adm_ctx->device) {
291 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
292 adm_ctx->device = NULL;
293 }
Lars Ellenberga910b122014-04-28 18:43:21 +0200294 if (adm_ctx->connection) {
295 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
296 adm_ctx->connection = NULL;
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200297 }
Lars Ellenberga910b122014-04-28 18:43:21 +0200298 if (adm_ctx->resource) {
299 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
300 adm_ctx->resource = NULL;
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +0200301 }
Philipp Reisner0ace9df2011-04-24 10:53:19 +0200302
Lars Ellenberga910b122014-04-28 18:43:21 +0200303 if (!adm_ctx->reply_skb)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100304 return -ENOMEM;
305
Lars Ellenberga910b122014-04-28 18:43:21 +0200306 adm_ctx->reply_dh->ret_code = retcode;
307 drbd_adm_send_reply(adm_ctx->reply_skb, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100308 return 0;
309}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700310
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200311static void setup_khelper_env(struct drbd_connection *connection, char **envp)
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100312{
313 char *afs;
314
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200315 /* FIXME: A future version will not allow this case. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200316 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200317 return;
318
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200319 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200320 case AF_INET6:
321 afs = "ipv6";
322 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200323 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200324 break;
325 case AF_INET:
326 afs = "ipv4";
327 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200328 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200329 break;
330 default:
331 afs = "ssocks";
332 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200333 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100334 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +0200335 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100336}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200338int drbd_khelper(struct drbd_device *device, char *cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700339{
340 char *envp[] = { "HOME=/",
341 "TERM=linux",
342 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100343 (char[20]) { }, /* address family */
344 (char[60]) { }, /* address */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345 NULL };
Lars Ellenberg09823682016-06-14 00:26:18 +0200346 char mb[14];
Philipp Reisnerb411b362009-09-25 16:07:19 -0700347 char *argv[] = {usermode_helper, cmd, mb, NULL };
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200348 struct drbd_connection *connection = first_peer_device(device)->connection;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100349 struct sib_info sib;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 int ret;
351
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200352 if (current == connection->worker.task)
353 set_bit(CALLBACK_PENDING, &connection->flags);
Lars Ellenbergc2ba6862012-06-14 15:14:06 +0200354
Lars Ellenberg09823682016-06-14 00:26:18 +0200355 snprintf(mb, 14, "minor-%d", device_to_minor(device));
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200356 setup_khelper_env(connection, envp);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700357
Lars Ellenberg1090c052010-07-19 17:41:04 +0200358 /* The helper may take some time.
359 * write out any unsynced meta data changes now */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200360 drbd_md_sync(device);
Lars Ellenberg1090c052010-07-19 17:41:04 +0200361
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200362 drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100363 sib.sib_reason = SIB_HELPER_PRE;
364 sib.helper_name = cmd;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200365 drbd_bcast_event(device, &sib);
Andreas Gruenbachera2972842014-07-31 17:41:33 +0200366 notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
Oleg Nesterov70834d32012-03-23 15:02:46 -0700367 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700368 if (ret)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200369 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -0700370 usermode_helper, cmd, mb,
371 (ret >> 8) & 0xff, ret);
372 else
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200373 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -0700374 usermode_helper, cmd, mb,
375 (ret >> 8) & 0xff, ret);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100376 sib.sib_reason = SIB_HELPER_POST;
377 sib.helper_exit_code = ret;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200378 drbd_bcast_event(device, &sib);
Andreas Gruenbachera2972842014-07-31 17:41:33 +0200379 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700380
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200381 if (current == connection->worker.task)
382 clear_bit(CALLBACK_PENDING, &connection->flags);
Lars Ellenbergc2ba6862012-06-14 15:14:06 +0200383
Philipp Reisnerb411b362009-09-25 16:07:19 -0700384 if (ret < 0) /* Ignore any ERRNOs we got. */
385 ret = 0;
386
387 return ret;
388}
389
Fabian Frederick7e5fec32016-06-14 00:26:35 +0200390enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700391{
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100392 char *envp[] = { "HOME=/",
393 "TERM=linux",
394 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
395 (char[20]) { }, /* address family */
396 (char[60]) { }, /* address */
397 NULL };
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200398 char *resource_name = connection->resource->name;
399 char *argv[] = {usermode_helper, cmd, resource_name, NULL };
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100400 int ret;
401
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200402 setup_khelper_env(connection, envp);
403 conn_md_sync(connection);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100404
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200405 drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100406 /* TODO: conn_bcast_event() ?? */
Andreas Gruenbachera2972842014-07-31 17:41:33 +0200407 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100408
Philipp Reisner98683652012-11-09 14:18:43 +0100409 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100410 if (ret)
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200411 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200412 usermode_helper, cmd, resource_name,
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100413 (ret >> 8) & 0xff, ret);
414 else
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200415 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200416 usermode_helper, cmd, resource_name,
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100417 (ret >> 8) & 0xff, ret);
418 /* TODO: conn_bcast_event() ?? */
Andreas Gruenbachera2972842014-07-31 17:41:33 +0200419 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
Philipp Reisner6b75dce2011-03-16 17:39:12 +0100420
421 if (ret < 0) /* Ignore any ERRNOs we got. */
422 ret = 0;
423
424 return ret;
425}
426
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200427static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700428{
Philipp Reisnercb703452011-03-24 11:03:07 +0100429 enum drbd_fencing_p fp = FP_NOT_AVAIL;
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200430 struct drbd_peer_device *peer_device;
Philipp Reisnercb703452011-03-24 11:03:07 +0100431 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700432
Philipp Reisner695d08f2011-04-11 22:53:32 -0700433 rcu_read_lock();
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200434 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
435 struct drbd_device *device = peer_device->device;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200436 if (get_ldev_if_state(device, D_CONSISTENT)) {
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200437 struct disk_conf *disk_conf =
438 rcu_dereference(peer_device->device->ldev->disk_conf);
439 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200440 put_ldev(device);
Philipp Reisnercb703452011-03-24 11:03:07 +0100441 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700442 }
Philipp Reisner695d08f2011-04-11 22:53:32 -0700443 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700444
Philipp Reisnercb703452011-03-24 11:03:07 +0100445 return fp;
446}
447
Lars Ellenberg7bd000c2016-06-14 00:26:26 +0200448static bool resource_is_supended(struct drbd_resource *resource)
449{
450 return resource->susp || resource->susp_fen || resource->susp_nod;
451}
452
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200453bool conn_try_outdate_peer(struct drbd_connection *connection)
Philipp Reisnercb703452011-03-24 11:03:07 +0100454{
Lars Ellenberg7bd000c2016-06-14 00:26:26 +0200455 struct drbd_resource * const resource = connection->resource;
Philipp Reisner28e448b2013-06-25 16:50:06 +0200456 unsigned int connect_cnt;
Philipp Reisnercb703452011-03-24 11:03:07 +0100457 union drbd_state mask = { };
458 union drbd_state val = { };
459 enum drbd_fencing_p fp;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700460 char *ex_to_string;
461 int r;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700462
Lars Ellenberg7bd000c2016-06-14 00:26:26 +0200463 spin_lock_irq(&resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200464 if (connection->cstate >= C_WF_REPORT_PARAMS) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200465 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
Lars Ellenberg7bd000c2016-06-14 00:26:26 +0200466 spin_unlock_irq(&resource->req_lock);
Philipp Reisnercb703452011-03-24 11:03:07 +0100467 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700468 }
469
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200470 connect_cnt = connection->connect_cnt;
Lars Ellenberg7bd000c2016-06-14 00:26:26 +0200471 spin_unlock_irq(&resource->req_lock);
Philipp Reisner28e448b2013-06-25 16:50:06 +0200472
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200473 fp = highest_fencing_policy(connection);
Philipp Reisnercb703452011-03-24 11:03:07 +0100474 switch (fp) {
475 case FP_NOT_AVAIL:
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200476 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
Lars Ellenberg7bd000c2016-06-14 00:26:26 +0200477 spin_lock_irq(&resource->req_lock);
478 if (connection->cstate < C_WF_REPORT_PARAMS) {
479 _conn_request_state(connection,
480 (union drbd_state) { { .susp_fen = 1 } },
481 (union drbd_state) { { .susp_fen = 0 } },
482 CS_VERBOSE | CS_HARD | CS_DC_SUSP);
483 /* We are no longer suspended due to the fencing policy.
484 * We may still be suspended due to the on-no-data-accessible policy.
485 * If that was OND_IO_ERROR, fail pending requests. */
486 if (!resource_is_supended(resource))
487 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
488 }
489 /* Else: in case we raced with a connection handshake,
490 * let the handshake figure out if we maybe can RESEND,
491 * and do not resume/fail pending requests here.
492 * Worst case is we stay suspended for now, which may be
493 * resolved by either re-establishing the replication link, or
494 * the next link failure, or eventually the administrator. */
495 spin_unlock_irq(&resource->req_lock);
496 return false;
497
Philipp Reisnercb703452011-03-24 11:03:07 +0100498 case FP_DONT_CARE:
499 return true;
500 default: ;
501 }
502
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200503 r = conn_khelper(connection, "fence-peer");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700504
505 switch ((r>>8) & 0xff) {
Fabian Frederick7e5fec32016-06-14 00:26:35 +0200506 case P_INCONSISTENT: /* peer is inconsistent */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700507 ex_to_string = "peer is inconsistent or worse";
Philipp Reisnercb703452011-03-24 11:03:07 +0100508 mask.pdsk = D_MASK;
509 val.pdsk = D_INCONSISTENT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700510 break;
Fabian Frederick7e5fec32016-06-14 00:26:35 +0200511 case P_OUTDATED: /* peer got outdated, or was already outdated */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700512 ex_to_string = "peer was fenced";
Philipp Reisnercb703452011-03-24 11:03:07 +0100513 mask.pdsk = D_MASK;
514 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700515 break;
Fabian Frederick7e5fec32016-06-14 00:26:35 +0200516 case P_DOWN: /* peer was down */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200517 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700518 /* we will(have) create(d) a new UUID anyways... */
519 ex_to_string = "peer is unreachable, assumed to be dead";
Philipp Reisnercb703452011-03-24 11:03:07 +0100520 mask.pdsk = D_MASK;
521 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700522 } else {
523 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700524 }
525 break;
Fabian Frederick7e5fec32016-06-14 00:26:35 +0200526 case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700527 * This is useful when an unconnected R_SECONDARY is asked to
528 * become R_PRIMARY, but finds the other peer being active. */
529 ex_to_string = "peer is active";
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200530 drbd_warn(connection, "Peer is primary, outdating myself.\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100531 mask.disk = D_MASK;
532 val.disk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700533 break;
Fabian Frederick7e5fec32016-06-14 00:26:35 +0200534 case P_FENCING:
535 /* THINK: do we need to handle this
536 * like case 4, or more like case 5? */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537 if (fp != FP_STONITH)
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200538 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539 ex_to_string = "peer was stonithed";
Philipp Reisnercb703452011-03-24 11:03:07 +0100540 mask.pdsk = D_MASK;
541 val.pdsk = D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700542 break;
543 default:
544 /* The script is broken ... */
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200545 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
Philipp Reisnercb703452011-03-24 11:03:07 +0100546 return false; /* Eventually leave IO frozen */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700547 }
548
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200549 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
Philipp Reisnercb703452011-03-24 11:03:07 +0100550 (r>>8) & 0xff, ex_to_string);
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200551
Philipp Reisnercb703452011-03-24 11:03:07 +0100552 /* Not using
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200553 conn_request_state(connection, mask, val, CS_VERBOSE);
Philipp Reisnercb703452011-03-24 11:03:07 +0100554 here, because we might were able to re-establish the connection in the
555 meantime. */
Lars Ellenberg7bd000c2016-06-14 00:26:26 +0200556 spin_lock_irq(&resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200557 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
558 if (connection->connect_cnt != connect_cnt)
Philipp Reisner28e448b2013-06-25 16:50:06 +0200559 /* In case the connection was established and droped
560 while the fence-peer handler was running, ignore it */
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200561 drbd_info(connection, "Ignoring fence-peer exit code\n");
Philipp Reisner28e448b2013-06-25 16:50:06 +0200562 else
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200563 _conn_request_state(connection, mask, val, CS_VERBOSE);
Philipp Reisner28e448b2013-06-25 16:50:06 +0200564 }
Lars Ellenberg7bd000c2016-06-14 00:26:26 +0200565 spin_unlock_irq(&resource->req_lock);
Philipp Reisnercb703452011-03-24 11:03:07 +0100566
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200567 return conn_highest_pdsk(connection) <= D_OUTDATED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700568}
569
Philipp Reisner87f7be42010-06-11 13:56:33 +0200570static int _try_outdate_peer_async(void *data)
571{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200572 struct drbd_connection *connection = (struct drbd_connection *)data;
Philipp Reisner87f7be42010-06-11 13:56:33 +0200573
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200574 conn_try_outdate_peer(connection);
Philipp Reisner21423fa2011-05-17 14:19:41 +0200575
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200576 kref_put(&connection->kref, drbd_destroy_connection);
Philipp Reisner87f7be42010-06-11 13:56:33 +0200577 return 0;
578}
579
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200580void conn_try_outdate_peer_async(struct drbd_connection *connection)
Philipp Reisner87f7be42010-06-11 13:56:33 +0200581{
582 struct task_struct *opa;
583
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200584 kref_get(&connection->kref);
Lars Ellenbergbbc1c5e2014-07-09 21:18:32 +0200585 /* We may just have force_sig()'ed this thread
586 * to get it out of some blocking network function.
587 * Clear signals; otherwise kthread_run(), which internally uses
588 * wait_on_completion_killable(), will mistake our pending signal
589 * for a new fatal signal and fail. */
590 flush_signals(current);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200591 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200592 if (IS_ERR(opa)) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200593 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200594 kref_put(&connection->kref, drbd_destroy_connection);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200595 }
Philipp Reisner87f7be42010-06-11 13:56:33 +0200596}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700597
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100598enum drbd_state_rv
Lars Ellenberg44a4d552013-11-22 12:40:58 +0100599drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700600{
Lars Ellenberg44a4d552013-11-22 12:40:58 +0100601 struct drbd_peer_device *const peer_device = first_peer_device(device);
602 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700603 const int max_tries = 4;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100604 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200605 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700606 int try = 0;
607 int forced = 0;
608 union drbd_state mask, val;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700609
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +0200610 if (new_role == R_PRIMARY) {
611 struct drbd_connection *connection;
612
613 /* Detect dead peers as soon as possible. */
614
615 rcu_read_lock();
616 for_each_connection(connection, device->resource)
617 request_ping(connection);
618 rcu_read_unlock();
619 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700620
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200621 mutex_lock(device->state_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700622
623 mask.i = 0; mask.role = R_MASK;
624 val.i = 0; val.role = new_role;
625
626 while (try++ < max_tries) {
Philipp Reisnera8821532014-11-10 17:21:11 +0100627 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700628
629 /* in case we first succeeded to outdate,
630 * but now suddenly could establish a connection */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100631 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700632 val.pdsk = 0;
633 mask.pdsk = 0;
634 continue;
635 }
636
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100637 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200638 (device->state.disk < D_UP_TO_DATE &&
639 device->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700640 mask.disk = D_MASK;
641 val.disk = D_UP_TO_DATE;
642 forced = 1;
643 continue;
644 }
645
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100646 if (rv == SS_NO_UP_TO_DATE_DISK &&
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200647 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +0200648 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700649
Lars Ellenberg44a4d552013-11-22 12:40:58 +0100650 if (conn_try_outdate_peer(connection)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700651 val.disk = D_UP_TO_DATE;
652 mask.disk = D_MASK;
653 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700654 continue;
655 }
656
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100657 if (rv == SS_NOTHING_TO_DO)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100658 goto out;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100659 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
Lars Ellenberg44a4d552013-11-22 12:40:58 +0100660 if (!conn_try_outdate_peer(connection) && force) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200661 drbd_warn(device, "Forced into split brain situation!\n");
Philipp Reisnercb703452011-03-24 11:03:07 +0100662 mask.pdsk = D_MASK;
663 val.pdsk = D_OUTDATED;
664
Philipp Reisnerb411b362009-09-25 16:07:19 -0700665 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666 continue;
667 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100668 if (rv == SS_TWO_PRIMARIES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700669 /* Maybe the peer is detected as dead very soon...
670 retry at most once more in this case. */
Philipp Reisner44ed1672011-04-19 17:10:19 +0200671 int timeo;
672 rcu_read_lock();
Lars Ellenberg44a4d552013-11-22 12:40:58 +0100673 nc = rcu_dereference(connection->net_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200674 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
675 rcu_read_unlock();
676 schedule_timeout_interruptible(timeo);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700677 if (try < max_tries)
678 try = max_tries - 1;
679 continue;
680 }
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100681 if (rv < SS_SUCCESS) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200682 rv = _drbd_request_state(device, mask, val,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700683 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100684 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100685 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700686 }
687 break;
688 }
689
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100690 if (rv < SS_SUCCESS)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100691 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700692
693 if (forced)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200694 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700695
696 /* Wait until nothing is on the fly :) */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200697 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700698
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100699 /* FIXME also wait for all pending P_BARRIER_ACK? */
700
Philipp Reisnerb411b362009-09-25 16:07:19 -0700701 if (new_role == R_SECONDARY) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200702 if (get_ldev(device)) {
703 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
704 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700705 }
706 } else {
Lars Ellenberg66ce6db2014-02-05 06:17:01 +0100707 mutex_lock(&device->resource->conf_update);
Lars Ellenberg44a4d552013-11-22 12:40:58 +0100708 nc = connection->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200709 if (nc)
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200710 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
Lars Ellenberg66ce6db2014-02-05 06:17:01 +0100711 mutex_unlock(&device->resource->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +0200712
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200713 if (get_ldev(device)) {
714 if (((device->state.conn < C_CONNECTED ||
715 device->state.pdsk <= D_FAILED)
716 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
717 drbd_uuid_new_current(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700718
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200719 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
720 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700721 }
722 }
723
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100724 /* writeout of activity log covered areas of the bitmap
725 * to stable storage done in after state change already */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700726
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200727 if (device->state.conn >= C_WF_REPORT_PARAMS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700728 /* if this was forced, we should consider sync */
729 if (forced)
Lars Ellenberg44a4d552013-11-22 12:40:58 +0100730 drbd_send_uuids(peer_device);
731 drbd_send_current_state(peer_device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700732 }
733
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200734 drbd_md_sync(device);
Lars Ellenberg720979f2014-02-05 06:28:08 +0100735 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200736 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100737out:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200738 mutex_unlock(device->state_mutex);
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100739 return rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700740}
741
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100742static const char *from_attrs_err_to_txt(int err)
Lars Ellenbergef50a3e2010-09-01 14:39:30 +0200743{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100744 return err == -ENOMSG ? "required attribute missing" :
745 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
Lars Ellenbergf3990022011-03-23 14:31:09 +0100746 err == -EEXIST ? "can not change invariant setting" :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100747 "invalid attribute value";
Lars Ellenbergef50a3e2010-09-01 14:39:30 +0200748}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700749
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100750int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700751{
Lars Ellenberga910b122014-04-28 18:43:21 +0200752 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100753 struct set_role_parms parms;
754 int err;
755 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756
Lars Ellenberga910b122014-04-28 18:43:21 +0200757 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100758 if (!adm_ctx.reply_skb)
759 return retcode;
760 if (retcode != NO_ERROR)
761 goto out;
762
763 memset(&parms, 0, sizeof(parms));
764 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +0100765 err = set_role_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100766 if (err) {
767 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +0200768 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100769 goto out;
770 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700771 }
Lars Ellenberg9e276872014-04-28 18:43:22 +0200772 genl_unlock();
773 mutex_lock(&adm_ctx.resource->adm_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700774
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100775 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200776 retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100777 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200778 retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
Lars Ellenberg9e276872014-04-28 18:43:22 +0200779
780 mutex_unlock(&adm_ctx.resource->adm_mutex);
781 genl_lock();
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100782out:
Lars Ellenberga910b122014-04-28 18:43:21 +0200783 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700784 return 0;
785}
786
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100787/* Initializes the md.*_offset members, so we are able to find
788 * the on disk meta data.
789 *
790 * We currently have two possible layouts:
791 * external:
792 * |----------- md_size_sect ------------------|
793 * [ 4k superblock ][ activity log ][ Bitmap ]
794 * | al_offset == 8 |
795 * | bm_offset = al_offset + X |
796 * ==> bitmap sectors = md_size_sect - bm_offset
797 *
798 * internal:
799 * |----------- md_size_sect ------------------|
800 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
801 * | al_offset < 0 |
802 * | bm_offset = al_offset - Y |
803 * ==> bitmap sectors = Y = al_offset - bm_offset
804 *
805 * Activity log size used to be fixed 32kB,
806 * but is about to become configurable.
807 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200808static void drbd_md_set_sector_offsets(struct drbd_device *device,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700809 struct drbd_backing_dev *bdev)
810{
811 sector_t md_size_sect = 0;
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +0100812 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200813
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +0100814 bdev->md.md_offset = drbd_md_ss(bdev);
815
Lars Ellenberg68e41a42013-03-19 18:16:45 +0100816 switch (bdev->md.meta_dev_idx) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700817 default:
818 /* v07 style fixed size indexed meta data */
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100819 bdev->md.md_size_sect = MD_128MB_SECT;
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100820 bdev->md.al_offset = MD_4kB_SECT;
821 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700822 break;
823 case DRBD_MD_INDEX_FLEX_EXT:
824 /* just occupy the full device; unit: sectors */
825 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100826 bdev->md.al_offset = MD_4kB_SECT;
827 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700828 break;
829 case DRBD_MD_INDEX_INTERNAL:
830 case DRBD_MD_INDEX_FLEX_INT:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700831 /* al size is still fixed */
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100832 bdev->md.al_offset = -al_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700833 /* we need (slightly less than) ~ this much bitmap sectors: */
834 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
835 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
836 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
837 md_size_sect = ALIGN(md_size_sect, 8);
838
839 /* plus the "drbd meta data super block",
840 * and the activity log; */
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100841 md_size_sect += MD_4kB_SECT + al_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700842
843 bdev->md.md_size_sect = md_size_sect;
844 /* bitmap offset is adjusted by 'super' block size */
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100845 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700846 break;
847 }
848}
849
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100850/* input size is expected to be in KB */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700851char *ppsize(char *buf, unsigned long long size)
852{
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100853 /* Needs 9 bytes at max including trailing NUL:
854 * -1ULL ==> "16384 EB" */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
856 int base = 0;
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100857 while (size >= 10000 && base < sizeof(units)-1) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700858 /* shift + round */
859 size = (size >> 10) + !!(size & (1<<9));
860 base++;
861 }
Lars Ellenberg4b0715f2010-12-14 15:13:04 +0100862 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700863
864 return buf;
865}
866
867/* there is still a theoretical deadlock when called from receiver
868 * on an D_INCONSISTENT R_PRIMARY:
869 * remote READ does inc_ap_bio, receiver would need to receive answer
870 * packet from remote to dec_ap_bio again.
871 * receiver receive_sizes(), comes here,
872 * waits for ap_bio_cnt == 0. -> deadlock.
873 * but this cannot happen, actually, because:
874 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
875 * (not connected, or bad/no disk on peer):
876 * see drbd_fail_request_early, ap_bio_cnt is zero.
877 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
878 * peer may not initiate a resize.
879 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100880/* Note these are not to be confused with
881 * drbd_adm_suspend_io/drbd_adm_resume_io,
882 * which are (sub) state changes triggered by admin (drbdsetup),
883 * and can be long lived.
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200884 * This changes an device->flag, is triggered by drbd internals,
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100885 * and should be short-lived. */
Philipp Reisner7dbb4382013-02-28 10:30:19 +0100886/* It needs to be a counter, since multiple threads might
887 independently suspend and resume IO. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200888void drbd_suspend_io(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700889{
Philipp Reisner7dbb4382013-02-28 10:30:19 +0100890 atomic_inc(&device->suspend_cnt);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200891 if (drbd_suspended(device))
Philipp Reisner265be2d2010-05-31 10:14:17 +0200892 return;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200893 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700894}
895
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200896void drbd_resume_io(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700897{
Philipp Reisner7dbb4382013-02-28 10:30:19 +0100898 if (atomic_dec_and_test(&device->suspend_cnt))
899 wake_up(&device->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700900}
901
902/**
903 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200904 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700905 *
906 * Returns 0 on success, negative return values indicate errors.
907 * You should call drbd_md_sync() after calling this function.
908 */
Philipp Reisnerd752b262013-06-25 16:50:08 +0200909enum determine_dev_size
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200910drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911{
Lars Ellenberg8011e242015-06-08 14:48:38 +0200912 struct md_offsets_and_sizes {
913 u64 last_agreed_sect;
914 u64 md_offset;
915 s32 al_offset;
916 s32 bm_offset;
917 u32 md_size_sect;
918
919 u32 al_stripes;
920 u32 al_stripe_size_4k;
921 } prev;
922 sector_t u_size, size;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200923 struct drbd_md *md = &device->ldev->md;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700924 char ppb[10];
Philipp Reisnerd752b262013-06-25 16:50:08 +0200925 void *buffer;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700926
927 int md_moved, la_size_changed;
Philipp Reisnere96c9632013-06-25 16:50:07 +0200928 enum determine_dev_size rv = DS_UNCHANGED;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700929
Lars Ellenberg5f7c0122015-06-08 15:18:45 +0200930 /* We may change the on-disk offsets of our meta data below. Lock out
931 * anything that may cause meta data IO, to avoid acting on incomplete
932 * layout changes or scribbling over meta data that is in the process
933 * of being moved.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700934 *
Lars Ellenberg5f7c0122015-06-08 15:18:45 +0200935 * Move is not exactly correct, btw, currently we have all our meta
936 * data in core memory, to "move" it we just write it all out, there
937 * are no reads. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200938 drbd_suspend_io(device);
Lars Ellenberge37d2432014-04-01 23:53:30 +0200939 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
Philipp Reisnerd752b262013-06-25 16:50:08 +0200940 if (!buffer) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200941 drbd_resume_io(device);
Philipp Reisnerd752b262013-06-25 16:50:08 +0200942 return DS_ERROR;
943 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700944
Lars Ellenberg8011e242015-06-08 14:48:38 +0200945 /* remember current offset and sizes */
946 prev.last_agreed_sect = md->la_size_sect;
947 prev.md_offset = md->md_offset;
948 prev.al_offset = md->al_offset;
949 prev.bm_offset = md->bm_offset;
950 prev.md_size_sect = md->md_size_sect;
951 prev.al_stripes = md->al_stripes;
952 prev.al_stripe_size_4k = md->al_stripe_size_4k;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700953
Philipp Reisnerd752b262013-06-25 16:50:08 +0200954 if (rs) {
955 /* rs is non NULL if we should change the AL layout only */
Philipp Reisnerd752b262013-06-25 16:50:08 +0200956 md->al_stripes = rs->al_stripes;
957 md->al_stripe_size_4k = rs->al_stripe_size / 4;
958 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
959 }
960
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200961 drbd_md_set_sector_offsets(device, device->ldev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700962
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200963 rcu_read_lock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200964 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200965 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200966 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700967
Lars Ellenberg8011e242015-06-08 14:48:38 +0200968 if (size < prev.last_agreed_sect) {
Philipp Reisnerd752b262013-06-25 16:50:08 +0200969 if (rs && u_size == 0) {
970 /* Remove "rs &&" later. This check should always be active, but
971 right now the receiver expects the permissive behavior */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200972 drbd_warn(device, "Implicit shrink not allowed. "
Philipp Reisnerd752b262013-06-25 16:50:08 +0200973 "Use --size=%llus for explicit shrink.\n",
974 (unsigned long long)size);
975 rv = DS_ERROR_SHRINK;
976 }
977 if (u_size > size)
978 rv = DS_ERROR_SPACE_MD;
979 if (rv != DS_UNCHANGED)
980 goto err_out;
981 }
982
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200983 if (drbd_get_capacity(device->this_bdev) != size ||
984 drbd_bm_capacity(device) != size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985 int err;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200986 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700987 if (unlikely(err)) {
988 /* currently there is only one error: ENOMEM! */
Lars Ellenberg8011e242015-06-08 14:48:38 +0200989 size = drbd_bm_capacity(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700990 if (size == 0) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200991 drbd_err(device, "OUT OF MEMORY! "
Philipp Reisnerb411b362009-09-25 16:07:19 -0700992 "Could not allocate bitmap!\n");
993 } else {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200994 drbd_err(device, "BM resizing failed. "
Lars Ellenberg8011e242015-06-08 14:48:38 +0200995 "Leaving size unchanged\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700996 }
Philipp Reisnere96c9632013-06-25 16:50:07 +0200997 rv = DS_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700998 }
999 /* racy, see comments above. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001000 drbd_set_my_capacity(device, size);
Lars Ellenberg8011e242015-06-08 14:48:38 +02001001 md->la_size_sect = size;
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001002 drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001003 (unsigned long long)size>>1);
1004 }
Philipp Reisnerd752b262013-06-25 16:50:08 +02001005 if (rv <= DS_ERROR)
1006 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001007
Lars Ellenberg8011e242015-06-08 14:48:38 +02001008 la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001009
Lars Ellenberg8011e242015-06-08 14:48:38 +02001010 md_moved = prev.md_offset != md->md_offset
1011 || prev.md_size_sect != md->md_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001012
Philipp Reisnerd752b262013-06-25 16:50:08 +02001013 if (la_size_changed || md_moved || rs) {
1014 u32 prev_flags;
Andreas Gruenbacher24dccab2010-12-12 17:45:41 +01001015
Lars Ellenbergfcb09672014-01-27 16:04:14 +01001016 /* We do some synchronous IO below, which may take some time.
1017 * Clear the timer, to avoid scary "timer expired!" messages,
1018 * "Superblock" is written out at least twice below, anyways. */
1019 del_timer(&device->md_sync_timer);
Philipp Reisnerd752b262013-06-25 16:50:08 +02001020
Lars Ellenberg5f7c0122015-06-08 15:18:45 +02001021 /* We won't change the "al-extents" setting, we just may need
1022 * to move the on-disk location of the activity log ringbuffer.
1023 * Lock for transaction is good enough, it may well be "dirty"
1024 * or even "starving". */
1025 wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1026
1027 /* mark current on-disk bitmap and activity log as unreliable */
Philipp Reisnerd752b262013-06-25 16:50:08 +02001028 prev_flags = md->flags;
Lars Ellenberg5f7c0122015-06-08 15:18:45 +02001029 md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001030 drbd_md_write(device, buffer);
Philipp Reisnerd752b262013-06-25 16:50:08 +02001031
Lars Ellenberg5f7c0122015-06-08 15:18:45 +02001032 drbd_al_initialize(device, buffer);
1033
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001034 drbd_info(device, "Writing the whole bitmap, %s\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001035 la_size_changed && md_moved ? "size changed and md moved" :
1036 la_size_changed ? "size changed" : "md moved");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01001037 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001038 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
Philipp Reisnerd752b262013-06-25 16:50:08 +02001039 "size changed", BM_LOCKED_MASK);
Philipp Reisnerd752b262013-06-25 16:50:08 +02001040
Lars Ellenberg5f7c0122015-06-08 15:18:45 +02001041 /* on-disk bitmap and activity log is authoritative again
1042 * (unless there was an IO error meanwhile...) */
Philipp Reisnerd752b262013-06-25 16:50:08 +02001043 md->flags = prev_flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001044 drbd_md_write(device, buffer);
Philipp Reisnerd752b262013-06-25 16:50:08 +02001045
1046 if (rs)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001047 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1048 md->al_stripes, md->al_stripe_size_4k * 4);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001049 }
1050
Lars Ellenberg8011e242015-06-08 14:48:38 +02001051 if (size > prev.last_agreed_sect)
1052 rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1053 if (size < prev.last_agreed_sect)
Philipp Reisnere96c9632013-06-25 16:50:07 +02001054 rv = DS_SHRUNK;
Philipp Reisnerd752b262013-06-25 16:50:08 +02001055
1056 if (0) {
1057 err_out:
Lars Ellenberg8011e242015-06-08 14:48:38 +02001058 /* restore previous offset and sizes */
1059 md->la_size_sect = prev.last_agreed_sect;
1060 md->md_offset = prev.md_offset;
1061 md->al_offset = prev.al_offset;
1062 md->bm_offset = prev.bm_offset;
1063 md->md_size_sect = prev.md_size_sect;
1064 md->al_stripes = prev.al_stripes;
1065 md->al_stripe_size_4k = prev.al_stripe_size_4k;
1066 md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
Philipp Reisnerd752b262013-06-25 16:50:08 +02001067 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001068 lc_unlock(device->act_log);
1069 wake_up(&device->al_wait);
1070 drbd_md_put_buffer(device);
1071 drbd_resume_io(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001072
1073 return rv;
1074}
1075
1076sector_t
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001077drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
Philipp Reisneref5e44a2011-05-03 13:27:43 +02001078 sector_t u_size, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001079{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001080 sector_t p_size = device->p_size; /* partner's disk size. */
Lars Ellenbergcccac982013-03-19 18:16:46 +01001081 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001082 sector_t m_size; /* my size */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001083 sector_t size = 0;
1084
1085 m_size = drbd_get_max_capacity(bdev);
1086
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001087 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001088 drbd_warn(device, "Resize while not connected was forced by the user!\n");
Philipp Reisnera393db62009-12-22 13:35:52 +01001089 p_size = m_size;
1090 }
1091
Philipp Reisnerb411b362009-09-25 16:07:19 -07001092 if (p_size && m_size) {
1093 size = min_t(sector_t, p_size, m_size);
1094 } else {
Lars Ellenbergcccac982013-03-19 18:16:46 +01001095 if (la_size_sect) {
1096 size = la_size_sect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001097 if (m_size && m_size < size)
1098 size = m_size;
1099 if (p_size && p_size < size)
1100 size = p_size;
1101 } else {
1102 if (m_size)
1103 size = m_size;
1104 if (p_size)
1105 size = p_size;
1106 }
1107 }
1108
1109 if (size == 0)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001110 drbd_err(device, "Both nodes diskless!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001111
1112 if (u_size) {
1113 if (u_size > size)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001114 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001115 (unsigned long)u_size>>1, (unsigned long)size>>1);
1116 else
1117 size = u_size;
1118 }
1119
1120 return size;
1121}
1122
1123/**
1124 * drbd_check_al_size() - Ensures that the AL is of the right size
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001125 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001126 *
1127 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1128 * failed, and 0 on success. You should call drbd_md_sync() after you called
1129 * this function.
1130 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001131static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001132{
1133 struct lru_cache *n, *t;
1134 struct lc_element *e;
1135 unsigned int in_use;
1136 int i;
1137
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001138 if (device->act_log &&
1139 device->act_log->nr_elements == dc->al_extents)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001140 return 0;
1141
1142 in_use = 0;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001143 t = device->act_log;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +01001144 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
Lars Ellenbergf3990022011-03-23 14:31:09 +01001145 dc->al_extents, sizeof(struct lc_element), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001146
1147 if (n == NULL) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001148 drbd_err(device, "Cannot allocate act_log lru!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001149 return -ENOMEM;
1150 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001151 spin_lock_irq(&device->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001152 if (t) {
1153 for (i = 0; i < t->nr_elements; i++) {
1154 e = lc_element_by_index(t, i);
1155 if (e->refcnt)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001156 drbd_err(device, "refcnt(%d)==%d\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001157 e->lc_number, e->refcnt);
1158 in_use += e->refcnt;
1159 }
1160 }
1161 if (!in_use)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001162 device->act_log = n;
1163 spin_unlock_irq(&device->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001164 if (in_use) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001165 drbd_err(device, "Activity log still in use!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001166 lc_destroy(n);
1167 return -EBUSY;
1168 } else {
Markus Elfringd01efce2014-11-19 13:33:32 +01001169 lc_destroy(t);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001170 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001171 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001172 return 0;
1173}
1174
Lars Ellenberg69ba1ee2016-06-14 00:26:21 +02001175static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1176{
1177 q->limits.discard_granularity = granularity;
1178}
Lars Ellenberg9104d312016-06-14 00:26:31 +02001179
1180static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1181{
1182 /* when we introduced REQ_WRITE_SAME support, we also bumped
1183 * our maximum supported batch bio size used for discards. */
1184 if (connection->agreed_features & DRBD_FF_WSAME)
1185 return DRBD_MAX_BBIO_SECTORS;
1186 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1187 return AL_EXTENT_SIZE >> 9;
1188}
1189
Lars Ellenberg69ba1ee2016-06-14 00:26:21 +02001190static void decide_on_discard_support(struct drbd_device *device,
1191 struct request_queue *q,
1192 struct request_queue *b,
1193 bool discard_zeroes_if_aligned)
1194{
1195 /* q = drbd device queue (device->rq_queue)
1196 * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
1197 * or NULL if diskless
1198 */
1199 struct drbd_connection *connection = first_peer_device(device)->connection;
1200 bool can_do = b ? blk_queue_discard(b) : true;
1201
Lars Ellenberg9104d312016-06-14 00:26:31 +02001202 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
Lars Ellenberg69ba1ee2016-06-14 00:26:21 +02001203 can_do = false;
1204 drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
1205 }
1206 if (can_do) {
1207 /* We don't care for the granularity, really.
1208 * Stacking limits below should fix it for the local
1209 * device. Whether or not it is a suitable granularity
1210 * on the remote device is not our problem, really. If
1211 * you care, you need to use devices with similar
1212 * topology on all peers. */
1213 blk_queue_discard_granularity(q, 512);
Lars Ellenberg9104d312016-06-14 00:26:31 +02001214 q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
Lars Ellenberg69ba1ee2016-06-14 00:26:21 +02001215 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
Christoph Hellwig45c21792017-04-05 19:21:22 +02001216 q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
Lars Ellenberg69ba1ee2016-06-14 00:26:21 +02001217 } else {
1218 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1219 blk_queue_discard_granularity(q, 0);
1220 q->limits.max_discard_sectors = 0;
Christoph Hellwig45c21792017-04-05 19:21:22 +02001221 q->limits.max_write_zeroes_sectors = 0;
Lars Ellenberg69ba1ee2016-06-14 00:26:21 +02001222 }
1223}
1224
1225static void fixup_discard_if_not_supported(struct request_queue *q)
1226{
1227 /* To avoid confusion, if this queue does not support discard, clear
1228 * max_discard_sectors, which is what lsblk -D reports to the user.
1229 * Older kernels got this wrong in "stack limits".
1230 * */
1231 if (!blk_queue_discard(q)) {
1232 blk_queue_max_discard_sectors(q, 0);
1233 blk_queue_discard_granularity(q, 0);
1234 }
1235}
1236
Lars Ellenberg9104d312016-06-14 00:26:31 +02001237static void decide_on_write_same_support(struct drbd_device *device,
1238 struct request_queue *q,
1239 struct request_queue *b, struct o_qlim *o)
1240{
1241 struct drbd_peer_device *peer_device = first_peer_device(device);
1242 struct drbd_connection *connection = peer_device->connection;
1243 bool can_do = b ? b->limits.max_write_same_sectors : true;
1244
1245 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
1246 can_do = false;
1247 drbd_info(peer_device, "peer does not support WRITE_SAME\n");
1248 }
1249
1250 if (o) {
1251 /* logical block size; queue_logical_block_size(NULL) is 512 */
1252 unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
1253 unsigned int me_lbs_b = queue_logical_block_size(b);
1254 unsigned int me_lbs = queue_logical_block_size(q);
1255
1256 if (me_lbs_b != me_lbs) {
1257 drbd_warn(device,
1258 "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
1259 me_lbs, me_lbs_b);
1260 /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
1261 can_do = false;
1262 }
1263 if (me_lbs_b != peer_lbs) {
1264 drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
1265 me_lbs, peer_lbs);
1266 if (can_do) {
1267 drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
1268 can_do = false;
1269 }
1270 me_lbs = max(me_lbs, me_lbs_b);
1271 /* We cannot change the logical block size of an in-use queue.
1272 * We can only hope that access happens to be properly aligned.
1273 * If not, the peer will likely produce an IO error, and detach. */
1274 if (peer_lbs > me_lbs) {
1275 if (device->state.role != R_PRIMARY) {
1276 blk_queue_logical_block_size(q, peer_lbs);
1277 drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
1278 } else {
1279 drbd_warn(peer_device,
1280 "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
1281 me_lbs, peer_lbs);
1282 }
1283 }
1284 }
1285 if (can_do && !o->write_same_capable) {
1286 /* If we introduce an open-coded write-same loop on the receiving side,
1287 * the peer would present itself as "capable". */
1288 drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
1289 can_do = false;
1290 }
1291 }
1292
1293 blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
1294}
1295
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01001296static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
Lars Ellenberg9104d312016-06-14 00:26:31 +02001297 unsigned int max_bio_size, struct o_qlim *o)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001298{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001299 struct request_queue * const q = device->rq_queue;
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001300 unsigned int max_hw_sectors = max_bio_size >> 9;
1301 unsigned int max_segments = 0;
Philipp Reisnerc1b31562014-04-28 18:43:32 +02001302 struct request_queue *b = NULL;
Lars Ellenbergdd4f6992016-06-14 00:26:20 +02001303 struct disk_conf *dc;
1304 bool discard_zeroes_if_aligned = true;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001305
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01001306 if (bdev) {
1307 b = bdev->backing_bdev->bd_disk->queue;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001308
1309 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001310 rcu_read_lock();
Lars Ellenbergdd4f6992016-06-14 00:26:20 +02001311 dc = rcu_dereference(device->ldev->disk_conf);
1312 max_segments = dc->max_bio_bvecs;
1313 discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001314 rcu_read_unlock();
Philipp Reisnerc1b31562014-04-28 18:43:32 +02001315
1316 blk_set_stacking_limits(&q->limits);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001317 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001318
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001319 blk_queue_max_hw_sectors(q, max_hw_sectors);
1320 /* This is the workaround for "bio would need to, but cannot, be split" */
1321 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001322 blk_queue_segment_boundary(q, PAGE_SIZE-1);
Lars Ellenberg69ba1ee2016-06-14 00:26:21 +02001323 decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
Lars Ellenberg9104d312016-06-14 00:26:31 +02001324 decide_on_write_same_support(device, q, b, o);
1325
Philipp Reisnerc1b31562014-04-28 18:43:32 +02001326 if (b) {
Philipp Reisner99432fc2011-05-20 16:39:13 +02001327 blk_queue_stack_limits(q, b);
1328
Jan Karadc3b17c2017-02-02 15:56:50 +01001329 if (q->backing_dev_info->ra_pages !=
1330 b->backing_dev_info->ra_pages) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001331 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
Jan Karadc3b17c2017-02-02 15:56:50 +01001332 q->backing_dev_info->ra_pages,
1333 b->backing_dev_info->ra_pages);
1334 q->backing_dev_info->ra_pages =
1335 b->backing_dev_info->ra_pages;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001336 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337 }
Lars Ellenberg69ba1ee2016-06-14 00:26:21 +02001338 fixup_discard_if_not_supported(q);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001339}
1340
Lars Ellenberg9104d312016-06-14 00:26:31 +02001341void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001342{
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001343 unsigned int now, new, local, peer;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001344
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001345 now = queue_max_hw_sectors(device->rq_queue) << 9;
1346 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1347 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
Philipp Reisner99432fc2011-05-20 16:39:13 +02001348
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01001349 if (bdev) {
1350 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001351 device->local_max_bio_size = local;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001352 }
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001353 local = min(local, DRBD_MAX_BIO_SIZE);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001354
1355 /* We may ignore peer limits if the peer is modern enough.
1356 Because new from 8.3.8 onwards the peer can use multiple
1357 BIOs for a single peer_request */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001358 if (device->state.conn >= C_WF_REPORT_PARAMS) {
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001359 if (first_peer_device(device)->connection->agreed_pro_version < 94)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001360 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
Philipp Reisner68093842011-06-30 15:43:06 +02001361 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001362 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
Philipp Reisner99432fc2011-05-20 16:39:13 +02001363 peer = DRBD_MAX_SIZE_H80_PACKET;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001364 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001365 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1366 else
Philipp Reisner99432fc2011-05-20 16:39:13 +02001367 peer = DRBD_MAX_BIO_SIZE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001368
Lars Ellenbergfa090e72014-04-28 18:43:27 +02001369 /* We may later detach and re-attach on a disconnected Primary.
1370 * Avoid this setting to jump back in that case.
1371 * We want to store what we know the peer DRBD can handle,
1372 * not what the peer IO backend can handle. */
1373 if (peer > device->peer_max_bio_size)
1374 device->peer_max_bio_size = peer;
1375 }
Lars Ellenbergdb141b22012-06-25 19:15:58 +02001376 new = min(local, peer);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001377
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001378 if (device->state.role == R_PRIMARY && new < now)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001379 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001380
1381 if (new != now)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001382 drbd_info(device, "max BIO size = %u\n", new);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001383
Lars Ellenberg9104d312016-06-14 00:26:31 +02001384 drbd_setup_queue_param(device, bdev, new, o);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001385}
1386
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001387/* Starts the worker thread */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001388static void conn_reconfig_start(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001389{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001390 drbd_thread_start(&connection->worker);
Andreas Gruenbacherb5043c52011-07-28 15:56:02 +02001391 drbd_flush_workqueue(&connection->sender_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001392}
1393
Philipp Reisnera18e9d12011-04-24 11:09:55 +02001394/* if still unconfigured, stops worker again. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001395static void conn_reconfig_done(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001396{
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001397 bool stop_threads;
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001398 spin_lock_irq(&connection->resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001399 stop_threads = conn_all_vols_unconf(connection) &&
1400 connection->cstate == C_STANDALONE;
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001401 spin_unlock_irq(&connection->resource->req_lock);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001402 if (stop_threads) {
Philipp Reisner668700b2015-03-16 16:08:29 +01001403 /* ack_receiver thread and ack_sender workqueue are implicitly
1404 * stopped by receiver in conn_disconnect() */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001405 drbd_thread_stop(&connection->receiver);
1406 drbd_thread_stop(&connection->worker);
Lars Ellenberg992d6e92011-05-02 11:47:18 +02001407 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001408}
1409
Philipp Reisner07782862010-08-31 12:00:50 +02001410/* Make sure IO is suspended before calling this function(). */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001411static void drbd_suspend_al(struct drbd_device *device)
Philipp Reisner07782862010-08-31 12:00:50 +02001412{
1413 int s = 0;
1414
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001415 if (!lc_try_lock(device->act_log)) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001416 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
Philipp Reisner07782862010-08-31 12:00:50 +02001417 return;
1418 }
1419
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001420 drbd_al_shrink(device);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001421 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001422 if (device->state.conn < C_CONNECTED)
1423 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001424 spin_unlock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001425 lc_unlock(device->act_log);
Philipp Reisner07782862010-08-31 12:00:50 +02001426
1427 if (s)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001428 drbd_info(device, "Suspended AL updates\n");
Philipp Reisner07782862010-08-31 12:00:50 +02001429}
1430
Lars Ellenberg5979e362011-04-27 21:09:55 +02001431
1432static bool should_set_defaults(struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433{
Lars Ellenberg5979e362011-04-27 21:09:55 +02001434 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1435 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1436}
1437
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001438static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
Philipp Reisnerd589a212011-05-04 10:06:52 +02001439{
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001440 /* This is limited by 16 bit "slot" numbers,
1441 * and by available on-disk context storage.
1442 *
1443 * Also (u16)~0 is special (denotes a "free" extent).
1444 *
1445 * One transaction occupies one 4kB on-disk block,
1446 * we have n such blocks in the on disk ring buffer,
1447 * the "current" transaction may fail (n-1),
1448 * and there is 919 slot numbers context information per transaction.
1449 *
1450 * 72 transaction blocks amounts to more than 2**16 context slots,
1451 * so cap there first.
1452 */
1453 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1454 const unsigned int sufficient_on_disk =
1455 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1456 /AL_CONTEXT_PER_TRANSACTION;
Philipp Reisnerd589a212011-05-04 10:06:52 +02001457
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001458 unsigned int al_size_4k = bdev->md.al_size_4k;
1459
1460 if (al_size_4k > sufficient_on_disk)
1461 return max_al_nr;
1462
1463 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
Philipp Reisnerd589a212011-05-04 10:06:52 +02001464}
1465
Lars Ellenberg70df7092013-12-20 11:17:02 +01001466static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1467{
1468 return a->disk_barrier != b->disk_barrier ||
1469 a->disk_flushes != b->disk_flushes ||
1470 a->disk_drain != b->disk_drain;
1471}
1472
Philipp Reisnera5ca66c2016-06-14 00:26:14 +02001473static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1474 struct drbd_backing_dev *nbc)
Philipp Reisnerc5c23852016-06-14 00:26:12 +02001475{
Philipp Reisnera5ca66c2016-06-14 00:26:14 +02001476 struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
1477
Philipp Reisnerc5c23852016-06-14 00:26:12 +02001478 if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1479 disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1480 if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1481 disk_conf->al_extents = drbd_al_extents_max(nbc);
Philipp Reisnera5ca66c2016-06-14 00:26:14 +02001482
Christoph Hellwig48920ff2017-04-05 19:21:23 +02001483 if (!blk_queue_discard(q)) {
Lars Ellenberg65f5be32016-06-14 00:26:29 +02001484 if (disk_conf->rs_discard_granularity) {
1485 disk_conf->rs_discard_granularity = 0; /* disable feature */
1486 drbd_info(device, "rs_discard_granularity feature disabled\n");
1487 }
Philipp Reisnera5ca66c2016-06-14 00:26:14 +02001488 }
1489
1490 if (disk_conf->rs_discard_granularity) {
1491 int orig_value = disk_conf->rs_discard_granularity;
1492 int remainder;
1493
1494 if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
1495 disk_conf->rs_discard_granularity = q->limits.discard_granularity;
1496
1497 remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
1498 disk_conf->rs_discard_granularity += remainder;
1499
1500 if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
1501 disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
1502
1503 if (disk_conf->rs_discard_granularity != orig_value)
1504 drbd_info(device, "rs_discard_granularity changed to %d\n",
1505 disk_conf->rs_discard_granularity);
1506 }
Philipp Reisnerc5c23852016-06-14 00:26:12 +02001507}
1508
Lars Ellenbergf3990022011-03-23 14:31:09 +01001509int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1510{
Lars Ellenberga910b122014-04-28 18:43:21 +02001511 struct drbd_config_context adm_ctx;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001512 enum drbd_ret_code retcode;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001513 struct drbd_device *device;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001514 struct disk_conf *new_disk_conf, *old_disk_conf;
Philipp Reisner813472c2011-05-03 16:47:02 +02001515 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001516 int err, fifo_size;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001517
Lars Ellenberga910b122014-04-28 18:43:21 +02001518 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001519 if (!adm_ctx.reply_skb)
1520 return retcode;
1521 if (retcode != NO_ERROR)
Lars Ellenberg9e276872014-04-28 18:43:22 +02001522 goto finish;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001523
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001524 device = adm_ctx.device;
Lars Ellenberg9e276872014-04-28 18:43:22 +02001525 mutex_lock(&adm_ctx.resource->adm_mutex);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001526
1527 /* we also need a disk
1528 * to change the options on */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001529 if (!get_ldev(device)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001530 retcode = ERR_NO_DISK;
1531 goto out;
1532 }
1533
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001534 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001535 if (!new_disk_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001536 retcode = ERR_NOMEM;
1537 goto fail;
1538 }
1539
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001540 mutex_lock(&device->resource->conf_update);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001541 old_disk_conf = device->ldev->disk_conf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001542 *new_disk_conf = *old_disk_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02001543 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02001544 set_disk_conf_defaults(new_disk_conf);
Lars Ellenberg5979e362011-04-27 21:09:55 +02001545
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001546 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02001547 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01001548 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02001549 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Philipp Reisner8e229432013-08-01 10:21:47 +02001550 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001551 }
1552
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001553 if (!expect(new_disk_conf->resync_rate >= 1))
1554 new_disk_conf->resync_rate = 1;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001555
Philipp Reisnera5ca66c2016-06-14 00:26:14 +02001556 sanitize_disk_conf(device, new_disk_conf, device->ldev);
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001557
1558 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1559 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001560
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001561 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001562 if (fifo_size != device->rs_plan_s->size) {
Philipp Reisner813472c2011-05-03 16:47:02 +02001563 new_plan = fifo_alloc(fifo_size);
1564 if (!new_plan) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001565 drbd_err(device, "kmalloc of fifo_buffer failed");
Lars Ellenbergf3990022011-03-23 14:31:09 +01001566 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001567 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001568 }
1569 }
1570
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001571 drbd_suspend_io(device);
1572 wait_event(device->al_wait, lc_try_lock(device->act_log));
1573 drbd_al_shrink(device);
1574 err = drbd_check_al_size(device, new_disk_conf);
1575 lc_unlock(device->act_log);
1576 wake_up(&device->al_wait);
1577 drbd_resume_io(device);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001578
1579 if (err) {
1580 retcode = ERR_NOMEM;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001581 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001582 }
1583
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001584 lock_all_resources();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001585 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001586 if (retcode == NO_ERROR) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001587 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1588 drbd_resync_after_changed(device);
Philipp Reisnerdc97b702011-05-03 14:27:15 +02001589 }
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001590 unlock_all_resources();
Lars Ellenbergf3990022011-03-23 14:31:09 +01001591
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001592 if (retcode != NO_ERROR)
1593 goto fail_unlock;
Lars Ellenbergf3990022011-03-23 14:31:09 +01001594
Philipp Reisner813472c2011-05-03 16:47:02 +02001595 if (new_plan) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001596 old_plan = device->rs_plan_s;
1597 rcu_assign_pointer(device->rs_plan_s, new_plan);
Philipp Reisner9958c852011-05-03 16:19:31 +02001598 }
Philipp Reisner9958c852011-05-03 16:19:31 +02001599
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001600 mutex_unlock(&device->resource->conf_update);
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001601
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001602 if (new_disk_conf->al_updates)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001603 device->ldev->md.flags &= ~MDF_AL_DISABLED;
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001604 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001605 device->ldev->md.flags |= MDF_AL_DISABLED;
Philipp Reisner9a51ab12012-02-20 21:53:28 +01001606
Lars Ellenberg691631c2012-10-26 00:41:50 +02001607 if (new_disk_conf->md_flushes)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001608 clear_bit(MD_NO_FUA, &device->flags);
Lars Ellenberg691631c2012-10-26 00:41:50 +02001609 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001610 set_bit(MD_NO_FUA, &device->flags);
Lars Ellenberg691631c2012-10-26 00:41:50 +02001611
Lars Ellenberg70df7092013-12-20 11:17:02 +01001612 if (write_ordering_changed(old_disk_conf, new_disk_conf))
Andreas Gruenbacherf6ba8632014-08-13 18:33:55 +02001613 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
Philipp Reisner27eb13e2012-03-30 14:12:15 +02001614
Lars Ellenbergdd4f6992016-06-14 00:26:20 +02001615 if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
Lars Ellenberg9104d312016-06-14 00:26:31 +02001616 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
Lars Ellenbergdd4f6992016-06-14 00:26:20 +02001617
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001618 drbd_md_sync(device);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001619
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001620 if (device->state.conn >= C_CONNECTED) {
1621 struct drbd_peer_device *peer_device;
1622
1623 for_each_peer_device(peer_device, device)
1624 drbd_send_sync_param(peer_device);
1625 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01001626
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001627 synchronize_rcu();
1628 kfree(old_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001629 kfree(old_plan);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001630 mod_timer(&device->request_timer, jiffies + HZ);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001631 goto success;
1632
1633fail_unlock:
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001634 mutex_unlock(&device->resource->conf_update);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001635 fail:
Lars Ellenberg5ecc72c2011-04-27 21:14:57 +02001636 kfree(new_disk_conf);
Philipp Reisner813472c2011-05-03 16:47:02 +02001637 kfree(new_plan);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001638success:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001639 put_ldev(device);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001640 out:
Lars Ellenberg9e276872014-04-28 18:43:22 +02001641 mutex_unlock(&adm_ctx.resource->adm_mutex);
1642 finish:
Lars Ellenberga910b122014-04-28 18:43:21 +02001643 drbd_adm_finish(&adm_ctx, info, retcode);
Lars Ellenbergf3990022011-03-23 14:31:09 +01001644 return 0;
1645}
1646
Lars Ellenberg63a7c8a2015-03-26 20:53:55 +01001647static struct block_device *open_backing_dev(struct drbd_device *device,
1648 const char *bdev_path, void *claim_ptr, bool do_bd_link)
1649{
1650 struct block_device *bdev;
1651 int err = 0;
1652
1653 bdev = blkdev_get_by_path(bdev_path,
1654 FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
1655 if (IS_ERR(bdev)) {
1656 drbd_err(device, "open(\"%s\") failed with %ld\n",
1657 bdev_path, PTR_ERR(bdev));
1658 return bdev;
1659 }
1660
1661 if (!do_bd_link)
1662 return bdev;
1663
1664 err = bd_link_disk_holder(bdev, device->vdisk);
1665 if (err) {
1666 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1667 drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1668 bdev_path, err);
1669 bdev = ERR_PTR(err);
1670 }
1671 return bdev;
1672}
1673
1674static int open_backing_devices(struct drbd_device *device,
1675 struct disk_conf *new_disk_conf,
1676 struct drbd_backing_dev *nbc)
1677{
1678 struct block_device *bdev;
1679
1680 bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
1681 if (IS_ERR(bdev))
1682 return ERR_OPEN_DISK;
1683 nbc->backing_bdev = bdev;
1684
1685 /*
1686 * meta_dev_idx >= 0: external fixed size, possibly multiple
1687 * drbd sharing one meta device. TODO in that case, paranoia
1688 * check that [md_bdev, meta_dev_idx] is not yet used by some
1689 * other drbd minor! (if you use drbd.conf + drbdadm, that
1690 * should check it for you already; but if you don't, or
1691 * someone fooled it, we need to double check here)
1692 */
1693 bdev = open_backing_dev(device, new_disk_conf->meta_dev,
1694 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1695 * if potentially shared with other drbd minors */
1696 (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1697 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1698 * as would happen with internal metadata. */
1699 (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1700 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1701 if (IS_ERR(bdev))
1702 return ERR_OPEN_MD_DISK;
1703 nbc->md_bdev = bdev;
1704 return NO_ERROR;
1705}
1706
1707static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
1708 bool do_bd_unlink)
1709{
1710 if (!bdev)
1711 return;
1712 if (do_bd_unlink)
1713 bd_unlink_disk_holder(bdev, device->vdisk);
1714 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1715}
1716
1717void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1718{
1719 if (ldev == NULL)
1720 return;
1721
1722 close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
1723 close_backing_dev(device, ldev->backing_bdev, true);
1724
1725 kfree(ldev->disk_conf);
1726 kfree(ldev);
1727}
1728
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001729int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001730{
Lars Ellenberga910b122014-04-28 18:43:21 +02001731 struct drbd_config_context adm_ctx;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001732 struct drbd_device *device;
Lars Ellenberg44a4d552013-11-22 12:40:58 +01001733 struct drbd_peer_device *peer_device;
1734 struct drbd_connection *connection;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001735 int err;
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01001736 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001737 enum determine_dev_size dd;
1738 sector_t max_possible_sectors;
1739 sector_t min_md_device_sectors;
1740 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001741 struct disk_conf *new_disk_conf = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001742 struct lru_cache *resync_lru = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001743 struct fifo_buffer *new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001744 union drbd_state ns, os;
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001745 enum drbd_state_rv rv;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001746 struct net_conf *nc;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001747
Lars Ellenberga910b122014-04-28 18:43:21 +02001748 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001749 if (!adm_ctx.reply_skb)
1750 return retcode;
1751 if (retcode != NO_ERROR)
Lars Ellenberg40cbf082011-03-16 16:52:10 +01001752 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001753
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001754 device = adm_ctx.device;
Lars Ellenberg9e276872014-04-28 18:43:22 +02001755 mutex_lock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg44a4d552013-11-22 12:40:58 +01001756 peer_device = first_peer_device(device);
Philipp Reisner3b8a44f2014-10-29 17:19:44 +01001757 connection = peer_device->connection;
Lars Ellenberg44a4d552013-11-22 12:40:58 +01001758 conn_reconfig_start(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001759
1760 /* if you want to reconfigure, please tear down first */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001761 if (device->state.disk > D_DISKLESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001762 retcode = ERR_DISK_CONFIGURED;
1763 goto fail;
1764 }
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001765 /* It may just now have detached because of IO error. Make sure
1766 * drbd_ldev_destroy is done already, we may end up here very fast,
1767 * e.g. if someone calls attach from the on-io-error handler,
1768 * to realize a "hot spare" feature (not that I'd recommend that) */
Lars Ellenberge334f552014-02-11 09:30:49 +01001769 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001770
Lars Ellenberg383606e2012-06-14 14:21:32 +02001771 /* make sure there is no leftover from previous force-detach attempts */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001772 clear_bit(FORCE_DETACH, &device->flags);
1773 clear_bit(WAS_IO_ERROR, &device->flags);
1774 clear_bit(WAS_READ_ERROR, &device->flags);
Lars Ellenberg383606e2012-06-14 14:21:32 +02001775
Lars Ellenberg0029d622012-06-14 18:02:52 +02001776 /* and no leftover from previously aborted resync or verify, either */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001777 device->rs_total = 0;
1778 device->rs_failed = 0;
1779 atomic_set(&device->rs_pending_cnt, 0);
Lars Ellenberg0029d622012-06-14 18:02:52 +02001780
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001781 /* allocation not in the IO path, drbdsetup context */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001782 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1783 if (!nbc) {
1784 retcode = ERR_NOMEM;
1785 goto fail;
1786 }
Philipp Reisner9f2247b2012-08-16 14:25:58 +02001787 spin_lock_init(&nbc->md.uuid_lock);
1788
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001789 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1790 if (!new_disk_conf) {
1791 retcode = ERR_NOMEM;
1792 goto fail;
1793 }
1794 nbc->disk_conf = new_disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001795
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001796 set_disk_conf_defaults(new_disk_conf);
1797 err = disk_conf_from_attrs(new_disk_conf, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01001798 if (err) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001799 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02001800 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001801 goto fail;
1802 }
1803
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001804 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1805 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
Philipp Reisnerd589a212011-05-04 10:06:52 +02001806
Philipp Reisner9958c852011-05-03 16:19:31 +02001807 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1808 if (!new_plan) {
1809 retcode = ERR_NOMEM;
1810 goto fail;
1811 }
1812
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001813 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001814 retcode = ERR_MD_IDX_INVALID;
1815 goto fail;
1816 }
1817
Philipp Reisner44ed1672011-04-19 17:10:19 +02001818 rcu_read_lock();
Lars Ellenberg44a4d552013-11-22 12:40:58 +01001819 nc = rcu_dereference(connection->net_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001820 if (nc) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001821 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001822 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001823 retcode = ERR_STONITH_AND_PROT_A;
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001824 goto fail;
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001825 }
1826 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02001827 rcu_read_unlock();
Philipp Reisner47ff2d02010-06-18 13:56:57 +02001828
Lars Ellenberg63a7c8a2015-03-26 20:53:55 +01001829 retcode = open_backing_devices(device, new_disk_conf, nbc);
1830 if (retcode != NO_ERROR)
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001831 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001832
Tejun Heoe525fd82010-11-13 11:55:17 +01001833 if ((nbc->backing_bdev == nbc->md_bdev) !=
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001834 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1835 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
Tejun Heoe525fd82010-11-13 11:55:17 +01001836 retcode = ERR_MD_IDX_INVALID;
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001837 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001838 }
1839
1840 resync_lru = lc_create("resync", drbd_bm_ext_cache,
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001841 1, 61, sizeof(struct bm_extent),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001842 offsetof(struct bm_extent, lce));
1843 if (!resync_lru) {
1844 retcode = ERR_NOMEM;
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001845 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001846 }
1847
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01001848 /* Read our meta data super block early.
1849 * This also sets other on-disk offsets. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001850 retcode = drbd_md_read(device, nbc);
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01001851 if (retcode != NO_ERROR)
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001852 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001853
Philipp Reisnera5ca66c2016-06-14 00:26:14 +02001854 sanitize_disk_conf(device, new_disk_conf, nbc);
Lars Ellenberg5bbcf5e2013-03-19 18:16:59 +01001855
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001856 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001857 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001858 (unsigned long long) drbd_get_max_capacity(nbc),
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001859 (unsigned long long) new_disk_conf->disk_size);
Lars Ellenberg7948bcd2011-06-06 15:36:04 +02001860 retcode = ERR_DISK_TOO_SMALL;
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001861 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001862 }
1863
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001864 if (new_disk_conf->meta_dev_idx < 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001865 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1866 /* at least one MB, otherwise it does not make sense */
1867 min_md_device_sectors = (2<<10);
1868 } else {
1869 max_possible_sectors = DRBD_MAX_SECTORS;
Lars Ellenbergae8bf312013-03-19 18:16:43 +01001870 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001871 }
1872
Philipp Reisnerb411b362009-09-25 16:07:19 -07001873 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
Lars Ellenberg7948bcd2011-06-06 15:36:04 +02001874 retcode = ERR_MD_DISK_TOO_SMALL;
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001875 drbd_warn(device, "refusing attach: md-device too small, "
Philipp Reisnerb411b362009-09-25 16:07:19 -07001876 "at least %llu sectors needed for this meta-disk type\n",
1877 (unsigned long long) min_md_device_sectors);
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001878 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001879 }
1880
1881 /* Make sure the new disk is big enough
1882 * (we may currently be R_PRIMARY with no local disk...) */
1883 if (drbd_get_max_capacity(nbc) <
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001884 drbd_get_capacity(device->this_bdev)) {
Lars Ellenberg7948bcd2011-06-06 15:36:04 +02001885 retcode = ERR_DISK_TOO_SMALL;
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001886 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001887 }
1888
1889 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1890
Lars Ellenberg13529942009-10-12 19:07:49 +02001891 if (nbc->known_size > max_possible_sectors) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001892 drbd_warn(device, "==> truncating very big lower level device "
Lars Ellenberg13529942009-10-12 19:07:49 +02001893 "to currently maximum possible %llu sectors <==\n",
1894 (unsigned long long) max_possible_sectors);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001895 if (new_disk_conf->meta_dev_idx >= 0)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001896 drbd_warn(device, "==>> using internal or flexible "
Lars Ellenberg13529942009-10-12 19:07:49 +02001897 "meta data may help <<==\n");
1898 }
1899
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001900 drbd_suspend_io(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001901 /* also wait for the last barrier ack. */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01001902 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1903 * We need a way to either ignore barrier acks for barriers sent before a device
1904 * was attached, or a way to wait for all pending barrier acks to come in.
1905 * As barriers are counted per resource,
1906 * we'd need to suspend io on all devices of a resource.
1907 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001908 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001909 /* and for any other previously queued work */
Lars Ellenberg44a4d552013-11-22 12:40:58 +01001910 drbd_flush_workqueue(&connection->sender_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001911
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001912 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001913 retcode = rv; /* FIXME: Type mismatch. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001914 drbd_resume_io(device);
Andreas Gruenbacherf2024e72010-12-10 13:44:05 +01001915 if (rv < SS_SUCCESS)
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001916 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001917
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001918 if (!get_ldev_if_state(device, D_ATTACHING))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001919 goto force_diskless;
1920
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001921 if (!device->bitmap) {
1922 if (drbd_bm_init(device)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001923 retcode = ERR_NOMEM;
1924 goto force_diskless_dec;
1925 }
1926 }
1927
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001928 if (device->state.conn < C_CONNECTED &&
Philipp Reisnerbabea492014-04-28 18:43:34 +02001929 device->state.role == R_PRIMARY && device->ed_uuid &&
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001930 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001931 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001932 (unsigned long long)device->ed_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001933 retcode = ERR_DATA_NOT_CURRENT;
1934 goto force_diskless_dec;
1935 }
1936
1937 /* Since we are diskless, fix the activity log first... */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001938 if (drbd_check_al_size(device, new_disk_conf)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001939 retcode = ERR_NOMEM;
1940 goto force_diskless_dec;
1941 }
1942
1943 /* Prevent shrinking of consistent devices ! */
1944 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001945 drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001946 drbd_warn(device, "refusing to truncate a consistent device\n");
Lars Ellenberg7948bcd2011-06-06 15:36:04 +02001947 retcode = ERR_DISK_TOO_SMALL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001948 goto force_diskless_dec;
1949 }
1950
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001951 lock_all_resources();
1952 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1953 if (retcode != NO_ERROR) {
1954 unlock_all_resources();
1955 goto force_diskless_dec;
1956 }
1957
Philipp Reisnerb411b362009-09-25 16:07:19 -07001958 /* Reset the "barriers don't work" bits here, then force meta data to
1959 * be written, to ensure we determine if barriers are supported. */
Andreas Gruenbachere5440462011-05-04 15:25:35 +02001960 if (new_disk_conf->md_flushes)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001961 clear_bit(MD_NO_FUA, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001962 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001963 set_bit(MD_NO_FUA, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001964
1965 /* Point of no return reached.
1966 * Devices and memory are no longer released by error cleanup below.
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001967 * now device takes over responsibility, and the state engine should
Philipp Reisnerb411b362009-09-25 16:07:19 -07001968 * clean it up somewhere. */
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02001969 D_ASSERT(device, device->ldev == NULL);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001970 device->ldev = nbc;
1971 device->resync = resync_lru;
1972 device->rs_plan_s = new_plan;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001973 nbc = NULL;
1974 resync_lru = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001975 new_disk_conf = NULL;
Philipp Reisner9958c852011-05-03 16:19:31 +02001976 new_plan = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001977
Andreas Gruenbacher1ec317d2014-08-14 16:17:58 +02001978 drbd_resync_after_changed(device);
Andreas Gruenbacherf6ba8632014-08-13 18:33:55 +02001979 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02001980 unlock_all_resources();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001981
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001982 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1983 set_bit(CRASHED_PRIMARY, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001985 clear_bit(CRASHED_PRIMARY, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001986
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001987 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
Andreas Gruenbacher6bbf53c2011-07-08 01:19:44 +02001988 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001989 set_bit(CRASHED_PRIMARY, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001991 device->send_cnt = 0;
1992 device->recv_cnt = 0;
1993 device->read_cnt = 0;
1994 device->writ_cnt = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001995
Lars Ellenberg9104d312016-06-14 00:26:31 +02001996 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001997
1998 /* If I am currently not R_PRIMARY,
1999 * but meta data primary indicator is set,
2000 * I just now recover from a hard crash,
2001 * and have been R_PRIMARY before that crash.
2002 *
2003 * Now, if I had no connection before that crash
2004 * (have been degraded R_PRIMARY), chances are that
2005 * I won't find my peer now either.
2006 *
2007 * In that case, and _only_ in that case,
2008 * we use the degr-wfc-timeout instead of the default,
2009 * so we can automatically recover from a crash of a
2010 * degraded but active "cluster" after a certain timeout.
2011 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002012 clear_bit(USE_DEGR_WFC_T, &device->flags);
2013 if (device->state.role != R_PRIMARY &&
2014 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2015 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2016 set_bit(USE_DEGR_WFC_T, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002017
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002018 dd = drbd_determine_dev_size(device, 0, NULL);
Philipp Reisnerd752b262013-06-25 16:50:08 +02002019 if (dd <= DS_ERROR) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002020 retcode = ERR_NOMEM_BITMAP;
2021 goto force_diskless_dec;
Philipp Reisnere96c9632013-06-25 16:50:07 +02002022 } else if (dd == DS_GREW)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002023 set_bit(RESYNC_AFTER_NEG, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002024
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002025 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2026 (test_bit(CRASHED_PRIMARY, &device->flags) &&
2027 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002028 drbd_info(device, "Assuming that all blocks are out of sync "
Philipp Reisnerb411b362009-09-25 16:07:19 -07002029 "(aka FullSync)\n");
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002030 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002031 "set_n_write from attaching", BM_LOCKED_MASK)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002032 retcode = ERR_IO_MD_DISK;
2033 goto force_diskless_dec;
2034 }
2035 } else {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002036 if (drbd_bitmap_io(device, &drbd_bm_read,
Andreas Gruenbacher22ab6a32010-12-13 01:44:11 +01002037 "read from attaching", BM_LOCKED_MASK)) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01002038 retcode = ERR_IO_MD_DISK;
2039 goto force_diskless_dec;
2040 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002041 }
2042
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002043 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2044 drbd_suspend_al(device); /* IO is still suspended here... */
Philipp Reisner07782862010-08-31 12:00:50 +02002045
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002046 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002047 os = drbd_read_state(device);
Philipp Reisner78bae592011-03-28 15:40:12 +02002048 ns = os;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002049 /* If MDF_CONSISTENT is not set go into inconsistent state,
2050 otherwise investigate MDF_WasUpToDate...
2051 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2052 otherwise into D_CONSISTENT state.
2053 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002054 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2055 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002056 ns.disk = D_CONSISTENT;
2057 else
2058 ns.disk = D_OUTDATED;
2059 } else {
2060 ns.disk = D_INCONSISTENT;
2061 }
2062
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002063 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002064 ns.pdsk = D_OUTDATED;
2065
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002066 rcu_read_lock();
2067 if (ns.disk == D_CONSISTENT &&
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002068 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002069 ns.disk = D_UP_TO_DATE;
2070
2071 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2072 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2073 this point, because drbd_request_state() modifies these
2074 flags. */
2075
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002076 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2077 device->ldev->md.flags &= ~MDF_AL_DISABLED;
Philipp Reisner9a51ab12012-02-20 21:53:28 +01002078 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002079 device->ldev->md.flags |= MDF_AL_DISABLED;
Philipp Reisner9a51ab12012-02-20 21:53:28 +01002080
2081 rcu_read_unlock();
2082
Philipp Reisnerb411b362009-09-25 16:07:19 -07002083 /* In case we are C_CONNECTED postpone any decision on the new disk
2084 state after the negotiation phase. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002085 if (device->state.conn == C_CONNECTED) {
2086 device->new_state_tmp.i = ns.i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002087 ns.i = os.i;
2088 ns.disk = D_NEGOTIATING;
Philipp Reisnerdc66c742010-06-02 14:31:29 +02002089
2090 /* We expect to receive up-to-date UUIDs soon.
2091 To avoid a race in receive_state, free p_uuid while
2092 holding req_lock. I.e. atomic with the state change */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002093 kfree(device->p_uuid);
2094 device->p_uuid = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002095 }
2096
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002097 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002098 spin_unlock_irq(&device->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002099
2100 if (rv < SS_SUCCESS)
2101 goto force_diskless_dec;
2102
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002103 mod_timer(&device->request_timer, jiffies + HZ);
Philipp Reisnercdfda632011-07-05 15:38:59 +02002104
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002105 if (device->state.role == R_PRIMARY)
2106 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002107 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002108 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002109
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002110 drbd_md_mark_dirty(device);
2111 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002112
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002113 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2114 put_ldev(device);
Lars Ellenberg44a4d552013-11-22 12:40:58 +01002115 conn_reconfig_done(connection);
Lars Ellenberg9e276872014-04-28 18:43:22 +02002116 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberga910b122014-04-28 18:43:21 +02002117 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002118 return 0;
2119
2120 force_diskless_dec:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002121 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002122 force_diskless:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002123 drbd_force_state(device, NS(disk, D_DISKLESS));
2124 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002125 fail:
Lars Ellenberg44a4d552013-11-22 12:40:58 +01002126 conn_reconfig_done(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002127 if (nbc) {
Lars Ellenberg63a7c8a2015-03-26 20:53:55 +01002128 close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
2129 close_backing_dev(device, nbc->backing_bdev, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002130 kfree(nbc);
2131 }
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002132 kfree(new_disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002133 lc_destroy(resync_lru);
Philipp Reisner9958c852011-05-03 16:19:31 +02002134 kfree(new_plan);
Lars Ellenberg9e276872014-04-28 18:43:22 +02002135 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg40cbf082011-03-16 16:52:10 +01002136 finish:
Lars Ellenberga910b122014-04-28 18:43:21 +02002137 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002138 return 0;
2139}
2140
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002141static int adm_detach(struct drbd_device *device, int force)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002142{
Philipp Reisner19f83c72011-03-29 14:21:03 +02002143 enum drbd_state_rv retcode;
Lars Ellenberg6434f402015-02-25 19:37:28 +01002144 void *buffer;
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02002145 int ret;
Philipp Reisner02ee8f92011-03-14 11:54:47 +01002146
Philipp Reisnercdfda632011-07-05 15:38:59 +02002147 if (force) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002148 set_bit(FORCE_DETACH, &device->flags);
2149 drbd_force_state(device, NS(disk, D_FAILED));
Philipp Reisnercdfda632011-07-05 15:38:59 +02002150 retcode = SS_SUCCESS;
Philipp Reisner02ee8f92011-03-14 11:54:47 +01002151 goto out;
2152 }
2153
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002154 drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
Lars Ellenberg6434f402015-02-25 19:37:28 +01002155 buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
2156 if (buffer) {
2157 retcode = drbd_request_state(device, NS(disk, D_FAILED));
2158 drbd_md_put_buffer(device);
2159 } else /* already <= D_FAILED */
2160 retcode = SS_NOTHING_TO_DO;
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02002161 /* D_FAILED will transition to DISKLESS. */
Lars Ellenberg05a72772015-01-26 11:35:38 +01002162 drbd_resume_io(device);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002163 ret = wait_event_interruptible(device->misc_wait,
2164 device->state.disk != D_FAILED);
Philipp Reisner9b2f61a2011-05-24 10:27:38 +02002165 if ((int)retcode == (int)SS_IS_DISKLESS)
Lars Ellenberg9a0d9d02011-05-02 11:51:31 +02002166 retcode = SS_NOTHING_TO_DO;
2167 if (ret)
2168 retcode = ERR_INTR;
Philipp Reisner02ee8f92011-03-14 11:54:47 +01002169out:
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002170 return retcode;
2171}
2172
Philipp Reisnerb411b362009-09-25 16:07:19 -07002173/* Detaching the disk is a process in multiple stages. First we need to lock
2174 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2175 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2176 * internal references as well.
2177 * Only then we have finally detached. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002178int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002179{
Lars Ellenberga910b122014-04-28 18:43:21 +02002180 struct drbd_config_context adm_ctx;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002181 enum drbd_ret_code retcode;
Philipp Reisnercdfda632011-07-05 15:38:59 +02002182 struct detach_parms parms = { };
2183 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002184
Lars Ellenberga910b122014-04-28 18:43:21 +02002185 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002186 if (!adm_ctx.reply_skb)
2187 return retcode;
2188 if (retcode != NO_ERROR)
2189 goto out;
2190
Philipp Reisnercdfda632011-07-05 15:38:59 +02002191 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2192 err = detach_parms_from_attrs(&parms, info);
2193 if (err) {
2194 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02002195 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Philipp Reisnercdfda632011-07-05 15:38:59 +02002196 goto out;
2197 }
2198 }
2199
Lars Ellenberg9e276872014-04-28 18:43:22 +02002200 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002201 retcode = adm_detach(adm_ctx.device, parms.force_detach);
Lars Ellenberg9e276872014-04-28 18:43:22 +02002202 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002203out:
Lars Ellenberga910b122014-04-28 18:43:21 +02002204 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002205 return 0;
2206}
2207
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002208static bool conn_resync_running(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002209{
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002210 struct drbd_peer_device *peer_device;
Philipp Reisner695d08f2011-04-11 22:53:32 -07002211 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002212 int vnr;
2213
Philipp Reisner695d08f2011-04-11 22:53:32 -07002214 rcu_read_lock();
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002215 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2216 struct drbd_device *device = peer_device->device;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002217 if (device->state.conn == C_SYNC_SOURCE ||
2218 device->state.conn == C_SYNC_TARGET ||
2219 device->state.conn == C_PAUSED_SYNC_S ||
2220 device->state.conn == C_PAUSED_SYNC_T) {
Philipp Reisner695d08f2011-04-11 22:53:32 -07002221 rv = true;
2222 break;
2223 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01002224 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002225 rcu_read_unlock();
2226
2227 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002228}
2229
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002230static bool conn_ov_running(struct drbd_connection *connection)
Lars Ellenbergf3990022011-03-23 14:31:09 +01002231{
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002232 struct drbd_peer_device *peer_device;
Philipp Reisner695d08f2011-04-11 22:53:32 -07002233 bool rv = false;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002234 int vnr;
2235
Philipp Reisner695d08f2011-04-11 22:53:32 -07002236 rcu_read_lock();
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002237 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2238 struct drbd_device *device = peer_device->device;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002239 if (device->state.conn == C_VERIFY_S ||
2240 device->state.conn == C_VERIFY_T) {
Philipp Reisner695d08f2011-04-11 22:53:32 -07002241 rv = true;
2242 break;
2243 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01002244 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002245 rcu_read_unlock();
2246
2247 return rv;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002248}
2249
Philipp Reisnercd643972011-04-13 18:00:59 -07002250static enum drbd_ret_code
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002251_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
Philipp Reisnercd643972011-04-13 18:00:59 -07002252{
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002253 struct drbd_peer_device *peer_device;
Philipp Reisnercd643972011-04-13 18:00:59 -07002254 int i;
2255
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002256 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2257 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02002258 return ERR_NEED_APV_100;
2259
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002260 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02002261 return ERR_NEED_APV_100;
2262
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002263 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02002264 return ERR_NEED_APV_100;
2265 }
2266
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002267 if (!new_net_conf->two_primaries &&
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002268 conn_highest_role(connection) == R_PRIMARY &&
2269 conn_highest_peer(connection) == R_PRIMARY)
Philipp Reisnerdcb20d12011-05-16 14:30:24 +02002270 return ERR_NEED_ALLOW_TWO_PRI;
Philipp Reisnerb032b6f2011-04-13 18:16:10 -07002271
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002272 if (new_net_conf->two_primaries &&
2273 (new_net_conf->wire_protocol != DRBD_PROT_C))
Philipp Reisnercd643972011-04-13 18:00:59 -07002274 return ERR_NOT_PROTO_C;
2275
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002276 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2277 struct drbd_device *device = peer_device->device;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002278 if (get_ldev(device)) {
2279 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2280 put_ldev(device);
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002281 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
Philipp Reisnercd643972011-04-13 18:00:59 -07002282 return ERR_STONITH_AND_PROT_A;
Philipp Reisnercd643972011-04-13 18:00:59 -07002283 }
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002284 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
Lars Ellenbergeb120102012-08-01 12:46:20 +02002285 return ERR_DISCARD_IMPOSSIBLE;
Philipp Reisnercd643972011-04-13 18:00:59 -07002286 }
Philipp Reisnercd643972011-04-13 18:00:59 -07002287
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002288 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
Philipp Reisnercd643972011-04-13 18:00:59 -07002289 return ERR_CONG_NOT_PROTO_A;
2290
2291 return NO_ERROR;
2292}
2293
Philipp Reisner44ed1672011-04-19 17:10:19 +02002294static enum drbd_ret_code
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002295check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
Philipp Reisner44ed1672011-04-19 17:10:19 +02002296{
Julia Lawalle9d5d4a2017-06-27 17:56:50 -06002297 enum drbd_ret_code rv;
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002298 struct drbd_peer_device *peer_device;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002299 int i;
2300
2301 rcu_read_lock();
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002302 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02002303 rcu_read_unlock();
2304
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01002305 /* connection->peer_devices protected by genl_lock() here */
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002306 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2307 struct drbd_device *device = peer_device->device;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002308 if (!device->bitmap) {
2309 if (drbd_bm_init(device))
Philipp Reisner44ed1672011-04-19 17:10:19 +02002310 return ERR_NOMEM;
2311 }
2312 }
2313
2314 return rv;
2315}
2316
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002317struct crypto {
Herbert Xu9534d672016-01-24 21:19:21 +08002318 struct crypto_ahash *verify_tfm;
2319 struct crypto_ahash *csums_tfm;
2320 struct crypto_shash *cram_hmac_tfm;
2321 struct crypto_ahash *integrity_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002322};
2323
2324static int
Herbert Xu9534d672016-01-24 21:19:21 +08002325alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002326{
2327 if (!tfm_name[0])
2328 return NO_ERROR;
2329
Herbert Xu9534d672016-01-24 21:19:21 +08002330 *tfm = crypto_alloc_shash(tfm_name, 0, 0);
2331 if (IS_ERR(*tfm)) {
2332 *tfm = NULL;
2333 return err_alg;
2334 }
2335
2336 return NO_ERROR;
2337}
2338
2339static int
2340alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
2341{
2342 if (!tfm_name[0])
2343 return NO_ERROR;
2344
2345 *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002346 if (IS_ERR(*tfm)) {
2347 *tfm = NULL;
2348 return err_alg;
2349 }
2350
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002351 return NO_ERROR;
2352}
2353
2354static enum drbd_ret_code
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002355alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002356{
Philipp Reisnerb411b362009-09-25 16:07:19 -07002357 char hmac_name[CRYPTO_MAX_ALG_NAME];
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002358 enum drbd_ret_code rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002359
Herbert Xu9534d672016-01-24 21:19:21 +08002360 rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
2361 ERR_CSUMS_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002362 if (rv != NO_ERROR)
2363 return rv;
Herbert Xu9534d672016-01-24 21:19:21 +08002364 rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
2365 ERR_VERIFY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002366 if (rv != NO_ERROR)
2367 return rv;
Herbert Xu9534d672016-01-24 21:19:21 +08002368 rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2369 ERR_INTEGRITY_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002370 if (rv != NO_ERROR)
2371 return rv;
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002372 if (new_net_conf->cram_hmac_alg[0] != 0) {
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002373 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002374 new_net_conf->cram_hmac_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002375
Herbert Xu9534d672016-01-24 21:19:21 +08002376 rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2377 ERR_AUTH_ALG);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002378 }
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002379
2380 return rv;
2381}
2382
2383static void free_crypto(struct crypto *crypto)
2384{
Herbert Xu9534d672016-01-24 21:19:21 +08002385 crypto_free_shash(crypto->cram_hmac_tfm);
2386 crypto_free_ahash(crypto->integrity_tfm);
2387 crypto_free_ahash(crypto->csums_tfm);
2388 crypto_free_ahash(crypto->verify_tfm);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002389}
2390
Lars Ellenbergf3990022011-03-23 14:31:09 +01002391int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2392{
Lars Ellenberga910b122014-04-28 18:43:21 +02002393 struct drbd_config_context adm_ctx;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002394 enum drbd_ret_code retcode;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002395 struct drbd_connection *connection;
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002396 struct net_conf *old_net_conf, *new_net_conf = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002397 int err;
2398 int ovr; /* online verify running */
2399 int rsr; /* re-sync running */
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002400 struct crypto crypto = { };
Lars Ellenbergf3990022011-03-23 14:31:09 +01002401
Lars Ellenberga910b122014-04-28 18:43:21 +02002402 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002403 if (!adm_ctx.reply_skb)
2404 return retcode;
2405 if (retcode != NO_ERROR)
Lars Ellenberg9e276872014-04-28 18:43:22 +02002406 goto finish;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002407
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002408 connection = adm_ctx.connection;
Lars Ellenberg9e276872014-04-28 18:43:22 +02002409 mutex_lock(&adm_ctx.resource->adm_mutex);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002410
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002411 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2412 if (!new_net_conf) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002413 retcode = ERR_NOMEM;
2414 goto out;
2415 }
2416
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002417 conn_reconfig_start(connection);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002418
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002419 mutex_lock(&connection->data.mutex);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002420 mutex_lock(&connection->resource->conf_update);
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002421 old_net_conf = connection->net_conf;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002422
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002423 if (!old_net_conf) {
Lars Ellenberga910b122014-04-28 18:43:21 +02002424 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
Philipp Reisner44ed1672011-04-19 17:10:19 +02002425 retcode = ERR_INVALID_REQUEST;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002426 goto fail;
Philipp Reisner44ed1672011-04-19 17:10:19 +02002427 }
2428
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002429 *new_net_conf = *old_net_conf;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002430 if (should_set_defaults(info))
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002431 set_net_conf_defaults(new_net_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02002432
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002433 err = net_conf_from_attrs_for_change(new_net_conf, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002434 if (err && err != -ENOMSG) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002435 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02002436 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Lars Ellenbergf3990022011-03-23 14:31:09 +01002437 goto fail;
2438 }
2439
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002440 retcode = check_net_options(connection, new_net_conf);
Philipp Reisnercd643972011-04-13 18:00:59 -07002441 if (retcode != NO_ERROR)
2442 goto fail;
2443
Lars Ellenbergf3990022011-03-23 14:31:09 +01002444 /* re-sync running */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002445 rsr = conn_resync_running(connection);
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002446 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002447 retcode = ERR_CSUMS_RESYNC_RUNNING;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002448 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002449 }
2450
Lars Ellenbergf3990022011-03-23 14:31:09 +01002451 /* online verify running */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002452 ovr = conn_ov_running(connection);
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002453 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002454 retcode = ERR_VERIFY_RUNNING;
2455 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002456 }
2457
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002458 retcode = alloc_crypto(&crypto, new_net_conf);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002459 if (retcode != NO_ERROR)
2460 goto fail;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002461
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002462 rcu_assign_pointer(connection->net_conf, new_net_conf);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002463
2464 if (!rsr) {
Herbert Xu9534d672016-01-24 21:19:21 +08002465 crypto_free_ahash(connection->csums_tfm);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002466 connection->csums_tfm = crypto.csums_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002467 crypto.csums_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002468 }
2469 if (!ovr) {
Herbert Xu9534d672016-01-24 21:19:21 +08002470 crypto_free_ahash(connection->verify_tfm);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002471 connection->verify_tfm = crypto.verify_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002472 crypto.verify_tfm = NULL;
Lars Ellenbergf3990022011-03-23 14:31:09 +01002473 }
2474
Herbert Xu9534d672016-01-24 21:19:21 +08002475 crypto_free_ahash(connection->integrity_tfm);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002476 connection->integrity_tfm = crypto.integrity_tfm;
2477 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2478 /* Do this without trying to take connection->data.mutex again. */
2479 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002480
Herbert Xu9534d672016-01-24 21:19:21 +08002481 crypto_free_shash(connection->cram_hmac_tfm);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002482 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002483
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002484 mutex_unlock(&connection->resource->conf_update);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002485 mutex_unlock(&connection->data.mutex);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002486 synchronize_rcu();
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002487 kfree(old_net_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002488
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02002489 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2490 struct drbd_peer_device *peer_device;
2491 int vnr;
2492
2493 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2494 drbd_send_sync_param(peer_device);
2495 }
Lars Ellenbergf3990022011-03-23 14:31:09 +01002496
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002497 goto done;
2498
Lars Ellenbergf3990022011-03-23 14:31:09 +01002499 fail:
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002500 mutex_unlock(&connection->resource->conf_update);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002501 mutex_unlock(&connection->data.mutex);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002502 free_crypto(&crypto);
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002503 kfree(new_net_conf);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002504 done:
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002505 conn_reconfig_done(connection);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002506 out:
Lars Ellenberg9e276872014-04-28 18:43:22 +02002507 mutex_unlock(&adm_ctx.resource->adm_mutex);
2508 finish:
Lars Ellenberga910b122014-04-28 18:43:21 +02002509 drbd_adm_finish(&adm_ctx, info, retcode);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002510 return 0;
2511}
2512
Andreas Gruenbachera2972842014-07-31 17:41:33 +02002513static void connection_to_info(struct connection_info *info,
2514 struct drbd_connection *connection)
2515{
2516 info->conn_connection_state = connection->cstate;
2517 info->conn_role = conn_highest_peer(connection);
2518}
2519
2520static void peer_device_to_info(struct peer_device_info *info,
2521 struct drbd_peer_device *peer_device)
2522{
2523 struct drbd_device *device = peer_device->device;
2524
2525 info->peer_repl_state =
2526 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2527 info->peer_disk_state = device->state.pdsk;
2528 info->peer_resync_susp_user = device->state.user_isp;
2529 info->peer_resync_susp_peer = device->state.peer_isp;
2530 info->peer_resync_susp_dependency = device->state.aftr_isp;
2531}
2532
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002533int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002534{
Andreas Gruenbachera2972842014-07-31 17:41:33 +02002535 struct connection_info connection_info;
2536 enum drbd_notification_type flags;
2537 unsigned int peer_devices = 0;
Lars Ellenberga910b122014-04-28 18:43:21 +02002538 struct drbd_config_context adm_ctx;
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002539 struct drbd_peer_device *peer_device;
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002540 struct net_conf *old_net_conf, *new_net_conf = NULL;
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002541 struct crypto crypto = { };
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002542 struct drbd_resource *resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002543 struct drbd_connection *connection;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002544 enum drbd_ret_code retcode;
2545 int i;
2546 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002547
Lars Ellenberga910b122014-04-28 18:43:21 +02002548 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002549
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002550 if (!adm_ctx.reply_skb)
2551 return retcode;
2552 if (retcode != NO_ERROR)
2553 goto out;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002554 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
Lars Ellenberga910b122014-04-28 18:43:21 +02002555 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002556 retcode = ERR_INVALID_REQUEST;
2557 goto out;
2558 }
2559
2560 /* No need for _rcu here. All reconfiguration is
2561 * strictly serialized on genl_lock(). We are protected against
2562 * concurrent reconfiguration/addition/deletion */
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002563 for_each_resource(resource, &drbd_resources) {
2564 for_each_connection(connection, resource) {
2565 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2566 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2567 connection->my_addr_len)) {
2568 retcode = ERR_LOCAL_ADDR;
2569 goto out;
2570 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002571
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002572 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2573 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2574 connection->peer_addr_len)) {
2575 retcode = ERR_PEER_ADDR;
2576 goto out;
2577 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002578 }
2579 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002580
Lars Ellenberg9e276872014-04-28 18:43:22 +02002581 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacher3ab706f2011-07-06 15:05:58 +02002582 connection = first_connection(adm_ctx.resource);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002583 conn_reconfig_start(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002584
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002585 if (connection->cstate > C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002586 retcode = ERR_NET_CONFIGURED;
2587 goto fail;
2588 }
2589
Andreas Gruenbachera209b4a2011-08-17 12:43:25 +02002590 /* allocation not in the IO path, drbdsetup / netlink process context */
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002591 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2592 if (!new_net_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002593 retcode = ERR_NOMEM;
2594 goto fail;
2595 }
2596
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002597 set_net_conf_defaults(new_net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002598
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002599 err = net_conf_from_attrs(new_net_conf, info);
Lars Ellenberg25e40932011-08-19 10:39:00 +02002600 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002601 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02002602 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002603 goto fail;
2604 }
2605
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002606 retcode = check_net_options(connection, new_net_conf);
Philipp Reisnercd643972011-04-13 18:00:59 -07002607 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002608 goto fail;
Philipp Reisner47ff2d02010-06-18 13:56:57 +02002609
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002610 retcode = alloc_crypto(&crypto, new_net_conf);
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002611 if (retcode != NO_ERROR)
Philipp Reisner422028b2010-10-27 11:12:07 +02002612 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002613
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002614 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002615
Andreas Gruenbacherb5043c52011-07-28 15:56:02 +02002616 drbd_flush_workqueue(&connection->sender_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002617
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002618 mutex_lock(&adm_ctx.resource->conf_update);
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002619 old_net_conf = connection->net_conf;
2620 if (old_net_conf) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002621 retcode = ERR_NET_CONFIGURED;
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002622 mutex_unlock(&adm_ctx.resource->conf_update);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002623 goto fail;
2624 }
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002625 rcu_assign_pointer(connection->net_conf, new_net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002626
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002627 conn_free_crypto(connection);
2628 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2629 connection->integrity_tfm = crypto.integrity_tfm;
2630 connection->csums_tfm = crypto.csums_tfm;
2631 connection->verify_tfm = crypto.verify_tfm;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002632
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002633 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2634 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2635 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2636 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002637
Andreas Gruenbachera2972842014-07-31 17:41:33 +02002638 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2639 peer_devices++;
2640 }
2641
2642 connection_to_info(&connection_info, connection);
2643 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2644 mutex_lock(&notification_mutex);
2645 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2646 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2647 struct peer_device_info peer_device_info;
2648
2649 peer_device_to_info(&peer_device_info, peer_device);
2650 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2651 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2652 }
2653 mutex_unlock(&notification_mutex);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002654 mutex_unlock(&adm_ctx.resource->conf_update);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002655
Philipp Reisner695d08f2011-04-11 22:53:32 -07002656 rcu_read_lock();
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002657 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2658 struct drbd_device *device = peer_device->device;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002659 device->send_cnt = 0;
2660 device->recv_cnt = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661 }
Philipp Reisner695d08f2011-04-11 22:53:32 -07002662 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002663
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002664 retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002665
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002666 conn_reconfig_done(connection);
Lars Ellenberg9e276872014-04-28 18:43:22 +02002667 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberga910b122014-04-28 18:43:21 +02002668 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002669 return 0;
2670
2671fail:
Philipp Reisner0fd0ea02011-04-27 11:27:47 +02002672 free_crypto(&crypto);
Andreas Gruenbacher270eb5c2011-07-06 14:42:39 +02002673 kfree(new_net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002674
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002675 conn_reconfig_done(connection);
Lars Ellenberg9e276872014-04-28 18:43:22 +02002676 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002677out:
Lars Ellenberga910b122014-04-28 18:43:21 +02002678 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002679 return 0;
2680}
2681
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002682static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002683{
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002684 enum drbd_state_rv rv;
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002685
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002686 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002687 force ? CS_HARD : 0);
Philipp Reisner2561b9c2010-12-03 15:22:48 +01002688
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002689 switch (rv) {
2690 case SS_NOTHING_TO_DO:
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002691 break;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002692 case SS_ALREADY_STANDALONE:
2693 return SS_SUCCESS;
2694 case SS_PRIMARY_NOP:
2695 /* Our state checking code wants to see the peer outdated. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002696 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
Philipp Reisner2bd5ed52013-03-27 14:08:40 +01002697
2698 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002699 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
Philipp Reisner2bd5ed52013-03-27 14:08:40 +01002700
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002701 break;
2702 case SS_CW_FAILED_BY_PEER:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002703 /* The peer probably wants to see us outdated. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002704 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002705 disk, D_OUTDATED), 0);
2706 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002707 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002708 CS_HARD);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002709 }
2710 break;
2711 default:;
2712 /* no special handling necessary */
2713 }
2714
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002715 if (rv >= SS_SUCCESS) {
2716 enum drbd_state_rv rv2;
2717 /* No one else can reconfigure the network while I am here.
2718 * The state handling only uses drbd_thread_stop_nowait(),
2719 * we want to really wait here until the receiver is no more.
2720 */
Andreas Gruenbacher9693da22011-06-21 13:58:18 +02002721 drbd_thread_stop(&connection->receiver);
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002722
2723 /* Race breaker. This additional state change request may be
2724 * necessary, if this was a forced disconnect during a receiver
2725 * restart. We may have "killed" the receiver thread just
Andreas Gruenbacher8fe60552011-07-22 11:04:36 +02002726 * after drbd_receiver() returned. Typically, we should be
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002727 * C_STANDALONE already, now, and this becomes a no-op.
2728 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002729 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002730 CS_VERBOSE | CS_HARD);
2731 if (rv2 < SS_SUCCESS)
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02002732 drbd_err(connection,
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002733 "unexpected rv2=%d in conn_try_disconnect()\n",
2734 rv2);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02002735 /* Unlike in DRBD 9, the state engine has generated
2736 * NOTIFY_DESTROY events before clearing connection->net_conf. */
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002737 }
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002738 return rv;
2739}
2740
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002741int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002742{
Lars Ellenberga910b122014-04-28 18:43:21 +02002743 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002744 struct disconnect_parms parms;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002745 struct drbd_connection *connection;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002746 enum drbd_state_rv rv;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002747 enum drbd_ret_code retcode;
2748 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002749
Lars Ellenberga910b122014-04-28 18:43:21 +02002750 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002751 if (!adm_ctx.reply_skb)
2752 return retcode;
2753 if (retcode != NO_ERROR)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002754 goto fail;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002755
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002756 connection = adm_ctx.connection;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002757 memset(&parms, 0, sizeof(parms));
2758 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002759 err = disconnect_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002760 if (err) {
2761 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02002762 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002763 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002764 }
2765 }
2766
Lars Ellenberg9e276872014-04-28 18:43:22 +02002767 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002768 rv = conn_try_disconnect(connection, parms.force_disconnect);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01002769 if (rv < SS_SUCCESS)
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02002770 retcode = rv; /* FIXME: Type mismatch. */
2771 else
2772 retcode = NO_ERROR;
Lars Ellenberg9e276872014-04-28 18:43:22 +02002773 mutex_unlock(&adm_ctx.resource->adm_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002774 fail:
Lars Ellenberga910b122014-04-28 18:43:21 +02002775 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002776 return 0;
2777}
2778
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002779void resync_after_online_grow(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002780{
2781 int iass; /* I am sync source */
2782
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002783 drbd_info(device, "Resync of new storage after online grow\n");
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002784 if (device->state.role != device->state.peer)
2785 iass = (device->state.role == R_PRIMARY);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002786 else
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002787 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002788
2789 if (iass)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002790 drbd_start_resync(device, C_SYNC_SOURCE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002791 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002792 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002793}
2794
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002795int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796{
Lars Ellenberga910b122014-04-28 18:43:21 +02002797 struct drbd_config_context adm_ctx;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002798 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002799 struct resize_parms rs;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002800 struct drbd_device *device;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002801 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002802 enum determine_dev_size dd;
Philipp Reisnerd752b262013-06-25 16:50:08 +02002803 bool change_al_layout = false;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002804 enum dds_flags ddsf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002805 sector_t u_size;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002806 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002807
Lars Ellenberga910b122014-04-28 18:43:21 +02002808 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002809 if (!adm_ctx.reply_skb)
2810 return retcode;
2811 if (retcode != NO_ERROR)
Lars Ellenberg9e276872014-04-28 18:43:22 +02002812 goto finish;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002813
Lars Ellenberg9e276872014-04-28 18:43:22 +02002814 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002815 device = adm_ctx.device;
2816 if (!get_ldev(device)) {
Philipp Reisnerd752b262013-06-25 16:50:08 +02002817 retcode = ERR_NO_DISK;
2818 goto fail;
2819 }
2820
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002821 memset(&rs, 0, sizeof(struct resize_parms));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002822 rs.al_stripes = device->ldev->md.al_stripes;
2823 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002824 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01002825 err = resize_parms_from_attrs(&rs, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002826 if (err) {
2827 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02002828 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Philipp Reisnerd752b262013-06-25 16:50:08 +02002829 goto fail_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002830 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002831 }
2832
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002833 if (device->state.conn > C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002834 retcode = ERR_RESIZE_RESYNC;
Philipp Reisnerd752b262013-06-25 16:50:08 +02002835 goto fail_ldev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002836 }
2837
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002838 if (device->state.role == R_SECONDARY &&
2839 device->state.peer == R_SECONDARY) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002840 retcode = ERR_NO_PRIMARY;
Philipp Reisnerd752b262013-06-25 16:50:08 +02002841 goto fail_ldev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002842 }
2843
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002844 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002845 retcode = ERR_NEED_APV_93;
Andreas Gruenbacher7b4e4d32011-09-28 22:15:04 +02002846 goto fail_ldev;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002847 }
2848
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002849 rcu_read_lock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002850 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002851 rcu_read_unlock();
2852 if (u_size != (sector_t)rs.resize_size) {
2853 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2854 if (!new_disk_conf) {
2855 retcode = ERR_NOMEM;
Philipp Reisner9bcd2522011-09-29 13:00:14 +02002856 goto fail_ldev;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002857 }
2858 }
2859
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002860 if (device->ldev->md.al_stripes != rs.al_stripes ||
2861 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
Philipp Reisnerd752b262013-06-25 16:50:08 +02002862 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2863
2864 if (al_size_k > (16 * 1024 * 1024)) {
2865 retcode = ERR_MD_LAYOUT_TOO_BIG;
2866 goto fail_ldev;
2867 }
2868
2869 if (al_size_k < MD_32kB_SECT/2) {
2870 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2871 goto fail_ldev;
2872 }
2873
Philipp Reisnercdc6af82014-04-28 18:43:15 +02002874 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
Philipp Reisnerd752b262013-06-25 16:50:08 +02002875 retcode = ERR_MD_LAYOUT_CONNECTED;
2876 goto fail_ldev;
2877 }
2878
2879 change_al_layout = true;
2880 }
2881
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002882 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2883 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002884
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002885 if (new_disk_conf) {
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002886 mutex_lock(&device->resource->conf_update);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002887 old_disk_conf = device->ldev->disk_conf;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002888 *new_disk_conf = *old_disk_conf;
2889 new_disk_conf->disk_size = (sector_t)rs.resize_size;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002890 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002891 mutex_unlock(&device->resource->conf_update);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002892 synchronize_rcu();
2893 kfree(old_disk_conf);
Oleg Drokin70644782015-04-26 01:28:43 -04002894 new_disk_conf = NULL;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02002895 }
2896
Philipp Reisner6495d2c2010-03-24 16:07:04 +01002897 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002898 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2899 drbd_md_sync(device);
2900 put_ldev(device);
Philipp Reisnere96c9632013-06-25 16:50:07 +02002901 if (dd == DS_ERROR) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002902 retcode = ERR_NOMEM_BITMAP;
2903 goto fail;
Philipp Reisnerd752b262013-06-25 16:50:08 +02002904 } else if (dd == DS_ERROR_SPACE_MD) {
2905 retcode = ERR_MD_LAYOUT_NO_FIT;
2906 goto fail;
2907 } else if (dd == DS_ERROR_SHRINK) {
2908 retcode = ERR_IMPLICIT_SHRINK;
2909 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002910 }
2911
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002912 if (device->state.conn == C_CONNECTED) {
Philipp Reisnere96c9632013-06-25 16:50:07 +02002913 if (dd == DS_GREW)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002914 set_bit(RESIZE_PENDING, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002915
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02002916 drbd_send_uuids(first_peer_device(device));
2917 drbd_send_sizes(first_peer_device(device), 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002918 }
2919
2920 fail:
Lars Ellenberg9e276872014-04-28 18:43:22 +02002921 mutex_unlock(&adm_ctx.resource->adm_mutex);
2922 finish:
Lars Ellenberga910b122014-04-28 18:43:21 +02002923 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002924 return 0;
Andreas Gruenbacher7b4e4d32011-09-28 22:15:04 +02002925
2926 fail_ldev:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002927 put_ldev(device);
Oleg Drokin70644782015-04-26 01:28:43 -04002928 kfree(new_disk_conf);
Andreas Gruenbacher7b4e4d32011-09-28 22:15:04 +02002929 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002930}
2931
Lars Ellenbergf3990022011-03-23 14:31:09 +01002932int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002933{
Lars Ellenberga910b122014-04-28 18:43:21 +02002934 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002935 enum drbd_ret_code retcode;
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002936 struct res_opts res_opts;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002937 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002938
Lars Ellenberga910b122014-04-28 18:43:21 +02002939 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002940 if (!adm_ctx.reply_skb)
2941 return retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002942 if (retcode != NO_ERROR)
2943 goto fail;
2944
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002945 res_opts = adm_ctx.resource->res_opts;
Lars Ellenberg5979e362011-04-27 21:09:55 +02002946 if (should_set_defaults(info))
Andreas Gruenbacherb966b5d2011-05-03 14:56:09 +02002947 set_res_opts_defaults(&res_opts);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002948
Lars Ellenbergb57a1e22011-04-27 21:17:33 +02002949 err = res_opts_from_attrs(&res_opts, info);
Andreas Gruenbacherc75b9b12011-05-24 14:18:31 +02002950 if (err && err != -ENOMSG) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002951 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02002952 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002953 goto fail;
2954 }
2955
Lars Ellenberg9e276872014-04-28 18:43:22 +02002956 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002957 err = set_resource_options(adm_ctx.resource, &res_opts);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002958 if (err) {
2959 retcode = ERR_INVALID_REQUEST;
2960 if (err == -ENOMEM)
Philipp Reisner778f2712010-07-06 11:14:00 +02002961 retcode = ERR_NOMEM;
Philipp Reisner778f2712010-07-06 11:14:00 +02002962 }
Lars Ellenberg9e276872014-04-28 18:43:22 +02002963 mutex_unlock(&adm_ctx.resource->adm_mutex);
Philipp Reisner778f2712010-07-06 11:14:00 +02002964
Philipp Reisnerb411b362009-09-25 16:07:19 -07002965fail:
Lars Ellenberga910b122014-04-28 18:43:21 +02002966 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002967 return 0;
2968}
2969
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002970int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002971{
Lars Ellenberga910b122014-04-28 18:43:21 +02002972 struct drbd_config_context adm_ctx;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002973 struct drbd_device *device;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002974 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2975
Lars Ellenberga910b122014-04-28 18:43:21 +02002976 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002977 if (!adm_ctx.reply_skb)
2978 return retcode;
2979 if (retcode != NO_ERROR)
2980 goto out;
2981
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002982 device = adm_ctx.device;
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01002983 if (!get_ldev(device)) {
2984 retcode = ERR_NO_DISK;
2985 goto out;
2986 }
2987
2988 mutex_lock(&adm_ctx.resource->adm_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002989
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002990 /* If there is still bitmap IO pending, probably because of a previous
Lars Ellenberg7ee1fb92012-06-19 10:27:58 +02002991 * resync just being finished, wait for it before requesting a new resync.
2992 * Also wait for it's after_state_ch(). */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002993 drbd_suspend_io(device);
2994 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
Andreas Gruenbacherb5043c52011-07-28 15:56:02 +02002995 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
Lars Ellenberg194bfb32011-01-18 10:38:01 +01002996
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01002997 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2998 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2999 * try to start a resync handshake as sync target for full sync.
Philipp Reisner9376d9f2013-03-27 14:08:36 +01003000 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003001 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
3002 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01003003 if (retcode >= SS_SUCCESS) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003004 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01003005 "set_n_write from invalidate", BM_LOCKED_MASK))
3006 retcode = ERR_IO_MD_DISK;
3007 }
3008 } else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003009 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
3010 drbd_resume_io(device);
Lars Ellenberg9e276872014-04-28 18:43:22 +02003011 mutex_unlock(&adm_ctx.resource->adm_mutex);
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003012 put_ldev(device);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003013out:
Lars Ellenberga910b122014-04-28 18:43:21 +02003014 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003015 return 0;
3016}
3017
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003018static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
3019 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003020{
Lars Ellenberga910b122014-04-28 18:43:21 +02003021 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003022 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003023
Lars Ellenberga910b122014-04-28 18:43:21 +02003024 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003025 if (!adm_ctx.reply_skb)
3026 return retcode;
3027 if (retcode != NO_ERROR)
3028 goto out;
Lars Ellenberg194bfb32011-01-18 10:38:01 +01003029
Lars Ellenberg9e276872014-04-28 18:43:22 +02003030 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003031 retcode = drbd_request_state(adm_ctx.device, mask, val);
Lars Ellenberg9e276872014-04-28 18:43:22 +02003032 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003033out:
Lars Ellenberga910b122014-04-28 18:43:21 +02003034 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003035 return 0;
3036}
3037
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003038static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003039{
3040 int rv;
3041
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003042 rv = drbd_bmio_set_n_write(device);
3043 drbd_suspend_al(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003044 return rv;
3045}
3046
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003047int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003048{
Lars Ellenberga910b122014-04-28 18:43:21 +02003049 struct drbd_config_context adm_ctx;
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01003050 int retcode; /* drbd_ret_code, drbd_state_rv */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003051 struct drbd_device *device;
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01003052
Lars Ellenberga910b122014-04-28 18:43:21 +02003053 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01003054 if (!adm_ctx.reply_skb)
3055 return retcode;
3056 if (retcode != NO_ERROR)
3057 goto out;
3058
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003059 device = adm_ctx.device;
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003060 if (!get_ldev(device)) {
3061 retcode = ERR_NO_DISK;
3062 goto out;
3063 }
3064
3065 mutex_lock(&adm_ctx.resource->adm_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003066
3067 /* If there is still bitmap IO pending, probably because of a previous
Lars Ellenberg7ee1fb92012-06-19 10:27:58 +02003068 * resync just being finished, wait for it before requesting a new resync.
3069 * Also wait for it's after_state_ch(). */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003070 drbd_suspend_io(device);
3071 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
Andreas Gruenbacherb5043c52011-07-28 15:56:02 +02003072 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003073
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01003074 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3075 * in the bitmap. Otherwise, try to start a resync handshake
3076 * as sync source for full sync.
3077 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003078 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01003079 /* The peer will get a resync upon connect anyways. Just make that
3080 into a full resync. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003081 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01003082 if (retcode >= SS_SUCCESS) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003083 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
Philipp Reisner0b2dafc2013-03-27 14:08:38 +01003084 "set_n_write from invalidate_peer",
3085 BM_LOCKED_SET_ALLOWED))
3086 retcode = ERR_IO_MD_DISK;
3087 }
3088 } else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003089 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3090 drbd_resume_io(device);
Lars Ellenberg9e276872014-04-28 18:43:22 +02003091 mutex_unlock(&adm_ctx.resource->adm_mutex);
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003092 put_ldev(device);
Philipp Reisner25b0d6c2012-02-14 12:12:35 +01003093out:
Lars Ellenberga910b122014-04-28 18:43:21 +02003094 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003095 return 0;
3096}
3097
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003098int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003099{
Lars Ellenberga910b122014-04-28 18:43:21 +02003100 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003101 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003102
Lars Ellenberga910b122014-04-28 18:43:21 +02003103 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003104 if (!adm_ctx.reply_skb)
3105 return retcode;
3106 if (retcode != NO_ERROR)
3107 goto out;
3108
Lars Ellenberg9e276872014-04-28 18:43:22 +02003109 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003110 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003111 retcode = ERR_PAUSE_IS_SET;
Lars Ellenberg9e276872014-04-28 18:43:22 +02003112 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003113out:
Lars Ellenberga910b122014-04-28 18:43:21 +02003114 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003115 return 0;
3116}
3117
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003118int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003119{
Lars Ellenberga910b122014-04-28 18:43:21 +02003120 struct drbd_config_context adm_ctx;
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02003121 union drbd_dev_state s;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003122 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003123
Lars Ellenberga910b122014-04-28 18:43:21 +02003124 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003125 if (!adm_ctx.reply_skb)
3126 return retcode;
3127 if (retcode != NO_ERROR)
3128 goto out;
3129
Lars Ellenberg9e276872014-04-28 18:43:22 +02003130 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003131 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3132 s = adm_ctx.device->state;
Philipp Reisnercd88d032011-01-20 11:46:41 +01003133 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3134 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3135 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3136 } else {
3137 retcode = ERR_PAUSE_IS_CLEAR;
3138 }
3139 }
Lars Ellenberg9e276872014-04-28 18:43:22 +02003140 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003141out:
Lars Ellenberga910b122014-04-28 18:43:21 +02003142 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003143 return 0;
3144}
3145
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003146int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003147{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003148 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003149}
3150
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003151int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003152{
Lars Ellenberga910b122014-04-28 18:43:21 +02003153 struct drbd_config_context adm_ctx;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003154 struct drbd_device *device;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003155 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3156
Lars Ellenberga910b122014-04-28 18:43:21 +02003157 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003158 if (!adm_ctx.reply_skb)
3159 return retcode;
3160 if (retcode != NO_ERROR)
3161 goto out;
3162
Lars Ellenberg9e276872014-04-28 18:43:22 +02003163 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003164 device = adm_ctx.device;
3165 if (test_bit(NEW_CUR_UUID, &device->flags)) {
Lars Ellenberg9fa48262015-03-18 17:13:26 +01003166 if (get_ldev_if_state(device, D_ATTACHING)) {
3167 drbd_uuid_new_current(device);
3168 put_ldev(device);
3169 } else {
3170 /* This is effectively a multi-stage "forced down".
3171 * The NEW_CUR_UUID bit is supposedly only set, if we
3172 * lost the replication connection, and are configured
3173 * to freeze IO and wait for some fence-peer handler.
3174 * So we still don't have a replication connection.
3175 * And now we don't have a local disk either. After
3176 * resume, we will fail all pending and new IO, because
3177 * we don't have any data anymore. Which means we will
3178 * eventually be able to terminate all users of this
3179 * device, and then take it down. By bumping the
3180 * "effective" data uuid, we make sure that you really
3181 * need to tear down before you reconfigure, we will
3182 * the refuse to re-connect or re-attach (because no
3183 * matching real data uuid exists).
3184 */
3185 u64 val;
3186 get_random_bytes(&val, sizeof(u64));
3187 drbd_set_ed_uuid(device, val);
3188 drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3189 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003190 clear_bit(NEW_CUR_UUID, &device->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02003191 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003192 drbd_suspend_io(device);
3193 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003194 if (retcode == SS_SUCCESS) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003195 if (device->state.conn < C_CONNECTED)
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003196 tl_clear(first_peer_device(device)->connection);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003197 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003198 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
Philipp Reisner265be2d2010-05-31 10:14:17 +02003199 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003200 drbd_resume_io(device);
Lars Ellenberg9e276872014-04-28 18:43:22 +02003201 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003202out:
Lars Ellenberga910b122014-04-28 18:43:21 +02003203 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003204 return 0;
3205}
3206
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003207int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003208{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003209 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003210}
3211
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003212static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3213 struct drbd_resource *resource,
3214 struct drbd_connection *connection,
3215 struct drbd_device *device)
Lars Ellenberg543cc102011-03-10 22:18:18 +01003216{
3217 struct nlattr *nla;
3218 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
3219 if (!nla)
3220 goto nla_put_failure;
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003221 if (device &&
3222 nla_put_u32(skb, T_ctx_volume, device->vnr))
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003223 goto nla_put_failure;
Andreas Gruenbacherf597f6b2014-02-19 10:49:07 +01003224 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003225 goto nla_put_failure;
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003226 if (connection) {
3227 if (connection->my_addr_len &&
3228 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3229 goto nla_put_failure;
3230 if (connection->peer_addr_len &&
3231 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3232 goto nla_put_failure;
3233 }
Lars Ellenberg543cc102011-03-10 22:18:18 +01003234 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003235 return 0;
Lars Ellenberg543cc102011-03-10 22:18:18 +01003236
3237nla_put_failure:
3238 if (nla)
3239 nla_nest_cancel(skb, nla);
3240 return -EMSGSIZE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003241}
3242
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003243/*
Andreas Gruenbachera55bbd32014-08-28 13:31:14 +02003244 * The generic netlink dump callbacks are called outside the genl_lock(), so
3245 * they cannot use the simple attribute parsing code which uses global
3246 * attribute tables.
3247 */
3248static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3249{
3250 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3251 const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3252 struct nlattr *nla;
3253
3254 nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3255 DRBD_NLA_CFG_CONTEXT);
3256 if (!nla)
3257 return NULL;
3258 return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3259}
3260
3261static void resource_to_info(struct resource_info *, struct drbd_resource *);
3262
3263int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3264{
3265 struct drbd_genlmsghdr *dh;
3266 struct drbd_resource *resource;
3267 struct resource_info resource_info;
3268 struct resource_statistics resource_statistics;
3269 int err;
3270
3271 rcu_read_lock();
3272 if (cb->args[0]) {
3273 for_each_resource_rcu(resource, &drbd_resources)
3274 if (resource == (struct drbd_resource *)cb->args[0])
3275 goto found_resource;
3276 err = 0; /* resource was probably deleted */
3277 goto out;
3278 }
3279 resource = list_entry(&drbd_resources,
3280 struct drbd_resource, resources);
3281
3282found_resource:
3283 list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3284 goto put_result;
3285 }
3286 err = 0;
3287 goto out;
3288
3289put_result:
3290 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3291 cb->nlh->nlmsg_seq, &drbd_genl_family,
3292 NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3293 err = -ENOMEM;
3294 if (!dh)
3295 goto out;
3296 dh->minor = -1U;
3297 dh->ret_code = NO_ERROR;
3298 err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3299 if (err)
3300 goto out;
3301 err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3302 if (err)
3303 goto out;
3304 resource_to_info(&resource_info, resource);
3305 err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3306 if (err)
3307 goto out;
3308 resource_statistics.res_stat_write_ordering = resource->write_ordering;
3309 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3310 if (err)
3311 goto out;
3312 cb->args[0] = (long)resource;
3313 genlmsg_end(skb, dh);
3314 err = 0;
3315
3316out:
3317 rcu_read_unlock();
3318 if (err)
3319 return err;
3320 return skb->len;
3321}
3322
3323static void device_to_statistics(struct device_statistics *s,
3324 struct drbd_device *device)
3325{
3326 memset(s, 0, sizeof(*s));
3327 s->dev_upper_blocked = !may_inc_ap_bio(device);
3328 if (get_ldev(device)) {
3329 struct drbd_md *md = &device->ldev->md;
3330 u64 *history_uuids = (u64 *)s->history_uuids;
3331 struct request_queue *q;
3332 int n;
3333
3334 spin_lock_irq(&md->uuid_lock);
3335 s->dev_current_uuid = md->uuid[UI_CURRENT];
3336 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3337 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3338 history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3339 for (; n < HISTORY_UUIDS; n++)
3340 history_uuids[n] = 0;
3341 s->history_uuids_len = HISTORY_UUIDS;
3342 spin_unlock_irq(&md->uuid_lock);
3343
3344 s->dev_disk_flags = md->flags;
3345 q = bdev_get_queue(device->ldev->backing_bdev);
3346 s->dev_lower_blocked =
Jan Karadc3b17c2017-02-02 15:56:50 +01003347 bdi_congested(q->backing_dev_info,
Andreas Gruenbachera55bbd32014-08-28 13:31:14 +02003348 (1 << WB_async_congested) |
3349 (1 << WB_sync_congested));
3350 put_ldev(device);
3351 }
3352 s->dev_size = drbd_get_capacity(device->this_bdev);
3353 s->dev_read = device->read_cnt;
3354 s->dev_write = device->writ_cnt;
3355 s->dev_al_writes = device->al_writ_cnt;
3356 s->dev_bm_writes = device->bm_writ_cnt;
3357 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3358 s->dev_lower_pending = atomic_read(&device->local_cnt);
3359 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3360 s->dev_exposed_data_uuid = device->ed_uuid;
3361}
3362
3363static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3364{
3365 if (cb->args[0]) {
3366 struct drbd_resource *resource =
3367 (struct drbd_resource *)cb->args[0];
3368 kref_put(&resource->kref, drbd_destroy_resource);
3369 }
3370
3371 return 0;
3372}
3373
3374int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3375 return put_resource_in_arg0(cb, 7);
3376}
3377
3378static void device_to_info(struct device_info *, struct drbd_device *);
3379
3380int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3381{
3382 struct nlattr *resource_filter;
3383 struct drbd_resource *resource;
3384 struct drbd_device *uninitialized_var(device);
3385 int minor, err, retcode;
3386 struct drbd_genlmsghdr *dh;
3387 struct device_info device_info;
3388 struct device_statistics device_statistics;
3389 struct idr *idr_to_search;
3390
3391 resource = (struct drbd_resource *)cb->args[0];
3392 if (!cb->args[0] && !cb->args[1]) {
3393 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3394 if (resource_filter) {
3395 retcode = ERR_RES_NOT_KNOWN;
3396 resource = drbd_find_resource(nla_data(resource_filter));
3397 if (!resource)
3398 goto put_result;
3399 cb->args[0] = (long)resource;
3400 }
3401 }
3402
3403 rcu_read_lock();
3404 minor = cb->args[1];
3405 idr_to_search = resource ? &resource->devices : &drbd_devices;
3406 device = idr_get_next(idr_to_search, &minor);
3407 if (!device) {
3408 err = 0;
3409 goto out;
3410 }
3411 idr_for_each_entry_continue(idr_to_search, device, minor) {
3412 retcode = NO_ERROR;
3413 goto put_result; /* only one iteration */
3414 }
3415 err = 0;
3416 goto out; /* no more devices */
3417
3418put_result:
3419 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3420 cb->nlh->nlmsg_seq, &drbd_genl_family,
3421 NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3422 err = -ENOMEM;
3423 if (!dh)
3424 goto out;
3425 dh->ret_code = retcode;
3426 dh->minor = -1U;
3427 if (retcode == NO_ERROR) {
3428 dh->minor = device->minor;
3429 err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3430 if (err)
3431 goto out;
3432 if (get_ldev(device)) {
3433 struct disk_conf *disk_conf =
3434 rcu_dereference(device->ldev->disk_conf);
3435
3436 err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3437 put_ldev(device);
3438 if (err)
3439 goto out;
3440 }
3441 device_to_info(&device_info, device);
3442 err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3443 if (err)
3444 goto out;
3445
3446 device_to_statistics(&device_statistics, device);
3447 err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3448 if (err)
3449 goto out;
3450 cb->args[1] = minor + 1;
3451 }
3452 genlmsg_end(skb, dh);
3453 err = 0;
3454
3455out:
3456 rcu_read_unlock();
3457 if (err)
3458 return err;
3459 return skb->len;
3460}
3461
3462int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3463{
3464 return put_resource_in_arg0(cb, 6);
3465}
3466
3467enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3468
3469int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3470{
3471 struct nlattr *resource_filter;
3472 struct drbd_resource *resource = NULL, *next_resource;
3473 struct drbd_connection *uninitialized_var(connection);
3474 int err = 0, retcode;
3475 struct drbd_genlmsghdr *dh;
3476 struct connection_info connection_info;
3477 struct connection_statistics connection_statistics;
3478
3479 rcu_read_lock();
3480 resource = (struct drbd_resource *)cb->args[0];
3481 if (!cb->args[0]) {
3482 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3483 if (resource_filter) {
3484 retcode = ERR_RES_NOT_KNOWN;
3485 resource = drbd_find_resource(nla_data(resource_filter));
3486 if (!resource)
3487 goto put_result;
3488 cb->args[0] = (long)resource;
3489 cb->args[1] = SINGLE_RESOURCE;
3490 }
3491 }
3492 if (!resource) {
3493 if (list_empty(&drbd_resources))
3494 goto out;
3495 resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3496 kref_get(&resource->kref);
3497 cb->args[0] = (long)resource;
3498 cb->args[1] = ITERATE_RESOURCES;
3499 }
3500
3501 next_resource:
3502 rcu_read_unlock();
3503 mutex_lock(&resource->conf_update);
3504 rcu_read_lock();
3505 if (cb->args[2]) {
3506 for_each_connection_rcu(connection, resource)
3507 if (connection == (struct drbd_connection *)cb->args[2])
3508 goto found_connection;
3509 /* connection was probably deleted */
3510 goto no_more_connections;
3511 }
3512 connection = list_entry(&resource->connections, struct drbd_connection, connections);
3513
3514found_connection:
3515 list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3516 if (!has_net_conf(connection))
3517 continue;
3518 retcode = NO_ERROR;
3519 goto put_result; /* only one iteration */
3520 }
3521
3522no_more_connections:
3523 if (cb->args[1] == ITERATE_RESOURCES) {
3524 for_each_resource_rcu(next_resource, &drbd_resources) {
3525 if (next_resource == resource)
3526 goto found_resource;
3527 }
3528 /* resource was probably deleted */
3529 }
3530 goto out;
3531
3532found_resource:
3533 list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3534 mutex_unlock(&resource->conf_update);
3535 kref_put(&resource->kref, drbd_destroy_resource);
3536 resource = next_resource;
3537 kref_get(&resource->kref);
3538 cb->args[0] = (long)resource;
3539 cb->args[2] = 0;
3540 goto next_resource;
3541 }
3542 goto out; /* no more resources */
3543
3544put_result:
3545 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3546 cb->nlh->nlmsg_seq, &drbd_genl_family,
3547 NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3548 err = -ENOMEM;
3549 if (!dh)
3550 goto out;
3551 dh->ret_code = retcode;
3552 dh->minor = -1U;
3553 if (retcode == NO_ERROR) {
3554 struct net_conf *net_conf;
3555
3556 err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3557 if (err)
3558 goto out;
3559 net_conf = rcu_dereference(connection->net_conf);
3560 if (net_conf) {
3561 err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3562 if (err)
3563 goto out;
3564 }
3565 connection_to_info(&connection_info, connection);
3566 err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3567 if (err)
3568 goto out;
3569 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3570 err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3571 if (err)
3572 goto out;
3573 cb->args[2] = (long)connection;
3574 }
3575 genlmsg_end(skb, dh);
3576 err = 0;
3577
3578out:
3579 rcu_read_unlock();
3580 if (resource)
3581 mutex_unlock(&resource->conf_update);
3582 if (err)
3583 return err;
3584 return skb->len;
3585}
3586
3587enum mdf_peer_flag {
3588 MDF_PEER_CONNECTED = 1 << 0,
3589 MDF_PEER_OUTDATED = 1 << 1,
3590 MDF_PEER_FENCING = 1 << 2,
3591 MDF_PEER_FULL_SYNC = 1 << 3,
3592};
3593
3594static void peer_device_to_statistics(struct peer_device_statistics *s,
3595 struct drbd_peer_device *peer_device)
3596{
3597 struct drbd_device *device = peer_device->device;
3598
3599 memset(s, 0, sizeof(*s));
3600 s->peer_dev_received = device->recv_cnt;
3601 s->peer_dev_sent = device->send_cnt;
3602 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3603 atomic_read(&device->rs_pending_cnt);
3604 s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3605 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3606 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3607 if (get_ldev(device)) {
3608 struct drbd_md *md = &device->ldev->md;
3609
3610 spin_lock_irq(&md->uuid_lock);
3611 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3612 spin_unlock_irq(&md->uuid_lock);
3613 s->peer_dev_flags =
3614 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3615 MDF_PEER_CONNECTED : 0) +
3616 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3617 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3618 MDF_PEER_OUTDATED : 0) +
3619 /* FIXME: MDF_PEER_FENCING? */
3620 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3621 MDF_PEER_FULL_SYNC : 0);
3622 put_ldev(device);
3623 }
3624}
3625
3626int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3627{
3628 return put_resource_in_arg0(cb, 9);
3629}
3630
3631int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3632{
3633 struct nlattr *resource_filter;
3634 struct drbd_resource *resource;
3635 struct drbd_device *uninitialized_var(device);
3636 struct drbd_peer_device *peer_device = NULL;
3637 int minor, err, retcode;
3638 struct drbd_genlmsghdr *dh;
3639 struct idr *idr_to_search;
3640
3641 resource = (struct drbd_resource *)cb->args[0];
3642 if (!cb->args[0] && !cb->args[1]) {
3643 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3644 if (resource_filter) {
3645 retcode = ERR_RES_NOT_KNOWN;
3646 resource = drbd_find_resource(nla_data(resource_filter));
3647 if (!resource)
3648 goto put_result;
3649 }
3650 cb->args[0] = (long)resource;
3651 }
3652
3653 rcu_read_lock();
3654 minor = cb->args[1];
3655 idr_to_search = resource ? &resource->devices : &drbd_devices;
3656 device = idr_find(idr_to_search, minor);
3657 if (!device) {
3658next_device:
3659 minor++;
3660 cb->args[2] = 0;
3661 device = idr_get_next(idr_to_search, &minor);
3662 if (!device) {
3663 err = 0;
3664 goto out;
3665 }
3666 }
3667 if (cb->args[2]) {
3668 for_each_peer_device(peer_device, device)
3669 if (peer_device == (struct drbd_peer_device *)cb->args[2])
3670 goto found_peer_device;
3671 /* peer device was probably deleted */
3672 goto next_device;
3673 }
3674 /* Make peer_device point to the list head (not the first entry). */
3675 peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3676
3677found_peer_device:
3678 list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3679 if (!has_net_conf(peer_device->connection))
3680 continue;
3681 retcode = NO_ERROR;
3682 goto put_result; /* only one iteration */
3683 }
3684 goto next_device;
3685
3686put_result:
3687 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3688 cb->nlh->nlmsg_seq, &drbd_genl_family,
3689 NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3690 err = -ENOMEM;
3691 if (!dh)
3692 goto out;
3693 dh->ret_code = retcode;
3694 dh->minor = -1U;
3695 if (retcode == NO_ERROR) {
3696 struct peer_device_info peer_device_info;
3697 struct peer_device_statistics peer_device_statistics;
3698
3699 dh->minor = minor;
3700 err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3701 if (err)
3702 goto out;
3703 peer_device_to_info(&peer_device_info, peer_device);
3704 err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3705 if (err)
3706 goto out;
3707 peer_device_to_statistics(&peer_device_statistics, peer_device);
3708 err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3709 if (err)
3710 goto out;
3711 cb->args[1] = minor;
3712 cb->args[2] = (long)peer_device;
3713 }
3714 genlmsg_end(skb, dh);
3715 err = 0;
3716
3717out:
3718 rcu_read_unlock();
3719 if (err)
3720 return err;
3721 return skb->len;
3722}
3723/*
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003724 * Return the connection of @resource if @resource has exactly one connection.
3725 */
3726static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3727{
3728 struct list_head *connections = &resource->connections;
3729
3730 if (list_empty(connections) || connections->next->next != connections)
3731 return NULL;
3732 return list_first_entry(&resource->connections, struct drbd_connection, connections);
3733}
3734
Lars Ellenberg8ce953a2014-02-27 09:46:18 +01003735static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003736 const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003737{
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003738 struct drbd_resource *resource = device->resource;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003739 struct state_info *si = NULL; /* for sizeof(si->member); */
3740 struct nlattr *nla;
3741 int got_ldev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003742 int err = 0;
3743 int exclude_sensitive;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003744
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003745 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3746 * to. So we better exclude_sensitive information.
3747 *
3748 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3749 * in the context of the requesting user process. Exclude sensitive
3750 * information, unless current has superuser.
3751 *
3752 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3753 * relies on the current implementation of netlink_dump(), which
3754 * executes the dump callback successively from netlink_recvmsg(),
3755 * always in the context of the receiving process */
3756 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003757
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003758 got_ldev = get_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003759
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003760 /* We need to add connection name and volume number information still.
3761 * Minor number is in drbd_genlmsghdr. */
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003762 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003763 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003764
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02003765 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
Lars Ellenbergf3990022011-03-23 14:31:09 +01003766 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003767
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02003768 rcu_read_lock();
Andreas Gruenbacherf9eb7bf2013-06-25 16:50:05 +02003769 if (got_ldev) {
3770 struct disk_conf *disk_conf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003771
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003772 disk_conf = rcu_dereference(device->ldev->disk_conf);
Andreas Gruenbacherf9eb7bf2013-06-25 16:50:05 +02003773 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3774 }
3775 if (!err) {
3776 struct net_conf *nc;
3777
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003778 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
Andreas Gruenbacherf9eb7bf2013-06-25 16:50:05 +02003779 if (nc)
3780 err = net_conf_to_skb(skb, nc, exclude_sensitive);
3781 }
Philipp Reisner44ed1672011-04-19 17:10:19 +02003782 rcu_read_unlock();
3783 if (err)
3784 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003785
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003786 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
3787 if (!nla)
3788 goto nla_put_failure;
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003789 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003790 nla_put_u32(skb, T_current_state, device->state.i) ||
Nicolas Dichtel1dee3f52016-05-09 11:40:20 +02003791 nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3792 nla_put_u64_0pad(skb, T_capacity,
3793 drbd_get_capacity(device->this_bdev)) ||
3794 nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3795 nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3796 nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3797 nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3798 nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3799 nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003800 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3801 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3802 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003803 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003804
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003805 if (got_ldev) {
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003806 int err;
3807
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003808 spin_lock_irq(&device->ldev->md.uuid_lock);
3809 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3810 spin_unlock_irq(&device->ldev->md.uuid_lock);
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003811
3812 if (err)
3813 goto nla_put_failure;
3814
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003815 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
Nicolas Dichtel1dee3f52016-05-09 11:40:20 +02003816 nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3817 nla_put_u64_0pad(skb, T_bits_oos,
3818 drbd_bm_total_weight(device)))
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003819 goto nla_put_failure;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003820 if (C_SYNC_SOURCE <= device->state.conn &&
3821 C_PAUSED_SYNC_T >= device->state.conn) {
Nicolas Dichtel1dee3f52016-05-09 11:40:20 +02003822 if (nla_put_u64_0pad(skb, T_bits_rs_total,
3823 device->rs_total) ||
3824 nla_put_u64_0pad(skb, T_bits_rs_failed,
3825 device->rs_failed))
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003826 goto nla_put_failure;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003827 }
3828 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003829
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003830 if (sib) {
3831 switch(sib->sib_reason) {
3832 case SIB_SYNC_PROGRESS:
3833 case SIB_GET_STATUS_REPLY:
3834 break;
3835 case SIB_STATE_CHANGE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003836 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3837 nla_put_u32(skb, T_new_state, sib->ns.i))
3838 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003839 break;
3840 case SIB_HELPER_POST:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003841 if (nla_put_u32(skb, T_helper_exit_code,
3842 sib->helper_exit_code))
3843 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003844 /* fall through */
3845 case SIB_HELPER_PRE:
Andreas Gruenbacher26ec9282012-07-11 20:36:03 +02003846 if (nla_put_string(skb, T_helper, sib->helper_name))
3847 goto nla_put_failure;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003848 break;
3849 }
3850 }
3851 nla_nest_end(skb, nla);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003852
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003853 if (0)
3854nla_put_failure:
3855 err = -EMSGSIZE;
3856 if (got_ldev)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003857 put_ldev(device);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003858 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003859}
3860
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003861int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003862{
Lars Ellenberga910b122014-04-28 18:43:21 +02003863 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003864 enum drbd_ret_code retcode;
3865 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003866
Lars Ellenberga910b122014-04-28 18:43:21 +02003867 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003868 if (!adm_ctx.reply_skb)
3869 return retcode;
3870 if (retcode != NO_ERROR)
3871 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003872
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003873 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003874 if (err) {
3875 nlmsg_free(adm_ctx.reply_skb);
3876 return err;
3877 }
3878out:
Lars Ellenberga910b122014-04-28 18:43:21 +02003879 drbd_adm_finish(&adm_ctx, info, retcode);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003880 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003881}
3882
Rashika Kheria4b7a5302013-12-19 15:17:33 +05303883static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003884{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003885 struct drbd_device *device;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003886 struct drbd_genlmsghdr *dh;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003887 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3888 struct drbd_resource *resource = NULL;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003889 struct drbd_resource *tmp;
Lars Ellenberg543cc102011-03-10 22:18:18 +01003890 unsigned volume = cb->args[1];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003891
Lars Ellenberg543cc102011-03-10 22:18:18 +01003892 /* Open coded, deferred, iteration:
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003893 * for_each_resource_safe(resource, tmp, &drbd_resources) {
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003894 * connection = "first connection of resource or undefined";
3895 * idr_for_each_entry(&resource->devices, device, i) {
Lars Ellenberg543cc102011-03-10 22:18:18 +01003896 * ...
3897 * }
3898 * }
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003899 * where resource is cb->args[0];
Lars Ellenberg543cc102011-03-10 22:18:18 +01003900 * and i is cb->args[1];
3901 *
Lars Ellenberg71932ef2011-04-18 09:43:25 +02003902 * cb->args[2] indicates if we shall loop over all resources,
3903 * or just dump all volumes of a single resource.
3904 *
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003905 * This may miss entries inserted after this dump started,
3906 * or entries deleted before they are reached.
Lars Ellenberg543cc102011-03-10 22:18:18 +01003907 *
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003908 * We need to make sure the device won't disappear while
Lars Ellenberg543cc102011-03-10 22:18:18 +01003909 * we are looking at it, and revalidate our iterators
3910 * on each iteration.
3911 */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003912
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02003913 /* synchronize with conn_create()/drbd_destroy_connection() */
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003914 rcu_read_lock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01003915 /* revalidate iterator position */
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003916 for_each_resource_rcu(tmp, &drbd_resources) {
Lars Ellenberg543cc102011-03-10 22:18:18 +01003917 if (pos == NULL) {
3918 /* first iteration */
3919 pos = tmp;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003920 resource = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01003921 break;
3922 }
3923 if (tmp == pos) {
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003924 resource = pos;
Lars Ellenberg543cc102011-03-10 22:18:18 +01003925 break;
3926 }
3927 }
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003928 if (resource) {
3929next_resource:
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003930 device = idr_get_next(&resource->devices, &volume);
3931 if (!device) {
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003932 /* No more volumes to dump on this resource.
3933 * Advance resource iterator. */
3934 pos = list_entry_rcu(resource->resources.next,
3935 struct drbd_resource, resources);
3936 /* Did we dump any volume of this resource yet? */
Lars Ellenberg543cc102011-03-10 22:18:18 +01003937 if (volume != 0) {
Lars Ellenberg71932ef2011-04-18 09:43:25 +02003938 /* If we reached the end of the list,
3939 * or only a single resource dump was requested,
3940 * we are done. */
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003941 if (&pos->resources == &drbd_resources || cb->args[2])
Lars Ellenberg71932ef2011-04-18 09:43:25 +02003942 goto out;
Lars Ellenberg543cc102011-03-10 22:18:18 +01003943 volume = 0;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003944 resource = pos;
3945 goto next_resource;
Lars Ellenberg543cc102011-03-10 22:18:18 +01003946 }
3947 }
3948
Philipp Reisner98683652012-11-09 14:18:43 +01003949 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003950 cb->nlh->nlmsg_seq, &drbd_genl_family,
3951 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3952 if (!dh)
Lars Ellenberg543cc102011-03-10 22:18:18 +01003953 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003954
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003955 if (!device) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02003956 /* This is a connection without a single volume.
Lars Ellenberg367d675d2011-07-11 23:49:55 +02003957 * Suprisingly enough, it may have a network
3958 * configuration. */
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003959 struct drbd_connection *connection;
3960
Lars Ellenberg543cc102011-03-10 22:18:18 +01003961 dh->minor = -1U;
3962 dh->ret_code = NO_ERROR;
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003963 connection = the_only_connection(resource);
3964 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
Lars Ellenberg367d675d2011-07-11 23:49:55 +02003965 goto cancel;
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003966 if (connection) {
3967 struct net_conf *nc;
3968
3969 nc = rcu_dereference(connection->net_conf);
3970 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3971 goto cancel;
3972 }
Lars Ellenberg367d675d2011-07-11 23:49:55 +02003973 goto done;
Lars Ellenberg543cc102011-03-10 22:18:18 +01003974 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003975
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003976 D_ASSERT(device, device->vnr == volume);
Andreas Gruenbacher251b8f82011-07-05 18:23:07 +02003977 D_ASSERT(device, device->resource == resource);
Lars Ellenberg543cc102011-03-10 22:18:18 +01003978
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003979 dh->minor = device_to_minor(device);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003980 dh->ret_code = NO_ERROR;
3981
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003982 if (nla_put_status_info(skb, device, NULL)) {
Lars Ellenberg367d675d2011-07-11 23:49:55 +02003983cancel:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003984 genlmsg_cancel(skb, dh);
Lars Ellenberg543cc102011-03-10 22:18:18 +01003985 goto out;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003986 }
Lars Ellenberg367d675d2011-07-11 23:49:55 +02003987done:
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003988 genlmsg_end(skb, dh);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02003989 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003990
Lars Ellenberg543cc102011-03-10 22:18:18 +01003991out:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02003992 rcu_read_unlock();
Lars Ellenberg543cc102011-03-10 22:18:18 +01003993 /* where to start the next iteration */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02003994 cb->args[0] = (long)pos;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003995 cb->args[1] = (pos == resource) ? volume + 1 : 0;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003996
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02003997 /* No more resources/volumes/minors found results in an empty skb.
Lars Ellenberg543cc102011-03-10 22:18:18 +01003998 * Which will terminate the dump. */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01003999 return skb->len;
4000}
4001
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004002/*
4003 * Request status of all resources, or of all volumes within a single resource.
4004 *
4005 * This is a dump, as the answer may not fit in a single reply skb otherwise.
4006 * Which means we cannot use the family->attrbuf or other such members, because
4007 * dump is NOT protected by the genl_lock(). During dump, we only have access
4008 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
4009 *
4010 * Once things are setup properly, we call into get_one_status().
Philipp Reisnerb411b362009-09-25 16:07:19 -07004011 */
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004012int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004013{
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004014 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
4015 struct nlattr *nla;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02004016 const char *resource_name;
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02004017 struct drbd_resource *resource;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02004018 int maxtype;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004019
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004020 /* Is this a followup call? */
4021 if (cb->args[0]) {
4022 /* ... of a single resource dump,
4023 * and the resource iterator has been advanced already? */
4024 if (cb->args[2] && cb->args[2] != cb->args[0])
4025 return 0; /* DONE. */
4026 goto dump;
4027 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004028
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004029 /* First call (from netlink_dump_start). We need to figure out
4030 * which resource(s) the user wants us to dump. */
4031 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
4032 nlmsg_attrlen(cb->nlh, hdrlen),
4033 DRBD_NLA_CFG_CONTEXT);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004034
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004035 /* No explicit context given. Dump all. */
4036 if (!nla)
4037 goto dump;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02004038 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4039 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4040 if (IS_ERR(nla))
4041 return PTR_ERR(nla);
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004042 /* context given, but no name present? */
4043 if (!nla)
4044 return -EINVAL;
Andreas Gruenbacher7c3063c2011-06-09 17:52:12 +02004045 resource_name = nla_data(nla);
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02004046 if (!*resource_name)
4047 return -ENODEV;
4048 resource = drbd_find_resource(resource_name);
4049 if (!resource)
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004050 return -ENODEV;
4051
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02004052 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
Philipp Reisner0ace9df2011-04-24 10:53:19 +02004053
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004054 /* prime iterators, and set "filter" mode mark:
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02004055 * only dump this connection. */
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02004056 cb->args[0] = (long)resource;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004057 /* cb->args[1] = 0; passed in this way. */
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02004058 cb->args[2] = (long)resource;
Lars Ellenberg71932ef2011-04-18 09:43:25 +02004059
4060dump:
4061 return get_one_status(skb, cb);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004062}
4063
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004064int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004065{
Lars Ellenberga910b122014-04-28 18:43:21 +02004066 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004067 enum drbd_ret_code retcode;
4068 struct timeout_parms tp;
4069 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004070
Lars Ellenberga910b122014-04-28 18:43:21 +02004071 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004072 if (!adm_ctx.reply_skb)
4073 return retcode;
4074 if (retcode != NO_ERROR)
4075 goto out;
4076
4077 tp.timeout_type =
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004078 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4079 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004080 UT_DEFAULT;
4081
4082 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4083 if (err) {
4084 nlmsg_free(adm_ctx.reply_skb);
4085 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004086 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004087out:
Lars Ellenberga910b122014-04-28 18:43:21 +02004088 drbd_adm_finish(&adm_ctx, info, retcode);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004089 return 0;
4090}
Lars Ellenberg873b0d52011-01-21 22:53:48 +01004091
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004092int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4093{
Lars Ellenberga910b122014-04-28 18:43:21 +02004094 struct drbd_config_context adm_ctx;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004095 struct drbd_device *device;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004096 enum drbd_ret_code retcode;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02004097 struct start_ov_parms parms;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004098
Lars Ellenberga910b122014-04-28 18:43:21 +02004099 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004100 if (!adm_ctx.reply_skb)
4101 return retcode;
4102 if (retcode != NO_ERROR)
4103 goto out;
4104
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004105 device = adm_ctx.device;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02004106
4107 /* resume from last known position, if possible */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004108 parms.ov_start_sector = device->ov_start_sector;
Lars Ellenberg58ffa582012-07-26 14:09:49 +02004109 parms.ov_stop_sector = ULLONG_MAX;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004110 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01004111 int err = start_ov_parms_from_attrs(&parms, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004112 if (err) {
4113 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02004114 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004115 goto out;
4116 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004117 }
Lars Ellenberg9e276872014-04-28 18:43:22 +02004118 mutex_lock(&adm_ctx.resource->adm_mutex);
4119
Lars Ellenberg58ffa582012-07-26 14:09:49 +02004120 /* w_make_ov_request expects position to be aligned */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004121 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4122 device->ov_stop_sector = parms.ov_stop_sector;
Lars Ellenberg873b0d52011-01-21 22:53:48 +01004123
4124 /* If there is still bitmap IO pending, e.g. previous resync or verify
4125 * just being finished, wait for it before requesting a new resync. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004126 drbd_suspend_io(device);
4127 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4128 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4129 drbd_resume_io(device);
Lars Ellenberg9e276872014-04-28 18:43:22 +02004130
4131 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004132out:
Lars Ellenberga910b122014-04-28 18:43:21 +02004133 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004134 return 0;
4135}
4136
4137
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004138int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004139{
Lars Ellenberga910b122014-04-28 18:43:21 +02004140 struct drbd_config_context adm_ctx;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004141 struct drbd_device *device;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004142 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004143 int skip_initial_sync = 0;
4144 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004145 struct new_c_uuid_parms args;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004146
Lars Ellenberga910b122014-04-28 18:43:21 +02004147 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004148 if (!adm_ctx.reply_skb)
4149 return retcode;
4150 if (retcode != NO_ERROR)
4151 goto out_nolock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004152
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004153 device = adm_ctx.device;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004154 memset(&args, 0, sizeof(args));
4155 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
Lars Ellenbergf3990022011-03-23 14:31:09 +01004156 err = new_c_uuid_parms_from_attrs(&args, info);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004157 if (err) {
4158 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02004159 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004160 goto out_nolock;
4161 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004162 }
4163
Lars Ellenberg9e276872014-04-28 18:43:22 +02004164 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004165 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07004166
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004167 if (!get_ldev(device)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004168 retcode = ERR_NO_DISK;
4169 goto out;
4170 }
4171
4172 /* this is "skip initial sync", assume to be clean */
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02004173 if (device->state.conn == C_CONNECTED &&
4174 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004175 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02004176 drbd_info(device, "Preparing to skip initial sync\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07004177 skip_initial_sync = 1;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004178 } else if (device->state.conn != C_STANDALONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004179 retcode = ERR_CONNECTED;
4180 goto out_dec;
4181 }
4182
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004183 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4184 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
Philipp Reisnerb411b362009-09-25 16:07:19 -07004185
4186 if (args.clear_bm) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004187 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01004188 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004189 if (err) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02004190 drbd_err(device, "Writing bitmap failed with %d\n", err);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004191 retcode = ERR_IO_MD_DISK;
4192 }
4193 if (skip_initial_sync) {
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02004194 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004195 _drbd_uuid_set(device, UI_BITMAP, 0);
4196 drbd_print_uuids(device, "cleared bitmap UUID");
Andreas Gruenbacher05008132011-07-07 14:19:42 +02004197 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004198 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
Philipp Reisnerb411b362009-09-25 16:07:19 -07004199 CS_VERBOSE, NULL);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02004200 spin_unlock_irq(&device->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004201 }
4202 }
4203
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004204 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004205out_dec:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004206 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004207out:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004208 mutex_unlock(device->state_mutex);
Lars Ellenberg9e276872014-04-28 18:43:22 +02004209 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004210out_nolock:
Lars Ellenberga910b122014-04-28 18:43:21 +02004211 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004212 return 0;
4213}
4214
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004215static enum drbd_ret_code
Lars Ellenberga910b122014-04-28 18:43:21 +02004216drbd_check_resource_name(struct drbd_config_context *adm_ctx)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004217{
Lars Ellenberga910b122014-04-28 18:43:21 +02004218 const char *name = adm_ctx->resource_name;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004219 if (!name || !name[0]) {
Lars Ellenberga910b122014-04-28 18:43:21 +02004220 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004221 return ERR_MANDATORY_TAG;
4222 }
4223 /* if we want to use these in sysfs/configfs/debugfs some day,
4224 * we must not allow slashes */
4225 if (strchr(name, '/')) {
Lars Ellenberga910b122014-04-28 18:43:21 +02004226 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004227 return ERR_INVALID_REQUEST;
4228 }
4229 return NO_ERROR;
4230}
Philipp Reisnerb411b362009-09-25 16:07:19 -07004231
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004232static void resource_to_info(struct resource_info *info,
4233 struct drbd_resource *resource)
4234{
4235 info->res_role = conn_highest_role(first_connection(resource));
4236 info->res_susp = resource->susp;
4237 info->res_susp_nod = resource->susp_nod;
4238 info->res_susp_fen = resource->susp_fen;
4239}
4240
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02004241int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004242{
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004243 struct drbd_connection *connection;
Lars Ellenberga910b122014-04-28 18:43:21 +02004244 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004245 enum drbd_ret_code retcode;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02004246 struct res_opts res_opts;
4247 int err;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004248
Lars Ellenberga910b122014-04-28 18:43:21 +02004249 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004250 if (!adm_ctx.reply_skb)
4251 return retcode;
4252 if (retcode != NO_ERROR)
4253 goto out;
4254
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02004255 set_res_opts_defaults(&res_opts);
4256 err = res_opts_from_attrs(&res_opts, info);
4257 if (err && err != -ENOMSG) {
4258 retcode = ERR_MANDATORY_TAG;
Lars Ellenberga910b122014-04-28 18:43:21 +02004259 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02004260 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004261 }
4262
Lars Ellenberga910b122014-04-28 18:43:21 +02004263 retcode = drbd_check_resource_name(&adm_ctx);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004264 if (retcode != NO_ERROR)
4265 goto out;
4266
Andreas Gruenbacher5c661042011-07-05 18:26:00 +02004267 if (adm_ctx.resource) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01004268 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4269 retcode = ERR_INVALID_REQUEST;
Lars Ellenberga910b122014-04-28 18:43:21 +02004270 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
Lars Ellenberg38f19612011-03-14 13:22:35 +01004271 }
4272 /* else: still NO_ERROR */
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004273 goto out;
Philipp Reisner9f5180e2009-10-06 09:30:14 +02004274 }
4275
Lars Ellenberg9e276872014-04-28 18:43:22 +02004276 /* not yet safe for genl_family.parallel_ops */
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02004277 mutex_lock(&resources_mutex);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004278 connection = conn_create(adm_ctx.resource_name, &res_opts);
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02004279 mutex_unlock(&resources_mutex);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004280
4281 if (connection) {
4282 struct resource_info resource_info;
4283
4284 mutex_lock(&notification_mutex);
4285 resource_to_info(&resource_info, connection->resource);
4286 notify_resource_state(NULL, 0, connection->resource,
4287 &resource_info, NOTIFY_CREATE);
4288 mutex_unlock(&notification_mutex);
4289 } else
4290 retcode = ERR_NOMEM;
4291
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004292out:
Lars Ellenberga910b122014-04-28 18:43:21 +02004293 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004294 return 0;
4295}
4296
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004297static void device_to_info(struct device_info *info,
4298 struct drbd_device *device)
4299{
4300 info->dev_disk_state = device->state.disk;
4301}
4302
4303
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02004304int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004305{
Lars Ellenberga910b122014-04-28 18:43:21 +02004306 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004307 struct drbd_genlmsghdr *dh = info->userhdr;
4308 enum drbd_ret_code retcode;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004309
Lars Ellenberga910b122014-04-28 18:43:21 +02004310 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004311 if (!adm_ctx.reply_skb)
4312 return retcode;
4313 if (retcode != NO_ERROR)
4314 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004315
Andreas Gruenbacherf2257a52011-07-14 16:00:40 +02004316 if (dh->minor > MINORMASK) {
Lars Ellenberga910b122014-04-28 18:43:21 +02004317 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004318 retcode = ERR_INVALID_REQUEST;
4319 goto out;
4320 }
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +02004321 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
Lars Ellenberga910b122014-04-28 18:43:21 +02004322 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004323 retcode = ERR_INVALID_REQUEST;
4324 goto out;
Philipp Reisner774b3052011-02-22 02:07:03 -05004325 }
4326
Lars Ellenberg38f19612011-03-14 13:22:35 +01004327 /* drbd_adm_prepare made sure already
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02004328 * that first_peer_device(device)->connection and device->vnr match the request. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004329 if (adm_ctx.device) {
Lars Ellenberg38f19612011-03-14 13:22:35 +01004330 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01004331 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
Lars Ellenberg38f19612011-03-14 13:22:35 +01004332 /* else: still NO_ERROR */
4333 goto out;
4334 }
4335
Lars Ellenberg9e276872014-04-28 18:43:22 +02004336 mutex_lock(&adm_ctx.resource->adm_mutex);
Lars Ellenberga910b122014-04-28 18:43:21 +02004337 retcode = drbd_create_device(&adm_ctx, dh->minor);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004338 if (retcode == NO_ERROR) {
4339 struct drbd_device *device;
4340 struct drbd_peer_device *peer_device;
4341 struct device_info info;
4342 unsigned int peer_devices = 0;
4343 enum drbd_notification_type flags;
4344
4345 device = minor_to_device(dh->minor);
4346 for_each_peer_device(peer_device, device) {
4347 if (!has_net_conf(peer_device->connection))
4348 continue;
4349 peer_devices++;
4350 }
4351
4352 device_to_info(&info, device);
4353 mutex_lock(&notification_mutex);
4354 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4355 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4356 for_each_peer_device(peer_device, device) {
4357 struct peer_device_info peer_device_info;
4358
4359 if (!has_net_conf(peer_device->connection))
4360 continue;
4361 peer_device_to_info(&peer_device_info, peer_device);
4362 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4363 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4364 NOTIFY_CREATE | flags);
4365 }
4366 mutex_unlock(&notification_mutex);
4367 }
Lars Ellenberg9e276872014-04-28 18:43:22 +02004368 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004369out:
Lars Ellenberga910b122014-04-28 18:43:21 +02004370 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05004371 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004372}
4373
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02004374static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004375{
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004376 struct drbd_peer_device *peer_device;
4377
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004378 if (device->state.disk == D_DISKLESS &&
4379 /* no need to be device->state.conn == C_STANDALONE &&
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004380 * we may want to delete a minor from a live replication group.
4381 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004382 device->state.role == R_SECONDARY) {
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004383 struct drbd_connection *connection =
4384 first_connection(device->resource);
4385
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004386 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
Philipp Reisner369bea62011-07-06 23:04:44 +02004387 CS_VERBOSE + CS_WAIT_COMPLETE);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004388
4389 /* If the state engine hasn't stopped the sender thread yet, we
4390 * need to flush the sender work queue before generating the
4391 * DESTROY events here. */
4392 if (get_t_state(&connection->worker) == RUNNING)
4393 drbd_flush_workqueue(&connection->sender_work);
4394
4395 mutex_lock(&notification_mutex);
4396 for_each_peer_device(peer_device, device) {
4397 if (!has_net_conf(peer_device->connection))
4398 continue;
4399 notify_peer_device_state(NULL, 0, peer_device, NULL,
4400 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4401 }
4402 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4403 mutex_unlock(&notification_mutex);
4404
Andreas Gruenbacherf82795d2011-07-03 23:32:26 +02004405 drbd_delete_device(device);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004406 return NO_ERROR;
4407 } else
4408 return ERR_MINOR_CONFIGURED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004409}
4410
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02004411int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05004412{
Lars Ellenberga910b122014-04-28 18:43:21 +02004413 struct drbd_config_context adm_ctx;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004414 enum drbd_ret_code retcode;
4415
Lars Ellenberga910b122014-04-28 18:43:21 +02004416 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004417 if (!adm_ctx.reply_skb)
4418 return retcode;
4419 if (retcode != NO_ERROR)
4420 goto out;
4421
Lars Ellenberg9e276872014-04-28 18:43:22 +02004422 mutex_lock(&adm_ctx.resource->adm_mutex);
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02004423 retcode = adm_del_minor(adm_ctx.device);
Lars Ellenberg9e276872014-04-28 18:43:22 +02004424 mutex_unlock(&adm_ctx.resource->adm_mutex);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004425out:
Lars Ellenberga910b122014-04-28 18:43:21 +02004426 drbd_adm_finish(&adm_ctx, info, retcode);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004427 return 0;
4428}
4429
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01004430static int adm_del_resource(struct drbd_resource *resource)
4431{
4432 struct drbd_connection *connection;
4433
4434 for_each_connection(connection, resource) {
4435 if (connection->cstate > C_STANDALONE)
4436 return ERR_NET_CONFIGURED;
4437 }
4438 if (!idr_is_empty(&resource->devices))
4439 return ERR_RES_IN_USE;
4440
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004441 /* The state engine has stopped the sender thread, so we don't
4442 * need to flush the sender work queue before generating the
4443 * DESTROY event here. */
4444 mutex_lock(&notification_mutex);
4445 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4446 mutex_unlock(&notification_mutex);
4447
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02004448 mutex_lock(&resources_mutex);
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01004449 list_del_rcu(&resource->resources);
Andreas Gruenbacher28bc3b82014-08-14 18:33:30 +02004450 mutex_unlock(&resources_mutex);
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01004451 /* Make sure all threads have actually stopped: state handling only
4452 * does drbd_thread_stop_nowait(). */
4453 list_for_each_entry(connection, &resource->connections, connections)
4454 drbd_thread_stop(&connection->worker);
4455 synchronize_rcu();
4456 drbd_free_resource(resource);
4457 return NO_ERROR;
4458}
4459
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004460int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4461{
Lars Ellenberga910b122014-04-28 18:43:21 +02004462 struct drbd_config_context adm_ctx;
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02004463 struct drbd_resource *resource;
4464 struct drbd_connection *connection;
4465 struct drbd_device *device;
Lars Ellenbergf3dfa402011-05-02 10:45:05 +02004466 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004467 unsigned i;
4468
Lars Ellenberga910b122014-04-28 18:43:21 +02004469 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004470 if (!adm_ctx.reply_skb)
4471 return retcode;
4472 if (retcode != NO_ERROR)
Lars Ellenberg9e276872014-04-28 18:43:22 +02004473 goto finish;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004474
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02004475 resource = adm_ctx.resource;
Lars Ellenberg9e276872014-04-28 18:43:22 +02004476 mutex_lock(&resource->adm_mutex);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004477 /* demote */
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02004478 for_each_connection(connection, resource) {
4479 struct drbd_peer_device *peer_device;
4480
4481 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4482 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4483 if (retcode < SS_SUCCESS) {
Lars Ellenberga910b122014-04-28 18:43:21 +02004484 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02004485 goto out;
4486 }
4487 }
4488
4489 retcode = conn_try_disconnect(connection, 0);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004490 if (retcode < SS_SUCCESS) {
Lars Ellenberga910b122014-04-28 18:43:21 +02004491 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004492 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004493 }
4494 }
4495
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004496 /* detach */
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02004497 idr_for_each_entry(&resource->devices, device, i) {
4498 retcode = adm_detach(device, 0);
Lars Ellenberg27012382012-07-24 10:13:55 +02004499 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
Lars Ellenberga910b122014-04-28 18:43:21 +02004500 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02004501 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004502 }
4503 }
4504
4505 /* delete volumes */
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02004506 idr_for_each_entry(&resource->devices, device, i) {
4507 retcode = adm_del_minor(device);
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004508 if (retcode != NO_ERROR) {
4509 /* "can not happen" */
Lars Ellenberga910b122014-04-28 18:43:21 +02004510 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
Philipp Reisneref356262011-04-13 14:21:29 -07004511 goto out;
Lars Ellenberg85f75dd72011-03-15 16:26:37 +01004512 }
4513 }
4514
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01004515 retcode = adm_del_resource(resource);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004516out:
Lars Ellenberg9e276872014-04-28 18:43:22 +02004517 mutex_unlock(&resource->adm_mutex);
4518finish:
Lars Ellenberga910b122014-04-28 18:43:21 +02004519 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05004520 return 0;
4521}
4522
Andreas Gruenbacher789c1b62011-06-06 16:16:44 +02004523int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
Philipp Reisner774b3052011-02-22 02:07:03 -05004524{
Lars Ellenberga910b122014-04-28 18:43:21 +02004525 struct drbd_config_context adm_ctx;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02004526 struct drbd_resource *resource;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004527 enum drbd_ret_code retcode;
4528
Lars Ellenberga910b122014-04-28 18:43:21 +02004529 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004530 if (!adm_ctx.reply_skb)
4531 return retcode;
4532 if (retcode != NO_ERROR)
Lars Ellenberg9e276872014-04-28 18:43:22 +02004533 goto finish;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02004534 resource = adm_ctx.resource;
Philipp Reisner774b3052011-02-22 02:07:03 -05004535
Andreas Gruenbacher179e20b82014-11-10 17:21:09 +01004536 mutex_lock(&resource->adm_mutex);
4537 retcode = adm_del_resource(resource);
Lars Ellenberg9e276872014-04-28 18:43:22 +02004538 mutex_unlock(&resource->adm_mutex);
4539finish:
Lars Ellenberga910b122014-04-28 18:43:21 +02004540 drbd_adm_finish(&adm_ctx, info, retcode);
Philipp Reisner774b3052011-02-22 02:07:03 -05004541 return 0;
4542}
4543
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004544void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004545{
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004546 struct sk_buff *msg;
4547 struct drbd_genlmsghdr *d_out;
4548 unsigned seq;
4549 int err = -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004550
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004551 seq = atomic_inc_return(&drbd_genl_seq);
4552 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4553 if (!msg)
4554 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004555
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004556 err = -EMSGSIZE;
4557 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4558 if (!d_out) /* cannot happen, but anyways. */
4559 goto nla_put_failure;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004560 d_out->minor = device_to_minor(device);
Andreas Gruenbacher6f9b5f82011-05-06 01:03:32 +02004561 d_out->ret_code = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004562
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02004563 if (nla_put_status_info(msg, device, sib))
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004564 goto nla_put_failure;
4565 genlmsg_end(msg, d_out);
Philipp Reisnerd38f86122014-12-10 15:26:57 +01004566 err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004567 /* msg has been consumed or freed in netlink_broadcast() */
4568 if (err && err != -ESRCH)
4569 goto failed;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004570
Philipp Reisnerb411b362009-09-25 16:07:19 -07004571 return;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004572
4573nla_put_failure:
4574 nlmsg_free(msg);
4575failed:
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02004576 drbd_err(device, "Error %d while broadcasting event. "
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01004577 "Event seq:%u sib_reason:%u\n",
4578 err, seq, sib->sib_reason);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004579}
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004580
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004581static int nla_put_notification_header(struct sk_buff *msg,
4582 enum drbd_notification_type type)
4583{
4584 struct drbd_notification_header nh = {
4585 .nh_type = type,
4586 };
4587
4588 return drbd_notification_header_to_skb(msg, &nh, true);
4589}
4590
4591void notify_resource_state(struct sk_buff *skb,
4592 unsigned int seq,
4593 struct drbd_resource *resource,
4594 struct resource_info *resource_info,
4595 enum drbd_notification_type type)
4596{
4597 struct resource_statistics resource_statistics;
4598 struct drbd_genlmsghdr *dh;
4599 bool multicast = false;
4600 int err;
4601
4602 if (!skb) {
4603 seq = atomic_inc_return(&notify_genl_seq);
4604 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4605 err = -ENOMEM;
4606 if (!skb)
4607 goto failed;
4608 multicast = true;
4609 }
4610
4611 err = -EMSGSIZE;
4612 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4613 if (!dh)
4614 goto nla_put_failure;
4615 dh->minor = -1U;
4616 dh->ret_code = NO_ERROR;
4617 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4618 nla_put_notification_header(skb, type) ||
4619 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4620 resource_info_to_skb(skb, resource_info, true)))
4621 goto nla_put_failure;
4622 resource_statistics.res_stat_write_ordering = resource->write_ordering;
4623 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4624 if (err)
4625 goto nla_put_failure;
4626 genlmsg_end(skb, dh);
4627 if (multicast) {
Philipp Reisnerd38f86122014-12-10 15:26:57 +01004628 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004629 /* skb has been consumed or freed in netlink_broadcast() */
4630 if (err && err != -ESRCH)
4631 goto failed;
4632 }
4633 return;
4634
4635nla_put_failure:
4636 nlmsg_free(skb);
4637failed:
4638 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4639 err, seq);
4640}
4641
4642void notify_device_state(struct sk_buff *skb,
4643 unsigned int seq,
4644 struct drbd_device *device,
4645 struct device_info *device_info,
4646 enum drbd_notification_type type)
4647{
4648 struct device_statistics device_statistics;
4649 struct drbd_genlmsghdr *dh;
4650 bool multicast = false;
4651 int err;
4652
4653 if (!skb) {
4654 seq = atomic_inc_return(&notify_genl_seq);
4655 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4656 err = -ENOMEM;
4657 if (!skb)
4658 goto failed;
4659 multicast = true;
4660 }
4661
4662 err = -EMSGSIZE;
4663 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4664 if (!dh)
4665 goto nla_put_failure;
4666 dh->minor = device->minor;
4667 dh->ret_code = NO_ERROR;
4668 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4669 nla_put_notification_header(skb, type) ||
4670 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4671 device_info_to_skb(skb, device_info, true)))
4672 goto nla_put_failure;
4673 device_to_statistics(&device_statistics, device);
4674 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4675 genlmsg_end(skb, dh);
4676 if (multicast) {
Philipp Reisnerd38f86122014-12-10 15:26:57 +01004677 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004678 /* skb has been consumed or freed in netlink_broadcast() */
4679 if (err && err != -ESRCH)
4680 goto failed;
4681 }
4682 return;
4683
4684nla_put_failure:
4685 nlmsg_free(skb);
4686failed:
4687 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4688 err, seq);
4689}
4690
4691void notify_connection_state(struct sk_buff *skb,
4692 unsigned int seq,
4693 struct drbd_connection *connection,
4694 struct connection_info *connection_info,
4695 enum drbd_notification_type type)
4696{
4697 struct connection_statistics connection_statistics;
4698 struct drbd_genlmsghdr *dh;
4699 bool multicast = false;
4700 int err;
4701
4702 if (!skb) {
4703 seq = atomic_inc_return(&notify_genl_seq);
4704 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4705 err = -ENOMEM;
4706 if (!skb)
4707 goto failed;
4708 multicast = true;
4709 }
4710
4711 err = -EMSGSIZE;
4712 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4713 if (!dh)
4714 goto nla_put_failure;
4715 dh->minor = -1U;
4716 dh->ret_code = NO_ERROR;
4717 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4718 nla_put_notification_header(skb, type) ||
4719 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4720 connection_info_to_skb(skb, connection_info, true)))
4721 goto nla_put_failure;
4722 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4723 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4724 genlmsg_end(skb, dh);
4725 if (multicast) {
Philipp Reisnerd38f86122014-12-10 15:26:57 +01004726 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004727 /* skb has been consumed or freed in netlink_broadcast() */
4728 if (err && err != -ESRCH)
4729 goto failed;
4730 }
4731 return;
4732
4733nla_put_failure:
4734 nlmsg_free(skb);
4735failed:
4736 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4737 err, seq);
4738}
4739
4740void notify_peer_device_state(struct sk_buff *skb,
4741 unsigned int seq,
4742 struct drbd_peer_device *peer_device,
4743 struct peer_device_info *peer_device_info,
4744 enum drbd_notification_type type)
4745{
4746 struct peer_device_statistics peer_device_statistics;
4747 struct drbd_resource *resource = peer_device->device->resource;
4748 struct drbd_genlmsghdr *dh;
4749 bool multicast = false;
4750 int err;
4751
4752 if (!skb) {
4753 seq = atomic_inc_return(&notify_genl_seq);
4754 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4755 err = -ENOMEM;
4756 if (!skb)
4757 goto failed;
4758 multicast = true;
4759 }
4760
4761 err = -EMSGSIZE;
4762 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4763 if (!dh)
4764 goto nla_put_failure;
4765 dh->minor = -1U;
4766 dh->ret_code = NO_ERROR;
4767 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4768 nla_put_notification_header(skb, type) ||
4769 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4770 peer_device_info_to_skb(skb, peer_device_info, true)))
4771 goto nla_put_failure;
4772 peer_device_to_statistics(&peer_device_statistics, peer_device);
4773 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4774 genlmsg_end(skb, dh);
4775 if (multicast) {
Philipp Reisnerd38f86122014-12-10 15:26:57 +01004776 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004777 /* skb has been consumed or freed in netlink_broadcast() */
4778 if (err && err != -ESRCH)
4779 goto failed;
4780 }
4781 return;
4782
4783nla_put_failure:
4784 nlmsg_free(skb);
4785failed:
4786 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4787 err, seq);
4788}
4789
4790void notify_helper(enum drbd_notification_type type,
4791 struct drbd_device *device, struct drbd_connection *connection,
4792 const char *name, int status)
4793{
4794 struct drbd_resource *resource = device ? device->resource : connection->resource;
4795 struct drbd_helper_info helper_info;
4796 unsigned int seq = atomic_inc_return(&notify_genl_seq);
4797 struct sk_buff *skb = NULL;
4798 struct drbd_genlmsghdr *dh;
4799 int err;
4800
4801 strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4802 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4803 helper_info.helper_status = status;
4804
4805 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4806 err = -ENOMEM;
4807 if (!skb)
4808 goto fail;
4809
4810 err = -EMSGSIZE;
4811 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4812 if (!dh)
4813 goto fail;
4814 dh->minor = device ? device->minor : -1;
4815 dh->ret_code = NO_ERROR;
4816 mutex_lock(&notification_mutex);
4817 if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4818 nla_put_notification_header(skb, type) ||
4819 drbd_helper_info_to_skb(skb, &helper_info, true))
4820 goto unlock_fail;
4821 genlmsg_end(skb, dh);
Philipp Reisnerd38f86122014-12-10 15:26:57 +01004822 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
Andreas Gruenbachera2972842014-07-31 17:41:33 +02004823 skb = NULL;
4824 /* skb has been consumed or freed in netlink_broadcast() */
4825 if (err && err != -ESRCH)
4826 goto unlock_fail;
4827 mutex_unlock(&notification_mutex);
4828 return;
4829
4830unlock_fail:
4831 mutex_unlock(&notification_mutex);
4832fail:
4833 nlmsg_free(skb);
4834 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4835 err, seq);
4836}
4837
4838static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4839{
4840 struct drbd_genlmsghdr *dh;
4841 int err;
4842
4843 err = -EMSGSIZE;
4844 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4845 if (!dh)
4846 goto nla_put_failure;
4847 dh->minor = -1U;
4848 dh->ret_code = NO_ERROR;
4849 if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4850 goto nla_put_failure;
4851 genlmsg_end(skb, dh);
4852 return;
4853
4854nla_put_failure:
4855 nlmsg_free(skb);
4856 pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4857}
4858
4859static void free_state_changes(struct list_head *list)
4860{
4861 while (!list_empty(list)) {
4862 struct drbd_state_change *state_change =
4863 list_first_entry(list, struct drbd_state_change, list);
4864 list_del(&state_change->list);
4865 forget_state_change(state_change);
4866 }
4867}
4868
4869static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4870{
4871 return 1 +
4872 state_change->n_connections +
4873 state_change->n_devices +
4874 state_change->n_devices * state_change->n_connections;
4875}
4876
4877static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4878{
4879 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4880 unsigned int seq = cb->args[2];
4881 unsigned int n;
4882 enum drbd_notification_type flags = 0;
4883
4884 /* There is no need for taking notification_mutex here: it doesn't
4885 matter if the initial state events mix with later state chage
4886 events; we can always tell the events apart by the NOTIFY_EXISTS
4887 flag. */
4888
4889 cb->args[5]--;
4890 if (cb->args[5] == 1) {
4891 notify_initial_state_done(skb, seq);
4892 goto out;
4893 }
4894 n = cb->args[4]++;
4895 if (cb->args[4] < cb->args[3])
4896 flags |= NOTIFY_CONTINUES;
4897 if (n < 1) {
4898 notify_resource_state_change(skb, seq, state_change->resource,
4899 NOTIFY_EXISTS | flags);
4900 goto next;
4901 }
4902 n--;
4903 if (n < state_change->n_connections) {
4904 notify_connection_state_change(skb, seq, &state_change->connections[n],
4905 NOTIFY_EXISTS | flags);
4906 goto next;
4907 }
4908 n -= state_change->n_connections;
4909 if (n < state_change->n_devices) {
4910 notify_device_state_change(skb, seq, &state_change->devices[n],
4911 NOTIFY_EXISTS | flags);
4912 goto next;
4913 }
4914 n -= state_change->n_devices;
4915 if (n < state_change->n_devices * state_change->n_connections) {
4916 notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4917 NOTIFY_EXISTS | flags);
4918 goto next;
4919 }
4920
4921next:
4922 if (cb->args[4] == cb->args[3]) {
4923 struct drbd_state_change *next_state_change =
4924 list_entry(state_change->list.next,
4925 struct drbd_state_change, list);
4926 cb->args[0] = (long)next_state_change;
4927 cb->args[3] = notifications_for_state_change(next_state_change);
4928 cb->args[4] = 0;
4929 }
4930out:
4931 return skb->len;
4932}
4933
4934int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4935{
4936 struct drbd_resource *resource;
4937 LIST_HEAD(head);
4938
4939 if (cb->args[5] >= 1) {
4940 if (cb->args[5] > 1)
4941 return get_initial_state(skb, cb);
4942 if (cb->args[0]) {
4943 struct drbd_state_change *state_change =
4944 (struct drbd_state_change *)cb->args[0];
4945
4946 /* connect list to head */
4947 list_add(&head, &state_change->list);
4948 free_state_changes(&head);
4949 }
4950 return 0;
4951 }
4952
4953 cb->args[5] = 2; /* number of iterations */
4954 mutex_lock(&resources_mutex);
4955 for_each_resource(resource, &drbd_resources) {
4956 struct drbd_state_change *state_change;
4957
4958 state_change = remember_old_state(resource, GFP_KERNEL);
4959 if (!state_change) {
4960 if (!list_empty(&head))
4961 free_state_changes(&head);
4962 mutex_unlock(&resources_mutex);
4963 return -ENOMEM;
4964 }
4965 copy_old_to_new_state_change(state_change);
4966 list_add_tail(&state_change->list, &head);
4967 cb->args[5] += notifications_for_state_change(state_change);
4968 }
4969 mutex_unlock(&resources_mutex);
4970
4971 if (!list_empty(&head)) {
4972 struct drbd_state_change *state_change =
4973 list_entry(head.next, struct drbd_state_change, list);
4974 cb->args[0] = (long)state_change;
4975 cb->args[3] = notifications_for_state_change(state_change);
4976 list_del(&head); /* detach list from head */
4977 }
4978
4979 cb->args[2] = cb->nlh->nlmsg_seq;
4980 return get_initial_state(skb, cb);
4981}