blob: 1927acefe230d74e7d0836ac062cd6887ff3d786 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#include <linux/autoconf.h>
27#include <linux/module.h>
28#include <linux/drbd.h>
29#include <linux/in.h>
30#include <linux/fs.h>
31#include <linux/file.h>
32#include <linux/slab.h>
33#include <linux/connector.h>
34#include <linux/blkpg.h>
35#include <linux/cpumask.h>
36#include "drbd_int.h"
37#include "drbd_tracing.h"
38#include "drbd_wrappers.h"
39#include <asm/unaligned.h>
40#include <linux/drbd_tag_magic.h>
41#include <linux/drbd_limits.h>
42
43static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
44static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
45static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
46
47/* see get_sb_bdev and bd_claim */
48static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
49
50/* Generate the tag_list to struct functions */
51#define NL_PACKET(name, number, fields) \
52static int name ## _from_tags(struct drbd_conf *mdev, \
53 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
54static int name ## _from_tags(struct drbd_conf *mdev, \
55 unsigned short *tags, struct name *arg) \
56{ \
57 int tag; \
58 int dlen; \
59 \
60 while ((tag = get_unaligned(tags++)) != TT_END) { \
61 dlen = get_unaligned(tags++); \
62 switch (tag_number(tag)) { \
63 fields \
64 default: \
65 if (tag & T_MANDATORY) { \
66 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
67 return 0; \
68 } \
69 } \
70 tags = (unsigned short *)((char *)tags + dlen); \
71 } \
72 return 1; \
73}
74#define NL_INTEGER(pn, pr, member) \
75 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
76 arg->member = get_unaligned((int *)(tags)); \
77 break;
78#define NL_INT64(pn, pr, member) \
79 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
80 arg->member = get_unaligned((u64 *)(tags)); \
81 break;
82#define NL_BIT(pn, pr, member) \
83 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
84 arg->member = *(char *)(tags) ? 1 : 0; \
85 break;
86#define NL_STRING(pn, pr, member, len) \
87 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
88 if (dlen > len) { \
89 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
90 #member, dlen, (unsigned int)len); \
91 return 0; \
92 } \
93 arg->member ## _len = dlen; \
94 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
95 break;
96#include "linux/drbd_nl.h"
97
98/* Generate the struct to tag_list functions */
99#define NL_PACKET(name, number, fields) \
100static unsigned short* \
101name ## _to_tags(struct drbd_conf *mdev, \
102 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
103static unsigned short* \
104name ## _to_tags(struct drbd_conf *mdev, \
105 struct name *arg, unsigned short *tags) \
106{ \
107 fields \
108 return tags; \
109}
110
111#define NL_INTEGER(pn, pr, member) \
112 put_unaligned(pn | pr | TT_INTEGER, tags++); \
113 put_unaligned(sizeof(int), tags++); \
114 put_unaligned(arg->member, (int *)tags); \
115 tags = (unsigned short *)((char *)tags+sizeof(int));
116#define NL_INT64(pn, pr, member) \
117 put_unaligned(pn | pr | TT_INT64, tags++); \
118 put_unaligned(sizeof(u64), tags++); \
119 put_unaligned(arg->member, (u64 *)tags); \
120 tags = (unsigned short *)((char *)tags+sizeof(u64));
121#define NL_BIT(pn, pr, member) \
122 put_unaligned(pn | pr | TT_BIT, tags++); \
123 put_unaligned(sizeof(char), tags++); \
124 *(char *)tags = arg->member; \
125 tags = (unsigned short *)((char *)tags+sizeof(char));
126#define NL_STRING(pn, pr, member, len) \
127 put_unaligned(pn | pr | TT_STRING, tags++); \
128 put_unaligned(arg->member ## _len, tags++); \
129 memcpy(tags, arg->member, arg->member ## _len); \
130 tags = (unsigned short *)((char *)tags + arg->member ## _len);
131#include "linux/drbd_nl.h"
132
133void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
134void drbd_nl_send_reply(struct cn_msg *, int);
135
136int drbd_khelper(struct drbd_conf *mdev, char *cmd)
137{
138 char *envp[] = { "HOME=/",
139 "TERM=linux",
140 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
141 NULL, /* Will be set to address family */
142 NULL, /* Will be set to address */
143 NULL };
144
145 char mb[12], af[20], ad[60], *afs;
146 char *argv[] = {usermode_helper, cmd, mb, NULL };
147 int ret;
148
149 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
150
151 if (get_net_conf(mdev)) {
152 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
153 case AF_INET6:
154 afs = "ipv6";
155 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
156 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
157 break;
158 case AF_INET:
159 afs = "ipv4";
160 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
161 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
162 break;
163 default:
164 afs = "ssocks";
165 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
166 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
167 }
168 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
169 envp[3]=af;
170 envp[4]=ad;
171 put_net_conf(mdev);
172 }
173
174 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
175
176 drbd_bcast_ev_helper(mdev, cmd);
177 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
178 if (ret)
179 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
180 usermode_helper, cmd, mb,
181 (ret >> 8) & 0xff, ret);
182 else
183 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
184 usermode_helper, cmd, mb,
185 (ret >> 8) & 0xff, ret);
186
187 if (ret < 0) /* Ignore any ERRNOs we got. */
188 ret = 0;
189
190 return ret;
191}
192
193enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
194{
195 char *ex_to_string;
196 int r;
197 enum drbd_disk_state nps;
198 enum drbd_fencing_p fp;
199
200 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
201
202 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
203 fp = mdev->ldev->dc.fencing;
204 put_ldev(mdev);
205 } else {
206 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
207 return mdev->state.pdsk;
208 }
209
210 if (fp == FP_STONITH)
211 _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE);
212
213 r = drbd_khelper(mdev, "fence-peer");
214
215 switch ((r>>8) & 0xff) {
216 case 3: /* peer is inconsistent */
217 ex_to_string = "peer is inconsistent or worse";
218 nps = D_INCONSISTENT;
219 break;
220 case 4: /* peer got outdated, or was already outdated */
221 ex_to_string = "peer was fenced";
222 nps = D_OUTDATED;
223 break;
224 case 5: /* peer was down */
225 if (mdev->state.disk == D_UP_TO_DATE) {
226 /* we will(have) create(d) a new UUID anyways... */
227 ex_to_string = "peer is unreachable, assumed to be dead";
228 nps = D_OUTDATED;
229 } else {
230 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
231 nps = mdev->state.pdsk;
232 }
233 break;
234 case 6: /* Peer is primary, voluntarily outdate myself.
235 * This is useful when an unconnected R_SECONDARY is asked to
236 * become R_PRIMARY, but finds the other peer being active. */
237 ex_to_string = "peer is active";
238 dev_warn(DEV, "Peer is primary, outdating myself.\n");
239 nps = D_UNKNOWN;
240 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
241 break;
242 case 7:
243 if (fp != FP_STONITH)
244 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
245 ex_to_string = "peer was stonithed";
246 nps = D_OUTDATED;
247 break;
248 default:
249 /* The script is broken ... */
250 nps = D_UNKNOWN;
251 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
252 return nps;
253 }
254
255 dev_info(DEV, "fence-peer helper returned %d (%s)\n",
256 (r>>8) & 0xff, ex_to_string);
257 return nps;
258}
259
260
261int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
262{
263 const int max_tries = 4;
264 int r = 0;
265 int try = 0;
266 int forced = 0;
267 union drbd_state mask, val;
268 enum drbd_disk_state nps;
269
270 if (new_role == R_PRIMARY)
271 request_ping(mdev); /* Detect a dead peer ASAP */
272
273 mutex_lock(&mdev->state_mutex);
274
275 mask.i = 0; mask.role = R_MASK;
276 val.i = 0; val.role = new_role;
277
278 while (try++ < max_tries) {
279 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
280
281 /* in case we first succeeded to outdate,
282 * but now suddenly could establish a connection */
283 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
284 val.pdsk = 0;
285 mask.pdsk = 0;
286 continue;
287 }
288
289 if (r == SS_NO_UP_TO_DATE_DISK && force &&
290 (mdev->state.disk == D_INCONSISTENT ||
291 mdev->state.disk == D_OUTDATED)) {
292 mask.disk = D_MASK;
293 val.disk = D_UP_TO_DATE;
294 forced = 1;
295 continue;
296 }
297
298 if (r == SS_NO_UP_TO_DATE_DISK &&
299 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
300 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
301 nps = drbd_try_outdate_peer(mdev);
302
303 if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
304 val.disk = D_UP_TO_DATE;
305 mask.disk = D_MASK;
306 }
307
308 val.pdsk = nps;
309 mask.pdsk = D_MASK;
310
311 continue;
312 }
313
314 if (r == SS_NOTHING_TO_DO)
315 goto fail;
316 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
317 nps = drbd_try_outdate_peer(mdev);
318
319 if (force && nps > D_OUTDATED) {
320 dev_warn(DEV, "Forced into split brain situation!\n");
321 nps = D_OUTDATED;
322 }
323
324 mask.pdsk = D_MASK;
325 val.pdsk = nps;
326
327 continue;
328 }
329 if (r == SS_TWO_PRIMARIES) {
330 /* Maybe the peer is detected as dead very soon...
331 retry at most once more in this case. */
332 __set_current_state(TASK_INTERRUPTIBLE);
333 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
334 if (try < max_tries)
335 try = max_tries - 1;
336 continue;
337 }
338 if (r < SS_SUCCESS) {
339 r = _drbd_request_state(mdev, mask, val,
340 CS_VERBOSE + CS_WAIT_COMPLETE);
341 if (r < SS_SUCCESS)
342 goto fail;
343 }
344 break;
345 }
346
347 if (r < SS_SUCCESS)
348 goto fail;
349
350 if (forced)
351 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
352
353 /* Wait until nothing is on the fly :) */
354 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
355
356 if (new_role == R_SECONDARY) {
357 set_disk_ro(mdev->vdisk, TRUE);
358 if (get_ldev(mdev)) {
359 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
360 put_ldev(mdev);
361 }
362 } else {
363 if (get_net_conf(mdev)) {
364 mdev->net_conf->want_lose = 0;
365 put_net_conf(mdev);
366 }
367 set_disk_ro(mdev->vdisk, FALSE);
368 if (get_ldev(mdev)) {
369 if (((mdev->state.conn < C_CONNECTED ||
370 mdev->state.pdsk <= D_FAILED)
371 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
372 drbd_uuid_new_current(mdev);
373
374 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
375 put_ldev(mdev);
376 }
377 }
378
379 if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
380 drbd_al_to_on_disk_bm(mdev);
381 put_ldev(mdev);
382 }
383
384 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
385 /* if this was forced, we should consider sync */
386 if (forced)
387 drbd_send_uuids(mdev);
388 drbd_send_state(mdev);
389 }
390
391 drbd_md_sync(mdev);
392
393 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
394 fail:
395 mutex_unlock(&mdev->state_mutex);
396 return r;
397}
398
399
400static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
401 struct drbd_nl_cfg_reply *reply)
402{
403 struct primary primary_args;
404
405 memset(&primary_args, 0, sizeof(struct primary));
406 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
407 reply->ret_code = ERR_MANDATORY_TAG;
408 return 0;
409 }
410
411 reply->ret_code =
412 drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer);
413
414 return 0;
415}
416
417static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
418 struct drbd_nl_cfg_reply *reply)
419{
420 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
421
422 return 0;
423}
424
425/* initializes the md.*_offset members, so we are able to find
426 * the on disk meta data */
427static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
428 struct drbd_backing_dev *bdev)
429{
430 sector_t md_size_sect = 0;
431 switch (bdev->dc.meta_dev_idx) {
432 default:
433 /* v07 style fixed size indexed meta data */
434 bdev->md.md_size_sect = MD_RESERVED_SECT;
435 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
436 bdev->md.al_offset = MD_AL_OFFSET;
437 bdev->md.bm_offset = MD_BM_OFFSET;
438 break;
439 case DRBD_MD_INDEX_FLEX_EXT:
440 /* just occupy the full device; unit: sectors */
441 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
442 bdev->md.md_offset = 0;
443 bdev->md.al_offset = MD_AL_OFFSET;
444 bdev->md.bm_offset = MD_BM_OFFSET;
445 break;
446 case DRBD_MD_INDEX_INTERNAL:
447 case DRBD_MD_INDEX_FLEX_INT:
448 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
449 /* al size is still fixed */
450 bdev->md.al_offset = -MD_AL_MAX_SIZE;
451 /* we need (slightly less than) ~ this much bitmap sectors: */
452 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
453 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
454 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
455 md_size_sect = ALIGN(md_size_sect, 8);
456
457 /* plus the "drbd meta data super block",
458 * and the activity log; */
459 md_size_sect += MD_BM_OFFSET;
460
461 bdev->md.md_size_sect = md_size_sect;
462 /* bitmap offset is adjusted by 'super' block size */
463 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
464 break;
465 }
466}
467
468char *ppsize(char *buf, unsigned long long size)
469{
470 /* Needs 9 bytes at max. */
471 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
472 int base = 0;
473 while (size >= 10000) {
474 /* shift + round */
475 size = (size >> 10) + !!(size & (1<<9));
476 base++;
477 }
478 sprintf(buf, "%lu %cB", (long)size, units[base]);
479
480 return buf;
481}
482
483/* there is still a theoretical deadlock when called from receiver
484 * on an D_INCONSISTENT R_PRIMARY:
485 * remote READ does inc_ap_bio, receiver would need to receive answer
486 * packet from remote to dec_ap_bio again.
487 * receiver receive_sizes(), comes here,
488 * waits for ap_bio_cnt == 0. -> deadlock.
489 * but this cannot happen, actually, because:
490 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
491 * (not connected, or bad/no disk on peer):
492 * see drbd_fail_request_early, ap_bio_cnt is zero.
493 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
494 * peer may not initiate a resize.
495 */
496void drbd_suspend_io(struct drbd_conf *mdev)
497{
498 set_bit(SUSPEND_IO, &mdev->flags);
499 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
500}
501
502void drbd_resume_io(struct drbd_conf *mdev)
503{
504 clear_bit(SUSPEND_IO, &mdev->flags);
505 wake_up(&mdev->misc_wait);
506}
507
508/**
509 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
510 * @mdev: DRBD device.
511 *
512 * Returns 0 on success, negative return values indicate errors.
513 * You should call drbd_md_sync() after calling this function.
514 */
515enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local)
516{
517 sector_t prev_first_sect, prev_size; /* previous meta location */
518 sector_t la_size;
519 sector_t size;
520 char ppb[10];
521
522 int md_moved, la_size_changed;
523 enum determine_dev_size rv = unchanged;
524
525 /* race:
526 * application request passes inc_ap_bio,
527 * but then cannot get an AL-reference.
528 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
529 *
530 * to avoid that:
531 * Suspend IO right here.
532 * still lock the act_log to not trigger ASSERTs there.
533 */
534 drbd_suspend_io(mdev);
535
536 /* no wait necessary anymore, actually we could assert that */
537 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
538
539 prev_first_sect = drbd_md_first_sector(mdev->ldev);
540 prev_size = mdev->ldev->md.md_size_sect;
541 la_size = mdev->ldev->md.la_size_sect;
542
543 /* TODO: should only be some assert here, not (re)init... */
544 drbd_md_set_sector_offsets(mdev, mdev->ldev);
545
546 size = drbd_new_dev_size(mdev, mdev->ldev);
547
548 if (drbd_get_capacity(mdev->this_bdev) != size ||
549 drbd_bm_capacity(mdev) != size) {
550 int err;
551 err = drbd_bm_resize(mdev, size);
552 if (unlikely(err)) {
553 /* currently there is only one error: ENOMEM! */
554 size = drbd_bm_capacity(mdev)>>1;
555 if (size == 0) {
556 dev_err(DEV, "OUT OF MEMORY! "
557 "Could not allocate bitmap!\n");
558 } else {
559 dev_err(DEV, "BM resizing failed. "
560 "Leaving size unchanged at size = %lu KB\n",
561 (unsigned long)size);
562 }
563 rv = dev_size_error;
564 }
565 /* racy, see comments above. */
566 drbd_set_my_capacity(mdev, size);
567 mdev->ldev->md.la_size_sect = size;
568 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
569 (unsigned long long)size>>1);
570 }
571 if (rv == dev_size_error)
572 goto out;
573
574 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
575
576 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
577 || prev_size != mdev->ldev->md.md_size_sect;
578
579 if (la_size_changed || md_moved) {
580 drbd_al_shrink(mdev); /* All extents inactive. */
581 dev_info(DEV, "Writing the whole bitmap, %s\n",
582 la_size_changed && md_moved ? "size changed and md moved" :
583 la_size_changed ? "size changed" : "md moved");
584 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
585 drbd_md_mark_dirty(mdev);
586 }
587
588 if (size > la_size)
589 rv = grew;
590 if (size < la_size)
591 rv = shrunk;
592out:
593 lc_unlock(mdev->act_log);
594 wake_up(&mdev->al_wait);
595 drbd_resume_io(mdev);
596
597 return rv;
598}
599
600sector_t
601drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
602{
603 sector_t p_size = mdev->p_size; /* partner's disk size. */
604 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
605 sector_t m_size; /* my size */
606 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
607 sector_t size = 0;
608
609 m_size = drbd_get_max_capacity(bdev);
610
611 if (p_size && m_size) {
612 size = min_t(sector_t, p_size, m_size);
613 } else {
614 if (la_size) {
615 size = la_size;
616 if (m_size && m_size < size)
617 size = m_size;
618 if (p_size && p_size < size)
619 size = p_size;
620 } else {
621 if (m_size)
622 size = m_size;
623 if (p_size)
624 size = p_size;
625 }
626 }
627
628 if (size == 0)
629 dev_err(DEV, "Both nodes diskless!\n");
630
631 if (u_size) {
632 if (u_size > size)
633 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
634 (unsigned long)u_size>>1, (unsigned long)size>>1);
635 else
636 size = u_size;
637 }
638
639 return size;
640}
641
642/**
643 * drbd_check_al_size() - Ensures that the AL is of the right size
644 * @mdev: DRBD device.
645 *
646 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
647 * failed, and 0 on success. You should call drbd_md_sync() after you called
648 * this function.
649 */
650static int drbd_check_al_size(struct drbd_conf *mdev)
651{
652 struct lru_cache *n, *t;
653 struct lc_element *e;
654 unsigned int in_use;
655 int i;
656
657 ERR_IF(mdev->sync_conf.al_extents < 7)
658 mdev->sync_conf.al_extents = 127;
659
660 if (mdev->act_log &&
661 mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
662 return 0;
663
664 in_use = 0;
665 t = mdev->act_log;
666 n = lc_create("act_log", drbd_al_ext_cache,
667 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
668
669 if (n == NULL) {
670 dev_err(DEV, "Cannot allocate act_log lru!\n");
671 return -ENOMEM;
672 }
673 spin_lock_irq(&mdev->al_lock);
674 if (t) {
675 for (i = 0; i < t->nr_elements; i++) {
676 e = lc_element_by_index(t, i);
677 if (e->refcnt)
678 dev_err(DEV, "refcnt(%d)==%d\n",
679 e->lc_number, e->refcnt);
680 in_use += e->refcnt;
681 }
682 }
683 if (!in_use)
684 mdev->act_log = n;
685 spin_unlock_irq(&mdev->al_lock);
686 if (in_use) {
687 dev_err(DEV, "Activity log still in use!\n");
688 lc_destroy(n);
689 return -EBUSY;
690 } else {
691 if (t)
692 lc_destroy(t);
693 }
694 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
695 return 0;
696}
697
698void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
699{
700 struct request_queue * const q = mdev->rq_queue;
701 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
702 int max_segments = mdev->ldev->dc.max_bio_bvecs;
703
704 if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv)
705 max_seg_s = PAGE_SIZE;
706
707 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
708
709 blk_queue_max_sectors(q, max_seg_s >> 9);
710 blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS);
711 blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS);
712 blk_queue_max_segment_size(q, max_seg_s);
713 blk_queue_logical_block_size(q, 512);
714 blk_queue_segment_boundary(q, PAGE_SIZE-1);
715 blk_stack_limits(&q->limits, &b->limits, 0);
716
717 if (b->merge_bvec_fn)
718 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n",
719 b->merge_bvec_fn);
720 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
721
722 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
723 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
724 q->backing_dev_info.ra_pages,
725 b->backing_dev_info.ra_pages);
726 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
727 }
728}
729
730/* serialize deconfig (worker exiting, doing cleanup)
731 * and reconfig (drbdsetup disk, drbdsetup net)
732 *
733 * wait for a potentially exiting worker, then restart it,
734 * or start a new one.
735 */
736static void drbd_reconfig_start(struct drbd_conf *mdev)
737{
738 wait_event(mdev->state_wait, test_and_set_bit(CONFIG_PENDING, &mdev->flags));
739 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
740 drbd_thread_start(&mdev->worker);
741}
742
743/* if still unconfigured, stops worker again.
744 * if configured now, clears CONFIG_PENDING.
745 * wakes potential waiters */
746static void drbd_reconfig_done(struct drbd_conf *mdev)
747{
748 spin_lock_irq(&mdev->req_lock);
749 if (mdev->state.disk == D_DISKLESS &&
750 mdev->state.conn == C_STANDALONE &&
751 mdev->state.role == R_SECONDARY) {
752 set_bit(DEVICE_DYING, &mdev->flags);
753 drbd_thread_stop_nowait(&mdev->worker);
754 } else
755 clear_bit(CONFIG_PENDING, &mdev->flags);
756 spin_unlock_irq(&mdev->req_lock);
757 wake_up(&mdev->state_wait);
758}
759
760/* does always return 0;
761 * interesting return code is in reply->ret_code */
762static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
763 struct drbd_nl_cfg_reply *reply)
764{
765 enum drbd_ret_codes retcode;
766 enum determine_dev_size dd;
767 sector_t max_possible_sectors;
768 sector_t min_md_device_sectors;
769 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
770 struct inode *inode, *inode2;
771 struct lru_cache *resync_lru = NULL;
772 union drbd_state ns, os;
773 int rv;
774 int cp_discovered = 0;
775 int logical_block_size;
776
777 drbd_reconfig_start(mdev);
778
779 /* if you want to reconfigure, please tear down first */
780 if (mdev->state.disk > D_DISKLESS) {
781 retcode = ERR_DISK_CONFIGURED;
782 goto fail;
783 }
784
785 /* allocation not in the IO path, cqueue thread context */
786 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
787 if (!nbc) {
788 retcode = ERR_NOMEM;
789 goto fail;
790 }
791
792 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
793 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
794 nbc->dc.fencing = DRBD_FENCING_DEF;
795 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
796
797 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
798 retcode = ERR_MANDATORY_TAG;
799 goto fail;
800 }
801
802 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
803 retcode = ERR_MD_IDX_INVALID;
804 goto fail;
805 }
806
807 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
808 if (IS_ERR(nbc->lo_file)) {
809 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
810 PTR_ERR(nbc->lo_file));
811 nbc->lo_file = NULL;
812 retcode = ERR_OPEN_DISK;
813 goto fail;
814 }
815
816 inode = nbc->lo_file->f_dentry->d_inode;
817
818 if (!S_ISBLK(inode->i_mode)) {
819 retcode = ERR_DISK_NOT_BDEV;
820 goto fail;
821 }
822
823 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
824 if (IS_ERR(nbc->md_file)) {
825 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
826 PTR_ERR(nbc->md_file));
827 nbc->md_file = NULL;
828 retcode = ERR_OPEN_MD_DISK;
829 goto fail;
830 }
831
832 inode2 = nbc->md_file->f_dentry->d_inode;
833
834 if (!S_ISBLK(inode2->i_mode)) {
835 retcode = ERR_MD_NOT_BDEV;
836 goto fail;
837 }
838
839 nbc->backing_bdev = inode->i_bdev;
840 if (bd_claim(nbc->backing_bdev, mdev)) {
841 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
842 nbc->backing_bdev, mdev,
843 nbc->backing_bdev->bd_holder,
844 nbc->backing_bdev->bd_contains->bd_holder,
845 nbc->backing_bdev->bd_holders);
846 retcode = ERR_BDCLAIM_DISK;
847 goto fail;
848 }
849
850 resync_lru = lc_create("resync", drbd_bm_ext_cache,
851 61, sizeof(struct bm_extent),
852 offsetof(struct bm_extent, lce));
853 if (!resync_lru) {
854 retcode = ERR_NOMEM;
855 goto release_bdev_fail;
856 }
857
858 /* meta_dev_idx >= 0: external fixed size,
859 * possibly multiple drbd sharing one meta device.
860 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
861 * not yet used by some other drbd minor!
862 * (if you use drbd.conf + drbdadm,
863 * that should check it for you already; but if you don't, or someone
864 * fooled it, we need to double check here) */
865 nbc->md_bdev = inode2->i_bdev;
866 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
867 : (void *) drbd_m_holder)) {
868 retcode = ERR_BDCLAIM_MD_DISK;
869 goto release_bdev_fail;
870 }
871
872 if ((nbc->backing_bdev == nbc->md_bdev) !=
873 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
874 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
875 retcode = ERR_MD_IDX_INVALID;
876 goto release_bdev2_fail;
877 }
878
879 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
880 drbd_md_set_sector_offsets(mdev, nbc);
881
882 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
883 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
884 (unsigned long long) drbd_get_max_capacity(nbc),
885 (unsigned long long) nbc->dc.disk_size);
886 retcode = ERR_DISK_TO_SMALL;
887 goto release_bdev2_fail;
888 }
889
890 if (nbc->dc.meta_dev_idx < 0) {
891 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
892 /* at least one MB, otherwise it does not make sense */
893 min_md_device_sectors = (2<<10);
894 } else {
895 max_possible_sectors = DRBD_MAX_SECTORS;
896 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
897 }
898
899 if (drbd_get_capacity(nbc->md_bdev) > max_possible_sectors)
900 dev_warn(DEV, "truncating very big lower level device "
901 "to currently maximum possible %llu sectors\n",
902 (unsigned long long) max_possible_sectors);
903
904 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
905 retcode = ERR_MD_DISK_TO_SMALL;
906 dev_warn(DEV, "refusing attach: md-device too small, "
907 "at least %llu sectors needed for this meta-disk type\n",
908 (unsigned long long) min_md_device_sectors);
909 goto release_bdev2_fail;
910 }
911
912 /* Make sure the new disk is big enough
913 * (we may currently be R_PRIMARY with no local disk...) */
914 if (drbd_get_max_capacity(nbc) <
915 drbd_get_capacity(mdev->this_bdev)) {
916 retcode = ERR_DISK_TO_SMALL;
917 goto release_bdev2_fail;
918 }
919
920 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
921
922 drbd_suspend_io(mdev);
923 /* also wait for the last barrier ack. */
924 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt));
925 /* and for any other previously queued work */
926 drbd_flush_workqueue(mdev);
927
928 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
929 drbd_resume_io(mdev);
930 if (retcode < SS_SUCCESS)
931 goto release_bdev2_fail;
932
933 if (!get_ldev_if_state(mdev, D_ATTACHING))
934 goto force_diskless;
935
936 drbd_md_set_sector_offsets(mdev, nbc);
937
938 if (!mdev->bitmap) {
939 if (drbd_bm_init(mdev)) {
940 retcode = ERR_NOMEM;
941 goto force_diskless_dec;
942 }
943 }
944
945 retcode = drbd_md_read(mdev, nbc);
946 if (retcode != NO_ERROR)
947 goto force_diskless_dec;
948
949 if (mdev->state.conn < C_CONNECTED &&
950 mdev->state.role == R_PRIMARY &&
951 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
952 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
953 (unsigned long long)mdev->ed_uuid);
954 retcode = ERR_DATA_NOT_CURRENT;
955 goto force_diskless_dec;
956 }
957
958 /* Since we are diskless, fix the activity log first... */
959 if (drbd_check_al_size(mdev)) {
960 retcode = ERR_NOMEM;
961 goto force_diskless_dec;
962 }
963
964 /* Prevent shrinking of consistent devices ! */
965 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
966 drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) {
967 dev_warn(DEV, "refusing to truncate a consistent device\n");
968 retcode = ERR_DISK_TO_SMALL;
969 goto force_diskless_dec;
970 }
971
972 if (!drbd_al_read_log(mdev, nbc)) {
973 retcode = ERR_IO_MD_DISK;
974 goto force_diskless_dec;
975 }
976
977 /* allocate a second IO page if logical_block_size != 512 */
978 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
979 if (logical_block_size == 0)
980 logical_block_size = MD_SECTOR_SIZE;
981
982 if (logical_block_size != MD_SECTOR_SIZE) {
983 if (!mdev->md_io_tmpp) {
984 struct page *page = alloc_page(GFP_NOIO);
985 if (!page)
986 goto force_diskless_dec;
987
988 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
989 logical_block_size, MD_SECTOR_SIZE);
990 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
991
992 mdev->md_io_tmpp = page;
993 }
994 }
995
996 /* Reset the "barriers don't work" bits here, then force meta data to
997 * be written, to ensure we determine if barriers are supported. */
998 if (nbc->dc.no_md_flush)
999 set_bit(MD_NO_BARRIER, &mdev->flags);
1000 else
1001 clear_bit(MD_NO_BARRIER, &mdev->flags);
1002
1003 /* Point of no return reached.
1004 * Devices and memory are no longer released by error cleanup below.
1005 * now mdev takes over responsibility, and the state engine should
1006 * clean it up somewhere. */
1007 D_ASSERT(mdev->ldev == NULL);
1008 mdev->ldev = nbc;
1009 mdev->resync = resync_lru;
1010 nbc = NULL;
1011 resync_lru = NULL;
1012
1013 mdev->write_ordering = WO_bio_barrier;
1014 drbd_bump_write_ordering(mdev, WO_bio_barrier);
1015
1016 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1017 set_bit(CRASHED_PRIMARY, &mdev->flags);
1018 else
1019 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1020
1021 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND)) {
1022 set_bit(CRASHED_PRIMARY, &mdev->flags);
1023 cp_discovered = 1;
1024 }
1025
1026 mdev->send_cnt = 0;
1027 mdev->recv_cnt = 0;
1028 mdev->read_cnt = 0;
1029 mdev->writ_cnt = 0;
1030
1031 drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE);
1032
1033 /* If I am currently not R_PRIMARY,
1034 * but meta data primary indicator is set,
1035 * I just now recover from a hard crash,
1036 * and have been R_PRIMARY before that crash.
1037 *
1038 * Now, if I had no connection before that crash
1039 * (have been degraded R_PRIMARY), chances are that
1040 * I won't find my peer now either.
1041 *
1042 * In that case, and _only_ in that case,
1043 * we use the degr-wfc-timeout instead of the default,
1044 * so we can automatically recover from a crash of a
1045 * degraded but active "cluster" after a certain timeout.
1046 */
1047 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1048 if (mdev->state.role != R_PRIMARY &&
1049 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1050 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1051 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1052
1053 dd = drbd_determin_dev_size(mdev);
1054 if (dd == dev_size_error) {
1055 retcode = ERR_NOMEM_BITMAP;
1056 goto force_diskless_dec;
1057 } else if (dd == grew)
1058 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1059
1060 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1061 dev_info(DEV, "Assuming that all blocks are out of sync "
1062 "(aka FullSync)\n");
1063 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
1064 retcode = ERR_IO_MD_DISK;
1065 goto force_diskless_dec;
1066 }
1067 } else {
1068 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
1069 retcode = ERR_IO_MD_DISK;
1070 goto force_diskless_dec;
1071 }
1072 }
1073
1074 if (cp_discovered) {
1075 drbd_al_apply_to_bm(mdev);
1076 drbd_al_to_on_disk_bm(mdev);
1077 }
1078
1079 spin_lock_irq(&mdev->req_lock);
1080 os = mdev->state;
1081 ns.i = os.i;
1082 /* If MDF_CONSISTENT is not set go into inconsistent state,
1083 otherwise investigate MDF_WasUpToDate...
1084 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1085 otherwise into D_CONSISTENT state.
1086 */
1087 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1088 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1089 ns.disk = D_CONSISTENT;
1090 else
1091 ns.disk = D_OUTDATED;
1092 } else {
1093 ns.disk = D_INCONSISTENT;
1094 }
1095
1096 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1097 ns.pdsk = D_OUTDATED;
1098
1099 if ( ns.disk == D_CONSISTENT &&
1100 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1101 ns.disk = D_UP_TO_DATE;
1102
1103 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1104 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1105 this point, because drbd_request_state() modifies these
1106 flags. */
1107
1108 /* In case we are C_CONNECTED postpone any decision on the new disk
1109 state after the negotiation phase. */
1110 if (mdev->state.conn == C_CONNECTED) {
1111 mdev->new_state_tmp.i = ns.i;
1112 ns.i = os.i;
1113 ns.disk = D_NEGOTIATING;
1114 }
1115
1116 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1117 ns = mdev->state;
1118 spin_unlock_irq(&mdev->req_lock);
1119
1120 if (rv < SS_SUCCESS)
1121 goto force_diskless_dec;
1122
1123 if (mdev->state.role == R_PRIMARY)
1124 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1125 else
1126 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1127
1128 drbd_md_mark_dirty(mdev);
1129 drbd_md_sync(mdev);
1130
1131 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1132 put_ldev(mdev);
1133 reply->ret_code = retcode;
1134 drbd_reconfig_done(mdev);
1135 return 0;
1136
1137 force_diskless_dec:
1138 put_ldev(mdev);
1139 force_diskless:
1140 drbd_force_state(mdev, NS(disk, D_DISKLESS));
1141 drbd_md_sync(mdev);
1142 release_bdev2_fail:
1143 if (nbc)
1144 bd_release(nbc->md_bdev);
1145 release_bdev_fail:
1146 if (nbc)
1147 bd_release(nbc->backing_bdev);
1148 fail:
1149 if (nbc) {
1150 if (nbc->lo_file)
1151 fput(nbc->lo_file);
1152 if (nbc->md_file)
1153 fput(nbc->md_file);
1154 kfree(nbc);
1155 }
1156 lc_destroy(resync_lru);
1157
1158 reply->ret_code = retcode;
1159 drbd_reconfig_done(mdev);
1160 return 0;
1161}
1162
1163static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1164 struct drbd_nl_cfg_reply *reply)
1165{
1166 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1167 return 0;
1168}
1169
1170static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1171 struct drbd_nl_cfg_reply *reply)
1172{
1173 int i, ns;
1174 enum drbd_ret_codes retcode;
1175 struct net_conf *new_conf = NULL;
1176 struct crypto_hash *tfm = NULL;
1177 struct crypto_hash *integrity_w_tfm = NULL;
1178 struct crypto_hash *integrity_r_tfm = NULL;
1179 struct hlist_head *new_tl_hash = NULL;
1180 struct hlist_head *new_ee_hash = NULL;
1181 struct drbd_conf *odev;
1182 char hmac_name[CRYPTO_MAX_ALG_NAME];
1183 void *int_dig_out = NULL;
1184 void *int_dig_in = NULL;
1185 void *int_dig_vv = NULL;
1186 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1187
1188 drbd_reconfig_start(mdev);
1189
1190 if (mdev->state.conn > C_STANDALONE) {
1191 retcode = ERR_NET_CONFIGURED;
1192 goto fail;
1193 }
1194
1195 /* allocation not in the IO path, cqueue thread context */
1196 new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
1197 if (!new_conf) {
1198 retcode = ERR_NOMEM;
1199 goto fail;
1200 }
1201
1202 memset(new_conf, 0, sizeof(struct net_conf));
1203 new_conf->timeout = DRBD_TIMEOUT_DEF;
1204 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
1205 new_conf->ping_int = DRBD_PING_INT_DEF;
1206 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
1207 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
1208 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1209 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
1210 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
1211 new_conf->ko_count = DRBD_KO_COUNT_DEF;
1212 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
1213 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
1214 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
1215 new_conf->want_lose = 0;
1216 new_conf->two_primaries = 0;
1217 new_conf->wire_protocol = DRBD_PROT_C;
1218 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
1219 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
1220
1221 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
1222 retcode = ERR_MANDATORY_TAG;
1223 goto fail;
1224 }
1225
1226 if (new_conf->two_primaries
1227 && (new_conf->wire_protocol != DRBD_PROT_C)) {
1228 retcode = ERR_NOT_PROTO_C;
1229 goto fail;
1230 };
1231
1232 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1233 retcode = ERR_DISCARD;
1234 goto fail;
1235 }
1236
1237 retcode = NO_ERROR;
1238
1239 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1240 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1241 for (i = 0; i < minor_count; i++) {
1242 odev = minor_to_mdev(i);
1243 if (!odev || odev == mdev)
1244 continue;
1245 if (get_net_conf(odev)) {
1246 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
1247 if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
1248 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1249 retcode = ERR_LOCAL_ADDR;
1250
1251 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
1252 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
1253 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1254 retcode = ERR_PEER_ADDR;
1255
1256 put_net_conf(odev);
1257 if (retcode != NO_ERROR)
1258 goto fail;
1259 }
1260 }
1261
1262 if (new_conf->cram_hmac_alg[0] != 0) {
1263 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1264 new_conf->cram_hmac_alg);
1265 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1266 if (IS_ERR(tfm)) {
1267 tfm = NULL;
1268 retcode = ERR_AUTH_ALG;
1269 goto fail;
1270 }
1271
1272 if (crypto_tfm_alg_type(crypto_hash_tfm(tfm))
1273 != CRYPTO_ALG_TYPE_HASH) {
1274 retcode = ERR_AUTH_ALG_ND;
1275 goto fail;
1276 }
1277 }
1278
1279 if (new_conf->integrity_alg[0]) {
1280 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1281 if (IS_ERR(integrity_w_tfm)) {
1282 integrity_w_tfm = NULL;
1283 retcode=ERR_INTEGRITY_ALG;
1284 goto fail;
1285 }
1286
1287 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1288 retcode=ERR_INTEGRITY_ALG_ND;
1289 goto fail;
1290 }
1291
1292 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1293 if (IS_ERR(integrity_r_tfm)) {
1294 integrity_r_tfm = NULL;
1295 retcode=ERR_INTEGRITY_ALG;
1296 goto fail;
1297 }
1298 }
1299
1300 ns = new_conf->max_epoch_size/8;
1301 if (mdev->tl_hash_s != ns) {
1302 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1303 if (!new_tl_hash) {
1304 retcode = ERR_NOMEM;
1305 goto fail;
1306 }
1307 }
1308
1309 ns = new_conf->max_buffers/8;
1310 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
1311 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1312 if (!new_ee_hash) {
1313 retcode = ERR_NOMEM;
1314 goto fail;
1315 }
1316 }
1317
1318 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1319
1320 if (integrity_w_tfm) {
1321 i = crypto_hash_digestsize(integrity_w_tfm);
1322 int_dig_out = kmalloc(i, GFP_KERNEL);
1323 if (!int_dig_out) {
1324 retcode = ERR_NOMEM;
1325 goto fail;
1326 }
1327 int_dig_in = kmalloc(i, GFP_KERNEL);
1328 if (!int_dig_in) {
1329 retcode = ERR_NOMEM;
1330 goto fail;
1331 }
1332 int_dig_vv = kmalloc(i, GFP_KERNEL);
1333 if (!int_dig_vv) {
1334 retcode = ERR_NOMEM;
1335 goto fail;
1336 }
1337 }
1338
1339 if (!mdev->bitmap) {
1340 if(drbd_bm_init(mdev)) {
1341 retcode = ERR_NOMEM;
1342 goto fail;
1343 }
1344 }
1345
1346 spin_lock_irq(&mdev->req_lock);
1347 if (mdev->net_conf != NULL) {
1348 retcode = ERR_NET_CONFIGURED;
1349 spin_unlock_irq(&mdev->req_lock);
1350 goto fail;
1351 }
1352 mdev->net_conf = new_conf;
1353
1354 mdev->send_cnt = 0;
1355 mdev->recv_cnt = 0;
1356
1357 if (new_tl_hash) {
1358 kfree(mdev->tl_hash);
1359 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
1360 mdev->tl_hash = new_tl_hash;
1361 }
1362
1363 if (new_ee_hash) {
1364 kfree(mdev->ee_hash);
1365 mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
1366 mdev->ee_hash = new_ee_hash;
1367 }
1368
1369 crypto_free_hash(mdev->cram_hmac_tfm);
1370 mdev->cram_hmac_tfm = tfm;
1371
1372 crypto_free_hash(mdev->integrity_w_tfm);
1373 mdev->integrity_w_tfm = integrity_w_tfm;
1374
1375 crypto_free_hash(mdev->integrity_r_tfm);
1376 mdev->integrity_r_tfm = integrity_r_tfm;
1377
1378 kfree(mdev->int_dig_out);
1379 kfree(mdev->int_dig_in);
1380 kfree(mdev->int_dig_vv);
1381 mdev->int_dig_out=int_dig_out;
1382 mdev->int_dig_in=int_dig_in;
1383 mdev->int_dig_vv=int_dig_vv;
1384 spin_unlock_irq(&mdev->req_lock);
1385
1386 retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE);
1387
1388 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1389 reply->ret_code = retcode;
1390 drbd_reconfig_done(mdev);
1391 return 0;
1392
1393fail:
1394 kfree(int_dig_out);
1395 kfree(int_dig_in);
1396 kfree(int_dig_vv);
1397 crypto_free_hash(tfm);
1398 crypto_free_hash(integrity_w_tfm);
1399 crypto_free_hash(integrity_r_tfm);
1400 kfree(new_tl_hash);
1401 kfree(new_ee_hash);
1402 kfree(new_conf);
1403
1404 reply->ret_code = retcode;
1405 drbd_reconfig_done(mdev);
1406 return 0;
1407}
1408
1409static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1410 struct drbd_nl_cfg_reply *reply)
1411{
1412 int retcode;
1413
1414 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
1415
1416 if (retcode == SS_NOTHING_TO_DO)
1417 goto done;
1418 else if (retcode == SS_ALREADY_STANDALONE)
1419 goto done;
1420 else if (retcode == SS_PRIMARY_NOP) {
1421 /* Our statche checking code wants to see the peer outdated. */
1422 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1423 pdsk, D_OUTDATED));
1424 } else if (retcode == SS_CW_FAILED_BY_PEER) {
1425 /* The peer probably wants to see us outdated. */
1426 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1427 disk, D_OUTDATED),
1428 CS_ORDERED);
1429 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1430 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1431 retcode = SS_SUCCESS;
1432 }
1433 }
1434
1435 if (retcode < SS_SUCCESS)
1436 goto fail;
1437
1438 if (wait_event_interruptible(mdev->state_wait,
1439 mdev->state.conn != C_DISCONNECTING)) {
1440 /* Do not test for mdev->state.conn == C_STANDALONE, since
1441 someone else might connect us in the mean time! */
1442 retcode = ERR_INTR;
1443 goto fail;
1444 }
1445
1446 done:
1447 retcode = NO_ERROR;
1448 fail:
1449 drbd_md_sync(mdev);
1450 reply->ret_code = retcode;
1451 return 0;
1452}
1453
1454void resync_after_online_grow(struct drbd_conf *mdev)
1455{
1456 int iass; /* I am sync source */
1457
1458 dev_info(DEV, "Resync of new storage after online grow\n");
1459 if (mdev->state.role != mdev->state.peer)
1460 iass = (mdev->state.role == R_PRIMARY);
1461 else
1462 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1463
1464 if (iass)
1465 drbd_start_resync(mdev, C_SYNC_SOURCE);
1466 else
1467 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1468}
1469
1470static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1471 struct drbd_nl_cfg_reply *reply)
1472{
1473 struct resize rs;
1474 int retcode = NO_ERROR;
1475 int ldsc = 0; /* local disk size changed */
1476 enum determine_dev_size dd;
1477
1478 memset(&rs, 0, sizeof(struct resize));
1479 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
1480 retcode = ERR_MANDATORY_TAG;
1481 goto fail;
1482 }
1483
1484 if (mdev->state.conn > C_CONNECTED) {
1485 retcode = ERR_RESIZE_RESYNC;
1486 goto fail;
1487 }
1488
1489 if (mdev->state.role == R_SECONDARY &&
1490 mdev->state.peer == R_SECONDARY) {
1491 retcode = ERR_NO_PRIMARY;
1492 goto fail;
1493 }
1494
1495 if (!get_ldev(mdev)) {
1496 retcode = ERR_NO_DISK;
1497 goto fail;
1498 }
1499
1500 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
1501 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
1502 ldsc = 1;
1503 }
1504
1505 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1506 dd = drbd_determin_dev_size(mdev);
1507 drbd_md_sync(mdev);
1508 put_ldev(mdev);
1509 if (dd == dev_size_error) {
1510 retcode = ERR_NOMEM_BITMAP;
1511 goto fail;
1512 }
1513
1514 if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) {
1515 if (dd == grew)
1516 set_bit(RESIZE_PENDING, &mdev->flags);
1517
1518 drbd_send_uuids(mdev);
1519 drbd_send_sizes(mdev, 1);
1520 }
1521
1522 fail:
1523 reply->ret_code = retcode;
1524 return 0;
1525}
1526
1527static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1528 struct drbd_nl_cfg_reply *reply)
1529{
1530 int retcode = NO_ERROR;
1531 int err;
1532 int ovr; /* online verify running */
1533 int rsr; /* re-sync running */
1534 struct crypto_hash *verify_tfm = NULL;
1535 struct crypto_hash *csums_tfm = NULL;
1536 struct syncer_conf sc;
1537 cpumask_var_t new_cpu_mask;
1538
1539 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1540 retcode = ERR_NOMEM;
1541 goto fail;
1542 }
1543
1544 if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
1545 memset(&sc, 0, sizeof(struct syncer_conf));
1546 sc.rate = DRBD_RATE_DEF;
1547 sc.after = DRBD_AFTER_DEF;
1548 sc.al_extents = DRBD_AL_EXTENTS_DEF;
1549 } else
1550 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1551
1552 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
1553 retcode = ERR_MANDATORY_TAG;
1554 goto fail;
1555 }
1556
1557 /* re-sync running */
1558 rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1559 mdev->state.conn == C_SYNC_TARGET ||
1560 mdev->state.conn == C_PAUSED_SYNC_S ||
1561 mdev->state.conn == C_PAUSED_SYNC_T );
1562
1563 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1564 retcode = ERR_CSUMS_RESYNC_RUNNING;
1565 goto fail;
1566 }
1567
1568 if (!rsr && sc.csums_alg[0]) {
1569 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1570 if (IS_ERR(csums_tfm)) {
1571 csums_tfm = NULL;
1572 retcode = ERR_CSUMS_ALG;
1573 goto fail;
1574 }
1575
1576 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1577 retcode = ERR_CSUMS_ALG_ND;
1578 goto fail;
1579 }
1580 }
1581
1582 /* online verify running */
1583 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1584
1585 if (ovr) {
1586 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1587 retcode = ERR_VERIFY_RUNNING;
1588 goto fail;
1589 }
1590 }
1591
1592 if (!ovr && sc.verify_alg[0]) {
1593 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1594 if (IS_ERR(verify_tfm)) {
1595 verify_tfm = NULL;
1596 retcode = ERR_VERIFY_ALG;
1597 goto fail;
1598 }
1599
1600 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1601 retcode = ERR_VERIFY_ALG_ND;
1602 goto fail;
1603 }
1604 }
1605
1606 /* silently ignore cpu mask on UP kernel */
1607 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1608 err = __bitmap_parse(sc.cpu_mask, 32, 0,
1609 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1610 if (err) {
1611 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1612 retcode = ERR_CPU_MASK_PARSE;
1613 goto fail;
1614 }
1615 }
1616
1617 ERR_IF (sc.rate < 1) sc.rate = 1;
1618 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
1619#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
1620 if (sc.al_extents > AL_MAX) {
1621 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
1622 sc.al_extents = AL_MAX;
1623 }
1624#undef AL_MAX
1625
1626 /* most sanity checks done, try to assign the new sync-after
1627 * dependency. need to hold the global lock in there,
1628 * to avoid a race in the dependency loop check. */
1629 retcode = drbd_alter_sa(mdev, sc.after);
1630 if (retcode != NO_ERROR)
1631 goto fail;
1632
1633 /* ok, assign the rest of it as well.
1634 * lock against receive_SyncParam() */
1635 spin_lock(&mdev->peer_seq_lock);
1636 mdev->sync_conf = sc;
1637
1638 if (!rsr) {
1639 crypto_free_hash(mdev->csums_tfm);
1640 mdev->csums_tfm = csums_tfm;
1641 csums_tfm = NULL;
1642 }
1643
1644 if (!ovr) {
1645 crypto_free_hash(mdev->verify_tfm);
1646 mdev->verify_tfm = verify_tfm;
1647 verify_tfm = NULL;
1648 }
1649 spin_unlock(&mdev->peer_seq_lock);
1650
1651 if (get_ldev(mdev)) {
1652 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1653 drbd_al_shrink(mdev);
1654 err = drbd_check_al_size(mdev);
1655 lc_unlock(mdev->act_log);
1656 wake_up(&mdev->al_wait);
1657
1658 put_ldev(mdev);
1659 drbd_md_sync(mdev);
1660
1661 if (err) {
1662 retcode = ERR_NOMEM;
1663 goto fail;
1664 }
1665 }
1666
1667 if (mdev->state.conn >= C_CONNECTED)
1668 drbd_send_sync_param(mdev, &sc);
1669
1670 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
1671 cpumask_copy(mdev->cpu_mask, new_cpu_mask);
1672 drbd_calc_cpu_mask(mdev);
1673 mdev->receiver.reset_cpu_mask = 1;
1674 mdev->asender.reset_cpu_mask = 1;
1675 mdev->worker.reset_cpu_mask = 1;
1676 }
1677
1678 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1679fail:
1680 free_cpumask_var(new_cpu_mask);
1681 crypto_free_hash(csums_tfm);
1682 crypto_free_hash(verify_tfm);
1683 reply->ret_code = retcode;
1684 return 0;
1685}
1686
1687static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1688 struct drbd_nl_cfg_reply *reply)
1689{
1690 int retcode;
1691
1692 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
1693
1694 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
1695 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1696
1697 while (retcode == SS_NEED_CONNECTION) {
1698 spin_lock_irq(&mdev->req_lock);
1699 if (mdev->state.conn < C_CONNECTED)
1700 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
1701 spin_unlock_irq(&mdev->req_lock);
1702
1703 if (retcode != SS_NEED_CONNECTION)
1704 break;
1705
1706 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1707 }
1708
1709 reply->ret_code = retcode;
1710 return 0;
1711}
1712
1713static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1714 struct drbd_nl_cfg_reply *reply)
1715{
1716
1717 reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
1718
1719 return 0;
1720}
1721
1722static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1723 struct drbd_nl_cfg_reply *reply)
1724{
1725 int retcode = NO_ERROR;
1726
1727 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
1728 retcode = ERR_PAUSE_IS_SET;
1729
1730 reply->ret_code = retcode;
1731 return 0;
1732}
1733
1734static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1735 struct drbd_nl_cfg_reply *reply)
1736{
1737 int retcode = NO_ERROR;
1738
1739 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
1740 retcode = ERR_PAUSE_IS_CLEAR;
1741
1742 reply->ret_code = retcode;
1743 return 0;
1744}
1745
1746static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1747 struct drbd_nl_cfg_reply *reply)
1748{
1749 reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
1750
1751 return 0;
1752}
1753
1754static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1755 struct drbd_nl_cfg_reply *reply)
1756{
1757 reply->ret_code = drbd_request_state(mdev, NS(susp, 0));
1758 return 0;
1759}
1760
1761static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1762 struct drbd_nl_cfg_reply *reply)
1763{
1764 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
1765 return 0;
1766}
1767
1768static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1769 struct drbd_nl_cfg_reply *reply)
1770{
1771 unsigned short *tl;
1772
1773 tl = reply->tag_list;
1774
1775 if (get_ldev(mdev)) {
1776 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
1777 put_ldev(mdev);
1778 }
1779
1780 if (get_net_conf(mdev)) {
1781 tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
1782 put_net_conf(mdev);
1783 }
1784 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
1785
1786 put_unaligned(TT_END, tl++); /* Close the tag list */
1787
1788 return (int)((char *)tl - (char *)reply->tag_list);
1789}
1790
1791static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1792 struct drbd_nl_cfg_reply *reply)
1793{
1794 unsigned short *tl = reply->tag_list;
1795 union drbd_state s = mdev->state;
1796 unsigned long rs_left;
1797 unsigned int res;
1798
1799 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
1800
1801 /* no local ref, no bitmap, no syncer progress. */
1802 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
1803 if (get_ldev(mdev)) {
1804 drbd_get_syncer_progress(mdev, &rs_left, &res);
1805 tl = tl_add_int(tl, T_sync_progress, &res);
1806 put_ldev(mdev);
1807 }
1808 }
1809 put_unaligned(TT_END, tl++); /* Close the tag list */
1810
1811 return (int)((char *)tl - (char *)reply->tag_list);
1812}
1813
1814static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1815 struct drbd_nl_cfg_reply *reply)
1816{
1817 unsigned short *tl;
1818
1819 tl = reply->tag_list;
1820
1821 if (get_ldev(mdev)) {
1822 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
1823 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
1824 put_ldev(mdev);
1825 }
1826 put_unaligned(TT_END, tl++); /* Close the tag list */
1827
1828 return (int)((char *)tl - (char *)reply->tag_list);
1829}
1830
1831/**
1832 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
1833 * @mdev: DRBD device.
1834 * @nlp: Netlink/connector packet from drbdsetup
1835 * @reply: Reply packet for drbdsetup
1836 */
1837static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1838 struct drbd_nl_cfg_reply *reply)
1839{
1840 unsigned short *tl;
1841 char rv;
1842
1843 tl = reply->tag_list;
1844
1845 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
1846 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
1847
1848 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
1849 put_unaligned(TT_END, tl++); /* Close the tag list */
1850
1851 return (int)((char *)tl - (char *)reply->tag_list);
1852}
1853
1854static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1855 struct drbd_nl_cfg_reply *reply)
1856{
1857 /* default to resume from last known position, if possible */
1858 struct start_ov args =
1859 { .start_sector = mdev->ov_start_sector };
1860
1861 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
1862 reply->ret_code = ERR_MANDATORY_TAG;
1863 return 0;
1864 }
1865 /* w_make_ov_request expects position to be aligned */
1866 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
1867 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
1868 return 0;
1869}
1870
1871
1872static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1873 struct drbd_nl_cfg_reply *reply)
1874{
1875 int retcode = NO_ERROR;
1876 int skip_initial_sync = 0;
1877 int err;
1878
1879 struct new_c_uuid args;
1880
1881 memset(&args, 0, sizeof(struct new_c_uuid));
1882 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
1883 reply->ret_code = ERR_MANDATORY_TAG;
1884 return 0;
1885 }
1886
1887 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
1888
1889 if (!get_ldev(mdev)) {
1890 retcode = ERR_NO_DISK;
1891 goto out;
1892 }
1893
1894 /* this is "skip initial sync", assume to be clean */
1895 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
1896 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
1897 dev_info(DEV, "Preparing to skip initial sync\n");
1898 skip_initial_sync = 1;
1899 } else if (mdev->state.conn != C_STANDALONE) {
1900 retcode = ERR_CONNECTED;
1901 goto out_dec;
1902 }
1903
1904 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
1905 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
1906
1907 if (args.clear_bm) {
1908 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
1909 if (err) {
1910 dev_err(DEV, "Writing bitmap failed with %d\n",err);
1911 retcode = ERR_IO_MD_DISK;
1912 }
1913 if (skip_initial_sync) {
1914 drbd_send_uuids_skip_initial_sync(mdev);
1915 _drbd_uuid_set(mdev, UI_BITMAP, 0);
1916 spin_lock_irq(&mdev->req_lock);
1917 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
1918 CS_VERBOSE, NULL);
1919 spin_unlock_irq(&mdev->req_lock);
1920 }
1921 }
1922
1923 drbd_md_sync(mdev);
1924out_dec:
1925 put_ldev(mdev);
1926out:
1927 mutex_unlock(&mdev->state_mutex);
1928
1929 reply->ret_code = retcode;
1930 return 0;
1931}
1932
1933static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp)
1934{
1935 struct drbd_conf *mdev;
1936
1937 if (nlp->drbd_minor >= minor_count)
1938 return NULL;
1939
1940 mdev = minor_to_mdev(nlp->drbd_minor);
1941
1942 if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) {
1943 struct gendisk *disk = NULL;
1944 mdev = drbd_new_device(nlp->drbd_minor);
1945
1946 spin_lock_irq(&drbd_pp_lock);
1947 if (minor_table[nlp->drbd_minor] == NULL) {
1948 minor_table[nlp->drbd_minor] = mdev;
1949 disk = mdev->vdisk;
1950 mdev = NULL;
1951 } /* else: we lost the race */
1952 spin_unlock_irq(&drbd_pp_lock);
1953
1954 if (disk) /* we won the race above */
1955 /* in case we ever add a drbd_delete_device(),
1956 * don't forget the del_gendisk! */
1957 add_disk(disk);
1958 else /* we lost the race above */
1959 drbd_free_mdev(mdev);
1960
1961 mdev = minor_to_mdev(nlp->drbd_minor);
1962 }
1963
1964 return mdev;
1965}
1966
1967struct cn_handler_struct {
1968 int (*function)(struct drbd_conf *,
1969 struct drbd_nl_cfg_req *,
1970 struct drbd_nl_cfg_reply *);
1971 int reply_body_size;
1972};
1973
1974static struct cn_handler_struct cnd_table[] = {
1975 [ P_primary ] = { &drbd_nl_primary, 0 },
1976 [ P_secondary ] = { &drbd_nl_secondary, 0 },
1977 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
1978 [ P_detach ] = { &drbd_nl_detach, 0 },
1979 [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
1980 [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
1981 [ P_resize ] = { &drbd_nl_resize, 0 },
1982 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
1983 [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
1984 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
1985 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
1986 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
1987 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
1988 [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
1989 [ P_outdate ] = { &drbd_nl_outdate, 0 },
1990 [ P_get_config ] = { &drbd_nl_get_config,
1991 sizeof(struct syncer_conf_tag_len_struct) +
1992 sizeof(struct disk_conf_tag_len_struct) +
1993 sizeof(struct net_conf_tag_len_struct) },
1994 [ P_get_state ] = { &drbd_nl_get_state,
1995 sizeof(struct get_state_tag_len_struct) +
1996 sizeof(struct sync_progress_tag_len_struct) },
1997 [ P_get_uuids ] = { &drbd_nl_get_uuids,
1998 sizeof(struct get_uuids_tag_len_struct) },
1999 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
2000 sizeof(struct get_timeout_flag_tag_len_struct)},
2001 [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
2002 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
2003};
2004
2005static void drbd_connector_callback(struct cn_msg *req)
2006{
2007 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
2008 struct cn_handler_struct *cm;
2009 struct cn_msg *cn_reply;
2010 struct drbd_nl_cfg_reply *reply;
2011 struct drbd_conf *mdev;
2012 int retcode, rr;
2013 int reply_size = sizeof(struct cn_msg)
2014 + sizeof(struct drbd_nl_cfg_reply)
2015 + sizeof(short int);
2016
2017 if (!try_module_get(THIS_MODULE)) {
2018 printk(KERN_ERR "drbd: try_module_get() failed!\n");
2019 return;
2020 }
2021
2022 mdev = ensure_mdev(nlp);
2023 if (!mdev) {
2024 retcode = ERR_MINOR_INVALID;
2025 goto fail;
2026 }
2027
2028 trace_drbd_netlink(req, 1);
2029
2030 if (nlp->packet_type >= P_nl_after_last_packet) {
2031 retcode = ERR_PACKET_NR;
2032 goto fail;
2033 }
2034
2035 cm = cnd_table + nlp->packet_type;
2036
2037 /* This may happen if packet number is 0: */
2038 if (cm->function == NULL) {
2039 retcode = ERR_PACKET_NR;
2040 goto fail;
2041 }
2042
2043 reply_size += cm->reply_body_size;
2044
2045 /* allocation not in the IO path, cqueue thread context */
2046 cn_reply = kmalloc(reply_size, GFP_KERNEL);
2047 if (!cn_reply) {
2048 retcode = ERR_NOMEM;
2049 goto fail;
2050 }
2051 reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
2052
2053 reply->packet_type =
2054 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
2055 reply->minor = nlp->drbd_minor;
2056 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
2057 /* reply->tag_list; might be modified by cm->function. */
2058
2059 rr = cm->function(mdev, nlp, reply);
2060
2061 cn_reply->id = req->id;
2062 cn_reply->seq = req->seq;
2063 cn_reply->ack = req->ack + 1;
2064 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
2065 cn_reply->flags = 0;
2066
2067 trace_drbd_netlink(cn_reply, 0);
2068 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
2069 if (rr && rr != -ESRCH)
2070 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2071
2072 kfree(cn_reply);
2073 module_put(THIS_MODULE);
2074 return;
2075 fail:
2076 drbd_nl_send_reply(req, retcode);
2077 module_put(THIS_MODULE);
2078}
2079
2080static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
2081
2082static unsigned short *
2083__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
2084 unsigned short len, int nul_terminated)
2085{
2086 unsigned short l = tag_descriptions[tag_number(tag)].max_len;
2087 len = (len < l) ? len : l;
2088 put_unaligned(tag, tl++);
2089 put_unaligned(len, tl++);
2090 memcpy(tl, data, len);
2091 tl = (unsigned short*)((char*)tl + len);
2092 if (nul_terminated)
2093 *((char*)tl - 1) = 0;
2094 return tl;
2095}
2096
2097static unsigned short *
2098tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
2099{
2100 return __tl_add_blob(tl, tag, data, len, 0);
2101}
2102
2103static unsigned short *
2104tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
2105{
2106 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
2107}
2108
2109static unsigned short *
2110tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
2111{
2112 put_unaligned(tag, tl++);
2113 switch(tag_type(tag)) {
2114 case TT_INTEGER:
2115 put_unaligned(sizeof(int), tl++);
2116 put_unaligned(*(int *)val, (int *)tl);
2117 tl = (unsigned short*)((char*)tl+sizeof(int));
2118 break;
2119 case TT_INT64:
2120 put_unaligned(sizeof(u64), tl++);
2121 put_unaligned(*(u64 *)val, (u64 *)tl);
2122 tl = (unsigned short*)((char*)tl+sizeof(u64));
2123 break;
2124 default:
2125 /* someone did something stupid. */
2126 ;
2127 }
2128 return tl;
2129}
2130
2131void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
2132{
2133 char buffer[sizeof(struct cn_msg)+
2134 sizeof(struct drbd_nl_cfg_reply)+
2135 sizeof(struct get_state_tag_len_struct)+
2136 sizeof(short int)];
2137 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2138 struct drbd_nl_cfg_reply *reply =
2139 (struct drbd_nl_cfg_reply *)cn_reply->data;
2140 unsigned short *tl = reply->tag_list;
2141
2142 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2143
2144 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
2145
2146 put_unaligned(TT_END, tl++); /* Close the tag list */
2147
2148 cn_reply->id.idx = CN_IDX_DRBD;
2149 cn_reply->id.val = CN_VAL_DRBD;
2150
2151 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2152 cn_reply->ack = 0; /* not used here. */
2153 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2154 (int)((char *)tl - (char *)reply->tag_list);
2155 cn_reply->flags = 0;
2156
2157 reply->packet_type = P_get_state;
2158 reply->minor = mdev_to_minor(mdev);
2159 reply->ret_code = NO_ERROR;
2160
2161 trace_drbd_netlink(cn_reply, 0);
2162 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2163}
2164
2165void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
2166{
2167 char buffer[sizeof(struct cn_msg)+
2168 sizeof(struct drbd_nl_cfg_reply)+
2169 sizeof(struct call_helper_tag_len_struct)+
2170 sizeof(short int)];
2171 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2172 struct drbd_nl_cfg_reply *reply =
2173 (struct drbd_nl_cfg_reply *)cn_reply->data;
2174 unsigned short *tl = reply->tag_list;
2175
2176 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2177
2178 tl = tl_add_str(tl, T_helper, helper_name);
2179 put_unaligned(TT_END, tl++); /* Close the tag list */
2180
2181 cn_reply->id.idx = CN_IDX_DRBD;
2182 cn_reply->id.val = CN_VAL_DRBD;
2183
2184 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2185 cn_reply->ack = 0; /* not used here. */
2186 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2187 (int)((char *)tl - (char *)reply->tag_list);
2188 cn_reply->flags = 0;
2189
2190 reply->packet_type = P_call_helper;
2191 reply->minor = mdev_to_minor(mdev);
2192 reply->ret_code = NO_ERROR;
2193
2194 trace_drbd_netlink(cn_reply, 0);
2195 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2196}
2197
2198void drbd_bcast_ee(struct drbd_conf *mdev,
2199 const char *reason, const int dgs,
2200 const char* seen_hash, const char* calc_hash,
2201 const struct drbd_epoch_entry* e)
2202{
2203 struct cn_msg *cn_reply;
2204 struct drbd_nl_cfg_reply *reply;
2205 struct bio_vec *bvec;
2206 unsigned short *tl;
2207 int i;
2208
2209 if (!e)
2210 return;
2211 if (!reason || !reason[0])
2212 return;
2213
2214 /* apparently we have to memcpy twice, first to prepare the data for the
2215 * struct cn_msg, then within cn_netlink_send from the cn_msg to the
2216 * netlink skb. */
2217 /* receiver thread context, which is not in the writeout path (of this node),
2218 * but may be in the writeout path of the _other_ node.
2219 * GFP_NOIO to avoid potential "distributed deadlock". */
2220 cn_reply = kmalloc(
2221 sizeof(struct cn_msg)+
2222 sizeof(struct drbd_nl_cfg_reply)+
2223 sizeof(struct dump_ee_tag_len_struct)+
2224 sizeof(short int),
2225 GFP_NOIO);
2226
2227 if (!cn_reply) {
2228 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
2229 (unsigned long long)e->sector, e->size);
2230 return;
2231 }
2232
2233 reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
2234 tl = reply->tag_list;
2235
2236 tl = tl_add_str(tl, T_dump_ee_reason, reason);
2237 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
2238 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
2239 tl = tl_add_int(tl, T_ee_sector, &e->sector);
2240 tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
2241
2242 put_unaligned(T_ee_data, tl++);
2243 put_unaligned(e->size, tl++);
2244
2245 __bio_for_each_segment(bvec, e->private_bio, i, 0) {
2246 void *d = kmap(bvec->bv_page);
2247 memcpy(tl, d + bvec->bv_offset, bvec->bv_len);
2248 kunmap(bvec->bv_page);
2249 tl=(unsigned short*)((char*)tl + bvec->bv_len);
2250 }
2251 put_unaligned(TT_END, tl++); /* Close the tag list */
2252
2253 cn_reply->id.idx = CN_IDX_DRBD;
2254 cn_reply->id.val = CN_VAL_DRBD;
2255
2256 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
2257 cn_reply->ack = 0; // not used here.
2258 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2259 (int)((char*)tl - (char*)reply->tag_list);
2260 cn_reply->flags = 0;
2261
2262 reply->packet_type = P_dump_ee;
2263 reply->minor = mdev_to_minor(mdev);
2264 reply->ret_code = NO_ERROR;
2265
2266 trace_drbd_netlink(cn_reply, 0);
2267 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2268 kfree(cn_reply);
2269}
2270
2271void drbd_bcast_sync_progress(struct drbd_conf *mdev)
2272{
2273 char buffer[sizeof(struct cn_msg)+
2274 sizeof(struct drbd_nl_cfg_reply)+
2275 sizeof(struct sync_progress_tag_len_struct)+
2276 sizeof(short int)];
2277 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2278 struct drbd_nl_cfg_reply *reply =
2279 (struct drbd_nl_cfg_reply *)cn_reply->data;
2280 unsigned short *tl = reply->tag_list;
2281 unsigned long rs_left;
2282 unsigned int res;
2283
2284 /* no local ref, no bitmap, no syncer progress, no broadcast. */
2285 if (!get_ldev(mdev))
2286 return;
2287 drbd_get_syncer_progress(mdev, &rs_left, &res);
2288 put_ldev(mdev);
2289
2290 tl = tl_add_int(tl, T_sync_progress, &res);
2291 put_unaligned(TT_END, tl++); /* Close the tag list */
2292
2293 cn_reply->id.idx = CN_IDX_DRBD;
2294 cn_reply->id.val = CN_VAL_DRBD;
2295
2296 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2297 cn_reply->ack = 0; /* not used here. */
2298 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2299 (int)((char *)tl - (char *)reply->tag_list);
2300 cn_reply->flags = 0;
2301
2302 reply->packet_type = P_sync_progress;
2303 reply->minor = mdev_to_minor(mdev);
2304 reply->ret_code = NO_ERROR;
2305
2306 trace_drbd_netlink(cn_reply, 0);
2307 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2308}
2309
2310int __init drbd_nl_init(void)
2311{
2312 static struct cb_id cn_id_drbd;
2313 int err, try=10;
2314
2315 cn_id_drbd.val = CN_VAL_DRBD;
2316 do {
2317 cn_id_drbd.idx = cn_idx;
2318 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
2319 if (!err)
2320 break;
2321 cn_idx = (cn_idx + CN_IDX_STEP);
2322 } while (try--);
2323
2324 if (err) {
2325 printk(KERN_ERR "drbd: cn_drbd failed to register\n");
2326 return err;
2327 }
2328
2329 return 0;
2330}
2331
2332void drbd_nl_cleanup(void)
2333{
2334 static struct cb_id cn_id_drbd;
2335
2336 cn_id_drbd.idx = cn_idx;
2337 cn_id_drbd.val = CN_VAL_DRBD;
2338
2339 cn_del_callback(&cn_id_drbd);
2340}
2341
2342void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
2343{
2344 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
2345 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2346 struct drbd_nl_cfg_reply *reply =
2347 (struct drbd_nl_cfg_reply *)cn_reply->data;
2348 int rr;
2349
2350 cn_reply->id = req->id;
2351
2352 cn_reply->seq = req->seq;
2353 cn_reply->ack = req->ack + 1;
2354 cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
2355 cn_reply->flags = 0;
2356
2357 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
2358 reply->ret_code = ret_code;
2359
2360 trace_drbd_netlink(cn_reply, 0);
2361 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2362 if (rr && rr != -ESRCH)
2363 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2364}
2365