blob: f20336bc59c85849c5158000aad2e5e72918b857 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
32#include <linux/connector.h>
33#include <linux/blkpg.h>
34#include <linux/cpumask.h>
35#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
38#include <linux/drbd_tag_magic.h>
39#include <linux/drbd_limits.h>
40
41static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
42static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
43static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
44
45/* see get_sb_bdev and bd_claim */
46static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
47
48/* Generate the tag_list to struct functions */
49#define NL_PACKET(name, number, fields) \
50static int name ## _from_tags(struct drbd_conf *mdev, \
51 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
52static int name ## _from_tags(struct drbd_conf *mdev, \
53 unsigned short *tags, struct name *arg) \
54{ \
55 int tag; \
56 int dlen; \
57 \
58 while ((tag = get_unaligned(tags++)) != TT_END) { \
59 dlen = get_unaligned(tags++); \
60 switch (tag_number(tag)) { \
61 fields \
62 default: \
63 if (tag & T_MANDATORY) { \
64 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
65 return 0; \
66 } \
67 } \
68 tags = (unsigned short *)((char *)tags + dlen); \
69 } \
70 return 1; \
71}
72#define NL_INTEGER(pn, pr, member) \
73 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
74 arg->member = get_unaligned((int *)(tags)); \
75 break;
76#define NL_INT64(pn, pr, member) \
77 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
78 arg->member = get_unaligned((u64 *)(tags)); \
79 break;
80#define NL_BIT(pn, pr, member) \
81 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
82 arg->member = *(char *)(tags) ? 1 : 0; \
83 break;
84#define NL_STRING(pn, pr, member, len) \
85 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
86 if (dlen > len) { \
87 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
88 #member, dlen, (unsigned int)len); \
89 return 0; \
90 } \
91 arg->member ## _len = dlen; \
92 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
93 break;
94#include "linux/drbd_nl.h"
95
96/* Generate the struct to tag_list functions */
97#define NL_PACKET(name, number, fields) \
98static unsigned short* \
99name ## _to_tags(struct drbd_conf *mdev, \
100 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
101static unsigned short* \
102name ## _to_tags(struct drbd_conf *mdev, \
103 struct name *arg, unsigned short *tags) \
104{ \
105 fields \
106 return tags; \
107}
108
109#define NL_INTEGER(pn, pr, member) \
110 put_unaligned(pn | pr | TT_INTEGER, tags++); \
111 put_unaligned(sizeof(int), tags++); \
112 put_unaligned(arg->member, (int *)tags); \
113 tags = (unsigned short *)((char *)tags+sizeof(int));
114#define NL_INT64(pn, pr, member) \
115 put_unaligned(pn | pr | TT_INT64, tags++); \
116 put_unaligned(sizeof(u64), tags++); \
117 put_unaligned(arg->member, (u64 *)tags); \
118 tags = (unsigned short *)((char *)tags+sizeof(u64));
119#define NL_BIT(pn, pr, member) \
120 put_unaligned(pn | pr | TT_BIT, tags++); \
121 put_unaligned(sizeof(char), tags++); \
122 *(char *)tags = arg->member; \
123 tags = (unsigned short *)((char *)tags+sizeof(char));
124#define NL_STRING(pn, pr, member, len) \
125 put_unaligned(pn | pr | TT_STRING, tags++); \
126 put_unaligned(arg->member ## _len, tags++); \
127 memcpy(tags, arg->member, arg->member ## _len); \
128 tags = (unsigned short *)((char *)tags + arg->member ## _len);
129#include "linux/drbd_nl.h"
130
131void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
132void drbd_nl_send_reply(struct cn_msg *, int);
133
134int drbd_khelper(struct drbd_conf *mdev, char *cmd)
135{
136 char *envp[] = { "HOME=/",
137 "TERM=linux",
138 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
139 NULL, /* Will be set to address family */
140 NULL, /* Will be set to address */
141 NULL };
142
143 char mb[12], af[20], ad[60], *afs;
144 char *argv[] = {usermode_helper, cmd, mb, NULL };
145 int ret;
146
147 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
148
149 if (get_net_conf(mdev)) {
150 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
151 case AF_INET6:
152 afs = "ipv6";
153 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
154 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
155 break;
156 case AF_INET:
157 afs = "ipv4";
158 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
159 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
160 break;
161 default:
162 afs = "ssocks";
163 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
164 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
165 }
166 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
167 envp[3]=af;
168 envp[4]=ad;
169 put_net_conf(mdev);
170 }
171
172 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
173
174 drbd_bcast_ev_helper(mdev, cmd);
175 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
176 if (ret)
177 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
178 usermode_helper, cmd, mb,
179 (ret >> 8) & 0xff, ret);
180 else
181 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
182 usermode_helper, cmd, mb,
183 (ret >> 8) & 0xff, ret);
184
185 if (ret < 0) /* Ignore any ERRNOs we got. */
186 ret = 0;
187
188 return ret;
189}
190
191enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
192{
193 char *ex_to_string;
194 int r;
195 enum drbd_disk_state nps;
196 enum drbd_fencing_p fp;
197
198 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
199
200 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
201 fp = mdev->ldev->dc.fencing;
202 put_ldev(mdev);
203 } else {
204 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
205 return mdev->state.pdsk;
206 }
207
208 if (fp == FP_STONITH)
209 _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE);
210
211 r = drbd_khelper(mdev, "fence-peer");
212
213 switch ((r>>8) & 0xff) {
214 case 3: /* peer is inconsistent */
215 ex_to_string = "peer is inconsistent or worse";
216 nps = D_INCONSISTENT;
217 break;
218 case 4: /* peer got outdated, or was already outdated */
219 ex_to_string = "peer was fenced";
220 nps = D_OUTDATED;
221 break;
222 case 5: /* peer was down */
223 if (mdev->state.disk == D_UP_TO_DATE) {
224 /* we will(have) create(d) a new UUID anyways... */
225 ex_to_string = "peer is unreachable, assumed to be dead";
226 nps = D_OUTDATED;
227 } else {
228 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
229 nps = mdev->state.pdsk;
230 }
231 break;
232 case 6: /* Peer is primary, voluntarily outdate myself.
233 * This is useful when an unconnected R_SECONDARY is asked to
234 * become R_PRIMARY, but finds the other peer being active. */
235 ex_to_string = "peer is active";
236 dev_warn(DEV, "Peer is primary, outdating myself.\n");
237 nps = D_UNKNOWN;
238 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
239 break;
240 case 7:
241 if (fp != FP_STONITH)
242 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
243 ex_to_string = "peer was stonithed";
244 nps = D_OUTDATED;
245 break;
246 default:
247 /* The script is broken ... */
248 nps = D_UNKNOWN;
249 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
250 return nps;
251 }
252
253 dev_info(DEV, "fence-peer helper returned %d (%s)\n",
254 (r>>8) & 0xff, ex_to_string);
255 return nps;
256}
257
258
259int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
260{
261 const int max_tries = 4;
262 int r = 0;
263 int try = 0;
264 int forced = 0;
265 union drbd_state mask, val;
266 enum drbd_disk_state nps;
267
268 if (new_role == R_PRIMARY)
269 request_ping(mdev); /* Detect a dead peer ASAP */
270
271 mutex_lock(&mdev->state_mutex);
272
273 mask.i = 0; mask.role = R_MASK;
274 val.i = 0; val.role = new_role;
275
276 while (try++ < max_tries) {
277 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
278
279 /* in case we first succeeded to outdate,
280 * but now suddenly could establish a connection */
281 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
282 val.pdsk = 0;
283 mask.pdsk = 0;
284 continue;
285 }
286
287 if (r == SS_NO_UP_TO_DATE_DISK && force &&
Philipp Reisnerd10a33c2010-03-04 15:11:39 +0100288 (mdev->state.disk < D_UP_TO_DATE &&
289 mdev->state.disk >= D_INCONSISTENT)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700290 mask.disk = D_MASK;
291 val.disk = D_UP_TO_DATE;
292 forced = 1;
293 continue;
294 }
295
296 if (r == SS_NO_UP_TO_DATE_DISK &&
297 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
298 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
299 nps = drbd_try_outdate_peer(mdev);
300
301 if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
302 val.disk = D_UP_TO_DATE;
303 mask.disk = D_MASK;
304 }
305
306 val.pdsk = nps;
307 mask.pdsk = D_MASK;
308
309 continue;
310 }
311
312 if (r == SS_NOTHING_TO_DO)
313 goto fail;
314 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
315 nps = drbd_try_outdate_peer(mdev);
316
317 if (force && nps > D_OUTDATED) {
318 dev_warn(DEV, "Forced into split brain situation!\n");
319 nps = D_OUTDATED;
320 }
321
322 mask.pdsk = D_MASK;
323 val.pdsk = nps;
324
325 continue;
326 }
327 if (r == SS_TWO_PRIMARIES) {
328 /* Maybe the peer is detected as dead very soon...
329 retry at most once more in this case. */
330 __set_current_state(TASK_INTERRUPTIBLE);
331 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
332 if (try < max_tries)
333 try = max_tries - 1;
334 continue;
335 }
336 if (r < SS_SUCCESS) {
337 r = _drbd_request_state(mdev, mask, val,
338 CS_VERBOSE + CS_WAIT_COMPLETE);
339 if (r < SS_SUCCESS)
340 goto fail;
341 }
342 break;
343 }
344
345 if (r < SS_SUCCESS)
346 goto fail;
347
348 if (forced)
349 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
350
351 /* Wait until nothing is on the fly :) */
352 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
353
354 if (new_role == R_SECONDARY) {
355 set_disk_ro(mdev->vdisk, TRUE);
356 if (get_ldev(mdev)) {
357 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
358 put_ldev(mdev);
359 }
360 } else {
361 if (get_net_conf(mdev)) {
362 mdev->net_conf->want_lose = 0;
363 put_net_conf(mdev);
364 }
365 set_disk_ro(mdev->vdisk, FALSE);
366 if (get_ldev(mdev)) {
367 if (((mdev->state.conn < C_CONNECTED ||
368 mdev->state.pdsk <= D_FAILED)
369 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
370 drbd_uuid_new_current(mdev);
371
372 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
373 put_ldev(mdev);
374 }
375 }
376
377 if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
378 drbd_al_to_on_disk_bm(mdev);
379 put_ldev(mdev);
380 }
381
382 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
383 /* if this was forced, we should consider sync */
384 if (forced)
385 drbd_send_uuids(mdev);
386 drbd_send_state(mdev);
387 }
388
389 drbd_md_sync(mdev);
390
391 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
392 fail:
393 mutex_unlock(&mdev->state_mutex);
394 return r;
395}
396
397
398static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
399 struct drbd_nl_cfg_reply *reply)
400{
401 struct primary primary_args;
402
403 memset(&primary_args, 0, sizeof(struct primary));
404 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
405 reply->ret_code = ERR_MANDATORY_TAG;
406 return 0;
407 }
408
409 reply->ret_code =
Philipp Reisner1f552432010-03-04 15:51:01 +0100410 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411
412 return 0;
413}
414
415static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
416 struct drbd_nl_cfg_reply *reply)
417{
418 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
419
420 return 0;
421}
422
423/* initializes the md.*_offset members, so we are able to find
424 * the on disk meta data */
425static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
426 struct drbd_backing_dev *bdev)
427{
428 sector_t md_size_sect = 0;
429 switch (bdev->dc.meta_dev_idx) {
430 default:
431 /* v07 style fixed size indexed meta data */
432 bdev->md.md_size_sect = MD_RESERVED_SECT;
433 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
434 bdev->md.al_offset = MD_AL_OFFSET;
435 bdev->md.bm_offset = MD_BM_OFFSET;
436 break;
437 case DRBD_MD_INDEX_FLEX_EXT:
438 /* just occupy the full device; unit: sectors */
439 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
440 bdev->md.md_offset = 0;
441 bdev->md.al_offset = MD_AL_OFFSET;
442 bdev->md.bm_offset = MD_BM_OFFSET;
443 break;
444 case DRBD_MD_INDEX_INTERNAL:
445 case DRBD_MD_INDEX_FLEX_INT:
446 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
447 /* al size is still fixed */
448 bdev->md.al_offset = -MD_AL_MAX_SIZE;
449 /* we need (slightly less than) ~ this much bitmap sectors: */
450 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
451 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
452 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
453 md_size_sect = ALIGN(md_size_sect, 8);
454
455 /* plus the "drbd meta data super block",
456 * and the activity log; */
457 md_size_sect += MD_BM_OFFSET;
458
459 bdev->md.md_size_sect = md_size_sect;
460 /* bitmap offset is adjusted by 'super' block size */
461 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
462 break;
463 }
464}
465
466char *ppsize(char *buf, unsigned long long size)
467{
468 /* Needs 9 bytes at max. */
469 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
470 int base = 0;
471 while (size >= 10000) {
472 /* shift + round */
473 size = (size >> 10) + !!(size & (1<<9));
474 base++;
475 }
476 sprintf(buf, "%lu %cB", (long)size, units[base]);
477
478 return buf;
479}
480
481/* there is still a theoretical deadlock when called from receiver
482 * on an D_INCONSISTENT R_PRIMARY:
483 * remote READ does inc_ap_bio, receiver would need to receive answer
484 * packet from remote to dec_ap_bio again.
485 * receiver receive_sizes(), comes here,
486 * waits for ap_bio_cnt == 0. -> deadlock.
487 * but this cannot happen, actually, because:
488 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
489 * (not connected, or bad/no disk on peer):
490 * see drbd_fail_request_early, ap_bio_cnt is zero.
491 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
492 * peer may not initiate a resize.
493 */
494void drbd_suspend_io(struct drbd_conf *mdev)
495{
496 set_bit(SUSPEND_IO, &mdev->flags);
497 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
498}
499
500void drbd_resume_io(struct drbd_conf *mdev)
501{
502 clear_bit(SUSPEND_IO, &mdev->flags);
503 wake_up(&mdev->misc_wait);
504}
505
506/**
507 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
508 * @mdev: DRBD device.
509 *
510 * Returns 0 on success, negative return values indicate errors.
511 * You should call drbd_md_sync() after calling this function.
512 */
Philipp Reisnerd8450302010-03-24 15:51:26 +0100513enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700514{
515 sector_t prev_first_sect, prev_size; /* previous meta location */
516 sector_t la_size;
517 sector_t size;
518 char ppb[10];
519
520 int md_moved, la_size_changed;
521 enum determine_dev_size rv = unchanged;
522
523 /* race:
524 * application request passes inc_ap_bio,
525 * but then cannot get an AL-reference.
526 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
527 *
528 * to avoid that:
529 * Suspend IO right here.
530 * still lock the act_log to not trigger ASSERTs there.
531 */
532 drbd_suspend_io(mdev);
533
534 /* no wait necessary anymore, actually we could assert that */
535 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
536
537 prev_first_sect = drbd_md_first_sector(mdev->ldev);
538 prev_size = mdev->ldev->md.md_size_sect;
539 la_size = mdev->ldev->md.la_size_sect;
540
541 /* TODO: should only be some assert here, not (re)init... */
542 drbd_md_set_sector_offsets(mdev, mdev->ldev);
543
Philipp Reisnerd8450302010-03-24 15:51:26 +0100544 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545
546 if (drbd_get_capacity(mdev->this_bdev) != size ||
547 drbd_bm_capacity(mdev) != size) {
548 int err;
Philipp Reisner02d9a942010-03-24 16:23:03 +0100549 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700550 if (unlikely(err)) {
551 /* currently there is only one error: ENOMEM! */
552 size = drbd_bm_capacity(mdev)>>1;
553 if (size == 0) {
554 dev_err(DEV, "OUT OF MEMORY! "
555 "Could not allocate bitmap!\n");
556 } else {
557 dev_err(DEV, "BM resizing failed. "
558 "Leaving size unchanged at size = %lu KB\n",
559 (unsigned long)size);
560 }
561 rv = dev_size_error;
562 }
563 /* racy, see comments above. */
564 drbd_set_my_capacity(mdev, size);
565 mdev->ldev->md.la_size_sect = size;
566 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
567 (unsigned long long)size>>1);
568 }
569 if (rv == dev_size_error)
570 goto out;
571
572 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
573
574 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
575 || prev_size != mdev->ldev->md.md_size_sect;
576
577 if (la_size_changed || md_moved) {
578 drbd_al_shrink(mdev); /* All extents inactive. */
579 dev_info(DEV, "Writing the whole bitmap, %s\n",
580 la_size_changed && md_moved ? "size changed and md moved" :
581 la_size_changed ? "size changed" : "md moved");
582 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
583 drbd_md_mark_dirty(mdev);
584 }
585
586 if (size > la_size)
587 rv = grew;
588 if (size < la_size)
589 rv = shrunk;
590out:
591 lc_unlock(mdev->act_log);
592 wake_up(&mdev->al_wait);
593 drbd_resume_io(mdev);
594
595 return rv;
596}
597
598sector_t
Philipp Reisnera393db62009-12-22 13:35:52 +0100599drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700600{
601 sector_t p_size = mdev->p_size; /* partner's disk size. */
602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
603 sector_t m_size; /* my size */
604 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
605 sector_t size = 0;
606
607 m_size = drbd_get_max_capacity(bdev);
608
Philipp Reisnera393db62009-12-22 13:35:52 +0100609 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
610 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
611 p_size = m_size;
612 }
613
Philipp Reisnerb411b362009-09-25 16:07:19 -0700614 if (p_size && m_size) {
615 size = min_t(sector_t, p_size, m_size);
616 } else {
617 if (la_size) {
618 size = la_size;
619 if (m_size && m_size < size)
620 size = m_size;
621 if (p_size && p_size < size)
622 size = p_size;
623 } else {
624 if (m_size)
625 size = m_size;
626 if (p_size)
627 size = p_size;
628 }
629 }
630
631 if (size == 0)
632 dev_err(DEV, "Both nodes diskless!\n");
633
634 if (u_size) {
635 if (u_size > size)
636 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
637 (unsigned long)u_size>>1, (unsigned long)size>>1);
638 else
639 size = u_size;
640 }
641
642 return size;
643}
644
645/**
646 * drbd_check_al_size() - Ensures that the AL is of the right size
647 * @mdev: DRBD device.
648 *
649 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
650 * failed, and 0 on success. You should call drbd_md_sync() after you called
651 * this function.
652 */
653static int drbd_check_al_size(struct drbd_conf *mdev)
654{
655 struct lru_cache *n, *t;
656 struct lc_element *e;
657 unsigned int in_use;
658 int i;
659
660 ERR_IF(mdev->sync_conf.al_extents < 7)
661 mdev->sync_conf.al_extents = 127;
662
663 if (mdev->act_log &&
664 mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
665 return 0;
666
667 in_use = 0;
668 t = mdev->act_log;
669 n = lc_create("act_log", drbd_al_ext_cache,
670 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
671
672 if (n == NULL) {
673 dev_err(DEV, "Cannot allocate act_log lru!\n");
674 return -ENOMEM;
675 }
676 spin_lock_irq(&mdev->al_lock);
677 if (t) {
678 for (i = 0; i < t->nr_elements; i++) {
679 e = lc_element_by_index(t, i);
680 if (e->refcnt)
681 dev_err(DEV, "refcnt(%d)==%d\n",
682 e->lc_number, e->refcnt);
683 in_use += e->refcnt;
684 }
685 }
686 if (!in_use)
687 mdev->act_log = n;
688 spin_unlock_irq(&mdev->al_lock);
689 if (in_use) {
690 dev_err(DEV, "Activity log still in use!\n");
691 lc_destroy(n);
692 return -EBUSY;
693 } else {
694 if (t)
695 lc_destroy(t);
696 }
697 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
698 return 0;
699}
700
701void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
702{
703 struct request_queue * const q = mdev->rq_queue;
704 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
705 int max_segments = mdev->ldev->dc.max_bio_bvecs;
706
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
708
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500709 blk_queue_max_hw_sectors(q, max_seg_s >> 9);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500710 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700711 blk_queue_max_segment_size(q, max_seg_s);
712 blk_queue_logical_block_size(q, 512);
713 blk_queue_segment_boundary(q, PAGE_SIZE-1);
714 blk_stack_limits(&q->limits, &b->limits, 0);
715
716 if (b->merge_bvec_fn)
717 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n",
718 b->merge_bvec_fn);
719 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
720
721 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
722 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
723 q->backing_dev_info.ra_pages,
724 b->backing_dev_info.ra_pages);
725 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
726 }
727}
728
729/* serialize deconfig (worker exiting, doing cleanup)
730 * and reconfig (drbdsetup disk, drbdsetup net)
731 *
732 * wait for a potentially exiting worker, then restart it,
733 * or start a new one.
734 */
735static void drbd_reconfig_start(struct drbd_conf *mdev)
736{
Lars Ellenberg6c6c7952009-11-16 15:48:54 +0100737 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
739 drbd_thread_start(&mdev->worker);
740}
741
742/* if still unconfigured, stops worker again.
743 * if configured now, clears CONFIG_PENDING.
744 * wakes potential waiters */
745static void drbd_reconfig_done(struct drbd_conf *mdev)
746{
747 spin_lock_irq(&mdev->req_lock);
748 if (mdev->state.disk == D_DISKLESS &&
749 mdev->state.conn == C_STANDALONE &&
750 mdev->state.role == R_SECONDARY) {
751 set_bit(DEVICE_DYING, &mdev->flags);
752 drbd_thread_stop_nowait(&mdev->worker);
753 } else
754 clear_bit(CONFIG_PENDING, &mdev->flags);
755 spin_unlock_irq(&mdev->req_lock);
756 wake_up(&mdev->state_wait);
757}
758
759/* does always return 0;
760 * interesting return code is in reply->ret_code */
761static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
762 struct drbd_nl_cfg_reply *reply)
763{
764 enum drbd_ret_codes retcode;
765 enum determine_dev_size dd;
766 sector_t max_possible_sectors;
767 sector_t min_md_device_sectors;
768 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
769 struct inode *inode, *inode2;
770 struct lru_cache *resync_lru = NULL;
771 union drbd_state ns, os;
772 int rv;
773 int cp_discovered = 0;
774 int logical_block_size;
775
776 drbd_reconfig_start(mdev);
777
778 /* if you want to reconfigure, please tear down first */
779 if (mdev->state.disk > D_DISKLESS) {
780 retcode = ERR_DISK_CONFIGURED;
781 goto fail;
782 }
783
784 /* allocation not in the IO path, cqueue thread context */
785 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
786 if (!nbc) {
787 retcode = ERR_NOMEM;
788 goto fail;
789 }
790
791 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
792 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
793 nbc->dc.fencing = DRBD_FENCING_DEF;
794 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
795
796 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
797 retcode = ERR_MANDATORY_TAG;
798 goto fail;
799 }
800
801 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
802 retcode = ERR_MD_IDX_INVALID;
803 goto fail;
804 }
805
806 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
807 if (IS_ERR(nbc->lo_file)) {
808 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
809 PTR_ERR(nbc->lo_file));
810 nbc->lo_file = NULL;
811 retcode = ERR_OPEN_DISK;
812 goto fail;
813 }
814
815 inode = nbc->lo_file->f_dentry->d_inode;
816
817 if (!S_ISBLK(inode->i_mode)) {
818 retcode = ERR_DISK_NOT_BDEV;
819 goto fail;
820 }
821
822 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
823 if (IS_ERR(nbc->md_file)) {
824 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
825 PTR_ERR(nbc->md_file));
826 nbc->md_file = NULL;
827 retcode = ERR_OPEN_MD_DISK;
828 goto fail;
829 }
830
831 inode2 = nbc->md_file->f_dentry->d_inode;
832
833 if (!S_ISBLK(inode2->i_mode)) {
834 retcode = ERR_MD_NOT_BDEV;
835 goto fail;
836 }
837
838 nbc->backing_bdev = inode->i_bdev;
839 if (bd_claim(nbc->backing_bdev, mdev)) {
840 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
841 nbc->backing_bdev, mdev,
842 nbc->backing_bdev->bd_holder,
843 nbc->backing_bdev->bd_contains->bd_holder,
844 nbc->backing_bdev->bd_holders);
845 retcode = ERR_BDCLAIM_DISK;
846 goto fail;
847 }
848
849 resync_lru = lc_create("resync", drbd_bm_ext_cache,
850 61, sizeof(struct bm_extent),
851 offsetof(struct bm_extent, lce));
852 if (!resync_lru) {
853 retcode = ERR_NOMEM;
854 goto release_bdev_fail;
855 }
856
857 /* meta_dev_idx >= 0: external fixed size,
858 * possibly multiple drbd sharing one meta device.
859 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
860 * not yet used by some other drbd minor!
861 * (if you use drbd.conf + drbdadm,
862 * that should check it for you already; but if you don't, or someone
863 * fooled it, we need to double check here) */
864 nbc->md_bdev = inode2->i_bdev;
865 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
866 : (void *) drbd_m_holder)) {
867 retcode = ERR_BDCLAIM_MD_DISK;
868 goto release_bdev_fail;
869 }
870
871 if ((nbc->backing_bdev == nbc->md_bdev) !=
872 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
873 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
874 retcode = ERR_MD_IDX_INVALID;
875 goto release_bdev2_fail;
876 }
877
878 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
879 drbd_md_set_sector_offsets(mdev, nbc);
880
881 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
882 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
883 (unsigned long long) drbd_get_max_capacity(nbc),
884 (unsigned long long) nbc->dc.disk_size);
885 retcode = ERR_DISK_TO_SMALL;
886 goto release_bdev2_fail;
887 }
888
889 if (nbc->dc.meta_dev_idx < 0) {
890 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
891 /* at least one MB, otherwise it does not make sense */
892 min_md_device_sectors = (2<<10);
893 } else {
894 max_possible_sectors = DRBD_MAX_SECTORS;
895 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
896 }
897
Philipp Reisnerb411b362009-09-25 16:07:19 -0700898 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
899 retcode = ERR_MD_DISK_TO_SMALL;
900 dev_warn(DEV, "refusing attach: md-device too small, "
901 "at least %llu sectors needed for this meta-disk type\n",
902 (unsigned long long) min_md_device_sectors);
903 goto release_bdev2_fail;
904 }
905
906 /* Make sure the new disk is big enough
907 * (we may currently be R_PRIMARY with no local disk...) */
908 if (drbd_get_max_capacity(nbc) <
909 drbd_get_capacity(mdev->this_bdev)) {
910 retcode = ERR_DISK_TO_SMALL;
911 goto release_bdev2_fail;
912 }
913
914 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
915
Lars Ellenberg13529942009-10-12 19:07:49 +0200916 if (nbc->known_size > max_possible_sectors) {
917 dev_warn(DEV, "==> truncating very big lower level device "
918 "to currently maximum possible %llu sectors <==\n",
919 (unsigned long long) max_possible_sectors);
920 if (nbc->dc.meta_dev_idx >= 0)
921 dev_warn(DEV, "==>> using internal or flexible "
922 "meta data may help <<==\n");
923 }
924
Philipp Reisnerb411b362009-09-25 16:07:19 -0700925 drbd_suspend_io(mdev);
926 /* also wait for the last barrier ack. */
927 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt));
928 /* and for any other previously queued work */
929 drbd_flush_workqueue(mdev);
930
931 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
932 drbd_resume_io(mdev);
933 if (retcode < SS_SUCCESS)
934 goto release_bdev2_fail;
935
936 if (!get_ldev_if_state(mdev, D_ATTACHING))
937 goto force_diskless;
938
939 drbd_md_set_sector_offsets(mdev, nbc);
940
Lars Ellenberg4aa83b72010-02-26 16:53:24 +0100941 /* allocate a second IO page if logical_block_size != 512 */
942 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
943 if (logical_block_size == 0)
944 logical_block_size = MD_SECTOR_SIZE;
945
946 if (logical_block_size != MD_SECTOR_SIZE) {
947 if (!mdev->md_io_tmpp) {
948 struct page *page = alloc_page(GFP_NOIO);
949 if (!page)
950 goto force_diskless_dec;
951
952 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
953 logical_block_size, MD_SECTOR_SIZE);
954 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
955
956 mdev->md_io_tmpp = page;
957 }
958 }
959
Philipp Reisnerb411b362009-09-25 16:07:19 -0700960 if (!mdev->bitmap) {
961 if (drbd_bm_init(mdev)) {
962 retcode = ERR_NOMEM;
963 goto force_diskless_dec;
964 }
965 }
966
967 retcode = drbd_md_read(mdev, nbc);
968 if (retcode != NO_ERROR)
969 goto force_diskless_dec;
970
971 if (mdev->state.conn < C_CONNECTED &&
972 mdev->state.role == R_PRIMARY &&
973 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
974 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
975 (unsigned long long)mdev->ed_uuid);
976 retcode = ERR_DATA_NOT_CURRENT;
977 goto force_diskless_dec;
978 }
979
980 /* Since we are diskless, fix the activity log first... */
981 if (drbd_check_al_size(mdev)) {
982 retcode = ERR_NOMEM;
983 goto force_diskless_dec;
984 }
985
986 /* Prevent shrinking of consistent devices ! */
987 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
Philipp Reisnera393db62009-12-22 13:35:52 +0100988 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700989 dev_warn(DEV, "refusing to truncate a consistent device\n");
990 retcode = ERR_DISK_TO_SMALL;
991 goto force_diskless_dec;
992 }
993
994 if (!drbd_al_read_log(mdev, nbc)) {
995 retcode = ERR_IO_MD_DISK;
996 goto force_diskless_dec;
997 }
998
Philipp Reisnerb411b362009-09-25 16:07:19 -0700999 /* Reset the "barriers don't work" bits here, then force meta data to
1000 * be written, to ensure we determine if barriers are supported. */
1001 if (nbc->dc.no_md_flush)
1002 set_bit(MD_NO_BARRIER, &mdev->flags);
1003 else
1004 clear_bit(MD_NO_BARRIER, &mdev->flags);
1005
1006 /* Point of no return reached.
1007 * Devices and memory are no longer released by error cleanup below.
1008 * now mdev takes over responsibility, and the state engine should
1009 * clean it up somewhere. */
1010 D_ASSERT(mdev->ldev == NULL);
1011 mdev->ldev = nbc;
1012 mdev->resync = resync_lru;
1013 nbc = NULL;
1014 resync_lru = NULL;
1015
1016 mdev->write_ordering = WO_bio_barrier;
1017 drbd_bump_write_ordering(mdev, WO_bio_barrier);
1018
1019 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1020 set_bit(CRASHED_PRIMARY, &mdev->flags);
1021 else
1022 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1023
1024 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND)) {
1025 set_bit(CRASHED_PRIMARY, &mdev->flags);
1026 cp_discovered = 1;
1027 }
1028
1029 mdev->send_cnt = 0;
1030 mdev->recv_cnt = 0;
1031 mdev->read_cnt = 0;
1032 mdev->writ_cnt = 0;
1033
1034 drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE);
1035
1036 /* If I am currently not R_PRIMARY,
1037 * but meta data primary indicator is set,
1038 * I just now recover from a hard crash,
1039 * and have been R_PRIMARY before that crash.
1040 *
1041 * Now, if I had no connection before that crash
1042 * (have been degraded R_PRIMARY), chances are that
1043 * I won't find my peer now either.
1044 *
1045 * In that case, and _only_ in that case,
1046 * we use the degr-wfc-timeout instead of the default,
1047 * so we can automatically recover from a crash of a
1048 * degraded but active "cluster" after a certain timeout.
1049 */
1050 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1051 if (mdev->state.role != R_PRIMARY &&
1052 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1053 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1054 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1055
Philipp Reisnera393db62009-12-22 13:35:52 +01001056 dd = drbd_determin_dev_size(mdev, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001057 if (dd == dev_size_error) {
1058 retcode = ERR_NOMEM_BITMAP;
1059 goto force_diskless_dec;
1060 } else if (dd == grew)
1061 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1062
1063 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1064 dev_info(DEV, "Assuming that all blocks are out of sync "
1065 "(aka FullSync)\n");
1066 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
1067 retcode = ERR_IO_MD_DISK;
1068 goto force_diskless_dec;
1069 }
1070 } else {
1071 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
1072 retcode = ERR_IO_MD_DISK;
1073 goto force_diskless_dec;
1074 }
1075 }
1076
1077 if (cp_discovered) {
1078 drbd_al_apply_to_bm(mdev);
1079 drbd_al_to_on_disk_bm(mdev);
1080 }
1081
1082 spin_lock_irq(&mdev->req_lock);
1083 os = mdev->state;
1084 ns.i = os.i;
1085 /* If MDF_CONSISTENT is not set go into inconsistent state,
1086 otherwise investigate MDF_WasUpToDate...
1087 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1088 otherwise into D_CONSISTENT state.
1089 */
1090 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1091 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1092 ns.disk = D_CONSISTENT;
1093 else
1094 ns.disk = D_OUTDATED;
1095 } else {
1096 ns.disk = D_INCONSISTENT;
1097 }
1098
1099 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1100 ns.pdsk = D_OUTDATED;
1101
1102 if ( ns.disk == D_CONSISTENT &&
1103 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1104 ns.disk = D_UP_TO_DATE;
1105
1106 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1107 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1108 this point, because drbd_request_state() modifies these
1109 flags. */
1110
1111 /* In case we are C_CONNECTED postpone any decision on the new disk
1112 state after the negotiation phase. */
1113 if (mdev->state.conn == C_CONNECTED) {
1114 mdev->new_state_tmp.i = ns.i;
1115 ns.i = os.i;
1116 ns.disk = D_NEGOTIATING;
1117 }
1118
1119 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1120 ns = mdev->state;
1121 spin_unlock_irq(&mdev->req_lock);
1122
1123 if (rv < SS_SUCCESS)
1124 goto force_diskless_dec;
1125
1126 if (mdev->state.role == R_PRIMARY)
1127 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1128 else
1129 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1130
1131 drbd_md_mark_dirty(mdev);
1132 drbd_md_sync(mdev);
1133
1134 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1135 put_ldev(mdev);
1136 reply->ret_code = retcode;
1137 drbd_reconfig_done(mdev);
1138 return 0;
1139
1140 force_diskless_dec:
1141 put_ldev(mdev);
1142 force_diskless:
1143 drbd_force_state(mdev, NS(disk, D_DISKLESS));
1144 drbd_md_sync(mdev);
1145 release_bdev2_fail:
1146 if (nbc)
1147 bd_release(nbc->md_bdev);
1148 release_bdev_fail:
1149 if (nbc)
1150 bd_release(nbc->backing_bdev);
1151 fail:
1152 if (nbc) {
1153 if (nbc->lo_file)
1154 fput(nbc->lo_file);
1155 if (nbc->md_file)
1156 fput(nbc->md_file);
1157 kfree(nbc);
1158 }
1159 lc_destroy(resync_lru);
1160
1161 reply->ret_code = retcode;
1162 drbd_reconfig_done(mdev);
1163 return 0;
1164}
1165
1166static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1167 struct drbd_nl_cfg_reply *reply)
1168{
1169 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1170 return 0;
1171}
1172
1173static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1174 struct drbd_nl_cfg_reply *reply)
1175{
1176 int i, ns;
1177 enum drbd_ret_codes retcode;
1178 struct net_conf *new_conf = NULL;
1179 struct crypto_hash *tfm = NULL;
1180 struct crypto_hash *integrity_w_tfm = NULL;
1181 struct crypto_hash *integrity_r_tfm = NULL;
1182 struct hlist_head *new_tl_hash = NULL;
1183 struct hlist_head *new_ee_hash = NULL;
1184 struct drbd_conf *odev;
1185 char hmac_name[CRYPTO_MAX_ALG_NAME];
1186 void *int_dig_out = NULL;
1187 void *int_dig_in = NULL;
1188 void *int_dig_vv = NULL;
1189 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1190
1191 drbd_reconfig_start(mdev);
1192
1193 if (mdev->state.conn > C_STANDALONE) {
1194 retcode = ERR_NET_CONFIGURED;
1195 goto fail;
1196 }
1197
1198 /* allocation not in the IO path, cqueue thread context */
1199 new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
1200 if (!new_conf) {
1201 retcode = ERR_NOMEM;
1202 goto fail;
1203 }
1204
1205 memset(new_conf, 0, sizeof(struct net_conf));
1206 new_conf->timeout = DRBD_TIMEOUT_DEF;
1207 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
1208 new_conf->ping_int = DRBD_PING_INT_DEF;
1209 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
1210 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
1211 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1212 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
1213 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
1214 new_conf->ko_count = DRBD_KO_COUNT_DEF;
1215 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
1216 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
1217 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
1218 new_conf->want_lose = 0;
1219 new_conf->two_primaries = 0;
1220 new_conf->wire_protocol = DRBD_PROT_C;
1221 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
1222 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
1223
1224 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
1225 retcode = ERR_MANDATORY_TAG;
1226 goto fail;
1227 }
1228
1229 if (new_conf->two_primaries
1230 && (new_conf->wire_protocol != DRBD_PROT_C)) {
1231 retcode = ERR_NOT_PROTO_C;
1232 goto fail;
1233 };
1234
1235 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1236 retcode = ERR_DISCARD;
1237 goto fail;
1238 }
1239
1240 retcode = NO_ERROR;
1241
1242 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1243 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1244 for (i = 0; i < minor_count; i++) {
1245 odev = minor_to_mdev(i);
1246 if (!odev || odev == mdev)
1247 continue;
1248 if (get_net_conf(odev)) {
1249 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
1250 if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
1251 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1252 retcode = ERR_LOCAL_ADDR;
1253
1254 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
1255 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
1256 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1257 retcode = ERR_PEER_ADDR;
1258
1259 put_net_conf(odev);
1260 if (retcode != NO_ERROR)
1261 goto fail;
1262 }
1263 }
1264
1265 if (new_conf->cram_hmac_alg[0] != 0) {
1266 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1267 new_conf->cram_hmac_alg);
1268 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1269 if (IS_ERR(tfm)) {
1270 tfm = NULL;
1271 retcode = ERR_AUTH_ALG;
1272 goto fail;
1273 }
1274
Philipp Reisner07982192009-12-28 16:58:38 +01001275 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001276 retcode = ERR_AUTH_ALG_ND;
1277 goto fail;
1278 }
1279 }
1280
1281 if (new_conf->integrity_alg[0]) {
1282 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1283 if (IS_ERR(integrity_w_tfm)) {
1284 integrity_w_tfm = NULL;
1285 retcode=ERR_INTEGRITY_ALG;
1286 goto fail;
1287 }
1288
1289 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1290 retcode=ERR_INTEGRITY_ALG_ND;
1291 goto fail;
1292 }
1293
1294 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1295 if (IS_ERR(integrity_r_tfm)) {
1296 integrity_r_tfm = NULL;
1297 retcode=ERR_INTEGRITY_ALG;
1298 goto fail;
1299 }
1300 }
1301
1302 ns = new_conf->max_epoch_size/8;
1303 if (mdev->tl_hash_s != ns) {
1304 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1305 if (!new_tl_hash) {
1306 retcode = ERR_NOMEM;
1307 goto fail;
1308 }
1309 }
1310
1311 ns = new_conf->max_buffers/8;
1312 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
1313 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1314 if (!new_ee_hash) {
1315 retcode = ERR_NOMEM;
1316 goto fail;
1317 }
1318 }
1319
1320 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1321
1322 if (integrity_w_tfm) {
1323 i = crypto_hash_digestsize(integrity_w_tfm);
1324 int_dig_out = kmalloc(i, GFP_KERNEL);
1325 if (!int_dig_out) {
1326 retcode = ERR_NOMEM;
1327 goto fail;
1328 }
1329 int_dig_in = kmalloc(i, GFP_KERNEL);
1330 if (!int_dig_in) {
1331 retcode = ERR_NOMEM;
1332 goto fail;
1333 }
1334 int_dig_vv = kmalloc(i, GFP_KERNEL);
1335 if (!int_dig_vv) {
1336 retcode = ERR_NOMEM;
1337 goto fail;
1338 }
1339 }
1340
1341 if (!mdev->bitmap) {
1342 if(drbd_bm_init(mdev)) {
1343 retcode = ERR_NOMEM;
1344 goto fail;
1345 }
1346 }
1347
1348 spin_lock_irq(&mdev->req_lock);
1349 if (mdev->net_conf != NULL) {
1350 retcode = ERR_NET_CONFIGURED;
1351 spin_unlock_irq(&mdev->req_lock);
1352 goto fail;
1353 }
1354 mdev->net_conf = new_conf;
1355
1356 mdev->send_cnt = 0;
1357 mdev->recv_cnt = 0;
1358
1359 if (new_tl_hash) {
1360 kfree(mdev->tl_hash);
1361 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
1362 mdev->tl_hash = new_tl_hash;
1363 }
1364
1365 if (new_ee_hash) {
1366 kfree(mdev->ee_hash);
1367 mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
1368 mdev->ee_hash = new_ee_hash;
1369 }
1370
1371 crypto_free_hash(mdev->cram_hmac_tfm);
1372 mdev->cram_hmac_tfm = tfm;
1373
1374 crypto_free_hash(mdev->integrity_w_tfm);
1375 mdev->integrity_w_tfm = integrity_w_tfm;
1376
1377 crypto_free_hash(mdev->integrity_r_tfm);
1378 mdev->integrity_r_tfm = integrity_r_tfm;
1379
1380 kfree(mdev->int_dig_out);
1381 kfree(mdev->int_dig_in);
1382 kfree(mdev->int_dig_vv);
1383 mdev->int_dig_out=int_dig_out;
1384 mdev->int_dig_in=int_dig_in;
1385 mdev->int_dig_vv=int_dig_vv;
1386 spin_unlock_irq(&mdev->req_lock);
1387
1388 retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE);
1389
1390 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1391 reply->ret_code = retcode;
1392 drbd_reconfig_done(mdev);
1393 return 0;
1394
1395fail:
1396 kfree(int_dig_out);
1397 kfree(int_dig_in);
1398 kfree(int_dig_vv);
1399 crypto_free_hash(tfm);
1400 crypto_free_hash(integrity_w_tfm);
1401 crypto_free_hash(integrity_r_tfm);
1402 kfree(new_tl_hash);
1403 kfree(new_ee_hash);
1404 kfree(new_conf);
1405
1406 reply->ret_code = retcode;
1407 drbd_reconfig_done(mdev);
1408 return 0;
1409}
1410
1411static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1412 struct drbd_nl_cfg_reply *reply)
1413{
1414 int retcode;
1415
1416 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
1417
1418 if (retcode == SS_NOTHING_TO_DO)
1419 goto done;
1420 else if (retcode == SS_ALREADY_STANDALONE)
1421 goto done;
1422 else if (retcode == SS_PRIMARY_NOP) {
1423 /* Our statche checking code wants to see the peer outdated. */
1424 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1425 pdsk, D_OUTDATED));
1426 } else if (retcode == SS_CW_FAILED_BY_PEER) {
1427 /* The peer probably wants to see us outdated. */
1428 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1429 disk, D_OUTDATED),
1430 CS_ORDERED);
1431 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1432 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1433 retcode = SS_SUCCESS;
1434 }
1435 }
1436
1437 if (retcode < SS_SUCCESS)
1438 goto fail;
1439
1440 if (wait_event_interruptible(mdev->state_wait,
1441 mdev->state.conn != C_DISCONNECTING)) {
1442 /* Do not test for mdev->state.conn == C_STANDALONE, since
1443 someone else might connect us in the mean time! */
1444 retcode = ERR_INTR;
1445 goto fail;
1446 }
1447
1448 done:
1449 retcode = NO_ERROR;
1450 fail:
1451 drbd_md_sync(mdev);
1452 reply->ret_code = retcode;
1453 return 0;
1454}
1455
1456void resync_after_online_grow(struct drbd_conf *mdev)
1457{
1458 int iass; /* I am sync source */
1459
1460 dev_info(DEV, "Resync of new storage after online grow\n");
1461 if (mdev->state.role != mdev->state.peer)
1462 iass = (mdev->state.role == R_PRIMARY);
1463 else
1464 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1465
1466 if (iass)
1467 drbd_start_resync(mdev, C_SYNC_SOURCE);
1468 else
1469 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1470}
1471
1472static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1473 struct drbd_nl_cfg_reply *reply)
1474{
1475 struct resize rs;
1476 int retcode = NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001477 enum determine_dev_size dd;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01001478 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001479
1480 memset(&rs, 0, sizeof(struct resize));
1481 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
1482 retcode = ERR_MANDATORY_TAG;
1483 goto fail;
1484 }
1485
1486 if (mdev->state.conn > C_CONNECTED) {
1487 retcode = ERR_RESIZE_RESYNC;
1488 goto fail;
1489 }
1490
1491 if (mdev->state.role == R_SECONDARY &&
1492 mdev->state.peer == R_SECONDARY) {
1493 retcode = ERR_NO_PRIMARY;
1494 goto fail;
1495 }
1496
1497 if (!get_ldev(mdev)) {
1498 retcode = ERR_NO_DISK;
1499 goto fail;
1500 }
1501
Philipp Reisner6495d2c2010-03-24 16:07:04 +01001502 if (rs.no_resync && mdev->agreed_pro_version < 93) {
1503 retcode = ERR_NEED_APV_93;
1504 goto fail;
1505 }
1506
Philipp Reisner087c2492010-03-26 13:49:56 +01001507 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001508 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001509
1510 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
Philipp Reisner6495d2c2010-03-24 16:07:04 +01001511 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1512 dd = drbd_determin_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001513 drbd_md_sync(mdev);
1514 put_ldev(mdev);
1515 if (dd == dev_size_error) {
1516 retcode = ERR_NOMEM_BITMAP;
1517 goto fail;
1518 }
1519
Philipp Reisner087c2492010-03-26 13:49:56 +01001520 if (mdev->state.conn == C_CONNECTED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001521 if (dd == grew)
1522 set_bit(RESIZE_PENDING, &mdev->flags);
1523
1524 drbd_send_uuids(mdev);
Philipp Reisner6495d2c2010-03-24 16:07:04 +01001525 drbd_send_sizes(mdev, 1, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001526 }
1527
1528 fail:
1529 reply->ret_code = retcode;
1530 return 0;
1531}
1532
1533static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1534 struct drbd_nl_cfg_reply *reply)
1535{
1536 int retcode = NO_ERROR;
1537 int err;
1538 int ovr; /* online verify running */
1539 int rsr; /* re-sync running */
1540 struct crypto_hash *verify_tfm = NULL;
1541 struct crypto_hash *csums_tfm = NULL;
1542 struct syncer_conf sc;
1543 cpumask_var_t new_cpu_mask;
1544
1545 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1546 retcode = ERR_NOMEM;
1547 goto fail;
1548 }
1549
1550 if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
1551 memset(&sc, 0, sizeof(struct syncer_conf));
1552 sc.rate = DRBD_RATE_DEF;
1553 sc.after = DRBD_AFTER_DEF;
1554 sc.al_extents = DRBD_AL_EXTENTS_DEF;
Philipp Reisner67c7ddd2010-05-04 11:12:00 +02001555 sc.dp_volume = DRBD_DP_VOLUME_DEF;
1556 sc.dp_interval = DRBD_DP_INTERVAL_DEF;
1557 sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
1558 sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001559 } else
1560 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1561
1562 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
1563 retcode = ERR_MANDATORY_TAG;
1564 goto fail;
1565 }
1566
1567 /* re-sync running */
1568 rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1569 mdev->state.conn == C_SYNC_TARGET ||
1570 mdev->state.conn == C_PAUSED_SYNC_S ||
1571 mdev->state.conn == C_PAUSED_SYNC_T );
1572
1573 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1574 retcode = ERR_CSUMS_RESYNC_RUNNING;
1575 goto fail;
1576 }
1577
1578 if (!rsr && sc.csums_alg[0]) {
1579 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1580 if (IS_ERR(csums_tfm)) {
1581 csums_tfm = NULL;
1582 retcode = ERR_CSUMS_ALG;
1583 goto fail;
1584 }
1585
1586 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1587 retcode = ERR_CSUMS_ALG_ND;
1588 goto fail;
1589 }
1590 }
1591
1592 /* online verify running */
1593 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1594
1595 if (ovr) {
1596 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1597 retcode = ERR_VERIFY_RUNNING;
1598 goto fail;
1599 }
1600 }
1601
1602 if (!ovr && sc.verify_alg[0]) {
1603 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1604 if (IS_ERR(verify_tfm)) {
1605 verify_tfm = NULL;
1606 retcode = ERR_VERIFY_ALG;
1607 goto fail;
1608 }
1609
1610 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1611 retcode = ERR_VERIFY_ALG_ND;
1612 goto fail;
1613 }
1614 }
1615
1616 /* silently ignore cpu mask on UP kernel */
1617 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1618 err = __bitmap_parse(sc.cpu_mask, 32, 0,
1619 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1620 if (err) {
1621 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1622 retcode = ERR_CPU_MASK_PARSE;
1623 goto fail;
1624 }
1625 }
1626
1627 ERR_IF (sc.rate < 1) sc.rate = 1;
1628 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
1629#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
1630 if (sc.al_extents > AL_MAX) {
1631 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
1632 sc.al_extents = AL_MAX;
1633 }
1634#undef AL_MAX
1635
1636 /* most sanity checks done, try to assign the new sync-after
1637 * dependency. need to hold the global lock in there,
1638 * to avoid a race in the dependency loop check. */
1639 retcode = drbd_alter_sa(mdev, sc.after);
1640 if (retcode != NO_ERROR)
1641 goto fail;
1642
1643 /* ok, assign the rest of it as well.
1644 * lock against receive_SyncParam() */
1645 spin_lock(&mdev->peer_seq_lock);
1646 mdev->sync_conf = sc;
1647
1648 if (!rsr) {
1649 crypto_free_hash(mdev->csums_tfm);
1650 mdev->csums_tfm = csums_tfm;
1651 csums_tfm = NULL;
1652 }
1653
1654 if (!ovr) {
1655 crypto_free_hash(mdev->verify_tfm);
1656 mdev->verify_tfm = verify_tfm;
1657 verify_tfm = NULL;
1658 }
1659 spin_unlock(&mdev->peer_seq_lock);
1660
1661 if (get_ldev(mdev)) {
1662 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1663 drbd_al_shrink(mdev);
1664 err = drbd_check_al_size(mdev);
1665 lc_unlock(mdev->act_log);
1666 wake_up(&mdev->al_wait);
1667
1668 put_ldev(mdev);
1669 drbd_md_sync(mdev);
1670
1671 if (err) {
1672 retcode = ERR_NOMEM;
1673 goto fail;
1674 }
1675 }
1676
1677 if (mdev->state.conn >= C_CONNECTED)
1678 drbd_send_sync_param(mdev, &sc);
1679
1680 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
1681 cpumask_copy(mdev->cpu_mask, new_cpu_mask);
1682 drbd_calc_cpu_mask(mdev);
1683 mdev->receiver.reset_cpu_mask = 1;
1684 mdev->asender.reset_cpu_mask = 1;
1685 mdev->worker.reset_cpu_mask = 1;
1686 }
1687
1688 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1689fail:
1690 free_cpumask_var(new_cpu_mask);
1691 crypto_free_hash(csums_tfm);
1692 crypto_free_hash(verify_tfm);
1693 reply->ret_code = retcode;
1694 return 0;
1695}
1696
1697static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1698 struct drbd_nl_cfg_reply *reply)
1699{
1700 int retcode;
1701
1702 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
1703
1704 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
1705 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1706
1707 while (retcode == SS_NEED_CONNECTION) {
1708 spin_lock_irq(&mdev->req_lock);
1709 if (mdev->state.conn < C_CONNECTED)
1710 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
1711 spin_unlock_irq(&mdev->req_lock);
1712
1713 if (retcode != SS_NEED_CONNECTION)
1714 break;
1715
1716 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1717 }
1718
1719 reply->ret_code = retcode;
1720 return 0;
1721}
1722
1723static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1724 struct drbd_nl_cfg_reply *reply)
1725{
1726
1727 reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
1728
1729 return 0;
1730}
1731
1732static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1733 struct drbd_nl_cfg_reply *reply)
1734{
1735 int retcode = NO_ERROR;
1736
1737 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
1738 retcode = ERR_PAUSE_IS_SET;
1739
1740 reply->ret_code = retcode;
1741 return 0;
1742}
1743
1744static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1745 struct drbd_nl_cfg_reply *reply)
1746{
1747 int retcode = NO_ERROR;
1748
1749 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
1750 retcode = ERR_PAUSE_IS_CLEAR;
1751
1752 reply->ret_code = retcode;
1753 return 0;
1754}
1755
1756static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1757 struct drbd_nl_cfg_reply *reply)
1758{
1759 reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
1760
1761 return 0;
1762}
1763
1764static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1765 struct drbd_nl_cfg_reply *reply)
1766{
1767 reply->ret_code = drbd_request_state(mdev, NS(susp, 0));
1768 return 0;
1769}
1770
1771static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1772 struct drbd_nl_cfg_reply *reply)
1773{
1774 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
1775 return 0;
1776}
1777
1778static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1779 struct drbd_nl_cfg_reply *reply)
1780{
1781 unsigned short *tl;
1782
1783 tl = reply->tag_list;
1784
1785 if (get_ldev(mdev)) {
1786 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
1787 put_ldev(mdev);
1788 }
1789
1790 if (get_net_conf(mdev)) {
1791 tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
1792 put_net_conf(mdev);
1793 }
1794 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
1795
1796 put_unaligned(TT_END, tl++); /* Close the tag list */
1797
1798 return (int)((char *)tl - (char *)reply->tag_list);
1799}
1800
1801static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1802 struct drbd_nl_cfg_reply *reply)
1803{
1804 unsigned short *tl = reply->tag_list;
1805 union drbd_state s = mdev->state;
1806 unsigned long rs_left;
1807 unsigned int res;
1808
1809 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
1810
1811 /* no local ref, no bitmap, no syncer progress. */
1812 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
1813 if (get_ldev(mdev)) {
1814 drbd_get_syncer_progress(mdev, &rs_left, &res);
1815 tl = tl_add_int(tl, T_sync_progress, &res);
1816 put_ldev(mdev);
1817 }
1818 }
1819 put_unaligned(TT_END, tl++); /* Close the tag list */
1820
1821 return (int)((char *)tl - (char *)reply->tag_list);
1822}
1823
1824static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1825 struct drbd_nl_cfg_reply *reply)
1826{
1827 unsigned short *tl;
1828
1829 tl = reply->tag_list;
1830
1831 if (get_ldev(mdev)) {
1832 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
1833 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
1834 put_ldev(mdev);
1835 }
1836 put_unaligned(TT_END, tl++); /* Close the tag list */
1837
1838 return (int)((char *)tl - (char *)reply->tag_list);
1839}
1840
1841/**
1842 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
1843 * @mdev: DRBD device.
1844 * @nlp: Netlink/connector packet from drbdsetup
1845 * @reply: Reply packet for drbdsetup
1846 */
1847static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1848 struct drbd_nl_cfg_reply *reply)
1849{
1850 unsigned short *tl;
1851 char rv;
1852
1853 tl = reply->tag_list;
1854
1855 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
1856 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
1857
1858 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
1859 put_unaligned(TT_END, tl++); /* Close the tag list */
1860
1861 return (int)((char *)tl - (char *)reply->tag_list);
1862}
1863
1864static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1865 struct drbd_nl_cfg_reply *reply)
1866{
1867 /* default to resume from last known position, if possible */
1868 struct start_ov args =
1869 { .start_sector = mdev->ov_start_sector };
1870
1871 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
1872 reply->ret_code = ERR_MANDATORY_TAG;
1873 return 0;
1874 }
1875 /* w_make_ov_request expects position to be aligned */
1876 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
1877 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
1878 return 0;
1879}
1880
1881
1882static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1883 struct drbd_nl_cfg_reply *reply)
1884{
1885 int retcode = NO_ERROR;
1886 int skip_initial_sync = 0;
1887 int err;
1888
1889 struct new_c_uuid args;
1890
1891 memset(&args, 0, sizeof(struct new_c_uuid));
1892 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
1893 reply->ret_code = ERR_MANDATORY_TAG;
1894 return 0;
1895 }
1896
1897 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
1898
1899 if (!get_ldev(mdev)) {
1900 retcode = ERR_NO_DISK;
1901 goto out;
1902 }
1903
1904 /* this is "skip initial sync", assume to be clean */
1905 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
1906 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
1907 dev_info(DEV, "Preparing to skip initial sync\n");
1908 skip_initial_sync = 1;
1909 } else if (mdev->state.conn != C_STANDALONE) {
1910 retcode = ERR_CONNECTED;
1911 goto out_dec;
1912 }
1913
1914 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
1915 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
1916
1917 if (args.clear_bm) {
1918 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
1919 if (err) {
1920 dev_err(DEV, "Writing bitmap failed with %d\n",err);
1921 retcode = ERR_IO_MD_DISK;
1922 }
1923 if (skip_initial_sync) {
1924 drbd_send_uuids_skip_initial_sync(mdev);
1925 _drbd_uuid_set(mdev, UI_BITMAP, 0);
1926 spin_lock_irq(&mdev->req_lock);
1927 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
1928 CS_VERBOSE, NULL);
1929 spin_unlock_irq(&mdev->req_lock);
1930 }
1931 }
1932
1933 drbd_md_sync(mdev);
1934out_dec:
1935 put_ldev(mdev);
1936out:
1937 mutex_unlock(&mdev->state_mutex);
1938
1939 reply->ret_code = retcode;
1940 return 0;
1941}
1942
1943static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp)
1944{
1945 struct drbd_conf *mdev;
1946
1947 if (nlp->drbd_minor >= minor_count)
1948 return NULL;
1949
1950 mdev = minor_to_mdev(nlp->drbd_minor);
1951
1952 if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) {
1953 struct gendisk *disk = NULL;
1954 mdev = drbd_new_device(nlp->drbd_minor);
1955
1956 spin_lock_irq(&drbd_pp_lock);
1957 if (minor_table[nlp->drbd_minor] == NULL) {
1958 minor_table[nlp->drbd_minor] = mdev;
1959 disk = mdev->vdisk;
1960 mdev = NULL;
1961 } /* else: we lost the race */
1962 spin_unlock_irq(&drbd_pp_lock);
1963
1964 if (disk) /* we won the race above */
1965 /* in case we ever add a drbd_delete_device(),
1966 * don't forget the del_gendisk! */
1967 add_disk(disk);
1968 else /* we lost the race above */
1969 drbd_free_mdev(mdev);
1970
1971 mdev = minor_to_mdev(nlp->drbd_minor);
1972 }
1973
1974 return mdev;
1975}
1976
1977struct cn_handler_struct {
1978 int (*function)(struct drbd_conf *,
1979 struct drbd_nl_cfg_req *,
1980 struct drbd_nl_cfg_reply *);
1981 int reply_body_size;
1982};
1983
1984static struct cn_handler_struct cnd_table[] = {
1985 [ P_primary ] = { &drbd_nl_primary, 0 },
1986 [ P_secondary ] = { &drbd_nl_secondary, 0 },
1987 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
1988 [ P_detach ] = { &drbd_nl_detach, 0 },
1989 [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
1990 [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
1991 [ P_resize ] = { &drbd_nl_resize, 0 },
1992 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
1993 [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
1994 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
1995 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
1996 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
1997 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
1998 [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
1999 [ P_outdate ] = { &drbd_nl_outdate, 0 },
2000 [ P_get_config ] = { &drbd_nl_get_config,
2001 sizeof(struct syncer_conf_tag_len_struct) +
2002 sizeof(struct disk_conf_tag_len_struct) +
2003 sizeof(struct net_conf_tag_len_struct) },
2004 [ P_get_state ] = { &drbd_nl_get_state,
2005 sizeof(struct get_state_tag_len_struct) +
2006 sizeof(struct sync_progress_tag_len_struct) },
2007 [ P_get_uuids ] = { &drbd_nl_get_uuids,
2008 sizeof(struct get_uuids_tag_len_struct) },
2009 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
2010 sizeof(struct get_timeout_flag_tag_len_struct)},
2011 [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
2012 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
2013};
2014
Philipp Reisner9f5180e2009-10-06 09:30:14 +02002015static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002016{
2017 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
2018 struct cn_handler_struct *cm;
2019 struct cn_msg *cn_reply;
2020 struct drbd_nl_cfg_reply *reply;
2021 struct drbd_conf *mdev;
2022 int retcode, rr;
2023 int reply_size = sizeof(struct cn_msg)
2024 + sizeof(struct drbd_nl_cfg_reply)
2025 + sizeof(short int);
2026
2027 if (!try_module_get(THIS_MODULE)) {
2028 printk(KERN_ERR "drbd: try_module_get() failed!\n");
2029 return;
2030 }
2031
Philipp Reisner9f5180e2009-10-06 09:30:14 +02002032 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
2033 retcode = ERR_PERM;
2034 goto fail;
2035 }
2036
Philipp Reisnerb411b362009-09-25 16:07:19 -07002037 mdev = ensure_mdev(nlp);
2038 if (!mdev) {
2039 retcode = ERR_MINOR_INVALID;
2040 goto fail;
2041 }
2042
Philipp Reisnerb411b362009-09-25 16:07:19 -07002043 if (nlp->packet_type >= P_nl_after_last_packet) {
2044 retcode = ERR_PACKET_NR;
2045 goto fail;
2046 }
2047
2048 cm = cnd_table + nlp->packet_type;
2049
2050 /* This may happen if packet number is 0: */
2051 if (cm->function == NULL) {
2052 retcode = ERR_PACKET_NR;
2053 goto fail;
2054 }
2055
2056 reply_size += cm->reply_body_size;
2057
2058 /* allocation not in the IO path, cqueue thread context */
2059 cn_reply = kmalloc(reply_size, GFP_KERNEL);
2060 if (!cn_reply) {
2061 retcode = ERR_NOMEM;
2062 goto fail;
2063 }
2064 reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
2065
2066 reply->packet_type =
2067 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
2068 reply->minor = nlp->drbd_minor;
2069 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
2070 /* reply->tag_list; might be modified by cm->function. */
2071
2072 rr = cm->function(mdev, nlp, reply);
2073
2074 cn_reply->id = req->id;
2075 cn_reply->seq = req->seq;
2076 cn_reply->ack = req->ack + 1;
2077 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
2078 cn_reply->flags = 0;
2079
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
2081 if (rr && rr != -ESRCH)
2082 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2083
2084 kfree(cn_reply);
2085 module_put(THIS_MODULE);
2086 return;
2087 fail:
2088 drbd_nl_send_reply(req, retcode);
2089 module_put(THIS_MODULE);
2090}
2091
2092static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
2093
2094static unsigned short *
2095__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
2096 unsigned short len, int nul_terminated)
2097{
2098 unsigned short l = tag_descriptions[tag_number(tag)].max_len;
2099 len = (len < l) ? len : l;
2100 put_unaligned(tag, tl++);
2101 put_unaligned(len, tl++);
2102 memcpy(tl, data, len);
2103 tl = (unsigned short*)((char*)tl + len);
2104 if (nul_terminated)
2105 *((char*)tl - 1) = 0;
2106 return tl;
2107}
2108
2109static unsigned short *
2110tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
2111{
2112 return __tl_add_blob(tl, tag, data, len, 0);
2113}
2114
2115static unsigned short *
2116tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
2117{
2118 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
2119}
2120
2121static unsigned short *
2122tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
2123{
2124 put_unaligned(tag, tl++);
2125 switch(tag_type(tag)) {
2126 case TT_INTEGER:
2127 put_unaligned(sizeof(int), tl++);
2128 put_unaligned(*(int *)val, (int *)tl);
2129 tl = (unsigned short*)((char*)tl+sizeof(int));
2130 break;
2131 case TT_INT64:
2132 put_unaligned(sizeof(u64), tl++);
2133 put_unaligned(*(u64 *)val, (u64 *)tl);
2134 tl = (unsigned short*)((char*)tl+sizeof(u64));
2135 break;
2136 default:
2137 /* someone did something stupid. */
2138 ;
2139 }
2140 return tl;
2141}
2142
2143void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
2144{
2145 char buffer[sizeof(struct cn_msg)+
2146 sizeof(struct drbd_nl_cfg_reply)+
2147 sizeof(struct get_state_tag_len_struct)+
2148 sizeof(short int)];
2149 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2150 struct drbd_nl_cfg_reply *reply =
2151 (struct drbd_nl_cfg_reply *)cn_reply->data;
2152 unsigned short *tl = reply->tag_list;
2153
2154 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2155
2156 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
2157
2158 put_unaligned(TT_END, tl++); /* Close the tag list */
2159
2160 cn_reply->id.idx = CN_IDX_DRBD;
2161 cn_reply->id.val = CN_VAL_DRBD;
2162
2163 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2164 cn_reply->ack = 0; /* not used here. */
2165 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2166 (int)((char *)tl - (char *)reply->tag_list);
2167 cn_reply->flags = 0;
2168
2169 reply->packet_type = P_get_state;
2170 reply->minor = mdev_to_minor(mdev);
2171 reply->ret_code = NO_ERROR;
2172
Philipp Reisnerb411b362009-09-25 16:07:19 -07002173 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2174}
2175
2176void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
2177{
2178 char buffer[sizeof(struct cn_msg)+
2179 sizeof(struct drbd_nl_cfg_reply)+
2180 sizeof(struct call_helper_tag_len_struct)+
2181 sizeof(short int)];
2182 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2183 struct drbd_nl_cfg_reply *reply =
2184 (struct drbd_nl_cfg_reply *)cn_reply->data;
2185 unsigned short *tl = reply->tag_list;
2186
2187 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2188
2189 tl = tl_add_str(tl, T_helper, helper_name);
2190 put_unaligned(TT_END, tl++); /* Close the tag list */
2191
2192 cn_reply->id.idx = CN_IDX_DRBD;
2193 cn_reply->id.val = CN_VAL_DRBD;
2194
2195 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2196 cn_reply->ack = 0; /* not used here. */
2197 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2198 (int)((char *)tl - (char *)reply->tag_list);
2199 cn_reply->flags = 0;
2200
2201 reply->packet_type = P_call_helper;
2202 reply->minor = mdev_to_minor(mdev);
2203 reply->ret_code = NO_ERROR;
2204
Philipp Reisnerb411b362009-09-25 16:07:19 -07002205 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2206}
2207
2208void drbd_bcast_ee(struct drbd_conf *mdev,
2209 const char *reason, const int dgs,
2210 const char* seen_hash, const char* calc_hash,
2211 const struct drbd_epoch_entry* e)
2212{
2213 struct cn_msg *cn_reply;
2214 struct drbd_nl_cfg_reply *reply;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002215 unsigned short *tl;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002216 struct page *page;
2217 unsigned len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002218
2219 if (!e)
2220 return;
2221 if (!reason || !reason[0])
2222 return;
2223
2224 /* apparently we have to memcpy twice, first to prepare the data for the
2225 * struct cn_msg, then within cn_netlink_send from the cn_msg to the
2226 * netlink skb. */
2227 /* receiver thread context, which is not in the writeout path (of this node),
2228 * but may be in the writeout path of the _other_ node.
2229 * GFP_NOIO to avoid potential "distributed deadlock". */
2230 cn_reply = kmalloc(
2231 sizeof(struct cn_msg)+
2232 sizeof(struct drbd_nl_cfg_reply)+
2233 sizeof(struct dump_ee_tag_len_struct)+
2234 sizeof(short int),
2235 GFP_NOIO);
2236
2237 if (!cn_reply) {
2238 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
2239 (unsigned long long)e->sector, e->size);
2240 return;
2241 }
2242
2243 reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
2244 tl = reply->tag_list;
2245
2246 tl = tl_add_str(tl, T_dump_ee_reason, reason);
2247 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
2248 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
2249 tl = tl_add_int(tl, T_ee_sector, &e->sector);
2250 tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
2251
2252 put_unaligned(T_ee_data, tl++);
2253 put_unaligned(e->size, tl++);
2254
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002255 len = e->size;
2256 page = e->pages;
2257 page_chain_for_each(page) {
2258 void *d = kmap_atomic(page, KM_USER0);
2259 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2260 memcpy(tl, d, l);
2261 kunmap_atomic(d, KM_USER0);
2262 tl = (unsigned short*)((char*)tl + l);
2263 len -= l;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002264 }
2265 put_unaligned(TT_END, tl++); /* Close the tag list */
2266
2267 cn_reply->id.idx = CN_IDX_DRBD;
2268 cn_reply->id.val = CN_VAL_DRBD;
2269
2270 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
2271 cn_reply->ack = 0; // not used here.
2272 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2273 (int)((char*)tl - (char*)reply->tag_list);
2274 cn_reply->flags = 0;
2275
2276 reply->packet_type = P_dump_ee;
2277 reply->minor = mdev_to_minor(mdev);
2278 reply->ret_code = NO_ERROR;
2279
Philipp Reisnerb411b362009-09-25 16:07:19 -07002280 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2281 kfree(cn_reply);
2282}
2283
2284void drbd_bcast_sync_progress(struct drbd_conf *mdev)
2285{
2286 char buffer[sizeof(struct cn_msg)+
2287 sizeof(struct drbd_nl_cfg_reply)+
2288 sizeof(struct sync_progress_tag_len_struct)+
2289 sizeof(short int)];
2290 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2291 struct drbd_nl_cfg_reply *reply =
2292 (struct drbd_nl_cfg_reply *)cn_reply->data;
2293 unsigned short *tl = reply->tag_list;
2294 unsigned long rs_left;
2295 unsigned int res;
2296
2297 /* no local ref, no bitmap, no syncer progress, no broadcast. */
2298 if (!get_ldev(mdev))
2299 return;
2300 drbd_get_syncer_progress(mdev, &rs_left, &res);
2301 put_ldev(mdev);
2302
2303 tl = tl_add_int(tl, T_sync_progress, &res);
2304 put_unaligned(TT_END, tl++); /* Close the tag list */
2305
2306 cn_reply->id.idx = CN_IDX_DRBD;
2307 cn_reply->id.val = CN_VAL_DRBD;
2308
2309 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2310 cn_reply->ack = 0; /* not used here. */
2311 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2312 (int)((char *)tl - (char *)reply->tag_list);
2313 cn_reply->flags = 0;
2314
2315 reply->packet_type = P_sync_progress;
2316 reply->minor = mdev_to_minor(mdev);
2317 reply->ret_code = NO_ERROR;
2318
Philipp Reisnerb411b362009-09-25 16:07:19 -07002319 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2320}
2321
2322int __init drbd_nl_init(void)
2323{
2324 static struct cb_id cn_id_drbd;
2325 int err, try=10;
2326
2327 cn_id_drbd.val = CN_VAL_DRBD;
2328 do {
2329 cn_id_drbd.idx = cn_idx;
2330 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
2331 if (!err)
2332 break;
2333 cn_idx = (cn_idx + CN_IDX_STEP);
2334 } while (try--);
2335
2336 if (err) {
2337 printk(KERN_ERR "drbd: cn_drbd failed to register\n");
2338 return err;
2339 }
2340
2341 return 0;
2342}
2343
2344void drbd_nl_cleanup(void)
2345{
2346 static struct cb_id cn_id_drbd;
2347
2348 cn_id_drbd.idx = cn_idx;
2349 cn_id_drbd.val = CN_VAL_DRBD;
2350
2351 cn_del_callback(&cn_id_drbd);
2352}
2353
2354void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
2355{
2356 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
2357 struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2358 struct drbd_nl_cfg_reply *reply =
2359 (struct drbd_nl_cfg_reply *)cn_reply->data;
2360 int rr;
2361
2362 cn_reply->id = req->id;
2363
2364 cn_reply->seq = req->seq;
2365 cn_reply->ack = req->ack + 1;
2366 cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
2367 cn_reply->flags = 0;
2368
2369 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
2370 reply->ret_code = ret_code;
2371
Philipp Reisnerb411b362009-09-25 16:07:19 -07002372 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2373 if (rr && rr != -ESRCH)
2374 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2375}
2376