blob: efd6169acf2f04bf758c68ceee419549351c7e64 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
39#include <linux/smp_lock.h>
40#include <linux/pkt_sched.h>
41#define __KERNEL_SYSCALLS__
42#include <linux/unistd.h>
43#include <linux/vmalloc.h>
44#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070045#include <linux/string.h>
46#include <linux/scatterlist.h>
47#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070048#include "drbd_req.h"
49
50#include "drbd_vli.h"
51
52struct flush_work {
53 struct drbd_work w;
54 struct drbd_epoch *epoch;
55};
56
57enum finish_epoch {
58 FE_STILL_LIVE,
59 FE_DESTROYED,
60 FE_RECYCLED,
61};
62
63static int drbd_do_handshake(struct drbd_conf *mdev);
64static int drbd_do_auth(struct drbd_conf *mdev);
65
66static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
68
69static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
70{
71 struct drbd_epoch *prev;
72 spin_lock(&mdev->epoch_lock);
73 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74 if (prev == epoch || prev == mdev->current_epoch)
75 prev = NULL;
76 spin_unlock(&mdev->epoch_lock);
77 return prev;
78}
79
80#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
81
Lars Ellenberg45bb9122010-05-14 17:10:48 +020082/*
83 * some helper functions to deal with single linked page lists,
84 * page->private being our "next" pointer.
85 */
86
87/* If at least n pages are linked at head, get n pages off.
88 * Otherwise, don't modify head, and return NULL.
89 * Locking is the responsibility of the caller.
90 */
91static struct page *page_chain_del(struct page **head, int n)
92{
93 struct page *page;
94 struct page *tmp;
95
96 BUG_ON(!n);
97 BUG_ON(!head);
98
99 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +0200100
101 if (!page)
102 return NULL;
103
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200104 while (page) {
105 tmp = page_chain_next(page);
106 if (--n == 0)
107 break; /* found sufficient pages */
108 if (tmp == NULL)
109 /* insufficient pages, don't use any of them. */
110 return NULL;
111 page = tmp;
112 }
113
114 /* add end of list marker for the returned list */
115 set_page_private(page, 0);
116 /* actual return value, and adjustment of head */
117 page = *head;
118 *head = tmp;
119 return page;
120}
121
122/* may be used outside of locks to find the tail of a (usually short)
123 * "private" page chain, before adding it back to a global chain head
124 * with page_chain_add() under a spinlock. */
125static struct page *page_chain_tail(struct page *page, int *len)
126{
127 struct page *tmp;
128 int i = 1;
129 while ((tmp = page_chain_next(page)))
130 ++i, page = tmp;
131 if (len)
132 *len = i;
133 return page;
134}
135
136static int page_chain_free(struct page *page)
137{
138 struct page *tmp;
139 int i = 0;
140 page_chain_for_each_safe(page, tmp) {
141 put_page(page);
142 ++i;
143 }
144 return i;
145}
146
147static void page_chain_add(struct page **head,
148 struct page *chain_first, struct page *chain_last)
149{
150#if 1
151 struct page *tmp;
152 tmp = page_chain_tail(chain_first, NULL);
153 BUG_ON(tmp != chain_last);
154#endif
155
156 /* add chain to head */
157 set_page_private(chain_last, (unsigned long)*head);
158 *head = chain_first;
159}
160
161static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700162{
163 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200164 struct page *tmp = NULL;
165 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700166
167 /* Yes, testing drbd_pp_vacant outside the lock is racy.
168 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200169 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700170 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200171 page = page_chain_del(&drbd_pp_pool, number);
172 if (page)
173 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700174 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200175 if (page)
176 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700177 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200178
Philipp Reisnerb411b362009-09-25 16:07:19 -0700179 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
180 * "criss-cross" setup, that might cause write-out on some other DRBD,
181 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200182 for (i = 0; i < number; i++) {
183 tmp = alloc_page(GFP_TRY);
184 if (!tmp)
185 break;
186 set_page_private(tmp, (unsigned long)page);
187 page = tmp;
188 }
189
190 if (i == number)
191 return page;
192
193 /* Not enough pages immediately available this time.
194 * No need to jump around here, drbd_pp_alloc will retry this
195 * function "soon". */
196 if (page) {
197 tmp = page_chain_tail(page, NULL);
198 spin_lock(&drbd_pp_lock);
199 page_chain_add(&drbd_pp_pool, page, tmp);
200 drbd_pp_vacant += i;
201 spin_unlock(&drbd_pp_lock);
202 }
203 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700204}
205
206/* kick lower level device, if we have more than (arbitrary number)
207 * reference counts on it, which typically are locally submitted io
208 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
209static void maybe_kick_lo(struct drbd_conf *mdev)
210{
211 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
212 drbd_kick_lo(mdev);
213}
214
215static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
216{
217 struct drbd_epoch_entry *e;
218 struct list_head *le, *tle;
219
220 /* The EEs are always appended to the end of the list. Since
221 they are sent in order over the wire, they have to finish
222 in order. As soon as we see the first not finished we can
223 stop to examine the list... */
224
225 list_for_each_safe(le, tle, &mdev->net_ee) {
226 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700228 break;
229 list_move(le, to_be_freed);
230 }
231}
232
233static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
234{
235 LIST_HEAD(reclaimed);
236 struct drbd_epoch_entry *e, *t;
237
238 maybe_kick_lo(mdev);
239 spin_lock_irq(&mdev->req_lock);
240 reclaim_net_ee(mdev, &reclaimed);
241 spin_unlock_irq(&mdev->req_lock);
242
243 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200244 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700245}
246
247/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200248 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700249 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200250 * @number: number of pages requested
251 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700252 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200253 * Tries to allocate number pages, first from our own page pool, then from
254 * the kernel, unless this allocation would exceed the max_buffers setting.
255 * Possibly retry until DRBD frees sufficient pages somewhere else.
256 *
257 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700258 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200259static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260{
261 struct page *page = NULL;
262 DEFINE_WAIT(wait);
263
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200264 /* Yes, we may run up to @number over max_buffers. If we
265 * follow it strictly, the admin will get it wrong anyways. */
266 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
267 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200269 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700270 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
271
272 drbd_kick_lo_and_reclaim_net(mdev);
273
274 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200275 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276 if (page)
277 break;
278 }
279
280 if (!retry)
281 break;
282
283 if (signal_pending(current)) {
284 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
285 break;
286 }
287
288 schedule();
289 }
290 finish_wait(&drbd_pp_wait, &wait);
291
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200292 if (page)
293 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 return page;
295}
296
297/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200298 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
299 * Either links the page chain back to the global pool,
300 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200301static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700302{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200303 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700304 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200305
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200306 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
307 i = page_chain_free(page);
308 else {
309 struct page *tmp;
310 tmp = page_chain_tail(page, &i);
311 spin_lock(&drbd_pp_lock);
312 page_chain_add(&drbd_pp_pool, page, tmp);
313 drbd_pp_vacant += i;
314 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200316 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200317 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200318 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
319 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700320 wake_up(&drbd_pp_wait);
321}
322
323/*
324You need to hold the req_lock:
325 _drbd_wait_ee_list_empty()
326
327You must not have the req_lock:
328 drbd_free_ee()
329 drbd_alloc_ee()
330 drbd_init_ee()
331 drbd_release_ee()
332 drbd_ee_fix_bhs()
333 drbd_process_done_ee()
334 drbd_clear_done_ee()
335 drbd_wait_ee_list_empty()
336*/
337
338struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
339 u64 id,
340 sector_t sector,
341 unsigned int data_size,
342 gfp_t gfp_mask) __must_hold(local)
343{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700344 struct drbd_epoch_entry *e;
345 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200346 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700347
348 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
349 return NULL;
350
351 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
352 if (!e) {
353 if (!(gfp_mask & __GFP_NOWARN))
354 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
355 return NULL;
356 }
357
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200358 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
359 if (!page)
360 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700361
Philipp Reisnerb411b362009-09-25 16:07:19 -0700362 INIT_HLIST_NODE(&e->colision);
363 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200364 e->mdev = mdev;
365 e->pages = page;
366 atomic_set(&e->pending_bios, 0);
367 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700368 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200369 e->sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200370 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700371
Philipp Reisnerb411b362009-09-25 16:07:19 -0700372 return e;
373
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200374 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700375 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 return NULL;
377}
378
Lars Ellenberg435f0742010-09-06 12:30:25 +0200379void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700380{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200381 if (e->flags & EE_HAS_DIGEST)
382 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200383 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200384 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700385 D_ASSERT(hlist_unhashed(&e->colision));
386 mempool_free(e, drbd_ee_mempool);
387}
388
389int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
390{
391 LIST_HEAD(work_list);
392 struct drbd_epoch_entry *e, *t;
393 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200394 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700395
396 spin_lock_irq(&mdev->req_lock);
397 list_splice_init(list, &work_list);
398 spin_unlock_irq(&mdev->req_lock);
399
400 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200401 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700402 count++;
403 }
404 return count;
405}
406
407
408/*
409 * This function is called from _asender only_
410 * but see also comments in _req_mod(,barrier_acked)
411 * and receive_Barrier.
412 *
413 * Move entries from net_ee to done_ee, if ready.
414 * Grab done_ee, call all callbacks, free the entries.
415 * The callbacks typically send out ACKs.
416 */
417static int drbd_process_done_ee(struct drbd_conf *mdev)
418{
419 LIST_HEAD(work_list);
420 LIST_HEAD(reclaimed);
421 struct drbd_epoch_entry *e, *t;
422 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
423
424 spin_lock_irq(&mdev->req_lock);
425 reclaim_net_ee(mdev, &reclaimed);
426 list_splice_init(&mdev->done_ee, &work_list);
427 spin_unlock_irq(&mdev->req_lock);
428
429 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200430 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700431
432 /* possible callbacks here:
433 * e_end_block, and e_end_resync_block, e_send_discard_ack.
434 * all ignore the last argument.
435 */
436 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700437 /* list_del not necessary, next/prev members not touched */
438 ok = e->w.cb(mdev, &e->w, !ok) && ok;
439 drbd_free_ee(mdev, e);
440 }
441 wake_up(&mdev->ee_wait);
442
443 return ok;
444}
445
446void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
447{
448 DEFINE_WAIT(wait);
449
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454 spin_unlock_irq(&mdev->req_lock);
455 drbd_kick_lo(mdev);
456 schedule();
457 finish_wait(&mdev->ee_wait, &wait);
458 spin_lock_irq(&mdev->req_lock);
459 }
460}
461
462void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
463{
464 spin_lock_irq(&mdev->req_lock);
465 _drbd_wait_ee_list_empty(mdev, head);
466 spin_unlock_irq(&mdev->req_lock);
467}
468
469/* see also kernel_accept; which is only present since 2.6.18.
470 * also we want to log which part of it failed, exactly */
471static int drbd_accept(struct drbd_conf *mdev, const char **what,
472 struct socket *sock, struct socket **newsock)
473{
474 struct sock *sk = sock->sk;
475 int err = 0;
476
477 *what = "listen";
478 err = sock->ops->listen(sock, 5);
479 if (err < 0)
480 goto out;
481
482 *what = "sock_create_lite";
483 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
484 newsock);
485 if (err < 0)
486 goto out;
487
488 *what = "accept";
489 err = sock->ops->accept(sock, *newsock, 0);
490 if (err < 0) {
491 sock_release(*newsock);
492 *newsock = NULL;
493 goto out;
494 }
495 (*newsock)->ops = sock->ops;
496
497out:
498 return err;
499}
500
501static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
502 void *buf, size_t size, int flags)
503{
504 mm_segment_t oldfs;
505 struct kvec iov = {
506 .iov_base = buf,
507 .iov_len = size,
508 };
509 struct msghdr msg = {
510 .msg_iovlen = 1,
511 .msg_iov = (struct iovec *)&iov,
512 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
513 };
514 int rv;
515
516 oldfs = get_fs();
517 set_fs(KERNEL_DS);
518 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
519 set_fs(oldfs);
520
521 return rv;
522}
523
524static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
525{
526 mm_segment_t oldfs;
527 struct kvec iov = {
528 .iov_base = buf,
529 .iov_len = size,
530 };
531 struct msghdr msg = {
532 .msg_iovlen = 1,
533 .msg_iov = (struct iovec *)&iov,
534 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
535 };
536 int rv;
537
538 oldfs = get_fs();
539 set_fs(KERNEL_DS);
540
541 for (;;) {
542 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
543 if (rv == size)
544 break;
545
546 /* Note:
547 * ECONNRESET other side closed the connection
548 * ERESTARTSYS (on sock) we got a signal
549 */
550
551 if (rv < 0) {
552 if (rv == -ECONNRESET)
553 dev_info(DEV, "sock was reset by peer\n");
554 else if (rv != -ERESTARTSYS)
555 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
556 break;
557 } else if (rv == 0) {
558 dev_info(DEV, "sock was shut down by peer\n");
559 break;
560 } else {
561 /* signal came in, or peer/link went down,
562 * after we read a partial message
563 */
564 /* D_ASSERT(signal_pending(current)); */
565 break;
566 }
567 };
568
569 set_fs(oldfs);
570
571 if (rv != size)
572 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
573
574 return rv;
575}
576
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200577/* quoting tcp(7):
578 * On individual connections, the socket buffer size must be set prior to the
579 * listen(2) or connect(2) calls in order to have it take effect.
580 * This is our wrapper to do so.
581 */
582static void drbd_setbufsize(struct socket *sock, unsigned int snd,
583 unsigned int rcv)
584{
585 /* open coded SO_SNDBUF, SO_RCVBUF */
586 if (snd) {
587 sock->sk->sk_sndbuf = snd;
588 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
589 }
590 if (rcv) {
591 sock->sk->sk_rcvbuf = rcv;
592 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
593 }
594}
595
Philipp Reisnerb411b362009-09-25 16:07:19 -0700596static struct socket *drbd_try_connect(struct drbd_conf *mdev)
597{
598 const char *what;
599 struct socket *sock;
600 struct sockaddr_in6 src_in6;
601 int err;
602 int disconnect_on_error = 1;
603
604 if (!get_net_conf(mdev))
605 return NULL;
606
607 what = "sock_create_kern";
608 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
609 SOCK_STREAM, IPPROTO_TCP, &sock);
610 if (err < 0) {
611 sock = NULL;
612 goto out;
613 }
614
615 sock->sk->sk_rcvtimeo =
616 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200617 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
618 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700619
620 /* explicitly bind to the configured IP as source IP
621 * for the outgoing connections.
622 * This is needed for multihomed hosts and to be
623 * able to use lo: interfaces for drbd.
624 * Make sure to use 0 as port number, so linux selects
625 * a free one dynamically.
626 */
627 memcpy(&src_in6, mdev->net_conf->my_addr,
628 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
629 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
630 src_in6.sin6_port = 0;
631 else
632 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
633
634 what = "bind before connect";
635 err = sock->ops->bind(sock,
636 (struct sockaddr *) &src_in6,
637 mdev->net_conf->my_addr_len);
638 if (err < 0)
639 goto out;
640
641 /* connect may fail, peer not yet available.
642 * stay C_WF_CONNECTION, don't go Disconnecting! */
643 disconnect_on_error = 0;
644 what = "connect";
645 err = sock->ops->connect(sock,
646 (struct sockaddr *)mdev->net_conf->peer_addr,
647 mdev->net_conf->peer_addr_len, 0);
648
649out:
650 if (err < 0) {
651 if (sock) {
652 sock_release(sock);
653 sock = NULL;
654 }
655 switch (-err) {
656 /* timeout, busy, signal pending */
657 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
658 case EINTR: case ERESTARTSYS:
659 /* peer not (yet) available, network problem */
660 case ECONNREFUSED: case ENETUNREACH:
661 case EHOSTDOWN: case EHOSTUNREACH:
662 disconnect_on_error = 0;
663 break;
664 default:
665 dev_err(DEV, "%s failed, err = %d\n", what, err);
666 }
667 if (disconnect_on_error)
668 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
669 }
670 put_net_conf(mdev);
671 return sock;
672}
673
674static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
675{
676 int timeo, err;
677 struct socket *s_estab = NULL, *s_listen;
678 const char *what;
679
680 if (!get_net_conf(mdev))
681 return NULL;
682
683 what = "sock_create_kern";
684 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
685 SOCK_STREAM, IPPROTO_TCP, &s_listen);
686 if (err) {
687 s_listen = NULL;
688 goto out;
689 }
690
691 timeo = mdev->net_conf->try_connect_int * HZ;
692 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
693
694 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
695 s_listen->sk->sk_rcvtimeo = timeo;
696 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200697 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
698 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700699
700 what = "bind before listen";
701 err = s_listen->ops->bind(s_listen,
702 (struct sockaddr *) mdev->net_conf->my_addr,
703 mdev->net_conf->my_addr_len);
704 if (err < 0)
705 goto out;
706
707 err = drbd_accept(mdev, &what, s_listen, &s_estab);
708
709out:
710 if (s_listen)
711 sock_release(s_listen);
712 if (err < 0) {
713 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
714 dev_err(DEV, "%s failed, err = %d\n", what, err);
715 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
716 }
717 }
718 put_net_conf(mdev);
719
720 return s_estab;
721}
722
723static int drbd_send_fp(struct drbd_conf *mdev,
724 struct socket *sock, enum drbd_packets cmd)
725{
Philipp Reisner02918be2010-08-20 14:35:10 +0200726 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700727
728 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
729}
730
731static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
732{
Philipp Reisner02918be2010-08-20 14:35:10 +0200733 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 int rr;
735
736 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
737
738 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
739 return be16_to_cpu(h->command);
740
741 return 0xffff;
742}
743
744/**
745 * drbd_socket_okay() - Free the socket if its connection is not okay
746 * @mdev: DRBD device.
747 * @sock: pointer to the pointer to the socket.
748 */
749static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
750{
751 int rr;
752 char tb[4];
753
754 if (!*sock)
755 return FALSE;
756
757 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
758
759 if (rr > 0 || rr == -EAGAIN) {
760 return TRUE;
761 } else {
762 sock_release(*sock);
763 *sock = NULL;
764 return FALSE;
765 }
766}
767
768/*
769 * return values:
770 * 1 yes, we have a valid connection
771 * 0 oops, did not work out, please try again
772 * -1 peer talks different language,
773 * no point in trying again, please go standalone.
774 * -2 We do not have a network config...
775 */
776static int drbd_connect(struct drbd_conf *mdev)
777{
778 struct socket *s, *sock, *msock;
779 int try, h, ok;
780
781 D_ASSERT(!mdev->data.socket);
782
Philipp Reisnerb411b362009-09-25 16:07:19 -0700783 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
784 return -2;
785
786 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
787
788 sock = NULL;
789 msock = NULL;
790
791 do {
792 for (try = 0;;) {
793 /* 3 tries, this should take less than a second! */
794 s = drbd_try_connect(mdev);
795 if (s || ++try >= 3)
796 break;
797 /* give the other side time to call bind() & listen() */
798 __set_current_state(TASK_INTERRUPTIBLE);
799 schedule_timeout(HZ / 10);
800 }
801
802 if (s) {
803 if (!sock) {
804 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
805 sock = s;
806 s = NULL;
807 } else if (!msock) {
808 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
809 msock = s;
810 s = NULL;
811 } else {
812 dev_err(DEV, "Logic error in drbd_connect()\n");
813 goto out_release_sockets;
814 }
815 }
816
817 if (sock && msock) {
818 __set_current_state(TASK_INTERRUPTIBLE);
819 schedule_timeout(HZ / 10);
820 ok = drbd_socket_okay(mdev, &sock);
821 ok = drbd_socket_okay(mdev, &msock) && ok;
822 if (ok)
823 break;
824 }
825
826retry:
827 s = drbd_wait_for_connect(mdev);
828 if (s) {
829 try = drbd_recv_fp(mdev, s);
830 drbd_socket_okay(mdev, &sock);
831 drbd_socket_okay(mdev, &msock);
832 switch (try) {
833 case P_HAND_SHAKE_S:
834 if (sock) {
835 dev_warn(DEV, "initial packet S crossed\n");
836 sock_release(sock);
837 }
838 sock = s;
839 break;
840 case P_HAND_SHAKE_M:
841 if (msock) {
842 dev_warn(DEV, "initial packet M crossed\n");
843 sock_release(msock);
844 }
845 msock = s;
846 set_bit(DISCARD_CONCURRENT, &mdev->flags);
847 break;
848 default:
849 dev_warn(DEV, "Error receiving initial packet\n");
850 sock_release(s);
851 if (random32() & 1)
852 goto retry;
853 }
854 }
855
856 if (mdev->state.conn <= C_DISCONNECTING)
857 goto out_release_sockets;
858 if (signal_pending(current)) {
859 flush_signals(current);
860 smp_rmb();
861 if (get_t_state(&mdev->receiver) == Exiting)
862 goto out_release_sockets;
863 }
864
865 if (sock && msock) {
866 ok = drbd_socket_okay(mdev, &sock);
867 ok = drbd_socket_okay(mdev, &msock) && ok;
868 if (ok)
869 break;
870 }
871 } while (1);
872
873 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
874 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
875
876 sock->sk->sk_allocation = GFP_NOIO;
877 msock->sk->sk_allocation = GFP_NOIO;
878
879 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
880 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
881
Philipp Reisnerb411b362009-09-25 16:07:19 -0700882 /* NOT YET ...
883 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
884 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
885 * first set it to the P_HAND_SHAKE timeout,
886 * which we set to 4x the configured ping_timeout. */
887 sock->sk->sk_sndtimeo =
888 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
889
890 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
891 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
892
893 /* we don't want delays.
894 * we use TCP_CORK where apropriate, though */
895 drbd_tcp_nodelay(sock);
896 drbd_tcp_nodelay(msock);
897
898 mdev->data.socket = sock;
899 mdev->meta.socket = msock;
900 mdev->last_received = jiffies;
901
902 D_ASSERT(mdev->asender.task == NULL);
903
904 h = drbd_do_handshake(mdev);
905 if (h <= 0)
906 return h;
907
908 if (mdev->cram_hmac_tfm) {
909 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100910 switch (drbd_do_auth(mdev)) {
911 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700912 dev_err(DEV, "Authentication of peer failed\n");
913 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100914 case 0:
915 dev_err(DEV, "Authentication of peer failed, trying again.\n");
916 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700917 }
918 }
919
920 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
921 return 0;
922
923 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
924 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
925
926 atomic_set(&mdev->packet_seq, 0);
927 mdev->peer_seq = 0;
928
929 drbd_thread_start(&mdev->asender);
930
Philipp Reisnerd5373382010-08-23 15:18:33 +0200931 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
932 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
933 put_ldev(mdev);
934 }
935
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200936 if (!drbd_send_protocol(mdev))
937 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700938 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100939 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700940 drbd_send_uuids(mdev);
941 drbd_send_state(mdev);
942 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
943 clear_bit(RESIZE_PENDING, &mdev->flags);
944
945 return 1;
946
947out_release_sockets:
948 if (sock)
949 sock_release(sock);
950 if (msock)
951 sock_release(msock);
952 return -1;
953}
954
Philipp Reisner02918be2010-08-20 14:35:10 +0200955static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700956{
Philipp Reisner02918be2010-08-20 14:35:10 +0200957 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700958 int r;
959
960 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700961 if (unlikely(r != sizeof(*h))) {
962 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
963 return FALSE;
Philipp Reisner02918be2010-08-20 14:35:10 +0200964 }
965
966 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
967 *cmd = be16_to_cpu(h->h80.command);
968 *packet_size = be16_to_cpu(h->h80.length);
969 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
970 *cmd = be16_to_cpu(h->h95.command);
971 *packet_size = be32_to_cpu(h->h95.length);
972 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200973 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
974 be32_to_cpu(h->h80.magic),
975 be16_to_cpu(h->h80.command),
976 be16_to_cpu(h->h80.length));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700977 return FALSE;
978 }
979 mdev->last_received = jiffies;
980
981 return TRUE;
982}
983
984static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
985{
986 int rv;
987
988 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400989 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200990 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991 if (rv) {
992 dev_err(DEV, "local disk flush failed with status %d\n", rv);
993 /* would rather check on EOPNOTSUPP, but that is not reliable.
994 * don't try again for ANY return value != 0
995 * if (rv == -EOPNOTSUPP) */
996 drbd_bump_write_ordering(mdev, WO_drain_io);
997 }
998 put_ldev(mdev);
999 }
1000
1001 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1002}
1003
1004static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1005{
1006 struct flush_work *fw = (struct flush_work *)w;
1007 struct drbd_epoch *epoch = fw->epoch;
1008
1009 kfree(w);
1010
1011 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
1012 drbd_flush_after_epoch(mdev, epoch);
1013
1014 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1015 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1016
1017 return 1;
1018}
1019
1020/**
1021 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1022 * @mdev: DRBD device.
1023 * @epoch: Epoch object.
1024 * @ev: Epoch event.
1025 */
1026static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1027 struct drbd_epoch *epoch,
1028 enum epoch_event ev)
1029{
1030 int finish, epoch_size;
1031 struct drbd_epoch *next_epoch;
1032 int schedule_flush = 0;
1033 enum finish_epoch rv = FE_STILL_LIVE;
1034
1035 spin_lock(&mdev->epoch_lock);
1036 do {
1037 next_epoch = NULL;
1038 finish = 0;
1039
1040 epoch_size = atomic_read(&epoch->epoch_size);
1041
1042 switch (ev & ~EV_CLEANUP) {
1043 case EV_PUT:
1044 atomic_dec(&epoch->active);
1045 break;
1046 case EV_GOT_BARRIER_NR:
1047 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1048
1049 /* Special case: If we just switched from WO_bio_barrier to
1050 WO_bdev_flush we should not finish the current epoch */
1051 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1052 mdev->write_ordering != WO_bio_barrier &&
1053 epoch == mdev->current_epoch)
1054 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1055 break;
1056 case EV_BARRIER_DONE:
1057 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1058 break;
1059 case EV_BECAME_LAST:
1060 /* nothing to do*/
1061 break;
1062 }
1063
Philipp Reisnerb411b362009-09-25 16:07:19 -07001064 if (epoch_size != 0 &&
1065 atomic_read(&epoch->active) == 0 &&
1066 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1067 epoch->list.prev == &mdev->current_epoch->list &&
1068 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1069 /* Nearly all conditions are met to finish that epoch... */
1070 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1071 mdev->write_ordering == WO_none ||
1072 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1073 ev & EV_CLEANUP) {
1074 finish = 1;
1075 set_bit(DE_IS_FINISHING, &epoch->flags);
1076 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1077 mdev->write_ordering == WO_bio_barrier) {
1078 atomic_inc(&epoch->active);
1079 schedule_flush = 1;
1080 }
1081 }
1082 if (finish) {
1083 if (!(ev & EV_CLEANUP)) {
1084 spin_unlock(&mdev->epoch_lock);
1085 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1086 spin_lock(&mdev->epoch_lock);
1087 }
1088 dec_unacked(mdev);
1089
1090 if (mdev->current_epoch != epoch) {
1091 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1092 list_del(&epoch->list);
1093 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1094 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001095 kfree(epoch);
1096
1097 if (rv == FE_STILL_LIVE)
1098 rv = FE_DESTROYED;
1099 } else {
1100 epoch->flags = 0;
1101 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001102 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001103 if (rv == FE_STILL_LIVE)
1104 rv = FE_RECYCLED;
1105 }
1106 }
1107
1108 if (!next_epoch)
1109 break;
1110
1111 epoch = next_epoch;
1112 } while (1);
1113
1114 spin_unlock(&mdev->epoch_lock);
1115
1116 if (schedule_flush) {
1117 struct flush_work *fw;
1118 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1119 if (fw) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001120 fw->w.cb = w_flush;
1121 fw->epoch = epoch;
1122 drbd_queue_work(&mdev->data.work, &fw->w);
1123 } else {
1124 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1125 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1126 /* That is not a recursion, only one level */
1127 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1128 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1129 }
1130 }
1131
1132 return rv;
1133}
1134
1135/**
1136 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1137 * @mdev: DRBD device.
1138 * @wo: Write ordering method to try.
1139 */
1140void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1141{
1142 enum write_ordering_e pwo;
1143 static char *write_ordering_str[] = {
1144 [WO_none] = "none",
1145 [WO_drain_io] = "drain",
1146 [WO_bdev_flush] = "flush",
1147 [WO_bio_barrier] = "barrier",
1148 };
1149
1150 pwo = mdev->write_ordering;
1151 wo = min(pwo, wo);
1152 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1153 wo = WO_bdev_flush;
1154 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1155 wo = WO_drain_io;
1156 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1157 wo = WO_none;
1158 mdev->write_ordering = wo;
1159 if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1160 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1161}
1162
1163/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001164 * drbd_submit_ee()
1165 * @mdev: DRBD device.
1166 * @e: epoch entry
1167 * @rw: flag field, see bio->bi_rw
1168 */
1169/* TODO allocate from our own bio_set. */
1170int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1171 const unsigned rw, const int fault_type)
1172{
1173 struct bio *bios = NULL;
1174 struct bio *bio;
1175 struct page *page = e->pages;
1176 sector_t sector = e->sector;
1177 unsigned ds = e->size;
1178 unsigned n_bios = 0;
1179 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1180
1181 /* In most cases, we will only need one bio. But in case the lower
1182 * level restrictions happen to be different at this offset on this
1183 * side than those of the sending peer, we may need to submit the
1184 * request in more than one bio. */
1185next_bio:
1186 bio = bio_alloc(GFP_NOIO, nr_pages);
1187 if (!bio) {
1188 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1189 goto fail;
1190 }
1191 /* > e->sector, unless this is the first bio */
1192 bio->bi_sector = sector;
1193 bio->bi_bdev = mdev->ldev->backing_bdev;
1194 /* we special case some flags in the multi-bio case, see below
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001195 * (REQ_UNPLUG, REQ_HARDBARRIER) */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001196 bio->bi_rw = rw;
1197 bio->bi_private = e;
1198 bio->bi_end_io = drbd_endio_sec;
1199
1200 bio->bi_next = bios;
1201 bios = bio;
1202 ++n_bios;
1203
1204 page_chain_for_each(page) {
1205 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1206 if (!bio_add_page(bio, page, len, 0)) {
1207 /* a single page must always be possible! */
1208 BUG_ON(bio->bi_vcnt == 0);
1209 goto next_bio;
1210 }
1211 ds -= len;
1212 sector += len >> 9;
1213 --nr_pages;
1214 }
1215 D_ASSERT(page == NULL);
1216 D_ASSERT(ds == 0);
1217
1218 atomic_set(&e->pending_bios, n_bios);
1219 do {
1220 bio = bios;
1221 bios = bios->bi_next;
1222 bio->bi_next = NULL;
1223
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001224 /* strip off REQ_UNPLUG unless it is the last bio */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001225 if (bios)
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001226 bio->bi_rw &= ~REQ_UNPLUG;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001227
1228 drbd_generic_make_request(mdev, fault_type, bio);
1229
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001230 /* strip off REQ_HARDBARRIER,
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001231 * unless it is the first or last bio */
1232 if (bios && bios->bi_next)
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001233 bios->bi_rw &= ~REQ_HARDBARRIER;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001234 } while (bios);
1235 maybe_kick_lo(mdev);
1236 return 0;
1237
1238fail:
1239 while (bios) {
1240 bio = bios;
1241 bios = bios->bi_next;
1242 bio_put(bio);
1243 }
1244 return -ENOMEM;
1245}
1246
1247/**
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001248 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
Philipp Reisnerb411b362009-09-25 16:07:19 -07001249 * @mdev: DRBD device.
1250 * @w: work object.
1251 * @cancel: The connection will be closed anyways (unused in this callback)
1252 */
1253int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1254{
1255 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001256 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1257 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1258 so that we can finish that epoch in drbd_may_finish_epoch().
1259 That is necessary if we already have a long chain of Epochs, before
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001260 we realize that REQ_HARDBARRIER is actually not supported */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001261
1262 /* As long as the -ENOTSUPP on the barrier is reported immediately
1263 that will never trigger. If it is reported late, we will just
1264 print that warning and continue correctly for all future requests
1265 with WO_bdev_flush */
1266 if (previous_epoch(mdev, e->epoch))
1267 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1268
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269 /* we still have a local reference,
1270 * get_ldev was done in receive_Data. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001271
1272 e->w.cb = e_end_block;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001273 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1274 /* drbd_submit_ee fails for one reason only:
1275 * if was not able to allocate sufficient bios.
1276 * requeue, try again later. */
1277 e->w.cb = w_e_reissue;
1278 drbd_queue_work(&mdev->data.work, &e->w);
1279 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001280 return 1;
1281}
1282
Philipp Reisner02918be2010-08-20 14:35:10 +02001283static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001284{
1285 int rv, issue_flush;
Philipp Reisner02918be2010-08-20 14:35:10 +02001286 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001287 struct drbd_epoch *epoch;
1288
Philipp Reisnerb411b362009-09-25 16:07:19 -07001289 inc_unacked(mdev);
1290
1291 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1292 drbd_kick_lo(mdev);
1293
1294 mdev->current_epoch->barrier_nr = p->barrier;
1295 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1296
1297 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1298 * the activity log, which means it would not be resynced in case the
1299 * R_PRIMARY crashes now.
1300 * Therefore we must send the barrier_ack after the barrier request was
1301 * completed. */
1302 switch (mdev->write_ordering) {
1303 case WO_bio_barrier:
1304 case WO_none:
1305 if (rv == FE_RECYCLED)
1306 return TRUE;
1307 break;
1308
1309 case WO_bdev_flush:
1310 case WO_drain_io:
Philipp Reisner367a8d72009-12-29 15:56:01 +01001311 if (rv == FE_STILL_LIVE) {
1312 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1313 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1314 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1315 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001316 if (rv == FE_RECYCLED)
1317 return TRUE;
1318
1319 /* The asender will send all the ACKs and barrier ACKs out, since
1320 all EEs moved from the active_ee to the done_ee. We need to
1321 provide a new epoch object for the EEs that come in soon */
1322 break;
1323 }
1324
1325 /* receiver context, in the writeout path of the other node.
1326 * avoid potential distributed deadlock */
1327 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1328 if (!epoch) {
1329 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
Dan Carpenterd3db7b42010-01-23 15:45:22 +03001330 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1332 if (issue_flush) {
1333 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1334 if (rv == FE_RECYCLED)
1335 return TRUE;
1336 }
1337
1338 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1339
1340 return TRUE;
1341 }
1342
1343 epoch->flags = 0;
1344 atomic_set(&epoch->epoch_size, 0);
1345 atomic_set(&epoch->active, 0);
1346
1347 spin_lock(&mdev->epoch_lock);
1348 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1349 list_add(&epoch->list, &mdev->current_epoch->list);
1350 mdev->current_epoch = epoch;
1351 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001352 } else {
1353 /* The current_epoch got recycled while we allocated this one... */
1354 kfree(epoch);
1355 }
1356 spin_unlock(&mdev->epoch_lock);
1357
1358 return TRUE;
1359}
1360
1361/* used from receive_RSDataReply (recv_resync_read)
1362 * and from receive_Data */
1363static struct drbd_epoch_entry *
1364read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1365{
Lars Ellenberg66660322010-04-06 12:15:04 +02001366 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001367 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001368 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001369 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001370 void *dig_in = mdev->int_dig_in;
1371 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001372 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001373
1374 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1375 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1376
1377 if (dgs) {
1378 rr = drbd_recv(mdev, dig_in, dgs);
1379 if (rr != dgs) {
1380 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1381 rr, dgs);
1382 return NULL;
1383 }
1384 }
1385
1386 data_size -= dgs;
1387
1388 ERR_IF(data_size & 0x1ff) return NULL;
1389 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1390
Lars Ellenberg66660322010-04-06 12:15:04 +02001391 /* even though we trust out peer,
1392 * we sometimes have to double check. */
1393 if (sector + (data_size>>9) > capacity) {
1394 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1395 (unsigned long long)capacity,
1396 (unsigned long long)sector, data_size);
1397 return NULL;
1398 }
1399
Philipp Reisnerb411b362009-09-25 16:07:19 -07001400 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1401 * "criss-cross" setup, that might cause write-out on some other DRBD,
1402 * which in turn might block on the other node at this very place. */
1403 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1404 if (!e)
1405 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001406
Philipp Reisnerb411b362009-09-25 16:07:19 -07001407 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001408 page = e->pages;
1409 page_chain_for_each(page) {
1410 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001411 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001412 rr = drbd_recv(mdev, data, len);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001413 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1414 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1415 data[0] = data[0] ^ (unsigned long)-1;
1416 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001418 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001419 drbd_free_ee(mdev, e);
1420 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001421 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001422 return NULL;
1423 }
1424 ds -= rr;
1425 }
1426
1427 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001428 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001429 if (memcmp(dig_in, dig_vv, dgs)) {
1430 dev_err(DEV, "Digest integrity check FAILED.\n");
1431 drbd_bcast_ee(mdev, "digest failed",
1432 dgs, dig_in, dig_vv, e);
1433 drbd_free_ee(mdev, e);
1434 return NULL;
1435 }
1436 }
1437 mdev->recv_cnt += data_size>>9;
1438 return e;
1439}
1440
1441/* drbd_drain_block() just takes a data block
1442 * out of the socket input buffer, and discards it.
1443 */
1444static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1445{
1446 struct page *page;
1447 int rr, rv = 1;
1448 void *data;
1449
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001450 if (!data_size)
1451 return TRUE;
1452
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001453 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001454
1455 data = kmap(page);
1456 while (data_size) {
1457 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1458 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1459 rv = 0;
1460 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1461 rr, min_t(int, data_size, PAGE_SIZE));
1462 break;
1463 }
1464 data_size -= rr;
1465 }
1466 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001467 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001468 return rv;
1469}
1470
1471static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1472 sector_t sector, int data_size)
1473{
1474 struct bio_vec *bvec;
1475 struct bio *bio;
1476 int dgs, rr, i, expect;
1477 void *dig_in = mdev->int_dig_in;
1478 void *dig_vv = mdev->int_dig_vv;
1479
1480 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1481 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1482
1483 if (dgs) {
1484 rr = drbd_recv(mdev, dig_in, dgs);
1485 if (rr != dgs) {
1486 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1487 rr, dgs);
1488 return 0;
1489 }
1490 }
1491
1492 data_size -= dgs;
1493
1494 /* optimistically update recv_cnt. if receiving fails below,
1495 * we disconnect anyways, and counters will be reset. */
1496 mdev->recv_cnt += data_size>>9;
1497
1498 bio = req->master_bio;
1499 D_ASSERT(sector == bio->bi_sector);
1500
1501 bio_for_each_segment(bvec, bio, i) {
1502 expect = min_t(int, data_size, bvec->bv_len);
1503 rr = drbd_recv(mdev,
1504 kmap(bvec->bv_page)+bvec->bv_offset,
1505 expect);
1506 kunmap(bvec->bv_page);
1507 if (rr != expect) {
1508 dev_warn(DEV, "short read receiving data reply: "
1509 "read %d expected %d\n",
1510 rr, expect);
1511 return 0;
1512 }
1513 data_size -= rr;
1514 }
1515
1516 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001517 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001518 if (memcmp(dig_in, dig_vv, dgs)) {
1519 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1520 return 0;
1521 }
1522 }
1523
1524 D_ASSERT(data_size == 0);
1525 return 1;
1526}
1527
1528/* e_end_resync_block() is called via
1529 * drbd_process_done_ee() by asender only */
1530static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1531{
1532 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1533 sector_t sector = e->sector;
1534 int ok;
1535
1536 D_ASSERT(hlist_unhashed(&e->colision));
1537
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001538 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001539 drbd_set_in_sync(mdev, sector, e->size);
1540 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1541 } else {
1542 /* Record failure to sync */
1543 drbd_rs_failed_io(mdev, sector, e->size);
1544
1545 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1546 }
1547 dec_unacked(mdev);
1548
1549 return ok;
1550}
1551
1552static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1553{
1554 struct drbd_epoch_entry *e;
1555
1556 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001557 if (!e)
1558 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001559
1560 dec_rs_pending(mdev);
1561
Philipp Reisnerb411b362009-09-25 16:07:19 -07001562 inc_unacked(mdev);
1563 /* corresponding dec_unacked() in e_end_resync_block()
1564 * respective _drbd_clear_done_ee */
1565
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001566 e->w.cb = e_end_resync_block;
1567
Philipp Reisnerb411b362009-09-25 16:07:19 -07001568 spin_lock_irq(&mdev->req_lock);
1569 list_add(&e->w.list, &mdev->sync_ee);
1570 spin_unlock_irq(&mdev->req_lock);
1571
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001572 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001573 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1574 return TRUE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001575
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001576 /* drbd_submit_ee currently fails for one reason only:
1577 * not being able to allocate enough bios.
1578 * Is dropping the connection going to help? */
1579 spin_lock_irq(&mdev->req_lock);
1580 list_del(&e->w.list);
1581 spin_unlock_irq(&mdev->req_lock);
1582
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001583 drbd_free_ee(mdev, e);
1584fail:
1585 put_ldev(mdev);
1586 return FALSE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001587}
1588
Philipp Reisner02918be2010-08-20 14:35:10 +02001589static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001590{
1591 struct drbd_request *req;
1592 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001594 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001595
1596 sector = be64_to_cpu(p->sector);
1597
1598 spin_lock_irq(&mdev->req_lock);
1599 req = _ar_id_to_req(mdev, p->block_id, sector);
1600 spin_unlock_irq(&mdev->req_lock);
1601 if (unlikely(!req)) {
1602 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1603 return FALSE;
1604 }
1605
1606 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1607 * special casing it there for the various failure cases.
1608 * still no race with drbd_fail_pending_reads */
1609 ok = recv_dless_read(mdev, req, sector, data_size);
1610
1611 if (ok)
1612 req_mod(req, data_received);
1613 /* else: nothing. handled from drbd_disconnect...
1614 * I don't think we may complete this just yet
1615 * in case we are "on-disconnect: freeze" */
1616
1617 return ok;
1618}
1619
Philipp Reisner02918be2010-08-20 14:35:10 +02001620static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001621{
1622 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001623 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001624 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001625
1626 sector = be64_to_cpu(p->sector);
1627 D_ASSERT(p->block_id == ID_SYNCER);
1628
1629 if (get_ldev(mdev)) {
1630 /* data is submitted to disk within recv_resync_read.
1631 * corresponding put_ldev done below on error,
1632 * or in drbd_endio_write_sec. */
1633 ok = recv_resync_read(mdev, sector, data_size);
1634 } else {
1635 if (__ratelimit(&drbd_ratelimit_state))
1636 dev_err(DEV, "Can not write resync data to local disk.\n");
1637
1638 ok = drbd_drain_block(mdev, data_size);
1639
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001640 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001641 }
1642
Philipp Reisner778f2712010-07-06 11:14:00 +02001643 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1644
Philipp Reisnerb411b362009-09-25 16:07:19 -07001645 return ok;
1646}
1647
1648/* e_end_block() is called via drbd_process_done_ee().
1649 * this means this function only runs in the asender thread
1650 */
1651static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1652{
1653 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1654 sector_t sector = e->sector;
1655 struct drbd_epoch *epoch;
1656 int ok = 1, pcmd;
1657
1658 if (e->flags & EE_IS_BARRIER) {
1659 epoch = previous_epoch(mdev, e->epoch);
1660 if (epoch)
1661 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1662 }
1663
1664 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001665 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001666 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1667 mdev->state.conn <= C_PAUSED_SYNC_T &&
1668 e->flags & EE_MAY_SET_IN_SYNC) ?
1669 P_RS_WRITE_ACK : P_WRITE_ACK;
1670 ok &= drbd_send_ack(mdev, pcmd, e);
1671 if (pcmd == P_RS_WRITE_ACK)
1672 drbd_set_in_sync(mdev, sector, e->size);
1673 } else {
1674 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1675 /* we expect it to be marked out of sync anyways...
1676 * maybe assert this? */
1677 }
1678 dec_unacked(mdev);
1679 }
1680 /* we delete from the conflict detection hash _after_ we sent out the
1681 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1682 if (mdev->net_conf->two_primaries) {
1683 spin_lock_irq(&mdev->req_lock);
1684 D_ASSERT(!hlist_unhashed(&e->colision));
1685 hlist_del_init(&e->colision);
1686 spin_unlock_irq(&mdev->req_lock);
1687 } else {
1688 D_ASSERT(hlist_unhashed(&e->colision));
1689 }
1690
1691 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1692
1693 return ok;
1694}
1695
1696static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1697{
1698 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1699 int ok = 1;
1700
1701 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1702 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1703
1704 spin_lock_irq(&mdev->req_lock);
1705 D_ASSERT(!hlist_unhashed(&e->colision));
1706 hlist_del_init(&e->colision);
1707 spin_unlock_irq(&mdev->req_lock);
1708
1709 dec_unacked(mdev);
1710
1711 return ok;
1712}
1713
1714/* Called from receive_Data.
1715 * Synchronize packets on sock with packets on msock.
1716 *
1717 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1718 * packet traveling on msock, they are still processed in the order they have
1719 * been sent.
1720 *
1721 * Note: we don't care for Ack packets overtaking P_DATA packets.
1722 *
1723 * In case packet_seq is larger than mdev->peer_seq number, there are
1724 * outstanding packets on the msock. We wait for them to arrive.
1725 * In case we are the logically next packet, we update mdev->peer_seq
1726 * ourselves. Correctly handles 32bit wrap around.
1727 *
1728 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1729 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1730 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1731 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1732 *
1733 * returns 0 if we may process the packet,
1734 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1735static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1736{
1737 DEFINE_WAIT(wait);
1738 unsigned int p_seq;
1739 long timeout;
1740 int ret = 0;
1741 spin_lock(&mdev->peer_seq_lock);
1742 for (;;) {
1743 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1744 if (seq_le(packet_seq, mdev->peer_seq+1))
1745 break;
1746 if (signal_pending(current)) {
1747 ret = -ERESTARTSYS;
1748 break;
1749 }
1750 p_seq = mdev->peer_seq;
1751 spin_unlock(&mdev->peer_seq_lock);
1752 timeout = schedule_timeout(30*HZ);
1753 spin_lock(&mdev->peer_seq_lock);
1754 if (timeout == 0 && p_seq == mdev->peer_seq) {
1755 ret = -ETIMEDOUT;
1756 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1757 break;
1758 }
1759 }
1760 finish_wait(&mdev->seq_wait, &wait);
1761 if (mdev->peer_seq+1 == packet_seq)
1762 mdev->peer_seq++;
1763 spin_unlock(&mdev->peer_seq_lock);
1764 return ret;
1765}
1766
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001767static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1768{
1769 if (mdev->agreed_pro_version >= 95)
1770 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1771 (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1772 (dpf & DP_FUA ? REQ_FUA : 0) |
1773 (dpf & DP_FLUSH ? REQ_FUA : 0) |
1774 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1775 else
1776 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
1777}
1778
Philipp Reisnerb411b362009-09-25 16:07:19 -07001779/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001780static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001781{
1782 sector_t sector;
1783 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001784 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001785 int rw = WRITE;
1786 u32 dp_flags;
1787
Philipp Reisnerb411b362009-09-25 16:07:19 -07001788 if (!get_ldev(mdev)) {
1789 if (__ratelimit(&drbd_ratelimit_state))
1790 dev_err(DEV, "Can not write mirrored data block "
1791 "to local disk.\n");
1792 spin_lock(&mdev->peer_seq_lock);
1793 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1794 mdev->peer_seq++;
1795 spin_unlock(&mdev->peer_seq_lock);
1796
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001797 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001798 atomic_inc(&mdev->current_epoch->epoch_size);
1799 return drbd_drain_block(mdev, data_size);
1800 }
1801
1802 /* get_ldev(mdev) successful.
1803 * Corresponding put_ldev done either below (on various errors),
1804 * or in drbd_endio_write_sec, if we successfully submit the data at
1805 * the end of this function. */
1806
1807 sector = be64_to_cpu(p->sector);
1808 e = read_in_block(mdev, p->block_id, sector, data_size);
1809 if (!e) {
1810 put_ldev(mdev);
1811 return FALSE;
1812 }
1813
Philipp Reisnerb411b362009-09-25 16:07:19 -07001814 e->w.cb = e_end_block;
1815
1816 spin_lock(&mdev->epoch_lock);
1817 e->epoch = mdev->current_epoch;
1818 atomic_inc(&e->epoch->epoch_size);
1819 atomic_inc(&e->epoch->active);
1820
1821 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1822 struct drbd_epoch *epoch;
1823 /* Issue a barrier if we start a new epoch, and the previous epoch
1824 was not a epoch containing a single request which already was
1825 a Barrier. */
1826 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1827 if (epoch == e->epoch) {
1828 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001829 rw |= REQ_HARDBARRIER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001830 e->flags |= EE_IS_BARRIER;
1831 } else {
1832 if (atomic_read(&epoch->epoch_size) > 1 ||
1833 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1834 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001835 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001836 rw |= REQ_HARDBARRIER;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001837 e->flags |= EE_IS_BARRIER;
1838 }
1839 }
1840 }
1841 spin_unlock(&mdev->epoch_lock);
1842
1843 dp_flags = be32_to_cpu(p->dp_flags);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001844 rw |= write_flags_to_bio(mdev, dp_flags);
1845
Philipp Reisnerb411b362009-09-25 16:07:19 -07001846 if (dp_flags & DP_MAY_SET_IN_SYNC)
1847 e->flags |= EE_MAY_SET_IN_SYNC;
1848
1849 /* I'm the receiver, I do hold a net_cnt reference. */
1850 if (!mdev->net_conf->two_primaries) {
1851 spin_lock_irq(&mdev->req_lock);
1852 } else {
1853 /* don't get the req_lock yet,
1854 * we may sleep in drbd_wait_peer_seq */
1855 const int size = e->size;
1856 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1857 DEFINE_WAIT(wait);
1858 struct drbd_request *i;
1859 struct hlist_node *n;
1860 struct hlist_head *slot;
1861 int first;
1862
1863 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1864 BUG_ON(mdev->ee_hash == NULL);
1865 BUG_ON(mdev->tl_hash == NULL);
1866
1867 /* conflict detection and handling:
1868 * 1. wait on the sequence number,
1869 * in case this data packet overtook ACK packets.
1870 * 2. check our hash tables for conflicting requests.
1871 * we only need to walk the tl_hash, since an ee can not
1872 * have a conflict with an other ee: on the submitting
1873 * node, the corresponding req had already been conflicting,
1874 * and a conflicting req is never sent.
1875 *
1876 * Note: for two_primaries, we are protocol C,
1877 * so there cannot be any request that is DONE
1878 * but still on the transfer log.
1879 *
1880 * unconditionally add to the ee_hash.
1881 *
1882 * if no conflicting request is found:
1883 * submit.
1884 *
1885 * if any conflicting request is found
1886 * that has not yet been acked,
1887 * AND I have the "discard concurrent writes" flag:
1888 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1889 *
1890 * if any conflicting request is found:
1891 * block the receiver, waiting on misc_wait
1892 * until no more conflicting requests are there,
1893 * or we get interrupted (disconnect).
1894 *
1895 * we do not just write after local io completion of those
1896 * requests, but only after req is done completely, i.e.
1897 * we wait for the P_DISCARD_ACK to arrive!
1898 *
1899 * then proceed normally, i.e. submit.
1900 */
1901 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1902 goto out_interrupted;
1903
1904 spin_lock_irq(&mdev->req_lock);
1905
1906 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1907
1908#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1909 slot = tl_hash_slot(mdev, sector);
1910 first = 1;
1911 for (;;) {
1912 int have_unacked = 0;
1913 int have_conflict = 0;
1914 prepare_to_wait(&mdev->misc_wait, &wait,
1915 TASK_INTERRUPTIBLE);
1916 hlist_for_each_entry(i, n, slot, colision) {
1917 if (OVERLAPS) {
1918 /* only ALERT on first iteration,
1919 * we may be woken up early... */
1920 if (first)
1921 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1922 " new: %llus +%u; pending: %llus +%u\n",
1923 current->comm, current->pid,
1924 (unsigned long long)sector, size,
1925 (unsigned long long)i->sector, i->size);
1926 if (i->rq_state & RQ_NET_PENDING)
1927 ++have_unacked;
1928 ++have_conflict;
1929 }
1930 }
1931#undef OVERLAPS
1932 if (!have_conflict)
1933 break;
1934
1935 /* Discard Ack only for the _first_ iteration */
1936 if (first && discard && have_unacked) {
1937 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1938 (unsigned long long)sector);
1939 inc_unacked(mdev);
1940 e->w.cb = e_send_discard_ack;
1941 list_add_tail(&e->w.list, &mdev->done_ee);
1942
1943 spin_unlock_irq(&mdev->req_lock);
1944
1945 /* we could probably send that P_DISCARD_ACK ourselves,
1946 * but I don't like the receiver using the msock */
1947
1948 put_ldev(mdev);
1949 wake_asender(mdev);
1950 finish_wait(&mdev->misc_wait, &wait);
1951 return TRUE;
1952 }
1953
1954 if (signal_pending(current)) {
1955 hlist_del_init(&e->colision);
1956
1957 spin_unlock_irq(&mdev->req_lock);
1958
1959 finish_wait(&mdev->misc_wait, &wait);
1960 goto out_interrupted;
1961 }
1962
1963 spin_unlock_irq(&mdev->req_lock);
1964 if (first) {
1965 first = 0;
1966 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1967 "sec=%llus\n", (unsigned long long)sector);
1968 } else if (discard) {
1969 /* we had none on the first iteration.
1970 * there must be none now. */
1971 D_ASSERT(have_unacked == 0);
1972 }
1973 schedule();
1974 spin_lock_irq(&mdev->req_lock);
1975 }
1976 finish_wait(&mdev->misc_wait, &wait);
1977 }
1978
1979 list_add(&e->w.list, &mdev->active_ee);
1980 spin_unlock_irq(&mdev->req_lock);
1981
1982 switch (mdev->net_conf->wire_protocol) {
1983 case DRBD_PROT_C:
1984 inc_unacked(mdev);
1985 /* corresponding dec_unacked() in e_end_block()
1986 * respective _drbd_clear_done_ee */
1987 break;
1988 case DRBD_PROT_B:
1989 /* I really don't like it that the receiver thread
1990 * sends on the msock, but anyways */
1991 drbd_send_ack(mdev, P_RECV_ACK, e);
1992 break;
1993 case DRBD_PROT_A:
1994 /* nothing to do */
1995 break;
1996 }
1997
1998 if (mdev->state.pdsk == D_DISKLESS) {
1999 /* In case we have the only disk of the cluster, */
2000 drbd_set_out_of_sync(mdev, e->sector, e->size);
2001 e->flags |= EE_CALL_AL_COMPLETE_IO;
2002 drbd_al_begin_io(mdev, e->sector);
2003 }
2004
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002005 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
2006 return TRUE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002007
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002008 /* drbd_submit_ee currently fails for one reason only:
2009 * not being able to allocate enough bios.
2010 * Is dropping the connection going to help? */
2011 spin_lock_irq(&mdev->req_lock);
2012 list_del(&e->w.list);
2013 hlist_del_init(&e->colision);
2014 spin_unlock_irq(&mdev->req_lock);
2015 if (e->flags & EE_CALL_AL_COMPLETE_IO)
2016 drbd_al_complete_io(mdev, e->sector);
2017
Philipp Reisnerb411b362009-09-25 16:07:19 -07002018out_interrupted:
2019 /* yes, the epoch_size now is imbalanced.
2020 * but we drop the connection anyways, so we don't have a chance to
2021 * receive a barrier... atomic_inc(&mdev->epoch_size); */
2022 put_ldev(mdev);
2023 drbd_free_ee(mdev, e);
2024 return FALSE;
2025}
2026
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002027/* We may throttle resync, if the lower device seems to be busy,
2028 * and current sync rate is above c_min_rate.
2029 *
2030 * To decide whether or not the lower device is busy, we use a scheme similar
2031 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2032 * (more than 64 sectors) of activity we cannot account for with our own resync
2033 * activity, it obviously is "busy".
2034 *
2035 * The current sync rate used here uses only the most recent two step marks,
2036 * to have a short time average so we can react faster.
2037 */
2038int drbd_rs_should_slow_down(struct drbd_conf *mdev)
2039{
2040 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2041 unsigned long db, dt, dbdt;
2042 int curr_events;
2043 int throttle = 0;
2044
2045 /* feature disabled? */
2046 if (mdev->sync_conf.c_min_rate == 0)
2047 return 0;
2048
2049 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2050 (int)part_stat_read(&disk->part0, sectors[1]) -
2051 atomic_read(&mdev->rs_sect_ev);
2052 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2053 unsigned long rs_left;
2054 int i;
2055
2056 mdev->rs_last_events = curr_events;
2057
2058 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2059 * approx. */
2060 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
2061 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2062
2063 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2064 if (!dt)
2065 dt++;
2066 db = mdev->rs_mark_left[i] - rs_left;
2067 dbdt = Bit2KB(db/dt);
2068
2069 if (dbdt > mdev->sync_conf.c_min_rate)
2070 throttle = 1;
2071 }
2072 return throttle;
2073}
2074
2075
Philipp Reisner02918be2010-08-20 14:35:10 +02002076static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002077{
2078 sector_t sector;
2079 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2080 struct drbd_epoch_entry *e;
2081 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002082 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002083 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02002084 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002085
2086 sector = be64_to_cpu(p->sector);
2087 size = be32_to_cpu(p->blksize);
2088
2089 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
2090 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2091 (unsigned long long)sector, size);
2092 return FALSE;
2093 }
2094 if (sector + (size>>9) > capacity) {
2095 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2096 (unsigned long long)sector, size);
2097 return FALSE;
2098 }
2099
2100 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002101 verb = 1;
2102 switch (cmd) {
2103 case P_DATA_REQUEST:
2104 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2105 break;
2106 case P_RS_DATA_REQUEST:
2107 case P_CSUM_RS_REQUEST:
2108 case P_OV_REQUEST:
2109 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2110 break;
2111 case P_OV_REPLY:
2112 verb = 0;
2113 dec_rs_pending(mdev);
2114 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2115 break;
2116 default:
2117 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2118 cmdname(cmd));
2119 }
2120 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002121 dev_err(DEV, "Can not satisfy peer's read request, "
2122 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002123
Lars Ellenberga821cc42010-09-06 12:31:37 +02002124 /* drain possibly payload */
2125 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002126 }
2127
2128 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2129 * "criss-cross" setup, that might cause write-out on some other DRBD,
2130 * which in turn might block on the other node at this very place. */
2131 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2132 if (!e) {
2133 put_ldev(mdev);
2134 return FALSE;
2135 }
2136
Philipp Reisner02918be2010-08-20 14:35:10 +02002137 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002138 case P_DATA_REQUEST:
2139 e->w.cb = w_e_end_data_req;
2140 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002141 /* application IO, don't drbd_rs_begin_io */
2142 goto submit;
2143
Philipp Reisnerb411b362009-09-25 16:07:19 -07002144 case P_RS_DATA_REQUEST:
2145 e->w.cb = w_e_end_rsdata_req;
2146 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002147 break;
2148
2149 case P_OV_REPLY:
2150 case P_CSUM_RS_REQUEST:
2151 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002152 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2153 if (!di)
2154 goto out_free_e;
2155
2156 di->digest_size = digest_size;
2157 di->digest = (((char *)di)+sizeof(struct digest_info));
2158
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002159 e->digest = di;
2160 e->flags |= EE_HAS_DIGEST;
2161
Philipp Reisnerb411b362009-09-25 16:07:19 -07002162 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2163 goto out_free_e;
2164
Philipp Reisner02918be2010-08-20 14:35:10 +02002165 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002166 D_ASSERT(mdev->agreed_pro_version >= 89);
2167 e->w.cb = w_e_end_csum_rs_req;
Philipp Reisner02918be2010-08-20 14:35:10 +02002168 } else if (cmd == P_OV_REPLY) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002169 e->w.cb = w_e_end_ov_reply;
2170 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002171 /* drbd_rs_begin_io done when we sent this request,
2172 * but accounting still needs to be done. */
2173 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002174 }
2175 break;
2176
2177 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178 if (mdev->ov_start_sector == ~(sector_t)0 &&
2179 mdev->agreed_pro_version >= 90) {
2180 mdev->ov_start_sector = sector;
2181 mdev->ov_position = sector;
2182 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2183 dev_info(DEV, "Online Verify start sector: %llu\n",
2184 (unsigned long long)sector);
2185 }
2186 e->w.cb = w_e_end_ov_req;
2187 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002188 break;
2189
Philipp Reisnerb411b362009-09-25 16:07:19 -07002190 default:
2191 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002192 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002193 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002194 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002195 }
2196
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002197 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2198 * wrt the receiver, but it is not as straightforward as it may seem.
2199 * Various places in the resync start and stop logic assume resync
2200 * requests are processed in order, requeuing this on the worker thread
2201 * introduces a bunch of new code for synchronization between threads.
2202 *
2203 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2204 * "forever", throttling after drbd_rs_begin_io will lock that extent
2205 * for application writes for the same time. For now, just throttle
2206 * here, where the rest of the code expects the receiver to sleep for
2207 * a while, anyways.
2208 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002209
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002210 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2211 * this defers syncer requests for some time, before letting at least
2212 * on request through. The resync controller on the receiving side
2213 * will adapt to the incoming rate accordingly.
2214 *
2215 * We cannot throttle here if remote is Primary/SyncTarget:
2216 * we would also throttle its application reads.
2217 * In that case, throttling is done on the SyncTarget only.
2218 */
2219 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
2220 msleep(100);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002221 if (drbd_rs_begin_io(mdev, e->sector))
2222 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002223
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002224submit_for_resync:
2225 atomic_add(size >> 9, &mdev->rs_sect_ev);
2226
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002227submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002228 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002229 spin_lock_irq(&mdev->req_lock);
2230 list_add_tail(&e->w.list, &mdev->read_ee);
2231 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002232
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002233 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2234 return TRUE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002235
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002236 /* drbd_submit_ee currently fails for one reason only:
2237 * not being able to allocate enough bios.
2238 * Is dropping the connection going to help? */
2239 spin_lock_irq(&mdev->req_lock);
2240 list_del(&e->w.list);
2241 spin_unlock_irq(&mdev->req_lock);
2242 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2243
Philipp Reisnerb411b362009-09-25 16:07:19 -07002244out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002245 put_ldev(mdev);
2246 drbd_free_ee(mdev, e);
2247 return FALSE;
2248}
2249
2250static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2251{
2252 int self, peer, rv = -100;
2253 unsigned long ch_self, ch_peer;
2254
2255 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2256 peer = mdev->p_uuid[UI_BITMAP] & 1;
2257
2258 ch_peer = mdev->p_uuid[UI_SIZE];
2259 ch_self = mdev->comm_bm_set;
2260
2261 switch (mdev->net_conf->after_sb_0p) {
2262 case ASB_CONSENSUS:
2263 case ASB_DISCARD_SECONDARY:
2264 case ASB_CALL_HELPER:
2265 dev_err(DEV, "Configuration error.\n");
2266 break;
2267 case ASB_DISCONNECT:
2268 break;
2269 case ASB_DISCARD_YOUNGER_PRI:
2270 if (self == 0 && peer == 1) {
2271 rv = -1;
2272 break;
2273 }
2274 if (self == 1 && peer == 0) {
2275 rv = 1;
2276 break;
2277 }
2278 /* Else fall through to one of the other strategies... */
2279 case ASB_DISCARD_OLDER_PRI:
2280 if (self == 0 && peer == 1) {
2281 rv = 1;
2282 break;
2283 }
2284 if (self == 1 && peer == 0) {
2285 rv = -1;
2286 break;
2287 }
2288 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002289 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002290 "Using discard-least-changes instead\n");
2291 case ASB_DISCARD_ZERO_CHG:
2292 if (ch_peer == 0 && ch_self == 0) {
2293 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2294 ? -1 : 1;
2295 break;
2296 } else {
2297 if (ch_peer == 0) { rv = 1; break; }
2298 if (ch_self == 0) { rv = -1; break; }
2299 }
2300 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2301 break;
2302 case ASB_DISCARD_LEAST_CHG:
2303 if (ch_self < ch_peer)
2304 rv = -1;
2305 else if (ch_self > ch_peer)
2306 rv = 1;
2307 else /* ( ch_self == ch_peer ) */
2308 /* Well, then use something else. */
2309 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2310 ? -1 : 1;
2311 break;
2312 case ASB_DISCARD_LOCAL:
2313 rv = -1;
2314 break;
2315 case ASB_DISCARD_REMOTE:
2316 rv = 1;
2317 }
2318
2319 return rv;
2320}
2321
2322static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2323{
2324 int self, peer, hg, rv = -100;
2325
2326 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2327 peer = mdev->p_uuid[UI_BITMAP] & 1;
2328
2329 switch (mdev->net_conf->after_sb_1p) {
2330 case ASB_DISCARD_YOUNGER_PRI:
2331 case ASB_DISCARD_OLDER_PRI:
2332 case ASB_DISCARD_LEAST_CHG:
2333 case ASB_DISCARD_LOCAL:
2334 case ASB_DISCARD_REMOTE:
2335 dev_err(DEV, "Configuration error.\n");
2336 break;
2337 case ASB_DISCONNECT:
2338 break;
2339 case ASB_CONSENSUS:
2340 hg = drbd_asb_recover_0p(mdev);
2341 if (hg == -1 && mdev->state.role == R_SECONDARY)
2342 rv = hg;
2343 if (hg == 1 && mdev->state.role == R_PRIMARY)
2344 rv = hg;
2345 break;
2346 case ASB_VIOLENTLY:
2347 rv = drbd_asb_recover_0p(mdev);
2348 break;
2349 case ASB_DISCARD_SECONDARY:
2350 return mdev->state.role == R_PRIMARY ? 1 : -1;
2351 case ASB_CALL_HELPER:
2352 hg = drbd_asb_recover_0p(mdev);
2353 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2354 self = drbd_set_role(mdev, R_SECONDARY, 0);
2355 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2356 * we might be here in C_WF_REPORT_PARAMS which is transient.
2357 * we do not need to wait for the after state change work either. */
2358 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2359 if (self != SS_SUCCESS) {
2360 drbd_khelper(mdev, "pri-lost-after-sb");
2361 } else {
2362 dev_warn(DEV, "Successfully gave up primary role.\n");
2363 rv = hg;
2364 }
2365 } else
2366 rv = hg;
2367 }
2368
2369 return rv;
2370}
2371
2372static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2373{
2374 int self, peer, hg, rv = -100;
2375
2376 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2377 peer = mdev->p_uuid[UI_BITMAP] & 1;
2378
2379 switch (mdev->net_conf->after_sb_2p) {
2380 case ASB_DISCARD_YOUNGER_PRI:
2381 case ASB_DISCARD_OLDER_PRI:
2382 case ASB_DISCARD_LEAST_CHG:
2383 case ASB_DISCARD_LOCAL:
2384 case ASB_DISCARD_REMOTE:
2385 case ASB_CONSENSUS:
2386 case ASB_DISCARD_SECONDARY:
2387 dev_err(DEV, "Configuration error.\n");
2388 break;
2389 case ASB_VIOLENTLY:
2390 rv = drbd_asb_recover_0p(mdev);
2391 break;
2392 case ASB_DISCONNECT:
2393 break;
2394 case ASB_CALL_HELPER:
2395 hg = drbd_asb_recover_0p(mdev);
2396 if (hg == -1) {
2397 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2398 * we might be here in C_WF_REPORT_PARAMS which is transient.
2399 * we do not need to wait for the after state change work either. */
2400 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2401 if (self != SS_SUCCESS) {
2402 drbd_khelper(mdev, "pri-lost-after-sb");
2403 } else {
2404 dev_warn(DEV, "Successfully gave up primary role.\n");
2405 rv = hg;
2406 }
2407 } else
2408 rv = hg;
2409 }
2410
2411 return rv;
2412}
2413
2414static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2415 u64 bits, u64 flags)
2416{
2417 if (!uuid) {
2418 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2419 return;
2420 }
2421 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2422 text,
2423 (unsigned long long)uuid[UI_CURRENT],
2424 (unsigned long long)uuid[UI_BITMAP],
2425 (unsigned long long)uuid[UI_HISTORY_START],
2426 (unsigned long long)uuid[UI_HISTORY_END],
2427 (unsigned long long)bits,
2428 (unsigned long long)flags);
2429}
2430
2431/*
2432 100 after split brain try auto recover
2433 2 C_SYNC_SOURCE set BitMap
2434 1 C_SYNC_SOURCE use BitMap
2435 0 no Sync
2436 -1 C_SYNC_TARGET use BitMap
2437 -2 C_SYNC_TARGET set BitMap
2438 -100 after split brain, disconnect
2439-1000 unrelated data
2440 */
2441static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2442{
2443 u64 self, peer;
2444 int i, j;
2445
2446 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2447 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2448
2449 *rule_nr = 10;
2450 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2451 return 0;
2452
2453 *rule_nr = 20;
2454 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2455 peer != UUID_JUST_CREATED)
2456 return -2;
2457
2458 *rule_nr = 30;
2459 if (self != UUID_JUST_CREATED &&
2460 (peer == UUID_JUST_CREATED || peer == (u64)0))
2461 return 2;
2462
2463 if (self == peer) {
2464 int rct, dc; /* roles at crash time */
2465
2466 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2467
2468 if (mdev->agreed_pro_version < 91)
2469 return -1001;
2470
2471 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2472 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2473 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2474 drbd_uuid_set_bm(mdev, 0UL);
2475
2476 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2477 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2478 *rule_nr = 34;
2479 } else {
2480 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2481 *rule_nr = 36;
2482 }
2483
2484 return 1;
2485 }
2486
2487 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2488
2489 if (mdev->agreed_pro_version < 91)
2490 return -1001;
2491
2492 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2493 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2494 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2495
2496 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2497 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2498 mdev->p_uuid[UI_BITMAP] = 0UL;
2499
2500 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2501 *rule_nr = 35;
2502 } else {
2503 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2504 *rule_nr = 37;
2505 }
2506
2507 return -1;
2508 }
2509
2510 /* Common power [off|failure] */
2511 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2512 (mdev->p_uuid[UI_FLAGS] & 2);
2513 /* lowest bit is set when we were primary,
2514 * next bit (weight 2) is set when peer was primary */
2515 *rule_nr = 40;
2516
2517 switch (rct) {
2518 case 0: /* !self_pri && !peer_pri */ return 0;
2519 case 1: /* self_pri && !peer_pri */ return 1;
2520 case 2: /* !self_pri && peer_pri */ return -1;
2521 case 3: /* self_pri && peer_pri */
2522 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2523 return dc ? -1 : 1;
2524 }
2525 }
2526
2527 *rule_nr = 50;
2528 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2529 if (self == peer)
2530 return -1;
2531
2532 *rule_nr = 51;
2533 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2534 if (self == peer) {
2535 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2536 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2537 if (self == peer) {
2538 /* The last P_SYNC_UUID did not get though. Undo the last start of
2539 resync as sync source modifications of the peer's UUIDs. */
2540
2541 if (mdev->agreed_pro_version < 91)
2542 return -1001;
2543
2544 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2545 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2546 return -1;
2547 }
2548 }
2549
2550 *rule_nr = 60;
2551 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2552 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2553 peer = mdev->p_uuid[i] & ~((u64)1);
2554 if (self == peer)
2555 return -2;
2556 }
2557
2558 *rule_nr = 70;
2559 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2560 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2561 if (self == peer)
2562 return 1;
2563
2564 *rule_nr = 71;
2565 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2566 if (self == peer) {
2567 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2568 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2569 if (self == peer) {
2570 /* The last P_SYNC_UUID did not get though. Undo the last start of
2571 resync as sync source modifications of our UUIDs. */
2572
2573 if (mdev->agreed_pro_version < 91)
2574 return -1001;
2575
2576 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2577 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2578
2579 dev_info(DEV, "Undid last start of resync:\n");
2580
2581 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2582 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2583
2584 return 1;
2585 }
2586 }
2587
2588
2589 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002590 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002591 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2592 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2593 if (self == peer)
2594 return 2;
2595 }
2596
2597 *rule_nr = 90;
2598 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2599 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2600 if (self == peer && self != ((u64)0))
2601 return 100;
2602
2603 *rule_nr = 100;
2604 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2605 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2606 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2607 peer = mdev->p_uuid[j] & ~((u64)1);
2608 if (self == peer)
2609 return -100;
2610 }
2611 }
2612
2613 return -1000;
2614}
2615
2616/* drbd_sync_handshake() returns the new conn state on success, or
2617 CONN_MASK (-1) on failure.
2618 */
2619static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2620 enum drbd_disk_state peer_disk) __must_hold(local)
2621{
2622 int hg, rule_nr;
2623 enum drbd_conns rv = C_MASK;
2624 enum drbd_disk_state mydisk;
2625
2626 mydisk = mdev->state.disk;
2627 if (mydisk == D_NEGOTIATING)
2628 mydisk = mdev->new_state_tmp.disk;
2629
2630 dev_info(DEV, "drbd_sync_handshake:\n");
2631 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2632 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2633 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2634
2635 hg = drbd_uuid_compare(mdev, &rule_nr);
2636
2637 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2638
2639 if (hg == -1000) {
2640 dev_alert(DEV, "Unrelated data, aborting!\n");
2641 return C_MASK;
2642 }
2643 if (hg == -1001) {
2644 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2645 return C_MASK;
2646 }
2647
2648 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2649 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2650 int f = (hg == -100) || abs(hg) == 2;
2651 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2652 if (f)
2653 hg = hg*2;
2654 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2655 hg > 0 ? "source" : "target");
2656 }
2657
Adam Gandelman3a11a482010-04-08 16:48:23 -07002658 if (abs(hg) == 100)
2659 drbd_khelper(mdev, "initial-split-brain");
2660
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2662 int pcount = (mdev->state.role == R_PRIMARY)
2663 + (peer_role == R_PRIMARY);
2664 int forced = (hg == -100);
2665
2666 switch (pcount) {
2667 case 0:
2668 hg = drbd_asb_recover_0p(mdev);
2669 break;
2670 case 1:
2671 hg = drbd_asb_recover_1p(mdev);
2672 break;
2673 case 2:
2674 hg = drbd_asb_recover_2p(mdev);
2675 break;
2676 }
2677 if (abs(hg) < 100) {
2678 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2679 "automatically solved. Sync from %s node\n",
2680 pcount, (hg < 0) ? "peer" : "this");
2681 if (forced) {
2682 dev_warn(DEV, "Doing a full sync, since"
2683 " UUIDs where ambiguous.\n");
2684 hg = hg*2;
2685 }
2686 }
2687 }
2688
2689 if (hg == -100) {
2690 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2691 hg = -1;
2692 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2693 hg = 1;
2694
2695 if (abs(hg) < 100)
2696 dev_warn(DEV, "Split-Brain detected, manually solved. "
2697 "Sync from %s node\n",
2698 (hg < 0) ? "peer" : "this");
2699 }
2700
2701 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002702 /* FIXME this log message is not correct if we end up here
2703 * after an attempted attach on a diskless node.
2704 * We just refuse to attach -- well, we drop the "connection"
2705 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002706 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002707 drbd_khelper(mdev, "split-brain");
2708 return C_MASK;
2709 }
2710
2711 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2712 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2713 return C_MASK;
2714 }
2715
2716 if (hg < 0 && /* by intention we do not use mydisk here. */
2717 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2718 switch (mdev->net_conf->rr_conflict) {
2719 case ASB_CALL_HELPER:
2720 drbd_khelper(mdev, "pri-lost");
2721 /* fall through */
2722 case ASB_DISCONNECT:
2723 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2724 return C_MASK;
2725 case ASB_VIOLENTLY:
2726 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2727 "assumption\n");
2728 }
2729 }
2730
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002731 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2732 if (hg == 0)
2733 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2734 else
2735 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2736 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2737 abs(hg) >= 2 ? "full" : "bit-map based");
2738 return C_MASK;
2739 }
2740
Philipp Reisnerb411b362009-09-25 16:07:19 -07002741 if (abs(hg) >= 2) {
2742 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2743 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2744 return C_MASK;
2745 }
2746
2747 if (hg > 0) { /* become sync source. */
2748 rv = C_WF_BITMAP_S;
2749 } else if (hg < 0) { /* become sync target */
2750 rv = C_WF_BITMAP_T;
2751 } else {
2752 rv = C_CONNECTED;
2753 if (drbd_bm_total_weight(mdev)) {
2754 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2755 drbd_bm_total_weight(mdev));
2756 }
2757 }
2758
2759 return rv;
2760}
2761
2762/* returns 1 if invalid */
2763static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2764{
2765 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2766 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2767 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2768 return 0;
2769
2770 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2771 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2772 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2773 return 1;
2774
2775 /* everything else is valid if they are equal on both sides. */
2776 if (peer == self)
2777 return 0;
2778
2779 /* everything es is invalid. */
2780 return 1;
2781}
2782
Philipp Reisner02918be2010-08-20 14:35:10 +02002783static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002784{
Philipp Reisner02918be2010-08-20 14:35:10 +02002785 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002786 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002787 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002788 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2789
Philipp Reisnerb411b362009-09-25 16:07:19 -07002790 p_proto = be32_to_cpu(p->protocol);
2791 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2792 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2793 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002794 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002795 cf = be32_to_cpu(p->conn_flags);
2796 p_want_lose = cf & CF_WANT_LOSE;
2797
2798 clear_bit(CONN_DRY_RUN, &mdev->flags);
2799
2800 if (cf & CF_DRY_RUN)
2801 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002802
2803 if (p_proto != mdev->net_conf->wire_protocol) {
2804 dev_err(DEV, "incompatible communication protocols\n");
2805 goto disconnect;
2806 }
2807
2808 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2809 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2810 goto disconnect;
2811 }
2812
2813 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2814 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2815 goto disconnect;
2816 }
2817
2818 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2819 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2820 goto disconnect;
2821 }
2822
2823 if (p_want_lose && mdev->net_conf->want_lose) {
2824 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2825 goto disconnect;
2826 }
2827
2828 if (p_two_primaries != mdev->net_conf->two_primaries) {
2829 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2830 goto disconnect;
2831 }
2832
2833 if (mdev->agreed_pro_version >= 87) {
2834 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2835
2836 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2837 return FALSE;
2838
2839 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2840 if (strcmp(p_integrity_alg, my_alg)) {
2841 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2842 goto disconnect;
2843 }
2844 dev_info(DEV, "data-integrity-alg: %s\n",
2845 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2846 }
2847
2848 return TRUE;
2849
2850disconnect:
2851 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2852 return FALSE;
2853}
2854
2855/* helper function
2856 * input: alg name, feature name
2857 * return: NULL (alg name was "")
2858 * ERR_PTR(error) if something goes wrong
2859 * or the crypto hash ptr, if it worked out ok. */
2860struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2861 const char *alg, const char *name)
2862{
2863 struct crypto_hash *tfm;
2864
2865 if (!alg[0])
2866 return NULL;
2867
2868 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2869 if (IS_ERR(tfm)) {
2870 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2871 alg, name, PTR_ERR(tfm));
2872 return tfm;
2873 }
2874 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2875 crypto_free_hash(tfm);
2876 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2877 return ERR_PTR(-EINVAL);
2878 }
2879 return tfm;
2880}
2881
Philipp Reisner02918be2010-08-20 14:35:10 +02002882static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002883{
2884 int ok = TRUE;
Philipp Reisner02918be2010-08-20 14:35:10 +02002885 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002886 unsigned int header_size, data_size, exp_max_sz;
2887 struct crypto_hash *verify_tfm = NULL;
2888 struct crypto_hash *csums_tfm = NULL;
2889 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002890 int *rs_plan_s = NULL;
2891 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002892
2893 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2894 : apv == 88 ? sizeof(struct p_rs_param)
2895 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002896 : apv <= 94 ? sizeof(struct p_rs_param_89)
2897 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002898
Philipp Reisner02918be2010-08-20 14:35:10 +02002899 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002900 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002901 packet_size, exp_max_sz);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002902 return FALSE;
2903 }
2904
2905 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002906 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2907 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002908 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002909 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2910 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002911 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002912 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002913 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2914 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002915 D_ASSERT(data_size == 0);
2916 }
2917
2918 /* initialize verify_alg and csums_alg */
2919 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2920
Philipp Reisner02918be2010-08-20 14:35:10 +02002921 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002922 return FALSE;
2923
2924 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2925
2926 if (apv >= 88) {
2927 if (apv == 88) {
2928 if (data_size > SHARED_SECRET_MAX) {
2929 dev_err(DEV, "verify-alg too long, "
2930 "peer wants %u, accepting only %u byte\n",
2931 data_size, SHARED_SECRET_MAX);
2932 return FALSE;
2933 }
2934
2935 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2936 return FALSE;
2937
2938 /* we expect NUL terminated string */
2939 /* but just in case someone tries to be evil */
2940 D_ASSERT(p->verify_alg[data_size-1] == 0);
2941 p->verify_alg[data_size-1] = 0;
2942
2943 } else /* apv >= 89 */ {
2944 /* we still expect NUL terminated strings */
2945 /* but just in case someone tries to be evil */
2946 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2947 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2948 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2949 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2950 }
2951
2952 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2953 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2954 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2955 mdev->sync_conf.verify_alg, p->verify_alg);
2956 goto disconnect;
2957 }
2958 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2959 p->verify_alg, "verify-alg");
2960 if (IS_ERR(verify_tfm)) {
2961 verify_tfm = NULL;
2962 goto disconnect;
2963 }
2964 }
2965
2966 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2967 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2968 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2969 mdev->sync_conf.csums_alg, p->csums_alg);
2970 goto disconnect;
2971 }
2972 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2973 p->csums_alg, "csums-alg");
2974 if (IS_ERR(csums_tfm)) {
2975 csums_tfm = NULL;
2976 goto disconnect;
2977 }
2978 }
2979
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002980 if (apv > 94) {
2981 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2982 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2983 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2984 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2985 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002986
2987 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2988 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2989 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2990 if (!rs_plan_s) {
2991 dev_err(DEV, "kmalloc of fifo_buffer failed");
2992 goto disconnect;
2993 }
2994 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002995 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002996
2997 spin_lock(&mdev->peer_seq_lock);
2998 /* lock against drbd_nl_syncer_conf() */
2999 if (verify_tfm) {
3000 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
3001 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
3002 crypto_free_hash(mdev->verify_tfm);
3003 mdev->verify_tfm = verify_tfm;
3004 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3005 }
3006 if (csums_tfm) {
3007 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
3008 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
3009 crypto_free_hash(mdev->csums_tfm);
3010 mdev->csums_tfm = csums_tfm;
3011 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3012 }
Philipp Reisner778f2712010-07-06 11:14:00 +02003013 if (fifo_size != mdev->rs_plan_s.size) {
3014 kfree(mdev->rs_plan_s.values);
3015 mdev->rs_plan_s.values = rs_plan_s;
3016 mdev->rs_plan_s.size = fifo_size;
3017 mdev->rs_planed = 0;
3018 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003019 spin_unlock(&mdev->peer_seq_lock);
3020 }
3021
3022 return ok;
3023disconnect:
3024 /* just for completeness: actually not needed,
3025 * as this is not reached if csums_tfm was ok. */
3026 crypto_free_hash(csums_tfm);
3027 /* but free the verify_tfm again, if csums_tfm did not work out */
3028 crypto_free_hash(verify_tfm);
3029 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3030 return FALSE;
3031}
3032
3033static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
3034{
3035 /* sorry, we currently have no working implementation
3036 * of distributed TCQ */
3037}
3038
3039/* warn if the arguments differ by more than 12.5% */
3040static void warn_if_differ_considerably(struct drbd_conf *mdev,
3041 const char *s, sector_t a, sector_t b)
3042{
3043 sector_t d;
3044 if (a == 0 || b == 0)
3045 return;
3046 d = (a > b) ? (a - b) : (b - a);
3047 if (d > (a>>3) || d > (b>>3))
3048 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3049 (unsigned long long)a, (unsigned long long)b);
3050}
3051
Philipp Reisner02918be2010-08-20 14:35:10 +02003052static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003053{
Philipp Reisner02918be2010-08-20 14:35:10 +02003054 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003055 enum determine_dev_size dd = unchanged;
3056 unsigned int max_seg_s;
3057 sector_t p_size, p_usize, my_usize;
3058 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003059 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003060
Philipp Reisnerb411b362009-09-25 16:07:19 -07003061 p_size = be64_to_cpu(p->d_size);
3062 p_usize = be64_to_cpu(p->u_size);
3063
3064 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
3065 dev_err(DEV, "some backing storage is needed\n");
3066 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3067 return FALSE;
3068 }
3069
3070 /* just store the peer's disk size for now.
3071 * we still need to figure out whether we accept that. */
3072 mdev->p_size = p_size;
3073
Philipp Reisnerb411b362009-09-25 16:07:19 -07003074 if (get_ldev(mdev)) {
3075 warn_if_differ_considerably(mdev, "lower level device sizes",
3076 p_size, drbd_get_max_capacity(mdev->ldev));
3077 warn_if_differ_considerably(mdev, "user requested size",
3078 p_usize, mdev->ldev->dc.disk_size);
3079
3080 /* if this is the first connect, or an otherwise expected
3081 * param exchange, choose the minimum */
3082 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3083 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3084 p_usize);
3085
3086 my_usize = mdev->ldev->dc.disk_size;
3087
3088 if (mdev->ldev->dc.disk_size != p_usize) {
3089 mdev->ldev->dc.disk_size = p_usize;
3090 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3091 (unsigned long)mdev->ldev->dc.disk_size);
3092 }
3093
3094 /* Never shrink a device with usable data during connect.
3095 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01003096 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07003097 drbd_get_capacity(mdev->this_bdev) &&
3098 mdev->state.disk >= D_OUTDATED &&
3099 mdev->state.conn < C_CONNECTED) {
3100 dev_err(DEV, "The peer's disk size is too small!\n");
3101 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3102 mdev->ldev->dc.disk_size = my_usize;
3103 put_ldev(mdev);
3104 return FALSE;
3105 }
3106 put_ldev(mdev);
3107 }
3108#undef min_not_zero
3109
Philipp Reisnere89b5912010-03-24 17:11:33 +01003110 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003111 if (get_ldev(mdev)) {
Philipp Reisnere89b5912010-03-24 17:11:33 +01003112 dd = drbd_determin_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003113 put_ldev(mdev);
3114 if (dd == dev_size_error)
3115 return FALSE;
3116 drbd_md_sync(mdev);
3117 } else {
3118 /* I am diskless, need to accept the peer's size. */
3119 drbd_set_my_capacity(mdev, p_size);
3120 }
3121
Philipp Reisnerb411b362009-09-25 16:07:19 -07003122 if (get_ldev(mdev)) {
3123 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3124 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3125 ldsc = 1;
3126 }
3127
Lars Ellenberga1c88d02010-05-14 19:16:41 +02003128 if (mdev->agreed_pro_version < 94)
3129 max_seg_s = be32_to_cpu(p->max_segment_size);
Lars Ellenberg8979d9c2010-09-14 15:56:29 +02003130 else if (mdev->agreed_pro_version == 94)
3131 max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
Lars Ellenberga1c88d02010-05-14 19:16:41 +02003132 else /* drbd 8.3.8 onwards */
3133 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
3134
Philipp Reisnerb411b362009-09-25 16:07:19 -07003135 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
3136 drbd_setup_queue_param(mdev, max_seg_s);
3137
Philipp Reisnere89b5912010-03-24 17:11:33 +01003138 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003139 put_ldev(mdev);
3140 }
3141
3142 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3143 if (be64_to_cpu(p->c_size) !=
3144 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3145 /* we have different sizes, probably peer
3146 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003147 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003148 }
3149 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3150 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3151 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003152 mdev->state.disk >= D_INCONSISTENT) {
3153 if (ddsf & DDSF_NO_RESYNC)
3154 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3155 else
3156 resync_after_online_grow(mdev);
3157 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003158 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3159 }
3160 }
3161
3162 return TRUE;
3163}
3164
Philipp Reisner02918be2010-08-20 14:35:10 +02003165static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003166{
Philipp Reisner02918be2010-08-20 14:35:10 +02003167 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003168 u64 *p_uuid;
3169 int i;
3170
Philipp Reisnerb411b362009-09-25 16:07:19 -07003171 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3172
3173 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3174 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3175
3176 kfree(mdev->p_uuid);
3177 mdev->p_uuid = p_uuid;
3178
3179 if (mdev->state.conn < C_CONNECTED &&
3180 mdev->state.disk < D_INCONSISTENT &&
3181 mdev->state.role == R_PRIMARY &&
3182 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3183 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3184 (unsigned long long)mdev->ed_uuid);
3185 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3186 return FALSE;
3187 }
3188
3189 if (get_ldev(mdev)) {
3190 int skip_initial_sync =
3191 mdev->state.conn == C_CONNECTED &&
3192 mdev->agreed_pro_version >= 90 &&
3193 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3194 (p_uuid[UI_FLAGS] & 8);
3195 if (skip_initial_sync) {
3196 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3197 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3198 "clear_n_write from receive_uuids");
3199 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3200 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3201 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3202 CS_VERBOSE, NULL);
3203 drbd_md_sync(mdev);
3204 }
3205 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003206 } else if (mdev->state.disk < D_INCONSISTENT &&
3207 mdev->state.role == R_PRIMARY) {
3208 /* I am a diskless primary, the peer just created a new current UUID
3209 for me. */
3210 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003211 }
3212
3213 /* Before we test for the disk state, we should wait until an eventually
3214 ongoing cluster wide state change is finished. That is important if
3215 we are primary and are detaching from our disk. We need to see the
3216 new disk state... */
3217 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3218 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3219 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3220
3221 return TRUE;
3222}
3223
3224/**
3225 * convert_state() - Converts the peer's view of the cluster state to our point of view
3226 * @ps: The state as seen by the peer.
3227 */
3228static union drbd_state convert_state(union drbd_state ps)
3229{
3230 union drbd_state ms;
3231
3232 static enum drbd_conns c_tab[] = {
3233 [C_CONNECTED] = C_CONNECTED,
3234
3235 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3236 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3237 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3238 [C_VERIFY_S] = C_VERIFY_T,
3239 [C_MASK] = C_MASK,
3240 };
3241
3242 ms.i = ps.i;
3243
3244 ms.conn = c_tab[ps.conn];
3245 ms.peer = ps.role;
3246 ms.role = ps.peer;
3247 ms.pdsk = ps.disk;
3248 ms.disk = ps.pdsk;
3249 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3250
3251 return ms;
3252}
3253
Philipp Reisner02918be2010-08-20 14:35:10 +02003254static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003255{
Philipp Reisner02918be2010-08-20 14:35:10 +02003256 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003257 union drbd_state mask, val;
3258 int rv;
3259
Philipp Reisnerb411b362009-09-25 16:07:19 -07003260 mask.i = be32_to_cpu(p->mask);
3261 val.i = be32_to_cpu(p->val);
3262
3263 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3264 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3265 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3266 return TRUE;
3267 }
3268
3269 mask = convert_state(mask);
3270 val = convert_state(val);
3271
3272 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3273
3274 drbd_send_sr_reply(mdev, rv);
3275 drbd_md_sync(mdev);
3276
3277 return TRUE;
3278}
3279
Philipp Reisner02918be2010-08-20 14:35:10 +02003280static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003281{
Philipp Reisner02918be2010-08-20 14:35:10 +02003282 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003283 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003284 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003285 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003286 int rv;
3287
Philipp Reisnerb411b362009-09-25 16:07:19 -07003288 peer_state.i = be32_to_cpu(p->state);
3289
3290 real_peer_disk = peer_state.disk;
3291 if (peer_state.disk == D_NEGOTIATING) {
3292 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3293 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3294 }
3295
3296 spin_lock_irq(&mdev->req_lock);
3297 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003298 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003299 spin_unlock_irq(&mdev->req_lock);
3300
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003301 /* peer says his disk is uptodate, while we think it is inconsistent,
3302 * and this happens while we think we have a sync going on. */
3303 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3304 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3305 /* If we are (becoming) SyncSource, but peer is still in sync
3306 * preparation, ignore its uptodate-ness to avoid flapping, it
3307 * will change to inconsistent once the peer reaches active
3308 * syncing states.
3309 * It may have changed syncer-paused flags, however, so we
3310 * cannot ignore this completely. */
3311 if (peer_state.conn > C_CONNECTED &&
3312 peer_state.conn < C_SYNC_SOURCE)
3313 real_peer_disk = D_INCONSISTENT;
3314
3315 /* if peer_state changes to connected at the same time,
3316 * it explicitly notifies us that it finished resync.
3317 * Maybe we should finish it up, too? */
3318 else if (os.conn >= C_SYNC_SOURCE &&
3319 peer_state.conn == C_CONNECTED) {
3320 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3321 drbd_resync_finished(mdev);
3322 return TRUE;
3323 }
3324 }
3325
3326 /* peer says his disk is inconsistent, while we think it is uptodate,
3327 * and this happens while the peer still thinks we have a sync going on,
3328 * but we think we are already done with the sync.
3329 * We ignore this to avoid flapping pdsk.
3330 * This should not happen, if the peer is a recent version of drbd. */
3331 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3332 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3333 real_peer_disk = D_UP_TO_DATE;
3334
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003335 if (ns.conn == C_WF_REPORT_PARAMS)
3336 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003337
3338 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3339 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3340 int cr; /* consider resync */
3341
3342 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003343 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003344 /* if we had an established connection
3345 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003346 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003347 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003348 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003349 /* if we have both been inconsistent, and the peer has been
3350 * forced to be UpToDate with --overwrite-data */
3351 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3352 /* if we had been plain connected, and the admin requested to
3353 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003354 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003355 (peer_state.conn >= C_STARTING_SYNC_S &&
3356 peer_state.conn <= C_WF_BITMAP_T));
3357
3358 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003359 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003360
3361 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003362 if (ns.conn == C_MASK) {
3363 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003364 if (mdev->state.disk == D_NEGOTIATING) {
3365 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003366 } else if (peer_state.disk == D_NEGOTIATING) {
3367 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3368 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003369 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003370 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003371 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3372 return FALSE;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003373 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003374 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3375 return FALSE;
3376 }
3377 }
3378 }
3379
3380 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003381 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003382 goto retry;
3383 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003384 ns.peer = peer_state.role;
3385 ns.pdsk = real_peer_disk;
3386 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003387 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003388 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003389 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3390 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003391 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3392 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3393 for temporal network outages! */
3394 spin_unlock_irq(&mdev->req_lock);
3395 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3396 tl_clear(mdev);
3397 drbd_uuid_new_current(mdev);
3398 clear_bit(NEW_CUR_UUID, &mdev->flags);
3399 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3400 return FALSE;
3401 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003402 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003403 ns = mdev->state;
3404 spin_unlock_irq(&mdev->req_lock);
3405
3406 if (rv < SS_SUCCESS) {
3407 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3408 return FALSE;
3409 }
3410
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003411 if (os.conn > C_WF_REPORT_PARAMS) {
3412 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003413 peer_state.disk != D_NEGOTIATING ) {
3414 /* we want resync, peer has not yet decided to sync... */
3415 /* Nowadays only used when forcing a node into primary role and
3416 setting its disk to UpToDate with that */
3417 drbd_send_uuids(mdev);
3418 drbd_send_state(mdev);
3419 }
3420 }
3421
3422 mdev->net_conf->want_lose = 0;
3423
3424 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3425
3426 return TRUE;
3427}
3428
Philipp Reisner02918be2010-08-20 14:35:10 +02003429static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003430{
Philipp Reisner02918be2010-08-20 14:35:10 +02003431 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003432
3433 wait_event(mdev->misc_wait,
3434 mdev->state.conn == C_WF_SYNC_UUID ||
3435 mdev->state.conn < C_CONNECTED ||
3436 mdev->state.disk < D_NEGOTIATING);
3437
3438 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3439
Philipp Reisnerb411b362009-09-25 16:07:19 -07003440 /* Here the _drbd_uuid_ functions are right, current should
3441 _not_ be rotated into the history */
3442 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3443 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3444 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3445
3446 drbd_start_resync(mdev, C_SYNC_TARGET);
3447
3448 put_ldev(mdev);
3449 } else
3450 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3451
3452 return TRUE;
3453}
3454
3455enum receive_bitmap_ret { OK, DONE, FAILED };
3456
3457static enum receive_bitmap_ret
Philipp Reisner02918be2010-08-20 14:35:10 +02003458receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3459 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003460{
3461 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3462 unsigned want = num_words * sizeof(long);
3463
Philipp Reisner02918be2010-08-20 14:35:10 +02003464 if (want != data_size) {
3465 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003466 return FAILED;
3467 }
3468 if (want == 0)
3469 return DONE;
3470 if (drbd_recv(mdev, buffer, want) != want)
3471 return FAILED;
3472
3473 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3474
3475 c->word_offset += num_words;
3476 c->bit_offset = c->word_offset * BITS_PER_LONG;
3477 if (c->bit_offset > c->bm_bits)
3478 c->bit_offset = c->bm_bits;
3479
3480 return OK;
3481}
3482
3483static enum receive_bitmap_ret
3484recv_bm_rle_bits(struct drbd_conf *mdev,
3485 struct p_compressed_bm *p,
3486 struct bm_xfer_ctx *c)
3487{
3488 struct bitstream bs;
3489 u64 look_ahead;
3490 u64 rl;
3491 u64 tmp;
3492 unsigned long s = c->bit_offset;
3493 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003494 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003495 int toggle = DCBP_get_start(p);
3496 int have;
3497 int bits;
3498
3499 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3500
3501 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3502 if (bits < 0)
3503 return FAILED;
3504
3505 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3506 bits = vli_decode_bits(&rl, look_ahead);
3507 if (bits <= 0)
3508 return FAILED;
3509
3510 if (toggle) {
3511 e = s + rl -1;
3512 if (e >= c->bm_bits) {
3513 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3514 return FAILED;
3515 }
3516 _drbd_bm_set_bits(mdev, s, e);
3517 }
3518
3519 if (have < bits) {
3520 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3521 have, bits, look_ahead,
3522 (unsigned int)(bs.cur.b - p->code),
3523 (unsigned int)bs.buf_len);
3524 return FAILED;
3525 }
3526 look_ahead >>= bits;
3527 have -= bits;
3528
3529 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3530 if (bits < 0)
3531 return FAILED;
3532 look_ahead |= tmp << have;
3533 have += bits;
3534 }
3535
3536 c->bit_offset = s;
3537 bm_xfer_ctx_bit_to_word_offset(c);
3538
3539 return (s == c->bm_bits) ? DONE : OK;
3540}
3541
3542static enum receive_bitmap_ret
3543decode_bitmap_c(struct drbd_conf *mdev,
3544 struct p_compressed_bm *p,
3545 struct bm_xfer_ctx *c)
3546{
3547 if (DCBP_get_code(p) == RLE_VLI_Bits)
3548 return recv_bm_rle_bits(mdev, p, c);
3549
3550 /* other variants had been implemented for evaluation,
3551 * but have been dropped as this one turned out to be "best"
3552 * during all our tests. */
3553
3554 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3555 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3556 return FAILED;
3557}
3558
3559void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3560 const char *direction, struct bm_xfer_ctx *c)
3561{
3562 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003563 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003564 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3565 + c->bm_words * sizeof(long);
3566 unsigned total = c->bytes[0] + c->bytes[1];
3567 unsigned r;
3568
3569 /* total can not be zero. but just in case: */
3570 if (total == 0)
3571 return;
3572
3573 /* don't report if not compressed */
3574 if (total >= plain)
3575 return;
3576
3577 /* total < plain. check for overflow, still */
3578 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3579 : (1000 * total / plain);
3580
3581 if (r > 1000)
3582 r = 1000;
3583
3584 r = 1000 - r;
3585 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3586 "total %u; compression: %u.%u%%\n",
3587 direction,
3588 c->bytes[1], c->packets[1],
3589 c->bytes[0], c->packets[0],
3590 total, r/10, r % 10);
3591}
3592
3593/* Since we are processing the bitfield from lower addresses to higher,
3594 it does not matter if the process it in 32 bit chunks or 64 bit
3595 chunks as long as it is little endian. (Understand it as byte stream,
3596 beginning with the lowest byte...) If we would use big endian
3597 we would need to process it from the highest address to the lowest,
3598 in order to be agnostic to the 32 vs 64 bits issue.
3599
3600 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003601static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003602{
3603 struct bm_xfer_ctx c;
3604 void *buffer;
3605 enum receive_bitmap_ret ret;
3606 int ok = FALSE;
Philipp Reisner02918be2010-08-20 14:35:10 +02003607 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003608
3609 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3610
3611 drbd_bm_lock(mdev, "receive bitmap");
3612
3613 /* maybe we should use some per thread scratch page,
3614 * and allocate that during initial device creation? */
3615 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3616 if (!buffer) {
3617 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3618 goto out;
3619 }
3620
3621 c = (struct bm_xfer_ctx) {
3622 .bm_bits = drbd_bm_bits(mdev),
3623 .bm_words = drbd_bm_words(mdev),
3624 };
3625
3626 do {
Philipp Reisner02918be2010-08-20 14:35:10 +02003627 if (cmd == P_BITMAP) {
3628 ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
3629 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003630 /* MAYBE: sanity check that we speak proto >= 90,
3631 * and the feature is enabled! */
3632 struct p_compressed_bm *p;
3633
Philipp Reisner02918be2010-08-20 14:35:10 +02003634 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003635 dev_err(DEV, "ReportCBitmap packet too large\n");
3636 goto out;
3637 }
3638 /* use the page buff */
3639 p = buffer;
3640 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003641 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003642 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003643 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3644 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003645 return FAILED;
3646 }
3647 ret = decode_bitmap_c(mdev, p, &c);
3648 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003649 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003650 goto out;
3651 }
3652
Philipp Reisner02918be2010-08-20 14:35:10 +02003653 c.packets[cmd == P_BITMAP]++;
3654 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003655
3656 if (ret != OK)
3657 break;
3658
Philipp Reisner02918be2010-08-20 14:35:10 +02003659 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003660 goto out;
3661 } while (ret == OK);
3662 if (ret == FAILED)
3663 goto out;
3664
3665 INFO_bm_xfer_stats(mdev, "receive", &c);
3666
3667 if (mdev->state.conn == C_WF_BITMAP_T) {
3668 ok = !drbd_send_bitmap(mdev);
3669 if (!ok)
3670 goto out;
3671 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3672 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3673 D_ASSERT(ok == SS_SUCCESS);
3674 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3675 /* admin may have requested C_DISCONNECTING,
3676 * other threads may have noticed network errors */
3677 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3678 drbd_conn_str(mdev->state.conn));
3679 }
3680
3681 ok = TRUE;
3682 out:
3683 drbd_bm_unlock(mdev);
3684 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3685 drbd_start_resync(mdev, C_SYNC_SOURCE);
3686 free_page((unsigned long) buffer);
3687 return ok;
3688}
3689
Philipp Reisner02918be2010-08-20 14:35:10 +02003690static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003691{
3692 /* TODO zero copy sink :) */
3693 static char sink[128];
3694 int size, want, r;
3695
Philipp Reisner02918be2010-08-20 14:35:10 +02003696 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3697 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003698
Philipp Reisner02918be2010-08-20 14:35:10 +02003699 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003700 while (size > 0) {
3701 want = min_t(int, size, sizeof(sink));
3702 r = drbd_recv(mdev, sink, want);
3703 ERR_IF(r <= 0) break;
3704 size -= r;
3705 }
3706 return size == 0;
3707}
3708
Philipp Reisner02918be2010-08-20 14:35:10 +02003709static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003710{
3711 if (mdev->state.disk >= D_INCONSISTENT)
3712 drbd_kick_lo(mdev);
3713
3714 /* Make sure we've acked all the TCP data associated
3715 * with the data requests being unplugged */
3716 drbd_tcp_quickack(mdev->data.socket);
3717
3718 return TRUE;
3719}
3720
Philipp Reisner02918be2010-08-20 14:35:10 +02003721typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003722
Philipp Reisner02918be2010-08-20 14:35:10 +02003723struct data_cmd {
3724 int expect_payload;
3725 size_t pkt_size;
3726 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003727};
3728
Philipp Reisner02918be2010-08-20 14:35:10 +02003729static struct data_cmd drbd_cmd_handler[] = {
3730 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3731 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3732 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3733 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3734 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3735 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3736 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3737 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3738 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3739 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3740 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3741 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3742 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3743 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3744 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3745 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3746 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3747 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3748 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3749 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3750 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3751 /* anything missing from this table is in
3752 * the asender_tbl, see get_asender_cmd */
3753 [P_MAX_CMD] = { 0, 0, NULL },
3754};
3755
3756/* All handler functions that expect a sub-header get that sub-heder in
3757 mdev->data.rbuf.header.head.payload.
3758
3759 Usually in mdev->data.rbuf.header.head the callback can find the usual
3760 p_header, but they may not rely on that. Since there is also p_header95 !
3761 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003762
3763static void drbdd(struct drbd_conf *mdev)
3764{
Philipp Reisner02918be2010-08-20 14:35:10 +02003765 union p_header *header = &mdev->data.rbuf.header;
3766 unsigned int packet_size;
3767 enum drbd_packets cmd;
3768 size_t shs; /* sub header size */
3769 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003770
3771 while (get_t_state(&mdev->receiver) == Running) {
3772 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003773 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3774 goto err_out;
3775
3776 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3777 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3778 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003779 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003780
Philipp Reisner02918be2010-08-20 14:35:10 +02003781 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3782 rv = drbd_recv(mdev, &header->h80.payload, shs);
3783 if (unlikely(rv != shs)) {
3784 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3785 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003786 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003787
3788 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3789 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3790 goto err_out;
3791 }
3792
3793 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3794
3795 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003796 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003797 cmdname(cmd), packet_size);
3798 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003799 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003800 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003801
Philipp Reisner02918be2010-08-20 14:35:10 +02003802 if (0) {
3803 err_out:
3804 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003805 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003806 /* If we leave here, we probably want to update at least the
3807 * "Connected" indicator on stable storage. Do so explicitly here. */
3808 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003809}
3810
3811void drbd_flush_workqueue(struct drbd_conf *mdev)
3812{
3813 struct drbd_wq_barrier barr;
3814
3815 barr.w.cb = w_prev_work_done;
3816 init_completion(&barr.done);
3817 drbd_queue_work(&mdev->data.work, &barr.w);
3818 wait_for_completion(&barr.done);
3819}
3820
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003821void drbd_free_tl_hash(struct drbd_conf *mdev)
3822{
3823 struct hlist_head *h;
3824
3825 spin_lock_irq(&mdev->req_lock);
3826
3827 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3828 spin_unlock_irq(&mdev->req_lock);
3829 return;
3830 }
3831 /* paranoia code */
3832 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3833 if (h->first)
3834 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3835 (int)(h - mdev->ee_hash), h->first);
3836 kfree(mdev->ee_hash);
3837 mdev->ee_hash = NULL;
3838 mdev->ee_hash_s = 0;
3839
3840 /* paranoia code */
3841 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3842 if (h->first)
3843 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3844 (int)(h - mdev->tl_hash), h->first);
3845 kfree(mdev->tl_hash);
3846 mdev->tl_hash = NULL;
3847 mdev->tl_hash_s = 0;
3848 spin_unlock_irq(&mdev->req_lock);
3849}
3850
Philipp Reisnerb411b362009-09-25 16:07:19 -07003851static void drbd_disconnect(struct drbd_conf *mdev)
3852{
3853 enum drbd_fencing_p fp;
3854 union drbd_state os, ns;
3855 int rv = SS_UNKNOWN_ERROR;
3856 unsigned int i;
3857
3858 if (mdev->state.conn == C_STANDALONE)
3859 return;
3860 if (mdev->state.conn >= C_WF_CONNECTION)
3861 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3862 drbd_conn_str(mdev->state.conn));
3863
3864 /* asender does not clean up anything. it must not interfere, either */
3865 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003866 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003867
Philipp Reisner85719572010-07-21 10:20:17 +02003868 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003869 spin_lock_irq(&mdev->req_lock);
3870 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3871 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3872 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3873 spin_unlock_irq(&mdev->req_lock);
3874
3875 /* We do not have data structures that would allow us to
3876 * get the rs_pending_cnt down to 0 again.
3877 * * On C_SYNC_TARGET we do not have any data structures describing
3878 * the pending RSDataRequest's we have sent.
3879 * * On C_SYNC_SOURCE there is no data structure that tracks
3880 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3881 * And no, it is not the sum of the reference counts in the
3882 * resync_LRU. The resync_LRU tracks the whole operation including
3883 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3884 * on the fly. */
3885 drbd_rs_cancel_all(mdev);
3886 mdev->rs_total = 0;
3887 mdev->rs_failed = 0;
3888 atomic_set(&mdev->rs_pending_cnt, 0);
3889 wake_up(&mdev->misc_wait);
3890
3891 /* make sure syncer is stopped and w_resume_next_sg queued */
3892 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003893 resync_timer_fn((unsigned long)mdev);
3894
Philipp Reisnerb411b362009-09-25 16:07:19 -07003895 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3896 * w_make_resync_request etc. which may still be on the worker queue
3897 * to be "canceled" */
3898 drbd_flush_workqueue(mdev);
3899
3900 /* This also does reclaim_net_ee(). If we do this too early, we might
3901 * miss some resync ee and pages.*/
3902 drbd_process_done_ee(mdev);
3903
3904 kfree(mdev->p_uuid);
3905 mdev->p_uuid = NULL;
3906
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003907 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003908 tl_clear(mdev);
3909
Philipp Reisnerb411b362009-09-25 16:07:19 -07003910 dev_info(DEV, "Connection closed\n");
3911
3912 drbd_md_sync(mdev);
3913
3914 fp = FP_DONT_CARE;
3915 if (get_ldev(mdev)) {
3916 fp = mdev->ldev->dc.fencing;
3917 put_ldev(mdev);
3918 }
3919
Philipp Reisner87f7be42010-06-11 13:56:33 +02003920 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3921 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003922
3923 spin_lock_irq(&mdev->req_lock);
3924 os = mdev->state;
3925 if (os.conn >= C_UNCONNECTED) {
3926 /* Do not restart in case we are C_DISCONNECTING */
3927 ns = os;
3928 ns.conn = C_UNCONNECTED;
3929 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3930 }
3931 spin_unlock_irq(&mdev->req_lock);
3932
3933 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003934 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003935
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003936 if (!is_susp(mdev->state)) {
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003937 /* we must not free the tl_hash
3938 * while application io is still on the fly */
3939 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3940 drbd_free_tl_hash(mdev);
3941 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003942
3943 crypto_free_hash(mdev->cram_hmac_tfm);
3944 mdev->cram_hmac_tfm = NULL;
3945
3946 kfree(mdev->net_conf);
3947 mdev->net_conf = NULL;
3948 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3949 }
3950
3951 /* tcp_close and release of sendpage pages can be deferred. I don't
3952 * want to use SO_LINGER, because apparently it can be deferred for
3953 * more than 20 seconds (longest time I checked).
3954 *
3955 * Actually we don't care for exactly when the network stack does its
3956 * put_page(), but release our reference on these pages right here.
3957 */
3958 i = drbd_release_ee(mdev, &mdev->net_ee);
3959 if (i)
3960 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003961 i = atomic_read(&mdev->pp_in_use_by_net);
3962 if (i)
3963 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003964 i = atomic_read(&mdev->pp_in_use);
3965 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003966 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003967
3968 D_ASSERT(list_empty(&mdev->read_ee));
3969 D_ASSERT(list_empty(&mdev->active_ee));
3970 D_ASSERT(list_empty(&mdev->sync_ee));
3971 D_ASSERT(list_empty(&mdev->done_ee));
3972
3973 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3974 atomic_set(&mdev->current_epoch->epoch_size, 0);
3975 D_ASSERT(list_empty(&mdev->current_epoch->list));
3976}
3977
3978/*
3979 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3980 * we can agree on is stored in agreed_pro_version.
3981 *
3982 * feature flags and the reserved array should be enough room for future
3983 * enhancements of the handshake protocol, and possible plugins...
3984 *
3985 * for now, they are expected to be zero, but ignored.
3986 */
3987static int drbd_send_handshake(struct drbd_conf *mdev)
3988{
3989 /* ASSERT current == mdev->receiver ... */
3990 struct p_handshake *p = &mdev->data.sbuf.handshake;
3991 int ok;
3992
3993 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3994 dev_err(DEV, "interrupted during initial handshake\n");
3995 return 0; /* interrupted. not ok. */
3996 }
3997
3998 if (mdev->data.socket == NULL) {
3999 mutex_unlock(&mdev->data.mutex);
4000 return 0;
4001 }
4002
4003 memset(p, 0, sizeof(*p));
4004 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4005 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4006 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02004007 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07004008 mutex_unlock(&mdev->data.mutex);
4009 return ok;
4010}
4011
4012/*
4013 * return values:
4014 * 1 yes, we have a valid connection
4015 * 0 oops, did not work out, please try again
4016 * -1 peer talks different language,
4017 * no point in trying again, please go standalone.
4018 */
4019static int drbd_do_handshake(struct drbd_conf *mdev)
4020{
4021 /* ASSERT current == mdev->receiver ... */
4022 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02004023 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
4024 unsigned int length;
4025 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004026 int rv;
4027
4028 rv = drbd_send_handshake(mdev);
4029 if (!rv)
4030 return 0;
4031
Philipp Reisner02918be2010-08-20 14:35:10 +02004032 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004033 if (!rv)
4034 return 0;
4035
Philipp Reisner02918be2010-08-20 14:35:10 +02004036 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004037 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004038 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004039 return -1;
4040 }
4041
Philipp Reisner02918be2010-08-20 14:35:10 +02004042 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004043 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004044 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004045 return -1;
4046 }
4047
4048 rv = drbd_recv(mdev, &p->head.payload, expect);
4049
4050 if (rv != expect) {
4051 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
4052 return 0;
4053 }
4054
Philipp Reisnerb411b362009-09-25 16:07:19 -07004055 p->protocol_min = be32_to_cpu(p->protocol_min);
4056 p->protocol_max = be32_to_cpu(p->protocol_max);
4057 if (p->protocol_max == 0)
4058 p->protocol_max = p->protocol_min;
4059
4060 if (PRO_VERSION_MAX < p->protocol_min ||
4061 PRO_VERSION_MIN > p->protocol_max)
4062 goto incompat;
4063
4064 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4065
4066 dev_info(DEV, "Handshake successful: "
4067 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
4068
4069 return 1;
4070
4071 incompat:
4072 dev_err(DEV, "incompatible DRBD dialects: "
4073 "I support %d-%d, peer supports %d-%d\n",
4074 PRO_VERSION_MIN, PRO_VERSION_MAX,
4075 p->protocol_min, p->protocol_max);
4076 return -1;
4077}
4078
4079#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4080static int drbd_do_auth(struct drbd_conf *mdev)
4081{
4082 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4083 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004084 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004085}
4086#else
4087#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004088
4089/* Return value:
4090 1 - auth succeeded,
4091 0 - failed, try again (network error),
4092 -1 - auth failed, don't try again.
4093*/
4094
Philipp Reisnerb411b362009-09-25 16:07:19 -07004095static int drbd_do_auth(struct drbd_conf *mdev)
4096{
4097 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4098 struct scatterlist sg;
4099 char *response = NULL;
4100 char *right_response = NULL;
4101 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004102 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4103 unsigned int resp_size;
4104 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004105 enum drbd_packets cmd;
4106 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004107 int rv;
4108
4109 desc.tfm = mdev->cram_hmac_tfm;
4110 desc.flags = 0;
4111
4112 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4113 (u8 *)mdev->net_conf->shared_secret, key_len);
4114 if (rv) {
4115 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004116 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117 goto fail;
4118 }
4119
4120 get_random_bytes(my_challenge, CHALLENGE_LEN);
4121
4122 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4123 if (!rv)
4124 goto fail;
4125
Philipp Reisner02918be2010-08-20 14:35:10 +02004126 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004127 if (!rv)
4128 goto fail;
4129
Philipp Reisner02918be2010-08-20 14:35:10 +02004130 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004131 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004132 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004133 rv = 0;
4134 goto fail;
4135 }
4136
Philipp Reisner02918be2010-08-20 14:35:10 +02004137 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004138 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004139 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004140 goto fail;
4141 }
4142
Philipp Reisner02918be2010-08-20 14:35:10 +02004143 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004144 if (peers_ch == NULL) {
4145 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004146 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004147 goto fail;
4148 }
4149
Philipp Reisner02918be2010-08-20 14:35:10 +02004150 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004151
Philipp Reisner02918be2010-08-20 14:35:10 +02004152 if (rv != length) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004153 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4154 rv = 0;
4155 goto fail;
4156 }
4157
4158 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4159 response = kmalloc(resp_size, GFP_NOIO);
4160 if (response == NULL) {
4161 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004162 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004163 goto fail;
4164 }
4165
4166 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004167 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004168
4169 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4170 if (rv) {
4171 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004172 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004173 goto fail;
4174 }
4175
4176 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4177 if (!rv)
4178 goto fail;
4179
Philipp Reisner02918be2010-08-20 14:35:10 +02004180 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004181 if (!rv)
4182 goto fail;
4183
Philipp Reisner02918be2010-08-20 14:35:10 +02004184 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004185 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004186 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004187 rv = 0;
4188 goto fail;
4189 }
4190
Philipp Reisner02918be2010-08-20 14:35:10 +02004191 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004192 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4193 rv = 0;
4194 goto fail;
4195 }
4196
4197 rv = drbd_recv(mdev, response , resp_size);
4198
4199 if (rv != resp_size) {
4200 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4201 rv = 0;
4202 goto fail;
4203 }
4204
4205 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004206 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004207 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004208 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004209 goto fail;
4210 }
4211
4212 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4213
4214 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4215 if (rv) {
4216 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004217 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004218 goto fail;
4219 }
4220
4221 rv = !memcmp(response, right_response, resp_size);
4222
4223 if (rv)
4224 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4225 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004226 else
4227 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228
4229 fail:
4230 kfree(peers_ch);
4231 kfree(response);
4232 kfree(right_response);
4233
4234 return rv;
4235}
4236#endif
4237
4238int drbdd_init(struct drbd_thread *thi)
4239{
4240 struct drbd_conf *mdev = thi->mdev;
4241 unsigned int minor = mdev_to_minor(mdev);
4242 int h;
4243
4244 sprintf(current->comm, "drbd%d_receiver", minor);
4245
4246 dev_info(DEV, "receiver (re)started\n");
4247
4248 do {
4249 h = drbd_connect(mdev);
4250 if (h == 0) {
4251 drbd_disconnect(mdev);
4252 __set_current_state(TASK_INTERRUPTIBLE);
4253 schedule_timeout(HZ);
4254 }
4255 if (h == -1) {
4256 dev_warn(DEV, "Discarding network configuration.\n");
4257 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4258 }
4259 } while (h == 0);
4260
4261 if (h > 0) {
4262 if (get_net_conf(mdev)) {
4263 drbdd(mdev);
4264 put_net_conf(mdev);
4265 }
4266 }
4267
4268 drbd_disconnect(mdev);
4269
4270 dev_info(DEV, "receiver terminated\n");
4271 return 0;
4272}
4273
4274/* ********* acknowledge sender ******** */
4275
Philipp Reisner0b70a132010-08-20 13:36:10 +02004276static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004277{
4278 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4279
4280 int retcode = be32_to_cpu(p->retcode);
4281
4282 if (retcode >= SS_SUCCESS) {
4283 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4284 } else {
4285 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4286 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4287 drbd_set_st_err_str(retcode), retcode);
4288 }
4289 wake_up(&mdev->state_wait);
4290
4291 return TRUE;
4292}
4293
Philipp Reisner0b70a132010-08-20 13:36:10 +02004294static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004295{
4296 return drbd_send_ping_ack(mdev);
4297
4298}
4299
Philipp Reisner0b70a132010-08-20 13:36:10 +02004300static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004301{
4302 /* restore idle timeout */
4303 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004304 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4305 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004306
4307 return TRUE;
4308}
4309
Philipp Reisner0b70a132010-08-20 13:36:10 +02004310static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004311{
4312 struct p_block_ack *p = (struct p_block_ack *)h;
4313 sector_t sector = be64_to_cpu(p->sector);
4314 int blksize = be32_to_cpu(p->blksize);
4315
4316 D_ASSERT(mdev->agreed_pro_version >= 89);
4317
4318 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4319
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004320 if (get_ldev(mdev)) {
4321 drbd_rs_complete_io(mdev, sector);
4322 drbd_set_in_sync(mdev, sector, blksize);
4323 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4324 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4325 put_ldev(mdev);
4326 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004327 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004328 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004329
4330 return TRUE;
4331}
4332
4333/* when we receive the ACK for a write request,
4334 * verify that we actually know about it */
4335static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4336 u64 id, sector_t sector)
4337{
4338 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4339 struct hlist_node *n;
4340 struct drbd_request *req;
4341
4342 hlist_for_each_entry(req, n, slot, colision) {
4343 if ((unsigned long)req == (unsigned long)id) {
4344 if (req->sector != sector) {
4345 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4346 "wrong sector (%llus versus %llus)\n", req,
4347 (unsigned long long)req->sector,
4348 (unsigned long long)sector);
4349 break;
4350 }
4351 return req;
4352 }
4353 }
4354 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4355 (void *)(unsigned long)id, (unsigned long long)sector);
4356 return NULL;
4357}
4358
4359typedef struct drbd_request *(req_validator_fn)
4360 (struct drbd_conf *mdev, u64 id, sector_t sector);
4361
4362static int validate_req_change_req_state(struct drbd_conf *mdev,
4363 u64 id, sector_t sector, req_validator_fn validator,
4364 const char *func, enum drbd_req_event what)
4365{
4366 struct drbd_request *req;
4367 struct bio_and_error m;
4368
4369 spin_lock_irq(&mdev->req_lock);
4370 req = validator(mdev, id, sector);
4371 if (unlikely(!req)) {
4372 spin_unlock_irq(&mdev->req_lock);
4373 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4374 return FALSE;
4375 }
4376 __req_mod(req, what, &m);
4377 spin_unlock_irq(&mdev->req_lock);
4378
4379 if (m.bio)
4380 complete_master_bio(mdev, &m);
4381 return TRUE;
4382}
4383
Philipp Reisner0b70a132010-08-20 13:36:10 +02004384static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004385{
4386 struct p_block_ack *p = (struct p_block_ack *)h;
4387 sector_t sector = be64_to_cpu(p->sector);
4388 int blksize = be32_to_cpu(p->blksize);
4389 enum drbd_req_event what;
4390
4391 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4392
4393 if (is_syncer_block_id(p->block_id)) {
4394 drbd_set_in_sync(mdev, sector, blksize);
4395 dec_rs_pending(mdev);
4396 return TRUE;
4397 }
4398 switch (be16_to_cpu(h->command)) {
4399 case P_RS_WRITE_ACK:
4400 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4401 what = write_acked_by_peer_and_sis;
4402 break;
4403 case P_WRITE_ACK:
4404 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4405 what = write_acked_by_peer;
4406 break;
4407 case P_RECV_ACK:
4408 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4409 what = recv_acked_by_peer;
4410 break;
4411 case P_DISCARD_ACK:
4412 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4413 what = conflict_discarded_by_peer;
4414 break;
4415 default:
4416 D_ASSERT(0);
4417 return FALSE;
4418 }
4419
4420 return validate_req_change_req_state(mdev, p->block_id, sector,
4421 _ack_id_to_req, __func__ , what);
4422}
4423
Philipp Reisner0b70a132010-08-20 13:36:10 +02004424static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004425{
4426 struct p_block_ack *p = (struct p_block_ack *)h;
4427 sector_t sector = be64_to_cpu(p->sector);
4428
4429 if (__ratelimit(&drbd_ratelimit_state))
4430 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4431
4432 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4433
4434 if (is_syncer_block_id(p->block_id)) {
4435 int size = be32_to_cpu(p->blksize);
4436 dec_rs_pending(mdev);
4437 drbd_rs_failed_io(mdev, sector, size);
4438 return TRUE;
4439 }
4440 return validate_req_change_req_state(mdev, p->block_id, sector,
4441 _ack_id_to_req, __func__ , neg_acked);
4442}
4443
Philipp Reisner0b70a132010-08-20 13:36:10 +02004444static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004445{
4446 struct p_block_ack *p = (struct p_block_ack *)h;
4447 sector_t sector = be64_to_cpu(p->sector);
4448
4449 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4450 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4451 (unsigned long long)sector, be32_to_cpu(p->blksize));
4452
4453 return validate_req_change_req_state(mdev, p->block_id, sector,
4454 _ar_id_to_req, __func__ , neg_acked);
4455}
4456
Philipp Reisner0b70a132010-08-20 13:36:10 +02004457static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004458{
4459 sector_t sector;
4460 int size;
4461 struct p_block_ack *p = (struct p_block_ack *)h;
4462
4463 sector = be64_to_cpu(p->sector);
4464 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004465
4466 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4467
4468 dec_rs_pending(mdev);
4469
4470 if (get_ldev_if_state(mdev, D_FAILED)) {
4471 drbd_rs_complete_io(mdev, sector);
4472 drbd_rs_failed_io(mdev, sector, size);
4473 put_ldev(mdev);
4474 }
4475
4476 return TRUE;
4477}
4478
Philipp Reisner0b70a132010-08-20 13:36:10 +02004479static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004480{
4481 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4482
4483 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4484
4485 return TRUE;
4486}
4487
Philipp Reisner0b70a132010-08-20 13:36:10 +02004488static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004489{
4490 struct p_block_ack *p = (struct p_block_ack *)h;
4491 struct drbd_work *w;
4492 sector_t sector;
4493 int size;
4494
4495 sector = be64_to_cpu(p->sector);
4496 size = be32_to_cpu(p->blksize);
4497
4498 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4499
4500 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4501 drbd_ov_oos_found(mdev, sector, size);
4502 else
4503 ov_oos_print(mdev);
4504
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004505 if (!get_ldev(mdev))
4506 return TRUE;
4507
Philipp Reisnerb411b362009-09-25 16:07:19 -07004508 drbd_rs_complete_io(mdev, sector);
4509 dec_rs_pending(mdev);
4510
4511 if (--mdev->ov_left == 0) {
4512 w = kmalloc(sizeof(*w), GFP_NOIO);
4513 if (w) {
4514 w->cb = w_ov_finished;
4515 drbd_queue_work_front(&mdev->data.work, w);
4516 } else {
4517 dev_err(DEV, "kmalloc(w) failed.");
4518 ov_oos_print(mdev);
4519 drbd_resync_finished(mdev);
4520 }
4521 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004522 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004523 return TRUE;
4524}
4525
Philipp Reisner02918be2010-08-20 14:35:10 +02004526static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004527{
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004528 return TRUE;
4529}
4530
Philipp Reisnerb411b362009-09-25 16:07:19 -07004531struct asender_cmd {
4532 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004533 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004534};
4535
4536static struct asender_cmd *get_asender_cmd(int cmd)
4537{
4538 static struct asender_cmd asender_tbl[] = {
4539 /* anything missing from this table is in
4540 * the drbd_cmd_handler (drbd_default_handler) table,
4541 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004542 [P_PING] = { sizeof(struct p_header80), got_Ping },
4543 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004544 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4545 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4546 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4547 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4548 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4549 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4550 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4551 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4552 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4553 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4554 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004555 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004556 [P_MAX_CMD] = { 0, NULL },
4557 };
4558 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4559 return NULL;
4560 return &asender_tbl[cmd];
4561}
4562
4563int drbd_asender(struct drbd_thread *thi)
4564{
4565 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004566 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004567 struct asender_cmd *cmd = NULL;
4568
4569 int rv, len;
4570 void *buf = h;
4571 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004572 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004573 int empty;
4574
4575 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4576
4577 current->policy = SCHED_RR; /* Make this a realtime task! */
4578 current->rt_priority = 2; /* more important than all other tasks */
4579
4580 while (get_t_state(thi) == Running) {
4581 drbd_thread_current_set_cpu(mdev);
4582 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4583 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4584 mdev->meta.socket->sk->sk_rcvtimeo =
4585 mdev->net_conf->ping_timeo*HZ/10;
4586 }
4587
4588 /* conditionally cork;
4589 * it may hurt latency if we cork without much to send */
4590 if (!mdev->net_conf->no_cork &&
4591 3 < atomic_read(&mdev->unacked_cnt))
4592 drbd_tcp_cork(mdev->meta.socket);
4593 while (1) {
4594 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4595 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004596 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004597 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004598 /* to avoid race with newly queued ACKs */
4599 set_bit(SIGNAL_ASENDER, &mdev->flags);
4600 spin_lock_irq(&mdev->req_lock);
4601 empty = list_empty(&mdev->done_ee);
4602 spin_unlock_irq(&mdev->req_lock);
4603 /* new ack may have been queued right here,
4604 * but then there is also a signal pending,
4605 * and we start over... */
4606 if (empty)
4607 break;
4608 }
4609 /* but unconditionally uncork unless disabled */
4610 if (!mdev->net_conf->no_cork)
4611 drbd_tcp_uncork(mdev->meta.socket);
4612
4613 /* short circuit, recv_msg would return EINTR anyways. */
4614 if (signal_pending(current))
4615 continue;
4616
4617 rv = drbd_recv_short(mdev, mdev->meta.socket,
4618 buf, expect-received, 0);
4619 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4620
4621 flush_signals(current);
4622
4623 /* Note:
4624 * -EINTR (on meta) we got a signal
4625 * -EAGAIN (on meta) rcvtimeo expired
4626 * -ECONNRESET other side closed the connection
4627 * -ERESTARTSYS (on data) we got a signal
4628 * rv < 0 other than above: unexpected error!
4629 * rv == expected: full header or command
4630 * rv < expected: "woken" by signal during receive
4631 * rv == 0 : "connection shut down by peer"
4632 */
4633 if (likely(rv > 0)) {
4634 received += rv;
4635 buf += rv;
4636 } else if (rv == 0) {
4637 dev_err(DEV, "meta connection shut down by peer.\n");
4638 goto reconnect;
4639 } else if (rv == -EAGAIN) {
4640 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4641 mdev->net_conf->ping_timeo*HZ/10) {
4642 dev_err(DEV, "PingAck did not arrive in time.\n");
4643 goto reconnect;
4644 }
4645 set_bit(SEND_PING, &mdev->flags);
4646 continue;
4647 } else if (rv == -EINTR) {
4648 continue;
4649 } else {
4650 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4651 goto reconnect;
4652 }
4653
4654 if (received == expect && cmd == NULL) {
4655 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004656 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4657 be32_to_cpu(h->magic),
4658 be16_to_cpu(h->command),
4659 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004660 goto reconnect;
4661 }
4662 cmd = get_asender_cmd(be16_to_cpu(h->command));
4663 len = be16_to_cpu(h->length);
4664 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004665 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4666 be32_to_cpu(h->magic),
4667 be16_to_cpu(h->command),
4668 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004669 goto disconnect;
4670 }
4671 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004672 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004673 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004674 }
4675 if (received == expect) {
4676 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004677 if (!cmd->process(mdev, h))
4678 goto reconnect;
4679
4680 buf = h;
4681 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004682 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004683 cmd = NULL;
4684 }
4685 }
4686
4687 if (0) {
4688reconnect:
4689 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004690 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004691 }
4692 if (0) {
4693disconnect:
4694 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004695 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004696 }
4697 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4698
4699 D_ASSERT(mdev->state.conn < C_CONNECTED);
4700 dev_info(DEV, "asender terminated\n");
4701
4702 return 0;
4703}