blob: a56b107e01eb48364e2ed3e47832d172c3d68ba0 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 INIT_HLIST_NODE(&e->colision);
337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200344 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 return e;
347
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return NULL;
351}
352
Lars Ellenberg435f0742010-09-06 12:30:25 +0200353void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200355 if (e->flags & EE_HAS_DIGEST)
356 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700359 D_ASSERT(hlist_unhashed(&e->colision));
360 mempool_free(e, drbd_ee_mempool);
361}
362
363int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
364{
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
367 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200368 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
373
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200375 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 count++;
377 }
378 return count;
379}
380
381
382/*
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
386 *
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
390 */
391static int drbd_process_done_ee(struct drbd_conf *mdev)
392{
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
397
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
402
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200404 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
409 */
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
414 }
415 wake_up(&mdev->ee_wait);
416
417 return ok;
418}
419
420void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
421{
422 DEFINE_WAIT(wait);
423
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100429 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
432 }
433}
434
435void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436{
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
440}
441
442/* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
446{
447 struct sock *sk = sock->sk;
448 int err = 0;
449
450 *what = "listen";
451 err = sock->ops->listen(sock, 5);
452 if (err < 0)
453 goto out;
454
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
457 newsock);
458 if (err < 0)
459 goto out;
460
461 *what = "accept";
462 err = sock->ops->accept(sock, *newsock, 0);
463 if (err < 0) {
464 sock_release(*newsock);
465 *newsock = NULL;
466 goto out;
467 }
468 (*newsock)->ops = sock->ops;
469
470out:
471 return err;
472}
473
474static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
475 void *buf, size_t size, int flags)
476{
477 mm_segment_t oldfs;
478 struct kvec iov = {
479 .iov_base = buf,
480 .iov_len = size,
481 };
482 struct msghdr msg = {
483 .msg_iovlen = 1,
484 .msg_iov = (struct iovec *)&iov,
485 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
486 };
487 int rv;
488
489 oldfs = get_fs();
490 set_fs(KERNEL_DS);
491 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
492 set_fs(oldfs);
493
494 return rv;
495}
496
497static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
498{
499 mm_segment_t oldfs;
500 struct kvec iov = {
501 .iov_base = buf,
502 .iov_len = size,
503 };
504 struct msghdr msg = {
505 .msg_iovlen = 1,
506 .msg_iov = (struct iovec *)&iov,
507 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
508 };
509 int rv;
510
511 oldfs = get_fs();
512 set_fs(KERNEL_DS);
513
514 for (;;) {
515 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
516 if (rv == size)
517 break;
518
519 /* Note:
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
522 */
523
524 if (rv < 0) {
525 if (rv == -ECONNRESET)
526 dev_info(DEV, "sock was reset by peer\n");
527 else if (rv != -ERESTARTSYS)
528 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
529 break;
530 } else if (rv == 0) {
531 dev_info(DEV, "sock was shut down by peer\n");
532 break;
533 } else {
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
536 */
537 /* D_ASSERT(signal_pending(current)); */
538 break;
539 }
540 };
541
542 set_fs(oldfs);
543
544 if (rv != size)
545 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
546
547 return rv;
548}
549
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200550/* quoting tcp(7):
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
554 */
555static void drbd_setbufsize(struct socket *sock, unsigned int snd,
556 unsigned int rcv)
557{
558 /* open coded SO_SNDBUF, SO_RCVBUF */
559 if (snd) {
560 sock->sk->sk_sndbuf = snd;
561 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
562 }
563 if (rcv) {
564 sock->sk->sk_rcvbuf = rcv;
565 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
566 }
567}
568
Philipp Reisnerb411b362009-09-25 16:07:19 -0700569static struct socket *drbd_try_connect(struct drbd_conf *mdev)
570{
571 const char *what;
572 struct socket *sock;
573 struct sockaddr_in6 src_in6;
574 int err;
575 int disconnect_on_error = 1;
576
577 if (!get_net_conf(mdev))
578 return NULL;
579
580 what = "sock_create_kern";
581 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
582 SOCK_STREAM, IPPROTO_TCP, &sock);
583 if (err < 0) {
584 sock = NULL;
585 goto out;
586 }
587
588 sock->sk->sk_rcvtimeo =
589 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200590 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
591 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700592
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
599 */
600 memcpy(&src_in6, mdev->net_conf->my_addr,
601 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
602 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
603 src_in6.sin6_port = 0;
604 else
605 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
606
607 what = "bind before connect";
608 err = sock->ops->bind(sock,
609 (struct sockaddr *) &src_in6,
610 mdev->net_conf->my_addr_len);
611 if (err < 0)
612 goto out;
613
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error = 0;
617 what = "connect";
618 err = sock->ops->connect(sock,
619 (struct sockaddr *)mdev->net_conf->peer_addr,
620 mdev->net_conf->peer_addr_len, 0);
621
622out:
623 if (err < 0) {
624 if (sock) {
625 sock_release(sock);
626 sock = NULL;
627 }
628 switch (-err) {
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
631 case EINTR: case ERESTARTSYS:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED: case ENETUNREACH:
634 case EHOSTDOWN: case EHOSTUNREACH:
635 disconnect_on_error = 0;
636 break;
637 default:
638 dev_err(DEV, "%s failed, err = %d\n", what, err);
639 }
640 if (disconnect_on_error)
641 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
642 }
643 put_net_conf(mdev);
644 return sock;
645}
646
647static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
648{
649 int timeo, err;
650 struct socket *s_estab = NULL, *s_listen;
651 const char *what;
652
653 if (!get_net_conf(mdev))
654 return NULL;
655
656 what = "sock_create_kern";
657 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
658 SOCK_STREAM, IPPROTO_TCP, &s_listen);
659 if (err) {
660 s_listen = NULL;
661 goto out;
662 }
663
664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
666
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
671 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700672
673 what = "bind before listen";
674 err = s_listen->ops->bind(s_listen,
675 (struct sockaddr *) mdev->net_conf->my_addr,
676 mdev->net_conf->my_addr_len);
677 if (err < 0)
678 goto out;
679
680 err = drbd_accept(mdev, &what, s_listen, &s_estab);
681
682out:
683 if (s_listen)
684 sock_release(s_listen);
685 if (err < 0) {
686 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
687 dev_err(DEV, "%s failed, err = %d\n", what, err);
688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
689 }
690 }
691 put_net_conf(mdev);
692
693 return s_estab;
694}
695
696static int drbd_send_fp(struct drbd_conf *mdev,
697 struct socket *sock, enum drbd_packets cmd)
698{
Philipp Reisner02918be2010-08-20 14:35:10 +0200699 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700700
701 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
702}
703
704static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
705{
Philipp Reisner02918be2010-08-20 14:35:10 +0200706 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 int rr;
708
709 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
710
711 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
712 return be16_to_cpu(h->command);
713
714 return 0xffff;
715}
716
717/**
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
721 */
722static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
723{
724 int rr;
725 char tb[4];
726
727 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100728 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700729
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
731
732 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100733 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 } else {
735 sock_release(*sock);
736 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100737 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 }
739}
740
741/*
742 * return values:
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
748 */
749static int drbd_connect(struct drbd_conf *mdev)
750{
751 struct socket *s, *sock, *msock;
752 int try, h, ok;
753
754 D_ASSERT(!mdev->data.socket);
755
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
757 return -2;
758
759 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
760
761 sock = NULL;
762 msock = NULL;
763
764 do {
765 for (try = 0;;) {
766 /* 3 tries, this should take less than a second! */
767 s = drbd_try_connect(mdev);
768 if (s || ++try >= 3)
769 break;
770 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100771 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772 }
773
774 if (s) {
775 if (!sock) {
776 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
777 sock = s;
778 s = NULL;
779 } else if (!msock) {
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
781 msock = s;
782 s = NULL;
783 } else {
784 dev_err(DEV, "Logic error in drbd_connect()\n");
785 goto out_release_sockets;
786 }
787 }
788
789 if (sock && msock) {
Philipp Reisner20ee6392011-01-18 15:28:59 +0100790 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700791 ok = drbd_socket_okay(mdev, &sock);
792 ok = drbd_socket_okay(mdev, &msock) && ok;
793 if (ok)
794 break;
795 }
796
797retry:
798 s = drbd_wait_for_connect(mdev);
799 if (s) {
800 try = drbd_recv_fp(mdev, s);
801 drbd_socket_okay(mdev, &sock);
802 drbd_socket_okay(mdev, &msock);
803 switch (try) {
804 case P_HAND_SHAKE_S:
805 if (sock) {
806 dev_warn(DEV, "initial packet S crossed\n");
807 sock_release(sock);
808 }
809 sock = s;
810 break;
811 case P_HAND_SHAKE_M:
812 if (msock) {
813 dev_warn(DEV, "initial packet M crossed\n");
814 sock_release(msock);
815 }
816 msock = s;
817 set_bit(DISCARD_CONCURRENT, &mdev->flags);
818 break;
819 default:
820 dev_warn(DEV, "Error receiving initial packet\n");
821 sock_release(s);
822 if (random32() & 1)
823 goto retry;
824 }
825 }
826
827 if (mdev->state.conn <= C_DISCONNECTING)
828 goto out_release_sockets;
829 if (signal_pending(current)) {
830 flush_signals(current);
831 smp_rmb();
832 if (get_t_state(&mdev->receiver) == Exiting)
833 goto out_release_sockets;
834 }
835
836 if (sock && msock) {
837 ok = drbd_socket_okay(mdev, &sock);
838 ok = drbd_socket_okay(mdev, &msock) && ok;
839 if (ok)
840 break;
841 }
842 } while (1);
843
844 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
845 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
846
847 sock->sk->sk_allocation = GFP_NOIO;
848 msock->sk->sk_allocation = GFP_NOIO;
849
850 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
851 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
852
Philipp Reisnerb411b362009-09-25 16:07:19 -0700853 /* NOT YET ...
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock->sk->sk_sndtimeo =
859 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
860
861 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
863
864 /* we don't want delays.
865 * we use TCP_CORK where apropriate, though */
866 drbd_tcp_nodelay(sock);
867 drbd_tcp_nodelay(msock);
868
869 mdev->data.socket = sock;
870 mdev->meta.socket = msock;
871 mdev->last_received = jiffies;
872
873 D_ASSERT(mdev->asender.task == NULL);
874
875 h = drbd_do_handshake(mdev);
876 if (h <= 0)
877 return h;
878
879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100881 switch (drbd_do_auth(mdev)) {
882 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883 dev_err(DEV, "Authentication of peer failed\n");
884 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100885 case 0:
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700888 }
889 }
890
891 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
892 return 0;
893
894 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
895 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
896
897 atomic_set(&mdev->packet_seq, 0);
898 mdev->peer_seq = 0;
899
900 drbd_thread_start(&mdev->asender);
901
Philipp Reisnerd5373382010-08-23 15:18:33 +0200902 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
903 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
904 put_ldev(mdev);
905 }
906
Philipp Reisner148efa12011-01-15 00:21:15 +0100907 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200908 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700909 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100910 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911 drbd_send_uuids(mdev);
912 drbd_send_state(mdev);
913 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
914 clear_bit(RESIZE_PENDING, &mdev->flags);
915
916 return 1;
917
918out_release_sockets:
919 if (sock)
920 sock_release(sock);
921 if (msock)
922 sock_release(msock);
923 return -1;
924}
925
Philipp Reisner02918be2010-08-20 14:35:10 +0200926static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700927{
Philipp Reisner02918be2010-08-20 14:35:10 +0200928 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700929 int r;
930
931 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932 if (unlikely(r != sizeof(*h))) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +0100933 if (!signal_pending(current))
934 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100935 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200936 }
937
938 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
939 *cmd = be16_to_cpu(h->h80.command);
940 *packet_size = be16_to_cpu(h->h80.length);
941 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
942 *cmd = be16_to_cpu(h->h95.command);
943 *packet_size = be32_to_cpu(h->h95.length);
944 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200945 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
946 be32_to_cpu(h->h80.magic),
947 be16_to_cpu(h->h80.command),
948 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100949 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950 }
951 mdev->last_received = jiffies;
952
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100953 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700954}
955
Philipp Reisner2451fc32010-08-24 13:43:11 +0200956static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957{
958 int rv;
959
960 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400961 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200962 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700963 if (rv) {
964 dev_err(DEV, "local disk flush failed with status %d\n", rv);
965 /* would rather check on EOPNOTSUPP, but that is not reliable.
966 * don't try again for ANY return value != 0
967 * if (rv == -EOPNOTSUPP) */
968 drbd_bump_write_ordering(mdev, WO_drain_io);
969 }
970 put_ldev(mdev);
971 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700972}
973
974/**
975 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
976 * @mdev: DRBD device.
977 * @epoch: Epoch object.
978 * @ev: Epoch event.
979 */
980static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
981 struct drbd_epoch *epoch,
982 enum epoch_event ev)
983{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200984 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700986 enum finish_epoch rv = FE_STILL_LIVE;
987
988 spin_lock(&mdev->epoch_lock);
989 do {
990 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991
992 epoch_size = atomic_read(&epoch->epoch_size);
993
994 switch (ev & ~EV_CLEANUP) {
995 case EV_PUT:
996 atomic_dec(&epoch->active);
997 break;
998 case EV_GOT_BARRIER_NR:
999 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001000 break;
1001 case EV_BECAME_LAST:
1002 /* nothing to do*/
1003 break;
1004 }
1005
Philipp Reisnerb411b362009-09-25 16:07:19 -07001006 if (epoch_size != 0 &&
1007 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001008 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001009 if (!(ev & EV_CLEANUP)) {
1010 spin_unlock(&mdev->epoch_lock);
1011 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1012 spin_lock(&mdev->epoch_lock);
1013 }
1014 dec_unacked(mdev);
1015
1016 if (mdev->current_epoch != epoch) {
1017 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1018 list_del(&epoch->list);
1019 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1020 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021 kfree(epoch);
1022
1023 if (rv == FE_STILL_LIVE)
1024 rv = FE_DESTROYED;
1025 } else {
1026 epoch->flags = 0;
1027 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001028 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001029 if (rv == FE_STILL_LIVE)
1030 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001031 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032 }
1033 }
1034
1035 if (!next_epoch)
1036 break;
1037
1038 epoch = next_epoch;
1039 } while (1);
1040
1041 spin_unlock(&mdev->epoch_lock);
1042
Philipp Reisnerb411b362009-09-25 16:07:19 -07001043 return rv;
1044}
1045
1046/**
1047 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1048 * @mdev: DRBD device.
1049 * @wo: Write ordering method to try.
1050 */
1051void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1052{
1053 enum write_ordering_e pwo;
1054 static char *write_ordering_str[] = {
1055 [WO_none] = "none",
1056 [WO_drain_io] = "drain",
1057 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001058 };
1059
1060 pwo = mdev->write_ordering;
1061 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001062 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1063 wo = WO_drain_io;
1064 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1065 wo = WO_none;
1066 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001067 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001068 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1069}
1070
1071/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001072 * drbd_submit_ee()
1073 * @mdev: DRBD device.
1074 * @e: epoch entry
1075 * @rw: flag field, see bio->bi_rw
1076 */
1077/* TODO allocate from our own bio_set. */
1078int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1079 const unsigned rw, const int fault_type)
1080{
1081 struct bio *bios = NULL;
1082 struct bio *bio;
1083 struct page *page = e->pages;
1084 sector_t sector = e->sector;
1085 unsigned ds = e->size;
1086 unsigned n_bios = 0;
1087 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1088
1089 /* In most cases, we will only need one bio. But in case the lower
1090 * level restrictions happen to be different at this offset on this
1091 * side than those of the sending peer, we may need to submit the
1092 * request in more than one bio. */
1093next_bio:
1094 bio = bio_alloc(GFP_NOIO, nr_pages);
1095 if (!bio) {
1096 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1097 goto fail;
1098 }
1099 /* > e->sector, unless this is the first bio */
1100 bio->bi_sector = sector;
1101 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001102 bio->bi_rw = rw;
1103 bio->bi_private = e;
1104 bio->bi_end_io = drbd_endio_sec;
1105
1106 bio->bi_next = bios;
1107 bios = bio;
1108 ++n_bios;
1109
1110 page_chain_for_each(page) {
1111 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1112 if (!bio_add_page(bio, page, len, 0)) {
1113 /* a single page must always be possible! */
1114 BUG_ON(bio->bi_vcnt == 0);
1115 goto next_bio;
1116 }
1117 ds -= len;
1118 sector += len >> 9;
1119 --nr_pages;
1120 }
1121 D_ASSERT(page == NULL);
1122 D_ASSERT(ds == 0);
1123
1124 atomic_set(&e->pending_bios, n_bios);
1125 do {
1126 bio = bios;
1127 bios = bios->bi_next;
1128 bio->bi_next = NULL;
1129
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001130 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001131 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001132 return 0;
1133
1134fail:
1135 while (bios) {
1136 bio = bios;
1137 bios = bios->bi_next;
1138 bio_put(bio);
1139 }
1140 return -ENOMEM;
1141}
1142
Philipp Reisner02918be2010-08-20 14:35:10 +02001143static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001144{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001145 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001146 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001147 struct drbd_epoch *epoch;
1148
Philipp Reisnerb411b362009-09-25 16:07:19 -07001149 inc_unacked(mdev);
1150
Philipp Reisnerb411b362009-09-25 16:07:19 -07001151 mdev->current_epoch->barrier_nr = p->barrier;
1152 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1153
1154 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1155 * the activity log, which means it would not be resynced in case the
1156 * R_PRIMARY crashes now.
1157 * Therefore we must send the barrier_ack after the barrier request was
1158 * completed. */
1159 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001160 case WO_none:
1161 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001162 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001163
1164 /* receiver context, in the writeout path of the other node.
1165 * avoid potential distributed deadlock */
1166 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1167 if (epoch)
1168 break;
1169 else
1170 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1171 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001172
1173 case WO_bdev_flush:
1174 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001176 drbd_flush(mdev);
1177
1178 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1179 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1180 if (epoch)
1181 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001182 }
1183
Philipp Reisner2451fc32010-08-24 13:43:11 +02001184 epoch = mdev->current_epoch;
1185 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1186
1187 D_ASSERT(atomic_read(&epoch->active) == 0);
1188 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001189
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001190 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001191 default:
1192 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001193 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001194 }
1195
1196 epoch->flags = 0;
1197 atomic_set(&epoch->epoch_size, 0);
1198 atomic_set(&epoch->active, 0);
1199
1200 spin_lock(&mdev->epoch_lock);
1201 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1202 list_add(&epoch->list, &mdev->current_epoch->list);
1203 mdev->current_epoch = epoch;
1204 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001205 } else {
1206 /* The current_epoch got recycled while we allocated this one... */
1207 kfree(epoch);
1208 }
1209 spin_unlock(&mdev->epoch_lock);
1210
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001211 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001212}
1213
1214/* used from receive_RSDataReply (recv_resync_read)
1215 * and from receive_Data */
1216static struct drbd_epoch_entry *
1217read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1218{
Lars Ellenberg66660322010-04-06 12:15:04 +02001219 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001220 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001221 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001222 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001223 void *dig_in = mdev->int_dig_in;
1224 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001225 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001226
1227 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1228 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1229
1230 if (dgs) {
1231 rr = drbd_recv(mdev, dig_in, dgs);
1232 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001233 if (!signal_pending(current))
1234 dev_warn(DEV,
1235 "short read receiving data digest: read %d expected %d\n",
1236 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001237 return NULL;
1238 }
1239 }
1240
1241 data_size -= dgs;
1242
Philipp Reisnerd07c9c12011-01-20 16:49:33 +01001243 ERR_IF(data_size == 0) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001244 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001245 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001246
Lars Ellenberg66660322010-04-06 12:15:04 +02001247 /* even though we trust out peer,
1248 * we sometimes have to double check. */
1249 if (sector + (data_size>>9) > capacity) {
1250 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1251 (unsigned long long)capacity,
1252 (unsigned long long)sector, data_size);
1253 return NULL;
1254 }
1255
Philipp Reisnerb411b362009-09-25 16:07:19 -07001256 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1257 * "criss-cross" setup, that might cause write-out on some other DRBD,
1258 * which in turn might block on the other node at this very place. */
1259 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1260 if (!e)
1261 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001262
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001264 page = e->pages;
1265 page_chain_for_each(page) {
1266 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001267 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001268 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001269 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001270 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1271 data[0] = data[0] ^ (unsigned long)-1;
1272 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001273 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001274 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001275 drbd_free_ee(mdev, e);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001276 if (!signal_pending(current))
1277 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1278 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279 return NULL;
1280 }
1281 ds -= rr;
1282 }
1283
1284 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001285 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001286 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001287 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1288 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001289 drbd_bcast_ee(mdev, "digest failed",
1290 dgs, dig_in, dig_vv, e);
1291 drbd_free_ee(mdev, e);
1292 return NULL;
1293 }
1294 }
1295 mdev->recv_cnt += data_size>>9;
1296 return e;
1297}
1298
1299/* drbd_drain_block() just takes a data block
1300 * out of the socket input buffer, and discards it.
1301 */
1302static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1303{
1304 struct page *page;
1305 int rr, rv = 1;
1306 void *data;
1307
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001308 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001309 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001310
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001311 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001312
1313 data = kmap(page);
1314 while (data_size) {
1315 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1316 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1317 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001318 if (!signal_pending(current))
1319 dev_warn(DEV,
1320 "short read receiving data: read %d expected %d\n",
1321 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001322 break;
1323 }
1324 data_size -= rr;
1325 }
1326 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001327 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001328 return rv;
1329}
1330
1331static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1332 sector_t sector, int data_size)
1333{
1334 struct bio_vec *bvec;
1335 struct bio *bio;
1336 int dgs, rr, i, expect;
1337 void *dig_in = mdev->int_dig_in;
1338 void *dig_vv = mdev->int_dig_vv;
1339
1340 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1341 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1342
1343 if (dgs) {
1344 rr = drbd_recv(mdev, dig_in, dgs);
1345 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001346 if (!signal_pending(current))
1347 dev_warn(DEV,
1348 "short read receiving data reply digest: read %d expected %d\n",
1349 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001350 return 0;
1351 }
1352 }
1353
1354 data_size -= dgs;
1355
1356 /* optimistically update recv_cnt. if receiving fails below,
1357 * we disconnect anyways, and counters will be reset. */
1358 mdev->recv_cnt += data_size>>9;
1359
1360 bio = req->master_bio;
1361 D_ASSERT(sector == bio->bi_sector);
1362
1363 bio_for_each_segment(bvec, bio, i) {
1364 expect = min_t(int, data_size, bvec->bv_len);
1365 rr = drbd_recv(mdev,
1366 kmap(bvec->bv_page)+bvec->bv_offset,
1367 expect);
1368 kunmap(bvec->bv_page);
1369 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001370 if (!signal_pending(current))
1371 dev_warn(DEV, "short read receiving data reply: "
1372 "read %d expected %d\n",
1373 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001374 return 0;
1375 }
1376 data_size -= rr;
1377 }
1378
1379 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001380 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001381 if (memcmp(dig_in, dig_vv, dgs)) {
1382 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1383 return 0;
1384 }
1385 }
1386
1387 D_ASSERT(data_size == 0);
1388 return 1;
1389}
1390
1391/* e_end_resync_block() is called via
1392 * drbd_process_done_ee() by asender only */
1393static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1394{
1395 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1396 sector_t sector = e->sector;
1397 int ok;
1398
1399 D_ASSERT(hlist_unhashed(&e->colision));
1400
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001401 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402 drbd_set_in_sync(mdev, sector, e->size);
1403 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1404 } else {
1405 /* Record failure to sync */
1406 drbd_rs_failed_io(mdev, sector, e->size);
1407
1408 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1409 }
1410 dec_unacked(mdev);
1411
1412 return ok;
1413}
1414
1415static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1416{
1417 struct drbd_epoch_entry *e;
1418
1419 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001420 if (!e)
1421 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001422
1423 dec_rs_pending(mdev);
1424
Philipp Reisnerb411b362009-09-25 16:07:19 -07001425 inc_unacked(mdev);
1426 /* corresponding dec_unacked() in e_end_resync_block()
1427 * respective _drbd_clear_done_ee */
1428
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001429 e->w.cb = e_end_resync_block;
1430
Philipp Reisnerb411b362009-09-25 16:07:19 -07001431 spin_lock_irq(&mdev->req_lock);
1432 list_add(&e->w.list, &mdev->sync_ee);
1433 spin_unlock_irq(&mdev->req_lock);
1434
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001435 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001436 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001437 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001438
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001439 /* drbd_submit_ee currently fails for one reason only:
1440 * not being able to allocate enough bios.
1441 * Is dropping the connection going to help? */
1442 spin_lock_irq(&mdev->req_lock);
1443 list_del(&e->w.list);
1444 spin_unlock_irq(&mdev->req_lock);
1445
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001446 drbd_free_ee(mdev, e);
1447fail:
1448 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001449 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001450}
1451
Philipp Reisner02918be2010-08-20 14:35:10 +02001452static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001453{
1454 struct drbd_request *req;
1455 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001456 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001457 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458
1459 sector = be64_to_cpu(p->sector);
1460
1461 spin_lock_irq(&mdev->req_lock);
1462 req = _ar_id_to_req(mdev, p->block_id, sector);
1463 spin_unlock_irq(&mdev->req_lock);
1464 if (unlikely(!req)) {
1465 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001466 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001467 }
1468
1469 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1470 * special casing it there for the various failure cases.
1471 * still no race with drbd_fail_pending_reads */
1472 ok = recv_dless_read(mdev, req, sector, data_size);
1473
1474 if (ok)
1475 req_mod(req, data_received);
1476 /* else: nothing. handled from drbd_disconnect...
1477 * I don't think we may complete this just yet
1478 * in case we are "on-disconnect: freeze" */
1479
1480 return ok;
1481}
1482
Philipp Reisner02918be2010-08-20 14:35:10 +02001483static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001484{
1485 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001487 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001488
1489 sector = be64_to_cpu(p->sector);
1490 D_ASSERT(p->block_id == ID_SYNCER);
1491
1492 if (get_ldev(mdev)) {
1493 /* data is submitted to disk within recv_resync_read.
1494 * corresponding put_ldev done below on error,
1495 * or in drbd_endio_write_sec. */
1496 ok = recv_resync_read(mdev, sector, data_size);
1497 } else {
1498 if (__ratelimit(&drbd_ratelimit_state))
1499 dev_err(DEV, "Can not write resync data to local disk.\n");
1500
1501 ok = drbd_drain_block(mdev, data_size);
1502
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001503 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504 }
1505
Philipp Reisner778f2712010-07-06 11:14:00 +02001506 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1507
Philipp Reisnerb411b362009-09-25 16:07:19 -07001508 return ok;
1509}
1510
1511/* e_end_block() is called via drbd_process_done_ee().
1512 * this means this function only runs in the asender thread
1513 */
1514static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1515{
1516 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1517 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001518 int ok = 1, pcmd;
1519
Philipp Reisnerb411b362009-09-25 16:07:19 -07001520 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001521 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1523 mdev->state.conn <= C_PAUSED_SYNC_T &&
1524 e->flags & EE_MAY_SET_IN_SYNC) ?
1525 P_RS_WRITE_ACK : P_WRITE_ACK;
1526 ok &= drbd_send_ack(mdev, pcmd, e);
1527 if (pcmd == P_RS_WRITE_ACK)
1528 drbd_set_in_sync(mdev, sector, e->size);
1529 } else {
1530 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1531 /* we expect it to be marked out of sync anyways...
1532 * maybe assert this? */
1533 }
1534 dec_unacked(mdev);
1535 }
1536 /* we delete from the conflict detection hash _after_ we sent out the
1537 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1538 if (mdev->net_conf->two_primaries) {
1539 spin_lock_irq(&mdev->req_lock);
1540 D_ASSERT(!hlist_unhashed(&e->colision));
1541 hlist_del_init(&e->colision);
1542 spin_unlock_irq(&mdev->req_lock);
1543 } else {
1544 D_ASSERT(hlist_unhashed(&e->colision));
1545 }
1546
1547 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1548
1549 return ok;
1550}
1551
1552static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1553{
1554 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1555 int ok = 1;
1556
1557 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1558 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1559
1560 spin_lock_irq(&mdev->req_lock);
1561 D_ASSERT(!hlist_unhashed(&e->colision));
1562 hlist_del_init(&e->colision);
1563 spin_unlock_irq(&mdev->req_lock);
1564
1565 dec_unacked(mdev);
1566
1567 return ok;
1568}
1569
1570/* Called from receive_Data.
1571 * Synchronize packets on sock with packets on msock.
1572 *
1573 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1574 * packet traveling on msock, they are still processed in the order they have
1575 * been sent.
1576 *
1577 * Note: we don't care for Ack packets overtaking P_DATA packets.
1578 *
1579 * In case packet_seq is larger than mdev->peer_seq number, there are
1580 * outstanding packets on the msock. We wait for them to arrive.
1581 * In case we are the logically next packet, we update mdev->peer_seq
1582 * ourselves. Correctly handles 32bit wrap around.
1583 *
1584 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1585 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1586 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1587 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1588 *
1589 * returns 0 if we may process the packet,
1590 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1591static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1592{
1593 DEFINE_WAIT(wait);
1594 unsigned int p_seq;
1595 long timeout;
1596 int ret = 0;
1597 spin_lock(&mdev->peer_seq_lock);
1598 for (;;) {
1599 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1600 if (seq_le(packet_seq, mdev->peer_seq+1))
1601 break;
1602 if (signal_pending(current)) {
1603 ret = -ERESTARTSYS;
1604 break;
1605 }
1606 p_seq = mdev->peer_seq;
1607 spin_unlock(&mdev->peer_seq_lock);
1608 timeout = schedule_timeout(30*HZ);
1609 spin_lock(&mdev->peer_seq_lock);
1610 if (timeout == 0 && p_seq == mdev->peer_seq) {
1611 ret = -ETIMEDOUT;
1612 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1613 break;
1614 }
1615 }
1616 finish_wait(&mdev->seq_wait, &wait);
1617 if (mdev->peer_seq+1 == packet_seq)
1618 mdev->peer_seq++;
1619 spin_unlock(&mdev->peer_seq_lock);
1620 return ret;
1621}
1622
Lars Ellenberg688593c2010-11-17 22:25:03 +01001623/* see also bio_flags_to_wire()
1624 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1625 * flags and back. We may replicate to other kernel versions. */
1626static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001627{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001628 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1629 (dpf & DP_FUA ? REQ_FUA : 0) |
1630 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1631 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001632}
1633
Philipp Reisnerb411b362009-09-25 16:07:19 -07001634/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001635static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001636{
1637 sector_t sector;
1638 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001639 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001640 int rw = WRITE;
1641 u32 dp_flags;
1642
Philipp Reisnerb411b362009-09-25 16:07:19 -07001643 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001644 spin_lock(&mdev->peer_seq_lock);
1645 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1646 mdev->peer_seq++;
1647 spin_unlock(&mdev->peer_seq_lock);
1648
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001649 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001650 atomic_inc(&mdev->current_epoch->epoch_size);
1651 return drbd_drain_block(mdev, data_size);
1652 }
1653
1654 /* get_ldev(mdev) successful.
1655 * Corresponding put_ldev done either below (on various errors),
1656 * or in drbd_endio_write_sec, if we successfully submit the data at
1657 * the end of this function. */
1658
1659 sector = be64_to_cpu(p->sector);
1660 e = read_in_block(mdev, p->block_id, sector, data_size);
1661 if (!e) {
1662 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001663 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001664 }
1665
Philipp Reisnerb411b362009-09-25 16:07:19 -07001666 e->w.cb = e_end_block;
1667
Lars Ellenberg688593c2010-11-17 22:25:03 +01001668 dp_flags = be32_to_cpu(p->dp_flags);
1669 rw |= wire_flags_to_bio(mdev, dp_flags);
1670
1671 if (dp_flags & DP_MAY_SET_IN_SYNC)
1672 e->flags |= EE_MAY_SET_IN_SYNC;
1673
Philipp Reisnerb411b362009-09-25 16:07:19 -07001674 spin_lock(&mdev->epoch_lock);
1675 e->epoch = mdev->current_epoch;
1676 atomic_inc(&e->epoch->epoch_size);
1677 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001678 spin_unlock(&mdev->epoch_lock);
1679
Philipp Reisnerb411b362009-09-25 16:07:19 -07001680 /* I'm the receiver, I do hold a net_cnt reference. */
1681 if (!mdev->net_conf->two_primaries) {
1682 spin_lock_irq(&mdev->req_lock);
1683 } else {
1684 /* don't get the req_lock yet,
1685 * we may sleep in drbd_wait_peer_seq */
1686 const int size = e->size;
1687 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1688 DEFINE_WAIT(wait);
1689 struct drbd_request *i;
1690 struct hlist_node *n;
1691 struct hlist_head *slot;
1692 int first;
1693
1694 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1695 BUG_ON(mdev->ee_hash == NULL);
1696 BUG_ON(mdev->tl_hash == NULL);
1697
1698 /* conflict detection and handling:
1699 * 1. wait on the sequence number,
1700 * in case this data packet overtook ACK packets.
1701 * 2. check our hash tables for conflicting requests.
1702 * we only need to walk the tl_hash, since an ee can not
1703 * have a conflict with an other ee: on the submitting
1704 * node, the corresponding req had already been conflicting,
1705 * and a conflicting req is never sent.
1706 *
1707 * Note: for two_primaries, we are protocol C,
1708 * so there cannot be any request that is DONE
1709 * but still on the transfer log.
1710 *
1711 * unconditionally add to the ee_hash.
1712 *
1713 * if no conflicting request is found:
1714 * submit.
1715 *
1716 * if any conflicting request is found
1717 * that has not yet been acked,
1718 * AND I have the "discard concurrent writes" flag:
1719 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1720 *
1721 * if any conflicting request is found:
1722 * block the receiver, waiting on misc_wait
1723 * until no more conflicting requests are there,
1724 * or we get interrupted (disconnect).
1725 *
1726 * we do not just write after local io completion of those
1727 * requests, but only after req is done completely, i.e.
1728 * we wait for the P_DISCARD_ACK to arrive!
1729 *
1730 * then proceed normally, i.e. submit.
1731 */
1732 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1733 goto out_interrupted;
1734
1735 spin_lock_irq(&mdev->req_lock);
1736
1737 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1738
1739#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1740 slot = tl_hash_slot(mdev, sector);
1741 first = 1;
1742 for (;;) {
1743 int have_unacked = 0;
1744 int have_conflict = 0;
1745 prepare_to_wait(&mdev->misc_wait, &wait,
1746 TASK_INTERRUPTIBLE);
1747 hlist_for_each_entry(i, n, slot, colision) {
1748 if (OVERLAPS) {
1749 /* only ALERT on first iteration,
1750 * we may be woken up early... */
1751 if (first)
1752 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1753 " new: %llus +%u; pending: %llus +%u\n",
1754 current->comm, current->pid,
1755 (unsigned long long)sector, size,
1756 (unsigned long long)i->sector, i->size);
1757 if (i->rq_state & RQ_NET_PENDING)
1758 ++have_unacked;
1759 ++have_conflict;
1760 }
1761 }
1762#undef OVERLAPS
1763 if (!have_conflict)
1764 break;
1765
1766 /* Discard Ack only for the _first_ iteration */
1767 if (first && discard && have_unacked) {
1768 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1769 (unsigned long long)sector);
1770 inc_unacked(mdev);
1771 e->w.cb = e_send_discard_ack;
1772 list_add_tail(&e->w.list, &mdev->done_ee);
1773
1774 spin_unlock_irq(&mdev->req_lock);
1775
1776 /* we could probably send that P_DISCARD_ACK ourselves,
1777 * but I don't like the receiver using the msock */
1778
1779 put_ldev(mdev);
1780 wake_asender(mdev);
1781 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001782 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001783 }
1784
1785 if (signal_pending(current)) {
1786 hlist_del_init(&e->colision);
1787
1788 spin_unlock_irq(&mdev->req_lock);
1789
1790 finish_wait(&mdev->misc_wait, &wait);
1791 goto out_interrupted;
1792 }
1793
1794 spin_unlock_irq(&mdev->req_lock);
1795 if (first) {
1796 first = 0;
1797 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1798 "sec=%llus\n", (unsigned long long)sector);
1799 } else if (discard) {
1800 /* we had none on the first iteration.
1801 * there must be none now. */
1802 D_ASSERT(have_unacked == 0);
1803 }
1804 schedule();
1805 spin_lock_irq(&mdev->req_lock);
1806 }
1807 finish_wait(&mdev->misc_wait, &wait);
1808 }
1809
1810 list_add(&e->w.list, &mdev->active_ee);
1811 spin_unlock_irq(&mdev->req_lock);
1812
1813 switch (mdev->net_conf->wire_protocol) {
1814 case DRBD_PROT_C:
1815 inc_unacked(mdev);
1816 /* corresponding dec_unacked() in e_end_block()
1817 * respective _drbd_clear_done_ee */
1818 break;
1819 case DRBD_PROT_B:
1820 /* I really don't like it that the receiver thread
1821 * sends on the msock, but anyways */
1822 drbd_send_ack(mdev, P_RECV_ACK, e);
1823 break;
1824 case DRBD_PROT_A:
1825 /* nothing to do */
1826 break;
1827 }
1828
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001829 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001830 /* In case we have the only disk of the cluster, */
1831 drbd_set_out_of_sync(mdev, e->sector, e->size);
1832 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001833 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001834 drbd_al_begin_io(mdev, e->sector);
1835 }
1836
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001837 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001838 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001839
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001840 /* drbd_submit_ee currently fails for one reason only:
1841 * not being able to allocate enough bios.
1842 * Is dropping the connection going to help? */
1843 spin_lock_irq(&mdev->req_lock);
1844 list_del(&e->w.list);
1845 hlist_del_init(&e->colision);
1846 spin_unlock_irq(&mdev->req_lock);
1847 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1848 drbd_al_complete_io(mdev, e->sector);
1849
Philipp Reisnerb411b362009-09-25 16:07:19 -07001850out_interrupted:
1851 /* yes, the epoch_size now is imbalanced.
1852 * but we drop the connection anyways, so we don't have a chance to
1853 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1854 put_ldev(mdev);
1855 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001856 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001857}
1858
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001859/* We may throttle resync, if the lower device seems to be busy,
1860 * and current sync rate is above c_min_rate.
1861 *
1862 * To decide whether or not the lower device is busy, we use a scheme similar
1863 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1864 * (more than 64 sectors) of activity we cannot account for with our own resync
1865 * activity, it obviously is "busy".
1866 *
1867 * The current sync rate used here uses only the most recent two step marks,
1868 * to have a short time average so we can react faster.
1869 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001870int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001871{
1872 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1873 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001874 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001875 int curr_events;
1876 int throttle = 0;
1877
1878 /* feature disabled? */
1879 if (mdev->sync_conf.c_min_rate == 0)
1880 return 0;
1881
Philipp Reisnere3555d82010-11-07 15:56:29 +01001882 spin_lock_irq(&mdev->al_lock);
1883 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1884 if (tmp) {
1885 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1886 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1887 spin_unlock_irq(&mdev->al_lock);
1888 return 0;
1889 }
1890 /* Do not slow down if app IO is already waiting for this extent */
1891 }
1892 spin_unlock_irq(&mdev->al_lock);
1893
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001894 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1895 (int)part_stat_read(&disk->part0, sectors[1]) -
1896 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001897
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001898 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1899 unsigned long rs_left;
1900 int i;
1901
1902 mdev->rs_last_events = curr_events;
1903
1904 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1905 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001906 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1907
1908 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1909 rs_left = mdev->ov_left;
1910 else
1911 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001912
1913 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1914 if (!dt)
1915 dt++;
1916 db = mdev->rs_mark_left[i] - rs_left;
1917 dbdt = Bit2KB(db/dt);
1918
1919 if (dbdt > mdev->sync_conf.c_min_rate)
1920 throttle = 1;
1921 }
1922 return throttle;
1923}
1924
1925
Philipp Reisner02918be2010-08-20 14:35:10 +02001926static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001927{
1928 sector_t sector;
1929 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1930 struct drbd_epoch_entry *e;
1931 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001932 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001933 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001934 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001935
1936 sector = be64_to_cpu(p->sector);
1937 size = be32_to_cpu(p->blksize);
1938
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001939 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001940 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1941 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001942 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001943 }
1944 if (sector + (size>>9) > capacity) {
1945 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1946 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001947 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001948 }
1949
1950 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001951 verb = 1;
1952 switch (cmd) {
1953 case P_DATA_REQUEST:
1954 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1955 break;
1956 case P_RS_DATA_REQUEST:
1957 case P_CSUM_RS_REQUEST:
1958 case P_OV_REQUEST:
1959 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1960 break;
1961 case P_OV_REPLY:
1962 verb = 0;
1963 dec_rs_pending(mdev);
1964 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1965 break;
1966 default:
1967 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1968 cmdname(cmd));
1969 }
1970 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001971 dev_err(DEV, "Can not satisfy peer's read request, "
1972 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001973
Lars Ellenberga821cc42010-09-06 12:31:37 +02001974 /* drain possibly payload */
1975 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001976 }
1977
1978 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1979 * "criss-cross" setup, that might cause write-out on some other DRBD,
1980 * which in turn might block on the other node at this very place. */
1981 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1982 if (!e) {
1983 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001984 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001985 }
1986
Philipp Reisner02918be2010-08-20 14:35:10 +02001987 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001988 case P_DATA_REQUEST:
1989 e->w.cb = w_e_end_data_req;
1990 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02001991 /* application IO, don't drbd_rs_begin_io */
1992 goto submit;
1993
Philipp Reisnerb411b362009-09-25 16:07:19 -07001994 case P_RS_DATA_REQUEST:
1995 e->w.cb = w_e_end_rsdata_req;
1996 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01001997 /* used in the sector offset progress display */
1998 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001999 break;
2000
2001 case P_OV_REPLY:
2002 case P_CSUM_RS_REQUEST:
2003 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002004 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2005 if (!di)
2006 goto out_free_e;
2007
2008 di->digest_size = digest_size;
2009 di->digest = (((char *)di)+sizeof(struct digest_info));
2010
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002011 e->digest = di;
2012 e->flags |= EE_HAS_DIGEST;
2013
Philipp Reisnerb411b362009-09-25 16:07:19 -07002014 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2015 goto out_free_e;
2016
Philipp Reisner02918be2010-08-20 14:35:10 +02002017 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002018 D_ASSERT(mdev->agreed_pro_version >= 89);
2019 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002020 /* used in the sector offset progress display */
2021 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002022 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002023 /* track progress, we may need to throttle */
2024 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002025 e->w.cb = w_e_end_ov_reply;
2026 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002027 /* drbd_rs_begin_io done when we sent this request,
2028 * but accounting still needs to be done. */
2029 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002030 }
2031 break;
2032
2033 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002034 if (mdev->ov_start_sector == ~(sector_t)0 &&
2035 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002036 unsigned long now = jiffies;
2037 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 mdev->ov_start_sector = sector;
2039 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002040 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2041 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002042 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2043 mdev->rs_mark_left[i] = mdev->ov_left;
2044 mdev->rs_mark_time[i] = now;
2045 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002046 dev_info(DEV, "Online Verify start sector: %llu\n",
2047 (unsigned long long)sector);
2048 }
2049 e->w.cb = w_e_end_ov_req;
2050 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002051 break;
2052
Philipp Reisnerb411b362009-09-25 16:07:19 -07002053 default:
2054 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002055 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002056 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002057 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002058 }
2059
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002060 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2061 * wrt the receiver, but it is not as straightforward as it may seem.
2062 * Various places in the resync start and stop logic assume resync
2063 * requests are processed in order, requeuing this on the worker thread
2064 * introduces a bunch of new code for synchronization between threads.
2065 *
2066 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2067 * "forever", throttling after drbd_rs_begin_io will lock that extent
2068 * for application writes for the same time. For now, just throttle
2069 * here, where the rest of the code expects the receiver to sleep for
2070 * a while, anyways.
2071 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002073 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2074 * this defers syncer requests for some time, before letting at least
2075 * on request through. The resync controller on the receiving side
2076 * will adapt to the incoming rate accordingly.
2077 *
2078 * We cannot throttle here if remote is Primary/SyncTarget:
2079 * we would also throttle its application reads.
2080 * In that case, throttling is done on the SyncTarget only.
2081 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002082 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2083 schedule_timeout_uninterruptible(HZ/10);
2084 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002085 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002087submit_for_resync:
2088 atomic_add(size >> 9, &mdev->rs_sect_ev);
2089
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002090submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002091 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002092 spin_lock_irq(&mdev->req_lock);
2093 list_add_tail(&e->w.list, &mdev->read_ee);
2094 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002095
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002096 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002097 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002098
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002099 /* drbd_submit_ee currently fails for one reason only:
2100 * not being able to allocate enough bios.
2101 * Is dropping the connection going to help? */
2102 spin_lock_irq(&mdev->req_lock);
2103 list_del(&e->w.list);
2104 spin_unlock_irq(&mdev->req_lock);
2105 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2106
Philipp Reisnerb411b362009-09-25 16:07:19 -07002107out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002108 put_ldev(mdev);
2109 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002110 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002111}
2112
2113static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2114{
2115 int self, peer, rv = -100;
2116 unsigned long ch_self, ch_peer;
2117
2118 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2119 peer = mdev->p_uuid[UI_BITMAP] & 1;
2120
2121 ch_peer = mdev->p_uuid[UI_SIZE];
2122 ch_self = mdev->comm_bm_set;
2123
2124 switch (mdev->net_conf->after_sb_0p) {
2125 case ASB_CONSENSUS:
2126 case ASB_DISCARD_SECONDARY:
2127 case ASB_CALL_HELPER:
2128 dev_err(DEV, "Configuration error.\n");
2129 break;
2130 case ASB_DISCONNECT:
2131 break;
2132 case ASB_DISCARD_YOUNGER_PRI:
2133 if (self == 0 && peer == 1) {
2134 rv = -1;
2135 break;
2136 }
2137 if (self == 1 && peer == 0) {
2138 rv = 1;
2139 break;
2140 }
2141 /* Else fall through to one of the other strategies... */
2142 case ASB_DISCARD_OLDER_PRI:
2143 if (self == 0 && peer == 1) {
2144 rv = 1;
2145 break;
2146 }
2147 if (self == 1 && peer == 0) {
2148 rv = -1;
2149 break;
2150 }
2151 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002152 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002153 "Using discard-least-changes instead\n");
2154 case ASB_DISCARD_ZERO_CHG:
2155 if (ch_peer == 0 && ch_self == 0) {
2156 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2157 ? -1 : 1;
2158 break;
2159 } else {
2160 if (ch_peer == 0) { rv = 1; break; }
2161 if (ch_self == 0) { rv = -1; break; }
2162 }
2163 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2164 break;
2165 case ASB_DISCARD_LEAST_CHG:
2166 if (ch_self < ch_peer)
2167 rv = -1;
2168 else if (ch_self > ch_peer)
2169 rv = 1;
2170 else /* ( ch_self == ch_peer ) */
2171 /* Well, then use something else. */
2172 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2173 ? -1 : 1;
2174 break;
2175 case ASB_DISCARD_LOCAL:
2176 rv = -1;
2177 break;
2178 case ASB_DISCARD_REMOTE:
2179 rv = 1;
2180 }
2181
2182 return rv;
2183}
2184
2185static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2186{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002187 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002188
2189 switch (mdev->net_conf->after_sb_1p) {
2190 case ASB_DISCARD_YOUNGER_PRI:
2191 case ASB_DISCARD_OLDER_PRI:
2192 case ASB_DISCARD_LEAST_CHG:
2193 case ASB_DISCARD_LOCAL:
2194 case ASB_DISCARD_REMOTE:
2195 dev_err(DEV, "Configuration error.\n");
2196 break;
2197 case ASB_DISCONNECT:
2198 break;
2199 case ASB_CONSENSUS:
2200 hg = drbd_asb_recover_0p(mdev);
2201 if (hg == -1 && mdev->state.role == R_SECONDARY)
2202 rv = hg;
2203 if (hg == 1 && mdev->state.role == R_PRIMARY)
2204 rv = hg;
2205 break;
2206 case ASB_VIOLENTLY:
2207 rv = drbd_asb_recover_0p(mdev);
2208 break;
2209 case ASB_DISCARD_SECONDARY:
2210 return mdev->state.role == R_PRIMARY ? 1 : -1;
2211 case ASB_CALL_HELPER:
2212 hg = drbd_asb_recover_0p(mdev);
2213 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002214 enum drbd_state_rv rv2;
2215
2216 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002217 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2218 * we might be here in C_WF_REPORT_PARAMS which is transient.
2219 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002220 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2221 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002222 drbd_khelper(mdev, "pri-lost-after-sb");
2223 } else {
2224 dev_warn(DEV, "Successfully gave up primary role.\n");
2225 rv = hg;
2226 }
2227 } else
2228 rv = hg;
2229 }
2230
2231 return rv;
2232}
2233
2234static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2235{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002236 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002237
2238 switch (mdev->net_conf->after_sb_2p) {
2239 case ASB_DISCARD_YOUNGER_PRI:
2240 case ASB_DISCARD_OLDER_PRI:
2241 case ASB_DISCARD_LEAST_CHG:
2242 case ASB_DISCARD_LOCAL:
2243 case ASB_DISCARD_REMOTE:
2244 case ASB_CONSENSUS:
2245 case ASB_DISCARD_SECONDARY:
2246 dev_err(DEV, "Configuration error.\n");
2247 break;
2248 case ASB_VIOLENTLY:
2249 rv = drbd_asb_recover_0p(mdev);
2250 break;
2251 case ASB_DISCONNECT:
2252 break;
2253 case ASB_CALL_HELPER:
2254 hg = drbd_asb_recover_0p(mdev);
2255 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002256 enum drbd_state_rv rv2;
2257
Philipp Reisnerb411b362009-09-25 16:07:19 -07002258 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2259 * we might be here in C_WF_REPORT_PARAMS which is transient.
2260 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002261 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2262 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002263 drbd_khelper(mdev, "pri-lost-after-sb");
2264 } else {
2265 dev_warn(DEV, "Successfully gave up primary role.\n");
2266 rv = hg;
2267 }
2268 } else
2269 rv = hg;
2270 }
2271
2272 return rv;
2273}
2274
2275static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2276 u64 bits, u64 flags)
2277{
2278 if (!uuid) {
2279 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2280 return;
2281 }
2282 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2283 text,
2284 (unsigned long long)uuid[UI_CURRENT],
2285 (unsigned long long)uuid[UI_BITMAP],
2286 (unsigned long long)uuid[UI_HISTORY_START],
2287 (unsigned long long)uuid[UI_HISTORY_END],
2288 (unsigned long long)bits,
2289 (unsigned long long)flags);
2290}
2291
2292/*
2293 100 after split brain try auto recover
2294 2 C_SYNC_SOURCE set BitMap
2295 1 C_SYNC_SOURCE use BitMap
2296 0 no Sync
2297 -1 C_SYNC_TARGET use BitMap
2298 -2 C_SYNC_TARGET set BitMap
2299 -100 after split brain, disconnect
2300-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002301-1091 requires proto 91
2302-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002303 */
2304static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2305{
2306 u64 self, peer;
2307 int i, j;
2308
2309 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2310 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2311
2312 *rule_nr = 10;
2313 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2314 return 0;
2315
2316 *rule_nr = 20;
2317 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2318 peer != UUID_JUST_CREATED)
2319 return -2;
2320
2321 *rule_nr = 30;
2322 if (self != UUID_JUST_CREATED &&
2323 (peer == UUID_JUST_CREATED || peer == (u64)0))
2324 return 2;
2325
2326 if (self == peer) {
2327 int rct, dc; /* roles at crash time */
2328
2329 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2330
2331 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002332 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002333
2334 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2335 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2336 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2337 drbd_uuid_set_bm(mdev, 0UL);
2338
2339 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2340 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2341 *rule_nr = 34;
2342 } else {
2343 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2344 *rule_nr = 36;
2345 }
2346
2347 return 1;
2348 }
2349
2350 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2351
2352 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002353 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002354
2355 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2356 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2357 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2358
2359 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2360 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2361 mdev->p_uuid[UI_BITMAP] = 0UL;
2362
2363 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2364 *rule_nr = 35;
2365 } else {
2366 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2367 *rule_nr = 37;
2368 }
2369
2370 return -1;
2371 }
2372
2373 /* Common power [off|failure] */
2374 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2375 (mdev->p_uuid[UI_FLAGS] & 2);
2376 /* lowest bit is set when we were primary,
2377 * next bit (weight 2) is set when peer was primary */
2378 *rule_nr = 40;
2379
2380 switch (rct) {
2381 case 0: /* !self_pri && !peer_pri */ return 0;
2382 case 1: /* self_pri && !peer_pri */ return 1;
2383 case 2: /* !self_pri && peer_pri */ return -1;
2384 case 3: /* self_pri && peer_pri */
2385 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2386 return dc ? -1 : 1;
2387 }
2388 }
2389
2390 *rule_nr = 50;
2391 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2392 if (self == peer)
2393 return -1;
2394
2395 *rule_nr = 51;
2396 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2397 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002398 if (mdev->agreed_pro_version < 96 ?
2399 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2400 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2401 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002402 /* The last P_SYNC_UUID did not get though. Undo the last start of
2403 resync as sync source modifications of the peer's UUIDs. */
2404
2405 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002406 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002407
2408 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2409 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002410
2411 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2412 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2413
Philipp Reisnerb411b362009-09-25 16:07:19 -07002414 return -1;
2415 }
2416 }
2417
2418 *rule_nr = 60;
2419 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2420 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2421 peer = mdev->p_uuid[i] & ~((u64)1);
2422 if (self == peer)
2423 return -2;
2424 }
2425
2426 *rule_nr = 70;
2427 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2428 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2429 if (self == peer)
2430 return 1;
2431
2432 *rule_nr = 71;
2433 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2434 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002435 if (mdev->agreed_pro_version < 96 ?
2436 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2437 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2438 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002439 /* The last P_SYNC_UUID did not get though. Undo the last start of
2440 resync as sync source modifications of our UUIDs. */
2441
2442 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002443 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002444
2445 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2446 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2447
Philipp Reisner4a23f262011-01-11 17:42:17 +01002448 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002449 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2450 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2451
2452 return 1;
2453 }
2454 }
2455
2456
2457 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002458 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002459 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2460 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2461 if (self == peer)
2462 return 2;
2463 }
2464
2465 *rule_nr = 90;
2466 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2467 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2468 if (self == peer && self != ((u64)0))
2469 return 100;
2470
2471 *rule_nr = 100;
2472 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2473 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2474 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2475 peer = mdev->p_uuid[j] & ~((u64)1);
2476 if (self == peer)
2477 return -100;
2478 }
2479 }
2480
2481 return -1000;
2482}
2483
2484/* drbd_sync_handshake() returns the new conn state on success, or
2485 CONN_MASK (-1) on failure.
2486 */
2487static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2488 enum drbd_disk_state peer_disk) __must_hold(local)
2489{
2490 int hg, rule_nr;
2491 enum drbd_conns rv = C_MASK;
2492 enum drbd_disk_state mydisk;
2493
2494 mydisk = mdev->state.disk;
2495 if (mydisk == D_NEGOTIATING)
2496 mydisk = mdev->new_state_tmp.disk;
2497
2498 dev_info(DEV, "drbd_sync_handshake:\n");
2499 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2500 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2501 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2502
2503 hg = drbd_uuid_compare(mdev, &rule_nr);
2504
2505 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2506
2507 if (hg == -1000) {
2508 dev_alert(DEV, "Unrelated data, aborting!\n");
2509 return C_MASK;
2510 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002511 if (hg < -1000) {
2512 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002513 return C_MASK;
2514 }
2515
2516 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2517 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2518 int f = (hg == -100) || abs(hg) == 2;
2519 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2520 if (f)
2521 hg = hg*2;
2522 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2523 hg > 0 ? "source" : "target");
2524 }
2525
Adam Gandelman3a11a482010-04-08 16:48:23 -07002526 if (abs(hg) == 100)
2527 drbd_khelper(mdev, "initial-split-brain");
2528
Philipp Reisnerb411b362009-09-25 16:07:19 -07002529 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2530 int pcount = (mdev->state.role == R_PRIMARY)
2531 + (peer_role == R_PRIMARY);
2532 int forced = (hg == -100);
2533
2534 switch (pcount) {
2535 case 0:
2536 hg = drbd_asb_recover_0p(mdev);
2537 break;
2538 case 1:
2539 hg = drbd_asb_recover_1p(mdev);
2540 break;
2541 case 2:
2542 hg = drbd_asb_recover_2p(mdev);
2543 break;
2544 }
2545 if (abs(hg) < 100) {
2546 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2547 "automatically solved. Sync from %s node\n",
2548 pcount, (hg < 0) ? "peer" : "this");
2549 if (forced) {
2550 dev_warn(DEV, "Doing a full sync, since"
2551 " UUIDs where ambiguous.\n");
2552 hg = hg*2;
2553 }
2554 }
2555 }
2556
2557 if (hg == -100) {
2558 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2559 hg = -1;
2560 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2561 hg = 1;
2562
2563 if (abs(hg) < 100)
2564 dev_warn(DEV, "Split-Brain detected, manually solved. "
2565 "Sync from %s node\n",
2566 (hg < 0) ? "peer" : "this");
2567 }
2568
2569 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002570 /* FIXME this log message is not correct if we end up here
2571 * after an attempted attach on a diskless node.
2572 * We just refuse to attach -- well, we drop the "connection"
2573 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002574 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002575 drbd_khelper(mdev, "split-brain");
2576 return C_MASK;
2577 }
2578
2579 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2580 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2581 return C_MASK;
2582 }
2583
2584 if (hg < 0 && /* by intention we do not use mydisk here. */
2585 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2586 switch (mdev->net_conf->rr_conflict) {
2587 case ASB_CALL_HELPER:
2588 drbd_khelper(mdev, "pri-lost");
2589 /* fall through */
2590 case ASB_DISCONNECT:
2591 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2592 return C_MASK;
2593 case ASB_VIOLENTLY:
2594 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2595 "assumption\n");
2596 }
2597 }
2598
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002599 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2600 if (hg == 0)
2601 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2602 else
2603 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2604 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2605 abs(hg) >= 2 ? "full" : "bit-map based");
2606 return C_MASK;
2607 }
2608
Philipp Reisnerb411b362009-09-25 16:07:19 -07002609 if (abs(hg) >= 2) {
2610 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002611 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2612 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002613 return C_MASK;
2614 }
2615
2616 if (hg > 0) { /* become sync source. */
2617 rv = C_WF_BITMAP_S;
2618 } else if (hg < 0) { /* become sync target */
2619 rv = C_WF_BITMAP_T;
2620 } else {
2621 rv = C_CONNECTED;
2622 if (drbd_bm_total_weight(mdev)) {
2623 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2624 drbd_bm_total_weight(mdev));
2625 }
2626 }
2627
2628 return rv;
2629}
2630
2631/* returns 1 if invalid */
2632static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2633{
2634 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2635 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2636 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2637 return 0;
2638
2639 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2640 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2641 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2642 return 1;
2643
2644 /* everything else is valid if they are equal on both sides. */
2645 if (peer == self)
2646 return 0;
2647
2648 /* everything es is invalid. */
2649 return 1;
2650}
2651
Philipp Reisner02918be2010-08-20 14:35:10 +02002652static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002653{
Philipp Reisner02918be2010-08-20 14:35:10 +02002654 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002655 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002656 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002657 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2658
Philipp Reisnerb411b362009-09-25 16:07:19 -07002659 p_proto = be32_to_cpu(p->protocol);
2660 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2661 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2662 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002663 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002664 cf = be32_to_cpu(p->conn_flags);
2665 p_want_lose = cf & CF_WANT_LOSE;
2666
2667 clear_bit(CONN_DRY_RUN, &mdev->flags);
2668
2669 if (cf & CF_DRY_RUN)
2670 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002671
2672 if (p_proto != mdev->net_conf->wire_protocol) {
2673 dev_err(DEV, "incompatible communication protocols\n");
2674 goto disconnect;
2675 }
2676
2677 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2678 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2679 goto disconnect;
2680 }
2681
2682 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2683 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2684 goto disconnect;
2685 }
2686
2687 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2688 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2689 goto disconnect;
2690 }
2691
2692 if (p_want_lose && mdev->net_conf->want_lose) {
2693 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2694 goto disconnect;
2695 }
2696
2697 if (p_two_primaries != mdev->net_conf->two_primaries) {
2698 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2699 goto disconnect;
2700 }
2701
2702 if (mdev->agreed_pro_version >= 87) {
2703 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2704
2705 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002706 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002707
2708 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2709 if (strcmp(p_integrity_alg, my_alg)) {
2710 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2711 goto disconnect;
2712 }
2713 dev_info(DEV, "data-integrity-alg: %s\n",
2714 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2715 }
2716
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002717 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002718
2719disconnect:
2720 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002721 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002722}
2723
2724/* helper function
2725 * input: alg name, feature name
2726 * return: NULL (alg name was "")
2727 * ERR_PTR(error) if something goes wrong
2728 * or the crypto hash ptr, if it worked out ok. */
2729struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2730 const char *alg, const char *name)
2731{
2732 struct crypto_hash *tfm;
2733
2734 if (!alg[0])
2735 return NULL;
2736
2737 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2738 if (IS_ERR(tfm)) {
2739 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2740 alg, name, PTR_ERR(tfm));
2741 return tfm;
2742 }
2743 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2744 crypto_free_hash(tfm);
2745 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2746 return ERR_PTR(-EINVAL);
2747 }
2748 return tfm;
2749}
2750
Philipp Reisner02918be2010-08-20 14:35:10 +02002751static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002752{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002753 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002754 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002755 unsigned int header_size, data_size, exp_max_sz;
2756 struct crypto_hash *verify_tfm = NULL;
2757 struct crypto_hash *csums_tfm = NULL;
2758 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002759 int *rs_plan_s = NULL;
2760 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002761
2762 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2763 : apv == 88 ? sizeof(struct p_rs_param)
2764 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002765 : apv <= 94 ? sizeof(struct p_rs_param_89)
2766 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002767
Philipp Reisner02918be2010-08-20 14:35:10 +02002768 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002769 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002770 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002771 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002772 }
2773
2774 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002775 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2776 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002777 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002778 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2779 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002780 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002781 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002782 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2783 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002784 D_ASSERT(data_size == 0);
2785 }
2786
2787 /* initialize verify_alg and csums_alg */
2788 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2789
Philipp Reisner02918be2010-08-20 14:35:10 +02002790 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002791 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002792
2793 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2794
2795 if (apv >= 88) {
2796 if (apv == 88) {
2797 if (data_size > SHARED_SECRET_MAX) {
2798 dev_err(DEV, "verify-alg too long, "
2799 "peer wants %u, accepting only %u byte\n",
2800 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002801 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002802 }
2803
2804 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002805 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002806
2807 /* we expect NUL terminated string */
2808 /* but just in case someone tries to be evil */
2809 D_ASSERT(p->verify_alg[data_size-1] == 0);
2810 p->verify_alg[data_size-1] = 0;
2811
2812 } else /* apv >= 89 */ {
2813 /* we still expect NUL terminated strings */
2814 /* but just in case someone tries to be evil */
2815 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2816 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2817 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2818 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2819 }
2820
2821 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2822 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2823 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2824 mdev->sync_conf.verify_alg, p->verify_alg);
2825 goto disconnect;
2826 }
2827 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2828 p->verify_alg, "verify-alg");
2829 if (IS_ERR(verify_tfm)) {
2830 verify_tfm = NULL;
2831 goto disconnect;
2832 }
2833 }
2834
2835 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2836 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2837 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2838 mdev->sync_conf.csums_alg, p->csums_alg);
2839 goto disconnect;
2840 }
2841 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2842 p->csums_alg, "csums-alg");
2843 if (IS_ERR(csums_tfm)) {
2844 csums_tfm = NULL;
2845 goto disconnect;
2846 }
2847 }
2848
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002849 if (apv > 94) {
2850 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2851 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2852 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2853 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2854 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002855
2856 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2857 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2858 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2859 if (!rs_plan_s) {
2860 dev_err(DEV, "kmalloc of fifo_buffer failed");
2861 goto disconnect;
2862 }
2863 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002864 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002865
2866 spin_lock(&mdev->peer_seq_lock);
2867 /* lock against drbd_nl_syncer_conf() */
2868 if (verify_tfm) {
2869 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2870 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2871 crypto_free_hash(mdev->verify_tfm);
2872 mdev->verify_tfm = verify_tfm;
2873 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2874 }
2875 if (csums_tfm) {
2876 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2877 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2878 crypto_free_hash(mdev->csums_tfm);
2879 mdev->csums_tfm = csums_tfm;
2880 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2881 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002882 if (fifo_size != mdev->rs_plan_s.size) {
2883 kfree(mdev->rs_plan_s.values);
2884 mdev->rs_plan_s.values = rs_plan_s;
2885 mdev->rs_plan_s.size = fifo_size;
2886 mdev->rs_planed = 0;
2887 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002888 spin_unlock(&mdev->peer_seq_lock);
2889 }
2890
2891 return ok;
2892disconnect:
2893 /* just for completeness: actually not needed,
2894 * as this is not reached if csums_tfm was ok. */
2895 crypto_free_hash(csums_tfm);
2896 /* but free the verify_tfm again, if csums_tfm did not work out */
2897 crypto_free_hash(verify_tfm);
2898 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002899 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002900}
2901
2902static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2903{
2904 /* sorry, we currently have no working implementation
2905 * of distributed TCQ */
2906}
2907
2908/* warn if the arguments differ by more than 12.5% */
2909static void warn_if_differ_considerably(struct drbd_conf *mdev,
2910 const char *s, sector_t a, sector_t b)
2911{
2912 sector_t d;
2913 if (a == 0 || b == 0)
2914 return;
2915 d = (a > b) ? (a - b) : (b - a);
2916 if (d > (a>>3) || d > (b>>3))
2917 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2918 (unsigned long long)a, (unsigned long long)b);
2919}
2920
Philipp Reisner02918be2010-08-20 14:35:10 +02002921static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002922{
Philipp Reisner02918be2010-08-20 14:35:10 +02002923 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002924 enum determine_dev_size dd = unchanged;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002925 unsigned int max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002926 sector_t p_size, p_usize, my_usize;
2927 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002928 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002929
Philipp Reisnerb411b362009-09-25 16:07:19 -07002930 p_size = be64_to_cpu(p->d_size);
2931 p_usize = be64_to_cpu(p->u_size);
2932
2933 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2934 dev_err(DEV, "some backing storage is needed\n");
2935 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002936 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002937 }
2938
2939 /* just store the peer's disk size for now.
2940 * we still need to figure out whether we accept that. */
2941 mdev->p_size = p_size;
2942
Philipp Reisnerb411b362009-09-25 16:07:19 -07002943 if (get_ldev(mdev)) {
2944 warn_if_differ_considerably(mdev, "lower level device sizes",
2945 p_size, drbd_get_max_capacity(mdev->ldev));
2946 warn_if_differ_considerably(mdev, "user requested size",
2947 p_usize, mdev->ldev->dc.disk_size);
2948
2949 /* if this is the first connect, or an otherwise expected
2950 * param exchange, choose the minimum */
2951 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2952 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2953 p_usize);
2954
2955 my_usize = mdev->ldev->dc.disk_size;
2956
2957 if (mdev->ldev->dc.disk_size != p_usize) {
2958 mdev->ldev->dc.disk_size = p_usize;
2959 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2960 (unsigned long)mdev->ldev->dc.disk_size);
2961 }
2962
2963 /* Never shrink a device with usable data during connect.
2964 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01002965 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07002966 drbd_get_capacity(mdev->this_bdev) &&
2967 mdev->state.disk >= D_OUTDATED &&
2968 mdev->state.conn < C_CONNECTED) {
2969 dev_err(DEV, "The peer's disk size is too small!\n");
2970 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2971 mdev->ldev->dc.disk_size = my_usize;
2972 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002973 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002974 }
2975 put_ldev(mdev);
2976 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002977
Philipp Reisnere89b5912010-03-24 17:11:33 +01002978 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002979 if (get_ldev(mdev)) {
Philipp Reisnere89b5912010-03-24 17:11:33 +01002980 dd = drbd_determin_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002981 put_ldev(mdev);
2982 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002983 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002984 drbd_md_sync(mdev);
2985 } else {
2986 /* I am diskless, need to accept the peer's size. */
2987 drbd_set_my_capacity(mdev, p_size);
2988 }
2989
Philipp Reisnerb411b362009-09-25 16:07:19 -07002990 if (get_ldev(mdev)) {
2991 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2992 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2993 ldsc = 1;
2994 }
2995
Lars Ellenberga1c88d02010-05-14 19:16:41 +02002996 if (mdev->agreed_pro_version < 94)
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002997 max_bio_size = be32_to_cpu(p->max_bio_size);
Lars Ellenberg8979d9c2010-09-14 15:56:29 +02002998 else if (mdev->agreed_pro_version == 94)
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002999 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
Lars Ellenberga1c88d02010-05-14 19:16:41 +02003000 else /* drbd 8.3.8 onwards */
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01003001 max_bio_size = DRBD_MAX_BIO_SIZE;
Lars Ellenberga1c88d02010-05-14 19:16:41 +02003002
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01003003 if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
3004 drbd_setup_queue_param(mdev, max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003005
Philipp Reisnere89b5912010-03-24 17:11:33 +01003006 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003007 put_ldev(mdev);
3008 }
3009
3010 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3011 if (be64_to_cpu(p->c_size) !=
3012 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3013 /* we have different sizes, probably peer
3014 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003015 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003016 }
3017 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3018 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3019 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003020 mdev->state.disk >= D_INCONSISTENT) {
3021 if (ddsf & DDSF_NO_RESYNC)
3022 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3023 else
3024 resync_after_online_grow(mdev);
3025 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003026 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3027 }
3028 }
3029
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003030 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003031}
3032
Philipp Reisner02918be2010-08-20 14:35:10 +02003033static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034{
Philipp Reisner02918be2010-08-20 14:35:10 +02003035 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003036 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003037 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003038
Philipp Reisnerb411b362009-09-25 16:07:19 -07003039 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3040
3041 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3042 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3043
3044 kfree(mdev->p_uuid);
3045 mdev->p_uuid = p_uuid;
3046
3047 if (mdev->state.conn < C_CONNECTED &&
3048 mdev->state.disk < D_INCONSISTENT &&
3049 mdev->state.role == R_PRIMARY &&
3050 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3051 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3052 (unsigned long long)mdev->ed_uuid);
3053 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003054 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003055 }
3056
3057 if (get_ldev(mdev)) {
3058 int skip_initial_sync =
3059 mdev->state.conn == C_CONNECTED &&
3060 mdev->agreed_pro_version >= 90 &&
3061 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3062 (p_uuid[UI_FLAGS] & 8);
3063 if (skip_initial_sync) {
3064 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3065 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003066 "clear_n_write from receive_uuids",
3067 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003068 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3069 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3070 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3071 CS_VERBOSE, NULL);
3072 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003073 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003074 }
3075 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003076 } else if (mdev->state.disk < D_INCONSISTENT &&
3077 mdev->state.role == R_PRIMARY) {
3078 /* I am a diskless primary, the peer just created a new current UUID
3079 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003080 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003081 }
3082
3083 /* Before we test for the disk state, we should wait until an eventually
3084 ongoing cluster wide state change is finished. That is important if
3085 we are primary and are detaching from our disk. We need to see the
3086 new disk state... */
3087 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3088 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003089 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3090
3091 if (updated_uuids)
3092 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003093
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003094 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003095}
3096
3097/**
3098 * convert_state() - Converts the peer's view of the cluster state to our point of view
3099 * @ps: The state as seen by the peer.
3100 */
3101static union drbd_state convert_state(union drbd_state ps)
3102{
3103 union drbd_state ms;
3104
3105 static enum drbd_conns c_tab[] = {
3106 [C_CONNECTED] = C_CONNECTED,
3107
3108 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3109 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3110 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3111 [C_VERIFY_S] = C_VERIFY_T,
3112 [C_MASK] = C_MASK,
3113 };
3114
3115 ms.i = ps.i;
3116
3117 ms.conn = c_tab[ps.conn];
3118 ms.peer = ps.role;
3119 ms.role = ps.peer;
3120 ms.pdsk = ps.disk;
3121 ms.disk = ps.pdsk;
3122 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3123
3124 return ms;
3125}
3126
Philipp Reisner02918be2010-08-20 14:35:10 +02003127static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003128{
Philipp Reisner02918be2010-08-20 14:35:10 +02003129 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003130 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003131 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003132
Philipp Reisnerb411b362009-09-25 16:07:19 -07003133 mask.i = be32_to_cpu(p->mask);
3134 val.i = be32_to_cpu(p->val);
3135
3136 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3137 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3138 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003139 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003140 }
3141
3142 mask = convert_state(mask);
3143 val = convert_state(val);
3144
3145 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3146
3147 drbd_send_sr_reply(mdev, rv);
3148 drbd_md_sync(mdev);
3149
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003150 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003151}
3152
Philipp Reisner02918be2010-08-20 14:35:10 +02003153static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003154{
Philipp Reisner02918be2010-08-20 14:35:10 +02003155 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003156 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003157 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003158 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003159 int rv;
3160
Philipp Reisnerb411b362009-09-25 16:07:19 -07003161 peer_state.i = be32_to_cpu(p->state);
3162
3163 real_peer_disk = peer_state.disk;
3164 if (peer_state.disk == D_NEGOTIATING) {
3165 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3166 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3167 }
3168
3169 spin_lock_irq(&mdev->req_lock);
3170 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003171 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003172 spin_unlock_irq(&mdev->req_lock);
3173
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003174 /* peer says his disk is uptodate, while we think it is inconsistent,
3175 * and this happens while we think we have a sync going on. */
3176 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3177 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3178 /* If we are (becoming) SyncSource, but peer is still in sync
3179 * preparation, ignore its uptodate-ness to avoid flapping, it
3180 * will change to inconsistent once the peer reaches active
3181 * syncing states.
3182 * It may have changed syncer-paused flags, however, so we
3183 * cannot ignore this completely. */
3184 if (peer_state.conn > C_CONNECTED &&
3185 peer_state.conn < C_SYNC_SOURCE)
3186 real_peer_disk = D_INCONSISTENT;
3187
3188 /* if peer_state changes to connected at the same time,
3189 * it explicitly notifies us that it finished resync.
3190 * Maybe we should finish it up, too? */
3191 else if (os.conn >= C_SYNC_SOURCE &&
3192 peer_state.conn == C_CONNECTED) {
3193 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3194 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003195 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003196 }
3197 }
3198
3199 /* peer says his disk is inconsistent, while we think it is uptodate,
3200 * and this happens while the peer still thinks we have a sync going on,
3201 * but we think we are already done with the sync.
3202 * We ignore this to avoid flapping pdsk.
3203 * This should not happen, if the peer is a recent version of drbd. */
3204 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3205 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3206 real_peer_disk = D_UP_TO_DATE;
3207
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003208 if (ns.conn == C_WF_REPORT_PARAMS)
3209 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003210
Philipp Reisner67531712010-10-27 12:21:30 +02003211 if (peer_state.conn == C_AHEAD)
3212 ns.conn = C_BEHIND;
3213
Philipp Reisnerb411b362009-09-25 16:07:19 -07003214 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3215 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3216 int cr; /* consider resync */
3217
3218 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003219 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003220 /* if we had an established connection
3221 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003222 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003223 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003224 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003225 /* if we have both been inconsistent, and the peer has been
3226 * forced to be UpToDate with --overwrite-data */
3227 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3228 /* if we had been plain connected, and the admin requested to
3229 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003230 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003231 (peer_state.conn >= C_STARTING_SYNC_S &&
3232 peer_state.conn <= C_WF_BITMAP_T));
3233
3234 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003235 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003236
3237 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003238 if (ns.conn == C_MASK) {
3239 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003240 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003241 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003242 } else if (peer_state.disk == D_NEGOTIATING) {
3243 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3244 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003245 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003246 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003247 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003248 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003249 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003250 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003251 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003252 }
3253 }
3254 }
3255
3256 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003257 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003258 goto retry;
3259 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003260 ns.peer = peer_state.role;
3261 ns.pdsk = real_peer_disk;
3262 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003263 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003264 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003265 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3266 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003267 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3268 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3269 for temporal network outages! */
3270 spin_unlock_irq(&mdev->req_lock);
3271 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3272 tl_clear(mdev);
3273 drbd_uuid_new_current(mdev);
3274 clear_bit(NEW_CUR_UUID, &mdev->flags);
3275 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003276 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003277 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003278 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003279 ns = mdev->state;
3280 spin_unlock_irq(&mdev->req_lock);
3281
3282 if (rv < SS_SUCCESS) {
3283 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003284 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003285 }
3286
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003287 if (os.conn > C_WF_REPORT_PARAMS) {
3288 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003289 peer_state.disk != D_NEGOTIATING ) {
3290 /* we want resync, peer has not yet decided to sync... */
3291 /* Nowadays only used when forcing a node into primary role and
3292 setting its disk to UpToDate with that */
3293 drbd_send_uuids(mdev);
3294 drbd_send_state(mdev);
3295 }
3296 }
3297
3298 mdev->net_conf->want_lose = 0;
3299
3300 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3301
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003302 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003303}
3304
Philipp Reisner02918be2010-08-20 14:35:10 +02003305static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003306{
Philipp Reisner02918be2010-08-20 14:35:10 +02003307 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003308
3309 wait_event(mdev->misc_wait,
3310 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003311 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003312 mdev->state.conn < C_CONNECTED ||
3313 mdev->state.disk < D_NEGOTIATING);
3314
3315 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3316
Philipp Reisnerb411b362009-09-25 16:07:19 -07003317 /* Here the _drbd_uuid_ functions are right, current should
3318 _not_ be rotated into the history */
3319 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3320 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3321 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3322
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003323 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003324 drbd_start_resync(mdev, C_SYNC_TARGET);
3325
3326 put_ldev(mdev);
3327 } else
3328 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3329
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003330 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003331}
3332
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003333/**
3334 * receive_bitmap_plain
3335 *
3336 * Return 0 when done, 1 when another iteration is needed, and a negative error
3337 * code upon failure.
3338 */
3339static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003340receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3341 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003342{
3343 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3344 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003345 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003346
Philipp Reisner02918be2010-08-20 14:35:10 +02003347 if (want != data_size) {
3348 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003349 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003350 }
3351 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003352 return 0;
3353 err = drbd_recv(mdev, buffer, want);
3354 if (err != want) {
3355 if (err >= 0)
3356 err = -EIO;
3357 return err;
3358 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003359
3360 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3361
3362 c->word_offset += num_words;
3363 c->bit_offset = c->word_offset * BITS_PER_LONG;
3364 if (c->bit_offset > c->bm_bits)
3365 c->bit_offset = c->bm_bits;
3366
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003367 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003368}
3369
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003370/**
3371 * recv_bm_rle_bits
3372 *
3373 * Return 0 when done, 1 when another iteration is needed, and a negative error
3374 * code upon failure.
3375 */
3376static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003377recv_bm_rle_bits(struct drbd_conf *mdev,
3378 struct p_compressed_bm *p,
3379 struct bm_xfer_ctx *c)
3380{
3381 struct bitstream bs;
3382 u64 look_ahead;
3383 u64 rl;
3384 u64 tmp;
3385 unsigned long s = c->bit_offset;
3386 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003387 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003388 int toggle = DCBP_get_start(p);
3389 int have;
3390 int bits;
3391
3392 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3393
3394 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3395 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003396 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003397
3398 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3399 bits = vli_decode_bits(&rl, look_ahead);
3400 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003401 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003402
3403 if (toggle) {
3404 e = s + rl -1;
3405 if (e >= c->bm_bits) {
3406 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003407 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003408 }
3409 _drbd_bm_set_bits(mdev, s, e);
3410 }
3411
3412 if (have < bits) {
3413 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3414 have, bits, look_ahead,
3415 (unsigned int)(bs.cur.b - p->code),
3416 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003417 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003418 }
3419 look_ahead >>= bits;
3420 have -= bits;
3421
3422 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3423 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003424 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003425 look_ahead |= tmp << have;
3426 have += bits;
3427 }
3428
3429 c->bit_offset = s;
3430 bm_xfer_ctx_bit_to_word_offset(c);
3431
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003432 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003433}
3434
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003435/**
3436 * decode_bitmap_c
3437 *
3438 * Return 0 when done, 1 when another iteration is needed, and a negative error
3439 * code upon failure.
3440 */
3441static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003442decode_bitmap_c(struct drbd_conf *mdev,
3443 struct p_compressed_bm *p,
3444 struct bm_xfer_ctx *c)
3445{
3446 if (DCBP_get_code(p) == RLE_VLI_Bits)
3447 return recv_bm_rle_bits(mdev, p, c);
3448
3449 /* other variants had been implemented for evaluation,
3450 * but have been dropped as this one turned out to be "best"
3451 * during all our tests. */
3452
3453 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3454 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003455 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003456}
3457
3458void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3459 const char *direction, struct bm_xfer_ctx *c)
3460{
3461 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003462 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003463 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3464 + c->bm_words * sizeof(long);
3465 unsigned total = c->bytes[0] + c->bytes[1];
3466 unsigned r;
3467
3468 /* total can not be zero. but just in case: */
3469 if (total == 0)
3470 return;
3471
3472 /* don't report if not compressed */
3473 if (total >= plain)
3474 return;
3475
3476 /* total < plain. check for overflow, still */
3477 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3478 : (1000 * total / plain);
3479
3480 if (r > 1000)
3481 r = 1000;
3482
3483 r = 1000 - r;
3484 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3485 "total %u; compression: %u.%u%%\n",
3486 direction,
3487 c->bytes[1], c->packets[1],
3488 c->bytes[0], c->packets[0],
3489 total, r/10, r % 10);
3490}
3491
3492/* Since we are processing the bitfield from lower addresses to higher,
3493 it does not matter if the process it in 32 bit chunks or 64 bit
3494 chunks as long as it is little endian. (Understand it as byte stream,
3495 beginning with the lowest byte...) If we would use big endian
3496 we would need to process it from the highest address to the lowest,
3497 in order to be agnostic to the 32 vs 64 bits issue.
3498
3499 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003500static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003501{
3502 struct bm_xfer_ctx c;
3503 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003504 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003505 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003506 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003507
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003508 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3509 /* you are supposed to send additional out-of-sync information
3510 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003511
3512 /* maybe we should use some per thread scratch page,
3513 * and allocate that during initial device creation? */
3514 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3515 if (!buffer) {
3516 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3517 goto out;
3518 }
3519
3520 c = (struct bm_xfer_ctx) {
3521 .bm_bits = drbd_bm_bits(mdev),
3522 .bm_words = drbd_bm_words(mdev),
3523 };
3524
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003525 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003526 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003527 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003528 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003529 /* MAYBE: sanity check that we speak proto >= 90,
3530 * and the feature is enabled! */
3531 struct p_compressed_bm *p;
3532
Philipp Reisner02918be2010-08-20 14:35:10 +02003533 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003534 dev_err(DEV, "ReportCBitmap packet too large\n");
3535 goto out;
3536 }
3537 /* use the page buff */
3538 p = buffer;
3539 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003540 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003541 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003542 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3543 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003544 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003545 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003546 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003547 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003548 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003549 goto out;
3550 }
3551
Philipp Reisner02918be2010-08-20 14:35:10 +02003552 c.packets[cmd == P_BITMAP]++;
3553 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003554
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003555 if (err <= 0) {
3556 if (err < 0)
3557 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003558 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003559 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003560 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003561 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003562 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003563
3564 INFO_bm_xfer_stats(mdev, "receive", &c);
3565
3566 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003567 enum drbd_state_rv rv;
3568
Philipp Reisnerb411b362009-09-25 16:07:19 -07003569 ok = !drbd_send_bitmap(mdev);
3570 if (!ok)
3571 goto out;
3572 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003573 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3574 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003575 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3576 /* admin may have requested C_DISCONNECTING,
3577 * other threads may have noticed network errors */
3578 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3579 drbd_conn_str(mdev->state.conn));
3580 }
3581
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003582 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003583 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003584 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003585 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3586 drbd_start_resync(mdev, C_SYNC_SOURCE);
3587 free_page((unsigned long) buffer);
3588 return ok;
3589}
3590
Philipp Reisner02918be2010-08-20 14:35:10 +02003591static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003592{
3593 /* TODO zero copy sink :) */
3594 static char sink[128];
3595 int size, want, r;
3596
Philipp Reisner02918be2010-08-20 14:35:10 +02003597 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3598 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003599
Philipp Reisner02918be2010-08-20 14:35:10 +02003600 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003601 while (size > 0) {
3602 want = min_t(int, size, sizeof(sink));
3603 r = drbd_recv(mdev, sink, want);
3604 ERR_IF(r <= 0) break;
3605 size -= r;
3606 }
3607 return size == 0;
3608}
3609
Philipp Reisner02918be2010-08-20 14:35:10 +02003610static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003611{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003612 /* Make sure we've acked all the TCP data associated
3613 * with the data requests being unplugged */
3614 drbd_tcp_quickack(mdev->data.socket);
3615
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003616 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003617}
3618
Philipp Reisner73a01a12010-10-27 14:33:00 +02003619static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3620{
3621 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3622
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003623 switch (mdev->state.conn) {
3624 case C_WF_SYNC_UUID:
3625 case C_WF_BITMAP_T:
3626 case C_BEHIND:
3627 break;
3628 default:
3629 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3630 drbd_conn_str(mdev->state.conn));
3631 }
3632
Philipp Reisner73a01a12010-10-27 14:33:00 +02003633 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3634
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003635 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003636}
3637
Philipp Reisner02918be2010-08-20 14:35:10 +02003638typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003639
Philipp Reisner02918be2010-08-20 14:35:10 +02003640struct data_cmd {
3641 int expect_payload;
3642 size_t pkt_size;
3643 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003644};
3645
Philipp Reisner02918be2010-08-20 14:35:10 +02003646static struct data_cmd drbd_cmd_handler[] = {
3647 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3648 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3649 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3650 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3651 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3652 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3653 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3654 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3655 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3656 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3657 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3658 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3659 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3660 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3661 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3662 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3663 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3664 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3665 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3666 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3667 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003668 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003669 /* anything missing from this table is in
3670 * the asender_tbl, see get_asender_cmd */
3671 [P_MAX_CMD] = { 0, 0, NULL },
3672};
3673
3674/* All handler functions that expect a sub-header get that sub-heder in
3675 mdev->data.rbuf.header.head.payload.
3676
3677 Usually in mdev->data.rbuf.header.head the callback can find the usual
3678 p_header, but they may not rely on that. Since there is also p_header95 !
3679 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003680
3681static void drbdd(struct drbd_conf *mdev)
3682{
Philipp Reisner02918be2010-08-20 14:35:10 +02003683 union p_header *header = &mdev->data.rbuf.header;
3684 unsigned int packet_size;
3685 enum drbd_packets cmd;
3686 size_t shs; /* sub header size */
3687 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003688
3689 while (get_t_state(&mdev->receiver) == Running) {
3690 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003691 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3692 goto err_out;
3693
3694 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3695 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3696 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003697 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003698
Philipp Reisner02918be2010-08-20 14:35:10 +02003699 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003700 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3701 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3702 goto err_out;
3703 }
3704
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003705 if (shs) {
3706 rv = drbd_recv(mdev, &header->h80.payload, shs);
3707 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003708 if (!signal_pending(current))
3709 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003710 goto err_out;
3711 }
3712 }
3713
Philipp Reisner02918be2010-08-20 14:35:10 +02003714 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3715
3716 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003717 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003718 cmdname(cmd), packet_size);
3719 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003720 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003721 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003722
Philipp Reisner02918be2010-08-20 14:35:10 +02003723 if (0) {
3724 err_out:
3725 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003726 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003727 /* If we leave here, we probably want to update at least the
3728 * "Connected" indicator on stable storage. Do so explicitly here. */
3729 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003730}
3731
3732void drbd_flush_workqueue(struct drbd_conf *mdev)
3733{
3734 struct drbd_wq_barrier barr;
3735
3736 barr.w.cb = w_prev_work_done;
3737 init_completion(&barr.done);
3738 drbd_queue_work(&mdev->data.work, &barr.w);
3739 wait_for_completion(&barr.done);
3740}
3741
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003742void drbd_free_tl_hash(struct drbd_conf *mdev)
3743{
3744 struct hlist_head *h;
3745
3746 spin_lock_irq(&mdev->req_lock);
3747
3748 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3749 spin_unlock_irq(&mdev->req_lock);
3750 return;
3751 }
3752 /* paranoia code */
3753 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3754 if (h->first)
3755 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3756 (int)(h - mdev->ee_hash), h->first);
3757 kfree(mdev->ee_hash);
3758 mdev->ee_hash = NULL;
3759 mdev->ee_hash_s = 0;
3760
3761 /* paranoia code */
3762 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3763 if (h->first)
3764 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3765 (int)(h - mdev->tl_hash), h->first);
3766 kfree(mdev->tl_hash);
3767 mdev->tl_hash = NULL;
3768 mdev->tl_hash_s = 0;
3769 spin_unlock_irq(&mdev->req_lock);
3770}
3771
Philipp Reisnerb411b362009-09-25 16:07:19 -07003772static void drbd_disconnect(struct drbd_conf *mdev)
3773{
3774 enum drbd_fencing_p fp;
3775 union drbd_state os, ns;
3776 int rv = SS_UNKNOWN_ERROR;
3777 unsigned int i;
3778
3779 if (mdev->state.conn == C_STANDALONE)
3780 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003781
3782 /* asender does not clean up anything. it must not interfere, either */
3783 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003784 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003785
Philipp Reisner85719572010-07-21 10:20:17 +02003786 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003787 spin_lock_irq(&mdev->req_lock);
3788 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3789 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3790 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3791 spin_unlock_irq(&mdev->req_lock);
3792
3793 /* We do not have data structures that would allow us to
3794 * get the rs_pending_cnt down to 0 again.
3795 * * On C_SYNC_TARGET we do not have any data structures describing
3796 * the pending RSDataRequest's we have sent.
3797 * * On C_SYNC_SOURCE there is no data structure that tracks
3798 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3799 * And no, it is not the sum of the reference counts in the
3800 * resync_LRU. The resync_LRU tracks the whole operation including
3801 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3802 * on the fly. */
3803 drbd_rs_cancel_all(mdev);
3804 mdev->rs_total = 0;
3805 mdev->rs_failed = 0;
3806 atomic_set(&mdev->rs_pending_cnt, 0);
3807 wake_up(&mdev->misc_wait);
3808
3809 /* make sure syncer is stopped and w_resume_next_sg queued */
3810 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003811 resync_timer_fn((unsigned long)mdev);
3812
Philipp Reisnerb411b362009-09-25 16:07:19 -07003813 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3814 * w_make_resync_request etc. which may still be on the worker queue
3815 * to be "canceled" */
3816 drbd_flush_workqueue(mdev);
3817
3818 /* This also does reclaim_net_ee(). If we do this too early, we might
3819 * miss some resync ee and pages.*/
3820 drbd_process_done_ee(mdev);
3821
3822 kfree(mdev->p_uuid);
3823 mdev->p_uuid = NULL;
3824
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003825 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003826 tl_clear(mdev);
3827
Philipp Reisnerb411b362009-09-25 16:07:19 -07003828 dev_info(DEV, "Connection closed\n");
3829
3830 drbd_md_sync(mdev);
3831
3832 fp = FP_DONT_CARE;
3833 if (get_ldev(mdev)) {
3834 fp = mdev->ldev->dc.fencing;
3835 put_ldev(mdev);
3836 }
3837
Philipp Reisner87f7be42010-06-11 13:56:33 +02003838 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3839 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003840
3841 spin_lock_irq(&mdev->req_lock);
3842 os = mdev->state;
3843 if (os.conn >= C_UNCONNECTED) {
3844 /* Do not restart in case we are C_DISCONNECTING */
3845 ns = os;
3846 ns.conn = C_UNCONNECTED;
3847 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3848 }
3849 spin_unlock_irq(&mdev->req_lock);
3850
3851 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003852 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003853
Philipp Reisnerb411b362009-09-25 16:07:19 -07003854 crypto_free_hash(mdev->cram_hmac_tfm);
3855 mdev->cram_hmac_tfm = NULL;
3856
3857 kfree(mdev->net_conf);
3858 mdev->net_conf = NULL;
3859 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3860 }
3861
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003862 /* serialize with bitmap writeout triggered by the state change,
3863 * if any. */
3864 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3865
Philipp Reisnerb411b362009-09-25 16:07:19 -07003866 /* tcp_close and release of sendpage pages can be deferred. I don't
3867 * want to use SO_LINGER, because apparently it can be deferred for
3868 * more than 20 seconds (longest time I checked).
3869 *
3870 * Actually we don't care for exactly when the network stack does its
3871 * put_page(), but release our reference on these pages right here.
3872 */
3873 i = drbd_release_ee(mdev, &mdev->net_ee);
3874 if (i)
3875 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003876 i = atomic_read(&mdev->pp_in_use_by_net);
3877 if (i)
3878 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003879 i = atomic_read(&mdev->pp_in_use);
3880 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003881 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003882
3883 D_ASSERT(list_empty(&mdev->read_ee));
3884 D_ASSERT(list_empty(&mdev->active_ee));
3885 D_ASSERT(list_empty(&mdev->sync_ee));
3886 D_ASSERT(list_empty(&mdev->done_ee));
3887
3888 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3889 atomic_set(&mdev->current_epoch->epoch_size, 0);
3890 D_ASSERT(list_empty(&mdev->current_epoch->list));
3891}
3892
3893/*
3894 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3895 * we can agree on is stored in agreed_pro_version.
3896 *
3897 * feature flags and the reserved array should be enough room for future
3898 * enhancements of the handshake protocol, and possible plugins...
3899 *
3900 * for now, they are expected to be zero, but ignored.
3901 */
3902static int drbd_send_handshake(struct drbd_conf *mdev)
3903{
3904 /* ASSERT current == mdev->receiver ... */
3905 struct p_handshake *p = &mdev->data.sbuf.handshake;
3906 int ok;
3907
3908 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3909 dev_err(DEV, "interrupted during initial handshake\n");
3910 return 0; /* interrupted. not ok. */
3911 }
3912
3913 if (mdev->data.socket == NULL) {
3914 mutex_unlock(&mdev->data.mutex);
3915 return 0;
3916 }
3917
3918 memset(p, 0, sizeof(*p));
3919 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3920 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3921 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003922 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003923 mutex_unlock(&mdev->data.mutex);
3924 return ok;
3925}
3926
3927/*
3928 * return values:
3929 * 1 yes, we have a valid connection
3930 * 0 oops, did not work out, please try again
3931 * -1 peer talks different language,
3932 * no point in trying again, please go standalone.
3933 */
3934static int drbd_do_handshake(struct drbd_conf *mdev)
3935{
3936 /* ASSERT current == mdev->receiver ... */
3937 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003938 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3939 unsigned int length;
3940 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003941 int rv;
3942
3943 rv = drbd_send_handshake(mdev);
3944 if (!rv)
3945 return 0;
3946
Philipp Reisner02918be2010-08-20 14:35:10 +02003947 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003948 if (!rv)
3949 return 0;
3950
Philipp Reisner02918be2010-08-20 14:35:10 +02003951 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003952 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003953 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003954 return -1;
3955 }
3956
Philipp Reisner02918be2010-08-20 14:35:10 +02003957 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003958 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003959 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003960 return -1;
3961 }
3962
3963 rv = drbd_recv(mdev, &p->head.payload, expect);
3964
3965 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003966 if (!signal_pending(current))
3967 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003968 return 0;
3969 }
3970
Philipp Reisnerb411b362009-09-25 16:07:19 -07003971 p->protocol_min = be32_to_cpu(p->protocol_min);
3972 p->protocol_max = be32_to_cpu(p->protocol_max);
3973 if (p->protocol_max == 0)
3974 p->protocol_max = p->protocol_min;
3975
3976 if (PRO_VERSION_MAX < p->protocol_min ||
3977 PRO_VERSION_MIN > p->protocol_max)
3978 goto incompat;
3979
3980 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3981
3982 dev_info(DEV, "Handshake successful: "
3983 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3984
3985 return 1;
3986
3987 incompat:
3988 dev_err(DEV, "incompatible DRBD dialects: "
3989 "I support %d-%d, peer supports %d-%d\n",
3990 PRO_VERSION_MIN, PRO_VERSION_MAX,
3991 p->protocol_min, p->protocol_max);
3992 return -1;
3993}
3994
3995#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3996static int drbd_do_auth(struct drbd_conf *mdev)
3997{
3998 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3999 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004000 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004001}
4002#else
4003#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004004
4005/* Return value:
4006 1 - auth succeeded,
4007 0 - failed, try again (network error),
4008 -1 - auth failed, don't try again.
4009*/
4010
Philipp Reisnerb411b362009-09-25 16:07:19 -07004011static int drbd_do_auth(struct drbd_conf *mdev)
4012{
4013 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4014 struct scatterlist sg;
4015 char *response = NULL;
4016 char *right_response = NULL;
4017 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004018 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4019 unsigned int resp_size;
4020 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004021 enum drbd_packets cmd;
4022 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004023 int rv;
4024
4025 desc.tfm = mdev->cram_hmac_tfm;
4026 desc.flags = 0;
4027
4028 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4029 (u8 *)mdev->net_conf->shared_secret, key_len);
4030 if (rv) {
4031 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004032 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004033 goto fail;
4034 }
4035
4036 get_random_bytes(my_challenge, CHALLENGE_LEN);
4037
4038 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4039 if (!rv)
4040 goto fail;
4041
Philipp Reisner02918be2010-08-20 14:35:10 +02004042 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004043 if (!rv)
4044 goto fail;
4045
Philipp Reisner02918be2010-08-20 14:35:10 +02004046 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004047 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004048 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004049 rv = 0;
4050 goto fail;
4051 }
4052
Philipp Reisner02918be2010-08-20 14:35:10 +02004053 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004054 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004055 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004056 goto fail;
4057 }
4058
Philipp Reisner02918be2010-08-20 14:35:10 +02004059 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004060 if (peers_ch == NULL) {
4061 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004062 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004063 goto fail;
4064 }
4065
Philipp Reisner02918be2010-08-20 14:35:10 +02004066 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004067
Philipp Reisner02918be2010-08-20 14:35:10 +02004068 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004069 if (!signal_pending(current))
4070 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004071 rv = 0;
4072 goto fail;
4073 }
4074
4075 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4076 response = kmalloc(resp_size, GFP_NOIO);
4077 if (response == NULL) {
4078 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004079 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004080 goto fail;
4081 }
4082
4083 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004084 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004085
4086 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4087 if (rv) {
4088 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004089 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004090 goto fail;
4091 }
4092
4093 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4094 if (!rv)
4095 goto fail;
4096
Philipp Reisner02918be2010-08-20 14:35:10 +02004097 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004098 if (!rv)
4099 goto fail;
4100
Philipp Reisner02918be2010-08-20 14:35:10 +02004101 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004102 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004103 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004104 rv = 0;
4105 goto fail;
4106 }
4107
Philipp Reisner02918be2010-08-20 14:35:10 +02004108 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004109 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4110 rv = 0;
4111 goto fail;
4112 }
4113
4114 rv = drbd_recv(mdev, response , resp_size);
4115
4116 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004117 if (!signal_pending(current))
4118 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004119 rv = 0;
4120 goto fail;
4121 }
4122
4123 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004124 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004125 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004126 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004127 goto fail;
4128 }
4129
4130 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4131
4132 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4133 if (rv) {
4134 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004135 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004136 goto fail;
4137 }
4138
4139 rv = !memcmp(response, right_response, resp_size);
4140
4141 if (rv)
4142 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4143 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004144 else
4145 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004146
4147 fail:
4148 kfree(peers_ch);
4149 kfree(response);
4150 kfree(right_response);
4151
4152 return rv;
4153}
4154#endif
4155
4156int drbdd_init(struct drbd_thread *thi)
4157{
4158 struct drbd_conf *mdev = thi->mdev;
4159 unsigned int minor = mdev_to_minor(mdev);
4160 int h;
4161
4162 sprintf(current->comm, "drbd%d_receiver", minor);
4163
4164 dev_info(DEV, "receiver (re)started\n");
4165
4166 do {
4167 h = drbd_connect(mdev);
4168 if (h == 0) {
4169 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004170 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004171 }
4172 if (h == -1) {
4173 dev_warn(DEV, "Discarding network configuration.\n");
4174 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4175 }
4176 } while (h == 0);
4177
4178 if (h > 0) {
4179 if (get_net_conf(mdev)) {
4180 drbdd(mdev);
4181 put_net_conf(mdev);
4182 }
4183 }
4184
4185 drbd_disconnect(mdev);
4186
4187 dev_info(DEV, "receiver terminated\n");
4188 return 0;
4189}
4190
4191/* ********* acknowledge sender ******** */
4192
Philipp Reisner0b70a132010-08-20 13:36:10 +02004193static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004194{
4195 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4196
4197 int retcode = be32_to_cpu(p->retcode);
4198
4199 if (retcode >= SS_SUCCESS) {
4200 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4201 } else {
4202 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4203 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4204 drbd_set_st_err_str(retcode), retcode);
4205 }
4206 wake_up(&mdev->state_wait);
4207
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004208 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004209}
4210
Philipp Reisner0b70a132010-08-20 13:36:10 +02004211static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004212{
4213 return drbd_send_ping_ack(mdev);
4214
4215}
4216
Philipp Reisner0b70a132010-08-20 13:36:10 +02004217static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004218{
4219 /* restore idle timeout */
4220 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004221 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4222 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004223
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004224 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004225}
4226
Philipp Reisner0b70a132010-08-20 13:36:10 +02004227static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228{
4229 struct p_block_ack *p = (struct p_block_ack *)h;
4230 sector_t sector = be64_to_cpu(p->sector);
4231 int blksize = be32_to_cpu(p->blksize);
4232
4233 D_ASSERT(mdev->agreed_pro_version >= 89);
4234
4235 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4236
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004237 if (get_ldev(mdev)) {
4238 drbd_rs_complete_io(mdev, sector);
4239 drbd_set_in_sync(mdev, sector, blksize);
4240 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4241 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4242 put_ldev(mdev);
4243 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004244 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004245 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004246
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004247 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004248}
4249
4250/* when we receive the ACK for a write request,
4251 * verify that we actually know about it */
4252static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4253 u64 id, sector_t sector)
4254{
4255 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4256 struct hlist_node *n;
4257 struct drbd_request *req;
4258
4259 hlist_for_each_entry(req, n, slot, colision) {
4260 if ((unsigned long)req == (unsigned long)id) {
4261 if (req->sector != sector) {
4262 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4263 "wrong sector (%llus versus %llus)\n", req,
4264 (unsigned long long)req->sector,
4265 (unsigned long long)sector);
4266 break;
4267 }
4268 return req;
4269 }
4270 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004271 return NULL;
4272}
4273
4274typedef struct drbd_request *(req_validator_fn)
4275 (struct drbd_conf *mdev, u64 id, sector_t sector);
4276
4277static int validate_req_change_req_state(struct drbd_conf *mdev,
4278 u64 id, sector_t sector, req_validator_fn validator,
4279 const char *func, enum drbd_req_event what)
4280{
4281 struct drbd_request *req;
4282 struct bio_and_error m;
4283
4284 spin_lock_irq(&mdev->req_lock);
4285 req = validator(mdev, id, sector);
4286 if (unlikely(!req)) {
4287 spin_unlock_irq(&mdev->req_lock);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004288
4289 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4290 (void *)(unsigned long)id, (unsigned long long)sector);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004291 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004292 }
4293 __req_mod(req, what, &m);
4294 spin_unlock_irq(&mdev->req_lock);
4295
4296 if (m.bio)
4297 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004298 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004299}
4300
Philipp Reisner0b70a132010-08-20 13:36:10 +02004301static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004302{
4303 struct p_block_ack *p = (struct p_block_ack *)h;
4304 sector_t sector = be64_to_cpu(p->sector);
4305 int blksize = be32_to_cpu(p->blksize);
4306 enum drbd_req_event what;
4307
4308 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4309
4310 if (is_syncer_block_id(p->block_id)) {
4311 drbd_set_in_sync(mdev, sector, blksize);
4312 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004313 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004314 }
4315 switch (be16_to_cpu(h->command)) {
4316 case P_RS_WRITE_ACK:
4317 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4318 what = write_acked_by_peer_and_sis;
4319 break;
4320 case P_WRITE_ACK:
4321 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4322 what = write_acked_by_peer;
4323 break;
4324 case P_RECV_ACK:
4325 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4326 what = recv_acked_by_peer;
4327 break;
4328 case P_DISCARD_ACK:
4329 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4330 what = conflict_discarded_by_peer;
4331 break;
4332 default:
4333 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004334 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004335 }
4336
4337 return validate_req_change_req_state(mdev, p->block_id, sector,
4338 _ack_id_to_req, __func__ , what);
4339}
4340
Philipp Reisner0b70a132010-08-20 13:36:10 +02004341static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004342{
4343 struct p_block_ack *p = (struct p_block_ack *)h;
4344 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004345 int size = be32_to_cpu(p->blksize);
4346 struct drbd_request *req;
4347 struct bio_and_error m;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004348
4349 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4350
4351 if (is_syncer_block_id(p->block_id)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004352 dec_rs_pending(mdev);
4353 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004354 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004355 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004356
4357 spin_lock_irq(&mdev->req_lock);
4358 req = _ack_id_to_req(mdev, p->block_id, sector);
4359 if (!req) {
4360 spin_unlock_irq(&mdev->req_lock);
4361 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4362 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4363 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4364 The master bio might already be completed, therefore the
4365 request is no longer in the collision hash.
4366 => Do not try to validate block_id as request. */
4367 /* In Protocol B we might already have got a P_RECV_ACK
4368 but then get a P_NEG_ACK after wards. */
4369 drbd_set_out_of_sync(mdev, sector, size);
4370 return true;
4371 } else {
4372 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4373 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4374 return false;
4375 }
4376 }
4377 __req_mod(req, neg_acked, &m);
4378 spin_unlock_irq(&mdev->req_lock);
4379
4380 if (m.bio)
4381 complete_master_bio(mdev, &m);
4382 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004383}
4384
Philipp Reisner0b70a132010-08-20 13:36:10 +02004385static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004386{
4387 struct p_block_ack *p = (struct p_block_ack *)h;
4388 sector_t sector = be64_to_cpu(p->sector);
4389
4390 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4391 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4392 (unsigned long long)sector, be32_to_cpu(p->blksize));
4393
4394 return validate_req_change_req_state(mdev, p->block_id, sector,
4395 _ar_id_to_req, __func__ , neg_acked);
4396}
4397
Philipp Reisner0b70a132010-08-20 13:36:10 +02004398static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004399{
4400 sector_t sector;
4401 int size;
4402 struct p_block_ack *p = (struct p_block_ack *)h;
4403
4404 sector = be64_to_cpu(p->sector);
4405 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004406
4407 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4408
4409 dec_rs_pending(mdev);
4410
4411 if (get_ldev_if_state(mdev, D_FAILED)) {
4412 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004413 switch (be16_to_cpu(h->command)) {
4414 case P_NEG_RS_DREPLY:
4415 drbd_rs_failed_io(mdev, sector, size);
4416 case P_RS_CANCEL:
4417 break;
4418 default:
4419 D_ASSERT(0);
4420 put_ldev(mdev);
4421 return false;
4422 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004423 put_ldev(mdev);
4424 }
4425
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004426 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004427}
4428
Philipp Reisner0b70a132010-08-20 13:36:10 +02004429static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004430{
4431 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4432
4433 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4434
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004435 if (mdev->state.conn == C_AHEAD &&
4436 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004437 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4438 mdev->start_resync_timer.expires = jiffies + HZ;
4439 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004440 }
4441
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004442 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004443}
4444
Philipp Reisner0b70a132010-08-20 13:36:10 +02004445static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004446{
4447 struct p_block_ack *p = (struct p_block_ack *)h;
4448 struct drbd_work *w;
4449 sector_t sector;
4450 int size;
4451
4452 sector = be64_to_cpu(p->sector);
4453 size = be32_to_cpu(p->blksize);
4454
4455 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4456
4457 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4458 drbd_ov_oos_found(mdev, sector, size);
4459 else
4460 ov_oos_print(mdev);
4461
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004462 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004463 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004464
Philipp Reisnerb411b362009-09-25 16:07:19 -07004465 drbd_rs_complete_io(mdev, sector);
4466 dec_rs_pending(mdev);
4467
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004468 --mdev->ov_left;
4469
4470 /* let's advance progress step marks only for every other megabyte */
4471 if ((mdev->ov_left & 0x200) == 0x200)
4472 drbd_advance_rs_marks(mdev, mdev->ov_left);
4473
4474 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004475 w = kmalloc(sizeof(*w), GFP_NOIO);
4476 if (w) {
4477 w->cb = w_ov_finished;
4478 drbd_queue_work_front(&mdev->data.work, w);
4479 } else {
4480 dev_err(DEV, "kmalloc(w) failed.");
4481 ov_oos_print(mdev);
4482 drbd_resync_finished(mdev);
4483 }
4484 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004485 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004486 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004487}
4488
Philipp Reisner02918be2010-08-20 14:35:10 +02004489static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004490{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004491 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004492}
4493
Philipp Reisnerb411b362009-09-25 16:07:19 -07004494struct asender_cmd {
4495 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004496 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004497};
4498
4499static struct asender_cmd *get_asender_cmd(int cmd)
4500{
4501 static struct asender_cmd asender_tbl[] = {
4502 /* anything missing from this table is in
4503 * the drbd_cmd_handler (drbd_default_handler) table,
4504 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004505 [P_PING] = { sizeof(struct p_header80), got_Ping },
4506 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004507 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4508 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4509 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4510 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4511 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4512 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4513 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4514 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4515 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4516 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4517 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004518 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004519 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004520 [P_MAX_CMD] = { 0, NULL },
4521 };
4522 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4523 return NULL;
4524 return &asender_tbl[cmd];
4525}
4526
4527int drbd_asender(struct drbd_thread *thi)
4528{
4529 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004530 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004531 struct asender_cmd *cmd = NULL;
4532
4533 int rv, len;
4534 void *buf = h;
4535 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004536 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004537 int empty;
4538
4539 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4540
4541 current->policy = SCHED_RR; /* Make this a realtime task! */
4542 current->rt_priority = 2; /* more important than all other tasks */
4543
4544 while (get_t_state(thi) == Running) {
4545 drbd_thread_current_set_cpu(mdev);
4546 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4547 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4548 mdev->meta.socket->sk->sk_rcvtimeo =
4549 mdev->net_conf->ping_timeo*HZ/10;
4550 }
4551
4552 /* conditionally cork;
4553 * it may hurt latency if we cork without much to send */
4554 if (!mdev->net_conf->no_cork &&
4555 3 < atomic_read(&mdev->unacked_cnt))
4556 drbd_tcp_cork(mdev->meta.socket);
4557 while (1) {
4558 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4559 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004560 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004561 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004562 /* to avoid race with newly queued ACKs */
4563 set_bit(SIGNAL_ASENDER, &mdev->flags);
4564 spin_lock_irq(&mdev->req_lock);
4565 empty = list_empty(&mdev->done_ee);
4566 spin_unlock_irq(&mdev->req_lock);
4567 /* new ack may have been queued right here,
4568 * but then there is also a signal pending,
4569 * and we start over... */
4570 if (empty)
4571 break;
4572 }
4573 /* but unconditionally uncork unless disabled */
4574 if (!mdev->net_conf->no_cork)
4575 drbd_tcp_uncork(mdev->meta.socket);
4576
4577 /* short circuit, recv_msg would return EINTR anyways. */
4578 if (signal_pending(current))
4579 continue;
4580
4581 rv = drbd_recv_short(mdev, mdev->meta.socket,
4582 buf, expect-received, 0);
4583 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4584
4585 flush_signals(current);
4586
4587 /* Note:
4588 * -EINTR (on meta) we got a signal
4589 * -EAGAIN (on meta) rcvtimeo expired
4590 * -ECONNRESET other side closed the connection
4591 * -ERESTARTSYS (on data) we got a signal
4592 * rv < 0 other than above: unexpected error!
4593 * rv == expected: full header or command
4594 * rv < expected: "woken" by signal during receive
4595 * rv == 0 : "connection shut down by peer"
4596 */
4597 if (likely(rv > 0)) {
4598 received += rv;
4599 buf += rv;
4600 } else if (rv == 0) {
4601 dev_err(DEV, "meta connection shut down by peer.\n");
4602 goto reconnect;
4603 } else if (rv == -EAGAIN) {
4604 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4605 mdev->net_conf->ping_timeo*HZ/10) {
4606 dev_err(DEV, "PingAck did not arrive in time.\n");
4607 goto reconnect;
4608 }
4609 set_bit(SEND_PING, &mdev->flags);
4610 continue;
4611 } else if (rv == -EINTR) {
4612 continue;
4613 } else {
4614 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4615 goto reconnect;
4616 }
4617
4618 if (received == expect && cmd == NULL) {
4619 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004620 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4621 be32_to_cpu(h->magic),
4622 be16_to_cpu(h->command),
4623 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004624 goto reconnect;
4625 }
4626 cmd = get_asender_cmd(be16_to_cpu(h->command));
4627 len = be16_to_cpu(h->length);
4628 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004629 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4630 be32_to_cpu(h->magic),
4631 be16_to_cpu(h->command),
4632 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004633 goto disconnect;
4634 }
4635 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004636 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004637 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004638 }
4639 if (received == expect) {
4640 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004641 if (!cmd->process(mdev, h))
4642 goto reconnect;
4643
4644 buf = h;
4645 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004646 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004647 cmd = NULL;
4648 }
4649 }
4650
4651 if (0) {
4652reconnect:
4653 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004654 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004655 }
4656 if (0) {
4657disconnect:
4658 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004659 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004660 }
4661 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4662
4663 D_ASSERT(mdev->state.conn < C_CONNECTED);
4664 dev_info(DEV, "asender terminated\n");
4665
4666 return 0;
4667}