blob: e7ed0aa93a16fb1360cb19de117aefa9fd808cb7 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Bart Van Assche24c48302011-05-21 18:32:29 +0200336 INIT_HLIST_NODE(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200344 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 return e;
347
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return NULL;
351}
352
Lars Ellenberg435f0742010-09-06 12:30:25 +0200353void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200355 if (e->flags & EE_HAS_DIGEST)
356 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Bart Van Assche24c48302011-05-21 18:32:29 +0200359 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 mempool_free(e, drbd_ee_mempool);
361}
362
363int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
364{
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
367 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200368 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
373
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200375 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 count++;
377 }
378 return count;
379}
380
381
382/*
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
386 *
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
390 */
391static int drbd_process_done_ee(struct drbd_conf *mdev)
392{
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
397
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
402
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200404 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
409 */
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
414 }
415 wake_up(&mdev->ee_wait);
416
417 return ok;
418}
419
420void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
421{
422 DEFINE_WAIT(wait);
423
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100429 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
432 }
433}
434
435void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436{
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
440}
441
442/* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
446{
447 struct sock *sk = sock->sk;
448 int err = 0;
449
450 *what = "listen";
451 err = sock->ops->listen(sock, 5);
452 if (err < 0)
453 goto out;
454
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
457 newsock);
458 if (err < 0)
459 goto out;
460
461 *what = "accept";
462 err = sock->ops->accept(sock, *newsock, 0);
463 if (err < 0) {
464 sock_release(*newsock);
465 *newsock = NULL;
466 goto out;
467 }
468 (*newsock)->ops = sock->ops;
469
470out:
471 return err;
472}
473
474static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
475 void *buf, size_t size, int flags)
476{
477 mm_segment_t oldfs;
478 struct kvec iov = {
479 .iov_base = buf,
480 .iov_len = size,
481 };
482 struct msghdr msg = {
483 .msg_iovlen = 1,
484 .msg_iov = (struct iovec *)&iov,
485 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
486 };
487 int rv;
488
489 oldfs = get_fs();
490 set_fs(KERNEL_DS);
491 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
492 set_fs(oldfs);
493
494 return rv;
495}
496
497static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
498{
499 mm_segment_t oldfs;
500 struct kvec iov = {
501 .iov_base = buf,
502 .iov_len = size,
503 };
504 struct msghdr msg = {
505 .msg_iovlen = 1,
506 .msg_iov = (struct iovec *)&iov,
507 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
508 };
509 int rv;
510
511 oldfs = get_fs();
512 set_fs(KERNEL_DS);
513
514 for (;;) {
515 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
516 if (rv == size)
517 break;
518
519 /* Note:
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
522 */
523
524 if (rv < 0) {
525 if (rv == -ECONNRESET)
526 dev_info(DEV, "sock was reset by peer\n");
527 else if (rv != -ERESTARTSYS)
528 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
529 break;
530 } else if (rv == 0) {
531 dev_info(DEV, "sock was shut down by peer\n");
532 break;
533 } else {
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
536 */
537 /* D_ASSERT(signal_pending(current)); */
538 break;
539 }
540 };
541
542 set_fs(oldfs);
543
544 if (rv != size)
545 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
546
547 return rv;
548}
549
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200550/* quoting tcp(7):
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
554 */
555static void drbd_setbufsize(struct socket *sock, unsigned int snd,
556 unsigned int rcv)
557{
558 /* open coded SO_SNDBUF, SO_RCVBUF */
559 if (snd) {
560 sock->sk->sk_sndbuf = snd;
561 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
562 }
563 if (rcv) {
564 sock->sk->sk_rcvbuf = rcv;
565 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
566 }
567}
568
Philipp Reisnerb411b362009-09-25 16:07:19 -0700569static struct socket *drbd_try_connect(struct drbd_conf *mdev)
570{
571 const char *what;
572 struct socket *sock;
573 struct sockaddr_in6 src_in6;
574 int err;
575 int disconnect_on_error = 1;
576
577 if (!get_net_conf(mdev))
578 return NULL;
579
580 what = "sock_create_kern";
581 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
582 SOCK_STREAM, IPPROTO_TCP, &sock);
583 if (err < 0) {
584 sock = NULL;
585 goto out;
586 }
587
588 sock->sk->sk_rcvtimeo =
589 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200590 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
591 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700592
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
599 */
600 memcpy(&src_in6, mdev->net_conf->my_addr,
601 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
602 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
603 src_in6.sin6_port = 0;
604 else
605 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
606
607 what = "bind before connect";
608 err = sock->ops->bind(sock,
609 (struct sockaddr *) &src_in6,
610 mdev->net_conf->my_addr_len);
611 if (err < 0)
612 goto out;
613
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error = 0;
617 what = "connect";
618 err = sock->ops->connect(sock,
619 (struct sockaddr *)mdev->net_conf->peer_addr,
620 mdev->net_conf->peer_addr_len, 0);
621
622out:
623 if (err < 0) {
624 if (sock) {
625 sock_release(sock);
626 sock = NULL;
627 }
628 switch (-err) {
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
631 case EINTR: case ERESTARTSYS:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED: case ENETUNREACH:
634 case EHOSTDOWN: case EHOSTUNREACH:
635 disconnect_on_error = 0;
636 break;
637 default:
638 dev_err(DEV, "%s failed, err = %d\n", what, err);
639 }
640 if (disconnect_on_error)
641 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
642 }
643 put_net_conf(mdev);
644 return sock;
645}
646
647static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
648{
649 int timeo, err;
650 struct socket *s_estab = NULL, *s_listen;
651 const char *what;
652
653 if (!get_net_conf(mdev))
654 return NULL;
655
656 what = "sock_create_kern";
657 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
658 SOCK_STREAM, IPPROTO_TCP, &s_listen);
659 if (err) {
660 s_listen = NULL;
661 goto out;
662 }
663
664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
666
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
671 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700672
673 what = "bind before listen";
674 err = s_listen->ops->bind(s_listen,
675 (struct sockaddr *) mdev->net_conf->my_addr,
676 mdev->net_conf->my_addr_len);
677 if (err < 0)
678 goto out;
679
680 err = drbd_accept(mdev, &what, s_listen, &s_estab);
681
682out:
683 if (s_listen)
684 sock_release(s_listen);
685 if (err < 0) {
686 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
687 dev_err(DEV, "%s failed, err = %d\n", what, err);
688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
689 }
690 }
691 put_net_conf(mdev);
692
693 return s_estab;
694}
695
696static int drbd_send_fp(struct drbd_conf *mdev,
697 struct socket *sock, enum drbd_packets cmd)
698{
Philipp Reisner02918be2010-08-20 14:35:10 +0200699 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700700
701 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
702}
703
704static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
705{
Philipp Reisner02918be2010-08-20 14:35:10 +0200706 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 int rr;
708
709 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
710
711 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
712 return be16_to_cpu(h->command);
713
714 return 0xffff;
715}
716
717/**
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
721 */
722static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
723{
724 int rr;
725 char tb[4];
726
727 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100728 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700729
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
731
732 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100733 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 } else {
735 sock_release(*sock);
736 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100737 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 }
739}
740
741/*
742 * return values:
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
748 */
749static int drbd_connect(struct drbd_conf *mdev)
750{
751 struct socket *s, *sock, *msock;
752 int try, h, ok;
753
754 D_ASSERT(!mdev->data.socket);
755
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
757 return -2;
758
759 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
760
761 sock = NULL;
762 msock = NULL;
763
764 do {
765 for (try = 0;;) {
766 /* 3 tries, this should take less than a second! */
767 s = drbd_try_connect(mdev);
768 if (s || ++try >= 3)
769 break;
770 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100771 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772 }
773
774 if (s) {
775 if (!sock) {
776 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
777 sock = s;
778 s = NULL;
779 } else if (!msock) {
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
781 msock = s;
782 s = NULL;
783 } else {
784 dev_err(DEV, "Logic error in drbd_connect()\n");
785 goto out_release_sockets;
786 }
787 }
788
789 if (sock && msock) {
Philipp Reisnera8e40792011-05-13 12:03:55 +0200790 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700791 ok = drbd_socket_okay(mdev, &sock);
792 ok = drbd_socket_okay(mdev, &msock) && ok;
793 if (ok)
794 break;
795 }
796
797retry:
798 s = drbd_wait_for_connect(mdev);
799 if (s) {
800 try = drbd_recv_fp(mdev, s);
801 drbd_socket_okay(mdev, &sock);
802 drbd_socket_okay(mdev, &msock);
803 switch (try) {
804 case P_HAND_SHAKE_S:
805 if (sock) {
806 dev_warn(DEV, "initial packet S crossed\n");
807 sock_release(sock);
808 }
809 sock = s;
810 break;
811 case P_HAND_SHAKE_M:
812 if (msock) {
813 dev_warn(DEV, "initial packet M crossed\n");
814 sock_release(msock);
815 }
816 msock = s;
817 set_bit(DISCARD_CONCURRENT, &mdev->flags);
818 break;
819 default:
820 dev_warn(DEV, "Error receiving initial packet\n");
821 sock_release(s);
822 if (random32() & 1)
823 goto retry;
824 }
825 }
826
827 if (mdev->state.conn <= C_DISCONNECTING)
828 goto out_release_sockets;
829 if (signal_pending(current)) {
830 flush_signals(current);
831 smp_rmb();
832 if (get_t_state(&mdev->receiver) == Exiting)
833 goto out_release_sockets;
834 }
835
836 if (sock && msock) {
837 ok = drbd_socket_okay(mdev, &sock);
838 ok = drbd_socket_okay(mdev, &msock) && ok;
839 if (ok)
840 break;
841 }
842 } while (1);
843
844 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
845 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
846
847 sock->sk->sk_allocation = GFP_NOIO;
848 msock->sk->sk_allocation = GFP_NOIO;
849
850 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
851 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
852
Philipp Reisnerb411b362009-09-25 16:07:19 -0700853 /* NOT YET ...
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock->sk->sk_sndtimeo =
859 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
860
861 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
863
864 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300865 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700866 drbd_tcp_nodelay(sock);
867 drbd_tcp_nodelay(msock);
868
869 mdev->data.socket = sock;
870 mdev->meta.socket = msock;
871 mdev->last_received = jiffies;
872
873 D_ASSERT(mdev->asender.task == NULL);
874
875 h = drbd_do_handshake(mdev);
876 if (h <= 0)
877 return h;
878
879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100881 switch (drbd_do_auth(mdev)) {
882 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883 dev_err(DEV, "Authentication of peer failed\n");
884 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100885 case 0:
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700888 }
889 }
890
891 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
892 return 0;
893
894 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
895 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
896
897 atomic_set(&mdev->packet_seq, 0);
898 mdev->peer_seq = 0;
899
900 drbd_thread_start(&mdev->asender);
901
Philipp Reisner148efa12011-01-15 00:21:15 +0100902 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200903 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100905 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700906 drbd_send_uuids(mdev);
907 drbd_send_state(mdev);
908 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
909 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100910 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911
912 return 1;
913
914out_release_sockets:
915 if (sock)
916 sock_release(sock);
917 if (msock)
918 sock_release(msock);
919 return -1;
920}
921
Philipp Reisner02918be2010-08-20 14:35:10 +0200922static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700923{
Philipp Reisner02918be2010-08-20 14:35:10 +0200924 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700925 int r;
926
927 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700928 if (unlikely(r != sizeof(*h))) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +0100929 if (!signal_pending(current))
930 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100931 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200932 }
933
934 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
935 *cmd = be16_to_cpu(h->h80.command);
936 *packet_size = be16_to_cpu(h->h80.length);
937 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
938 *cmd = be16_to_cpu(h->h95.command);
939 *packet_size = be32_to_cpu(h->h95.length);
940 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200941 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
942 be32_to_cpu(h->h80.magic),
943 be16_to_cpu(h->h80.command),
944 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100945 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700946 }
947 mdev->last_received = jiffies;
948
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100949 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950}
951
Philipp Reisner2451fc32010-08-24 13:43:11 +0200952static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700953{
954 int rv;
955
956 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400957 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200958 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959 if (rv) {
Philipp Reisnerebd2b0c2011-05-25 11:03:04 +0200960 dev_info(DEV, "local disk flush failed with status %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700961 /* would rather check on EOPNOTSUPP, but that is not reliable.
962 * don't try again for ANY return value != 0
963 * if (rv == -EOPNOTSUPP) */
964 drbd_bump_write_ordering(mdev, WO_drain_io);
965 }
966 put_ldev(mdev);
967 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700968}
969
970/**
971 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
972 * @mdev: DRBD device.
973 * @epoch: Epoch object.
974 * @ev: Epoch event.
975 */
976static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
977 struct drbd_epoch *epoch,
978 enum epoch_event ev)
979{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200980 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700982 enum finish_epoch rv = FE_STILL_LIVE;
983
984 spin_lock(&mdev->epoch_lock);
985 do {
986 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700987
988 epoch_size = atomic_read(&epoch->epoch_size);
989
990 switch (ev & ~EV_CLEANUP) {
991 case EV_PUT:
992 atomic_dec(&epoch->active);
993 break;
994 case EV_GOT_BARRIER_NR:
995 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700996 break;
997 case EV_BECAME_LAST:
998 /* nothing to do*/
999 break;
1000 }
1001
Philipp Reisnerb411b362009-09-25 16:07:19 -07001002 if (epoch_size != 0 &&
1003 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001004 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001005 if (!(ev & EV_CLEANUP)) {
1006 spin_unlock(&mdev->epoch_lock);
1007 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1008 spin_lock(&mdev->epoch_lock);
1009 }
1010 dec_unacked(mdev);
1011
1012 if (mdev->current_epoch != epoch) {
1013 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1014 list_del(&epoch->list);
1015 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1016 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001017 kfree(epoch);
1018
1019 if (rv == FE_STILL_LIVE)
1020 rv = FE_DESTROYED;
1021 } else {
1022 epoch->flags = 0;
1023 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001024 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001025 if (rv == FE_STILL_LIVE)
1026 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001027 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001028 }
1029 }
1030
1031 if (!next_epoch)
1032 break;
1033
1034 epoch = next_epoch;
1035 } while (1);
1036
1037 spin_unlock(&mdev->epoch_lock);
1038
Philipp Reisnerb411b362009-09-25 16:07:19 -07001039 return rv;
1040}
1041
1042/**
1043 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1044 * @mdev: DRBD device.
1045 * @wo: Write ordering method to try.
1046 */
1047void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1048{
1049 enum write_ordering_e pwo;
1050 static char *write_ordering_str[] = {
1051 [WO_none] = "none",
1052 [WO_drain_io] = "drain",
1053 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001054 };
1055
1056 pwo = mdev->write_ordering;
1057 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001058 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1059 wo = WO_drain_io;
1060 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1061 wo = WO_none;
1062 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001063 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001064 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1065}
1066
1067/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001068 * drbd_submit_ee()
1069 * @mdev: DRBD device.
1070 * @e: epoch entry
1071 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001072 *
1073 * May spread the pages to multiple bios,
1074 * depending on bio_add_page restrictions.
1075 *
1076 * Returns 0 if all bios have been submitted,
1077 * -ENOMEM if we could not allocate enough bios,
1078 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1079 * single page to an empty bio (which should never happen and likely indicates
1080 * that the lower level IO stack is in some way broken). This has been observed
1081 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001082 */
1083/* TODO allocate from our own bio_set. */
1084int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1085 const unsigned rw, const int fault_type)
1086{
1087 struct bio *bios = NULL;
1088 struct bio *bio;
1089 struct page *page = e->pages;
1090 sector_t sector = e->sector;
1091 unsigned ds = e->size;
1092 unsigned n_bios = 0;
1093 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001094 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001095
1096 /* In most cases, we will only need one bio. But in case the lower
1097 * level restrictions happen to be different at this offset on this
1098 * side than those of the sending peer, we may need to submit the
1099 * request in more than one bio. */
1100next_bio:
1101 bio = bio_alloc(GFP_NOIO, nr_pages);
1102 if (!bio) {
1103 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1104 goto fail;
1105 }
1106 /* > e->sector, unless this is the first bio */
1107 bio->bi_sector = sector;
1108 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001109 bio->bi_rw = rw;
1110 bio->bi_private = e;
1111 bio->bi_end_io = drbd_endio_sec;
1112
1113 bio->bi_next = bios;
1114 bios = bio;
1115 ++n_bios;
1116
1117 page_chain_for_each(page) {
1118 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1119 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001120 /* A single page must always be possible!
1121 * But in case it fails anyways,
1122 * we deal with it, and complain (below). */
1123 if (bio->bi_vcnt == 0) {
1124 dev_err(DEV,
1125 "bio_add_page failed for len=%u, "
1126 "bi_vcnt=0 (bi_sector=%llu)\n",
1127 len, (unsigned long long)bio->bi_sector);
1128 err = -ENOSPC;
1129 goto fail;
1130 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001131 goto next_bio;
1132 }
1133 ds -= len;
1134 sector += len >> 9;
1135 --nr_pages;
1136 }
1137 D_ASSERT(page == NULL);
1138 D_ASSERT(ds == 0);
1139
1140 atomic_set(&e->pending_bios, n_bios);
1141 do {
1142 bio = bios;
1143 bios = bios->bi_next;
1144 bio->bi_next = NULL;
1145
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001146 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001147 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001148 return 0;
1149
1150fail:
1151 while (bios) {
1152 bio = bios;
1153 bios = bios->bi_next;
1154 bio_put(bio);
1155 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001156 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001157}
1158
Philipp Reisner02918be2010-08-20 14:35:10 +02001159static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001160{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001161 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001162 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001163 struct drbd_epoch *epoch;
1164
Philipp Reisnerb411b362009-09-25 16:07:19 -07001165 inc_unacked(mdev);
1166
Philipp Reisnerb411b362009-09-25 16:07:19 -07001167 mdev->current_epoch->barrier_nr = p->barrier;
1168 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1169
1170 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1171 * the activity log, which means it would not be resynced in case the
1172 * R_PRIMARY crashes now.
1173 * Therefore we must send the barrier_ack after the barrier request was
1174 * completed. */
1175 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001176 case WO_none:
1177 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001178 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001179
1180 /* receiver context, in the writeout path of the other node.
1181 * avoid potential distributed deadlock */
1182 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1183 if (epoch)
1184 break;
1185 else
1186 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1187 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001188
1189 case WO_bdev_flush:
1190 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001191 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001192 drbd_flush(mdev);
1193
1194 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1195 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1196 if (epoch)
1197 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001198 }
1199
Philipp Reisner2451fc32010-08-24 13:43:11 +02001200 epoch = mdev->current_epoch;
1201 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1202
1203 D_ASSERT(atomic_read(&epoch->active) == 0);
1204 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001205
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001206 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001207 default:
1208 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001209 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001210 }
1211
1212 epoch->flags = 0;
1213 atomic_set(&epoch->epoch_size, 0);
1214 atomic_set(&epoch->active, 0);
1215
1216 spin_lock(&mdev->epoch_lock);
1217 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1218 list_add(&epoch->list, &mdev->current_epoch->list);
1219 mdev->current_epoch = epoch;
1220 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001221 } else {
1222 /* The current_epoch got recycled while we allocated this one... */
1223 kfree(epoch);
1224 }
1225 spin_unlock(&mdev->epoch_lock);
1226
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001227 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001228}
1229
1230/* used from receive_RSDataReply (recv_resync_read)
1231 * and from receive_Data */
1232static struct drbd_epoch_entry *
1233read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1234{
Lars Ellenberg66660322010-04-06 12:15:04 +02001235 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001236 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001237 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001238 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001239 void *dig_in = mdev->int_dig_in;
1240 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001241 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001242
1243 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1244 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1245
1246 if (dgs) {
1247 rr = drbd_recv(mdev, dig_in, dgs);
1248 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001249 if (!signal_pending(current))
1250 dev_warn(DEV,
1251 "short read receiving data digest: read %d expected %d\n",
1252 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001253 return NULL;
1254 }
1255 }
1256
1257 data_size -= dgs;
1258
Philipp Reisnerd07c9c12011-01-20 16:49:33 +01001259 ERR_IF(data_size == 0) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001260 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001261 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262
Lars Ellenberg66660322010-04-06 12:15:04 +02001263 /* even though we trust out peer,
1264 * we sometimes have to double check. */
1265 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001266 dev_err(DEV, "request from peer beyond end of local disk: "
1267 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001268 (unsigned long long)capacity,
1269 (unsigned long long)sector, data_size);
1270 return NULL;
1271 }
1272
Philipp Reisnerb411b362009-09-25 16:07:19 -07001273 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1274 * "criss-cross" setup, that might cause write-out on some other DRBD,
1275 * which in turn might block on the other node at this very place. */
1276 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1277 if (!e)
1278 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001279
Philipp Reisnerb411b362009-09-25 16:07:19 -07001280 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001281 page = e->pages;
1282 page_chain_for_each(page) {
1283 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001284 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001285 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001286 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001287 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1288 data[0] = data[0] ^ (unsigned long)-1;
1289 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001290 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001291 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001292 drbd_free_ee(mdev, e);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001293 if (!signal_pending(current))
1294 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1295 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001296 return NULL;
1297 }
1298 ds -= rr;
1299 }
1300
1301 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001302 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001303 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001304 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1305 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001306 drbd_bcast_ee(mdev, "digest failed",
1307 dgs, dig_in, dig_vv, e);
1308 drbd_free_ee(mdev, e);
1309 return NULL;
1310 }
1311 }
1312 mdev->recv_cnt += data_size>>9;
1313 return e;
1314}
1315
1316/* drbd_drain_block() just takes a data block
1317 * out of the socket input buffer, and discards it.
1318 */
1319static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1320{
1321 struct page *page;
1322 int rr, rv = 1;
1323 void *data;
1324
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001325 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001326 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001327
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001328 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001329
1330 data = kmap(page);
1331 while (data_size) {
1332 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1333 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1334 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001335 if (!signal_pending(current))
1336 dev_warn(DEV,
1337 "short read receiving data: read %d expected %d\n",
1338 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001339 break;
1340 }
1341 data_size -= rr;
1342 }
1343 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001344 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001345 return rv;
1346}
1347
1348static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1349 sector_t sector, int data_size)
1350{
1351 struct bio_vec *bvec;
1352 struct bio *bio;
1353 int dgs, rr, i, expect;
1354 void *dig_in = mdev->int_dig_in;
1355 void *dig_vv = mdev->int_dig_vv;
1356
1357 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1358 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1359
1360 if (dgs) {
1361 rr = drbd_recv(mdev, dig_in, dgs);
1362 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001363 if (!signal_pending(current))
1364 dev_warn(DEV,
1365 "short read receiving data reply digest: read %d expected %d\n",
1366 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001367 return 0;
1368 }
1369 }
1370
1371 data_size -= dgs;
1372
1373 /* optimistically update recv_cnt. if receiving fails below,
1374 * we disconnect anyways, and counters will be reset. */
1375 mdev->recv_cnt += data_size>>9;
1376
1377 bio = req->master_bio;
1378 D_ASSERT(sector == bio->bi_sector);
1379
1380 bio_for_each_segment(bvec, bio, i) {
1381 expect = min_t(int, data_size, bvec->bv_len);
1382 rr = drbd_recv(mdev,
1383 kmap(bvec->bv_page)+bvec->bv_offset,
1384 expect);
1385 kunmap(bvec->bv_page);
1386 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001387 if (!signal_pending(current))
1388 dev_warn(DEV, "short read receiving data reply: "
1389 "read %d expected %d\n",
1390 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001391 return 0;
1392 }
1393 data_size -= rr;
1394 }
1395
1396 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001397 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001398 if (memcmp(dig_in, dig_vv, dgs)) {
1399 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1400 return 0;
1401 }
1402 }
1403
1404 D_ASSERT(data_size == 0);
1405 return 1;
1406}
1407
1408/* e_end_resync_block() is called via
1409 * drbd_process_done_ee() by asender only */
1410static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1411{
1412 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1413 sector_t sector = e->sector;
1414 int ok;
1415
Bart Van Assche24c48302011-05-21 18:32:29 +02001416 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001418 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001419 drbd_set_in_sync(mdev, sector, e->size);
1420 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1421 } else {
1422 /* Record failure to sync */
1423 drbd_rs_failed_io(mdev, sector, e->size);
1424
1425 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1426 }
1427 dec_unacked(mdev);
1428
1429 return ok;
1430}
1431
1432static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1433{
1434 struct drbd_epoch_entry *e;
1435
1436 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001437 if (!e)
1438 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001439
1440 dec_rs_pending(mdev);
1441
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442 inc_unacked(mdev);
1443 /* corresponding dec_unacked() in e_end_resync_block()
1444 * respective _drbd_clear_done_ee */
1445
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001446 e->w.cb = e_end_resync_block;
1447
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448 spin_lock_irq(&mdev->req_lock);
1449 list_add(&e->w.list, &mdev->sync_ee);
1450 spin_unlock_irq(&mdev->req_lock);
1451
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001452 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001453 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001454 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001455
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001456 /* don't care for the reason here */
1457 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001458 spin_lock_irq(&mdev->req_lock);
1459 list_del(&e->w.list);
1460 spin_unlock_irq(&mdev->req_lock);
1461
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001462 drbd_free_ee(mdev, e);
1463fail:
1464 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001465 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001466}
1467
Philipp Reisner02918be2010-08-20 14:35:10 +02001468static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001469{
1470 struct drbd_request *req;
1471 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001472 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001473 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001474
1475 sector = be64_to_cpu(p->sector);
1476
1477 spin_lock_irq(&mdev->req_lock);
1478 req = _ar_id_to_req(mdev, p->block_id, sector);
1479 spin_unlock_irq(&mdev->req_lock);
1480 if (unlikely(!req)) {
1481 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001482 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001483 }
1484
Bart Van Assche24c48302011-05-21 18:32:29 +02001485 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486 * special casing it there for the various failure cases.
1487 * still no race with drbd_fail_pending_reads */
1488 ok = recv_dless_read(mdev, req, sector, data_size);
1489
1490 if (ok)
1491 req_mod(req, data_received);
1492 /* else: nothing. handled from drbd_disconnect...
1493 * I don't think we may complete this just yet
1494 * in case we are "on-disconnect: freeze" */
1495
1496 return ok;
1497}
1498
Philipp Reisner02918be2010-08-20 14:35:10 +02001499static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001500{
1501 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001502 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001503 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504
1505 sector = be64_to_cpu(p->sector);
1506 D_ASSERT(p->block_id == ID_SYNCER);
1507
1508 if (get_ldev(mdev)) {
1509 /* data is submitted to disk within recv_resync_read.
1510 * corresponding put_ldev done below on error,
1511 * or in drbd_endio_write_sec. */
1512 ok = recv_resync_read(mdev, sector, data_size);
1513 } else {
1514 if (__ratelimit(&drbd_ratelimit_state))
1515 dev_err(DEV, "Can not write resync data to local disk.\n");
1516
1517 ok = drbd_drain_block(mdev, data_size);
1518
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001519 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001520 }
1521
Philipp Reisner778f2712010-07-06 11:14:00 +02001522 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1523
Philipp Reisnerb411b362009-09-25 16:07:19 -07001524 return ok;
1525}
1526
1527/* e_end_block() is called via drbd_process_done_ee().
1528 * this means this function only runs in the asender thread
1529 */
1530static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1531{
1532 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1533 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001534 int ok = 1, pcmd;
1535
Philipp Reisnerb411b362009-09-25 16:07:19 -07001536 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001537 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1539 mdev->state.conn <= C_PAUSED_SYNC_T &&
1540 e->flags & EE_MAY_SET_IN_SYNC) ?
1541 P_RS_WRITE_ACK : P_WRITE_ACK;
1542 ok &= drbd_send_ack(mdev, pcmd, e);
1543 if (pcmd == P_RS_WRITE_ACK)
1544 drbd_set_in_sync(mdev, sector, e->size);
1545 } else {
1546 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1547 /* we expect it to be marked out of sync anyways...
1548 * maybe assert this? */
1549 }
1550 dec_unacked(mdev);
1551 }
1552 /* we delete from the conflict detection hash _after_ we sent out the
1553 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1554 if (mdev->net_conf->two_primaries) {
1555 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001556 D_ASSERT(!hlist_unhashed(&e->collision));
1557 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001558 spin_unlock_irq(&mdev->req_lock);
1559 } else {
Bart Van Assche24c48302011-05-21 18:32:29 +02001560 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001561 }
1562
1563 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1564
1565 return ok;
1566}
1567
1568static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1569{
1570 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1571 int ok = 1;
1572
1573 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1574 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1575
1576 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001577 D_ASSERT(!hlist_unhashed(&e->collision));
1578 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001579 spin_unlock_irq(&mdev->req_lock);
1580
1581 dec_unacked(mdev);
1582
1583 return ok;
1584}
1585
1586/* Called from receive_Data.
1587 * Synchronize packets on sock with packets on msock.
1588 *
1589 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1590 * packet traveling on msock, they are still processed in the order they have
1591 * been sent.
1592 *
1593 * Note: we don't care for Ack packets overtaking P_DATA packets.
1594 *
1595 * In case packet_seq is larger than mdev->peer_seq number, there are
1596 * outstanding packets on the msock. We wait for them to arrive.
1597 * In case we are the logically next packet, we update mdev->peer_seq
1598 * ourselves. Correctly handles 32bit wrap around.
1599 *
1600 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1601 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1602 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1603 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1604 *
1605 * returns 0 if we may process the packet,
1606 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1607static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1608{
1609 DEFINE_WAIT(wait);
1610 unsigned int p_seq;
1611 long timeout;
1612 int ret = 0;
1613 spin_lock(&mdev->peer_seq_lock);
1614 for (;;) {
1615 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1616 if (seq_le(packet_seq, mdev->peer_seq+1))
1617 break;
1618 if (signal_pending(current)) {
1619 ret = -ERESTARTSYS;
1620 break;
1621 }
1622 p_seq = mdev->peer_seq;
1623 spin_unlock(&mdev->peer_seq_lock);
1624 timeout = schedule_timeout(30*HZ);
1625 spin_lock(&mdev->peer_seq_lock);
1626 if (timeout == 0 && p_seq == mdev->peer_seq) {
1627 ret = -ETIMEDOUT;
1628 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1629 break;
1630 }
1631 }
1632 finish_wait(&mdev->seq_wait, &wait);
1633 if (mdev->peer_seq+1 == packet_seq)
1634 mdev->peer_seq++;
1635 spin_unlock(&mdev->peer_seq_lock);
1636 return ret;
1637}
1638
Lars Ellenberg688593c2010-11-17 22:25:03 +01001639/* see also bio_flags_to_wire()
1640 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1641 * flags and back. We may replicate to other kernel versions. */
1642static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001643{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001644 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1645 (dpf & DP_FUA ? REQ_FUA : 0) |
1646 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1647 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001648}
1649
Philipp Reisnerb411b362009-09-25 16:07:19 -07001650/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001651static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001652{
1653 sector_t sector;
1654 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001655 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001656 int rw = WRITE;
1657 u32 dp_flags;
1658
Philipp Reisnerb411b362009-09-25 16:07:19 -07001659 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001660 spin_lock(&mdev->peer_seq_lock);
1661 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1662 mdev->peer_seq++;
1663 spin_unlock(&mdev->peer_seq_lock);
1664
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001665 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001666 atomic_inc(&mdev->current_epoch->epoch_size);
1667 return drbd_drain_block(mdev, data_size);
1668 }
1669
1670 /* get_ldev(mdev) successful.
1671 * Corresponding put_ldev done either below (on various errors),
1672 * or in drbd_endio_write_sec, if we successfully submit the data at
1673 * the end of this function. */
1674
1675 sector = be64_to_cpu(p->sector);
1676 e = read_in_block(mdev, p->block_id, sector, data_size);
1677 if (!e) {
1678 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001679 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001680 }
1681
Philipp Reisnerb411b362009-09-25 16:07:19 -07001682 e->w.cb = e_end_block;
1683
Lars Ellenberg688593c2010-11-17 22:25:03 +01001684 dp_flags = be32_to_cpu(p->dp_flags);
1685 rw |= wire_flags_to_bio(mdev, dp_flags);
1686
1687 if (dp_flags & DP_MAY_SET_IN_SYNC)
1688 e->flags |= EE_MAY_SET_IN_SYNC;
1689
Philipp Reisnerb411b362009-09-25 16:07:19 -07001690 spin_lock(&mdev->epoch_lock);
1691 e->epoch = mdev->current_epoch;
1692 atomic_inc(&e->epoch->epoch_size);
1693 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001694 spin_unlock(&mdev->epoch_lock);
1695
Philipp Reisnerb411b362009-09-25 16:07:19 -07001696 /* I'm the receiver, I do hold a net_cnt reference. */
1697 if (!mdev->net_conf->two_primaries) {
1698 spin_lock_irq(&mdev->req_lock);
1699 } else {
1700 /* don't get the req_lock yet,
1701 * we may sleep in drbd_wait_peer_seq */
1702 const int size = e->size;
1703 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1704 DEFINE_WAIT(wait);
1705 struct drbd_request *i;
1706 struct hlist_node *n;
1707 struct hlist_head *slot;
1708 int first;
1709
1710 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1711 BUG_ON(mdev->ee_hash == NULL);
1712 BUG_ON(mdev->tl_hash == NULL);
1713
1714 /* conflict detection and handling:
1715 * 1. wait on the sequence number,
1716 * in case this data packet overtook ACK packets.
1717 * 2. check our hash tables for conflicting requests.
1718 * we only need to walk the tl_hash, since an ee can not
1719 * have a conflict with an other ee: on the submitting
1720 * node, the corresponding req had already been conflicting,
1721 * and a conflicting req is never sent.
1722 *
1723 * Note: for two_primaries, we are protocol C,
1724 * so there cannot be any request that is DONE
1725 * but still on the transfer log.
1726 *
1727 * unconditionally add to the ee_hash.
1728 *
1729 * if no conflicting request is found:
1730 * submit.
1731 *
1732 * if any conflicting request is found
1733 * that has not yet been acked,
1734 * AND I have the "discard concurrent writes" flag:
1735 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1736 *
1737 * if any conflicting request is found:
1738 * block the receiver, waiting on misc_wait
1739 * until no more conflicting requests are there,
1740 * or we get interrupted (disconnect).
1741 *
1742 * we do not just write after local io completion of those
1743 * requests, but only after req is done completely, i.e.
1744 * we wait for the P_DISCARD_ACK to arrive!
1745 *
1746 * then proceed normally, i.e. submit.
1747 */
1748 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1749 goto out_interrupted;
1750
1751 spin_lock_irq(&mdev->req_lock);
1752
Bart Van Assche24c48302011-05-21 18:32:29 +02001753 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001754
1755#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1756 slot = tl_hash_slot(mdev, sector);
1757 first = 1;
1758 for (;;) {
1759 int have_unacked = 0;
1760 int have_conflict = 0;
1761 prepare_to_wait(&mdev->misc_wait, &wait,
1762 TASK_INTERRUPTIBLE);
Bart Van Assche24c48302011-05-21 18:32:29 +02001763 hlist_for_each_entry(i, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001764 if (OVERLAPS) {
1765 /* only ALERT on first iteration,
1766 * we may be woken up early... */
1767 if (first)
1768 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1769 " new: %llus +%u; pending: %llus +%u\n",
1770 current->comm, current->pid,
1771 (unsigned long long)sector, size,
1772 (unsigned long long)i->sector, i->size);
1773 if (i->rq_state & RQ_NET_PENDING)
1774 ++have_unacked;
1775 ++have_conflict;
1776 }
1777 }
1778#undef OVERLAPS
1779 if (!have_conflict)
1780 break;
1781
1782 /* Discard Ack only for the _first_ iteration */
1783 if (first && discard && have_unacked) {
1784 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1785 (unsigned long long)sector);
1786 inc_unacked(mdev);
1787 e->w.cb = e_send_discard_ack;
1788 list_add_tail(&e->w.list, &mdev->done_ee);
1789
1790 spin_unlock_irq(&mdev->req_lock);
1791
1792 /* we could probably send that P_DISCARD_ACK ourselves,
1793 * but I don't like the receiver using the msock */
1794
1795 put_ldev(mdev);
1796 wake_asender(mdev);
1797 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001798 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001799 }
1800
1801 if (signal_pending(current)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001802 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001803
1804 spin_unlock_irq(&mdev->req_lock);
1805
1806 finish_wait(&mdev->misc_wait, &wait);
1807 goto out_interrupted;
1808 }
1809
1810 spin_unlock_irq(&mdev->req_lock);
1811 if (first) {
1812 first = 0;
1813 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1814 "sec=%llus\n", (unsigned long long)sector);
1815 } else if (discard) {
1816 /* we had none on the first iteration.
1817 * there must be none now. */
1818 D_ASSERT(have_unacked == 0);
1819 }
1820 schedule();
1821 spin_lock_irq(&mdev->req_lock);
1822 }
1823 finish_wait(&mdev->misc_wait, &wait);
1824 }
1825
1826 list_add(&e->w.list, &mdev->active_ee);
1827 spin_unlock_irq(&mdev->req_lock);
1828
1829 switch (mdev->net_conf->wire_protocol) {
1830 case DRBD_PROT_C:
1831 inc_unacked(mdev);
1832 /* corresponding dec_unacked() in e_end_block()
1833 * respective _drbd_clear_done_ee */
1834 break;
1835 case DRBD_PROT_B:
1836 /* I really don't like it that the receiver thread
1837 * sends on the msock, but anyways */
1838 drbd_send_ack(mdev, P_RECV_ACK, e);
1839 break;
1840 case DRBD_PROT_A:
1841 /* nothing to do */
1842 break;
1843 }
1844
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001845 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001846 /* In case we have the only disk of the cluster, */
1847 drbd_set_out_of_sync(mdev, e->sector, e->size);
1848 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001849 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001850 drbd_al_begin_io(mdev, e->sector);
1851 }
1852
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001853 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001854 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001855
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001856 /* don't care for the reason here */
1857 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001858 spin_lock_irq(&mdev->req_lock);
1859 list_del(&e->w.list);
Bart Van Assche24c48302011-05-21 18:32:29 +02001860 hlist_del_init(&e->collision);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001861 spin_unlock_irq(&mdev->req_lock);
1862 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1863 drbd_al_complete_io(mdev, e->sector);
1864
Philipp Reisnerb411b362009-09-25 16:07:19 -07001865out_interrupted:
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001866 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001867 put_ldev(mdev);
1868 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001869 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001870}
1871
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001872/* We may throttle resync, if the lower device seems to be busy,
1873 * and current sync rate is above c_min_rate.
1874 *
1875 * To decide whether or not the lower device is busy, we use a scheme similar
1876 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1877 * (more than 64 sectors) of activity we cannot account for with our own resync
1878 * activity, it obviously is "busy".
1879 *
1880 * The current sync rate used here uses only the most recent two step marks,
1881 * to have a short time average so we can react faster.
1882 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001883int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001884{
1885 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1886 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001887 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001888 int curr_events;
1889 int throttle = 0;
1890
1891 /* feature disabled? */
1892 if (mdev->sync_conf.c_min_rate == 0)
1893 return 0;
1894
Philipp Reisnere3555d82010-11-07 15:56:29 +01001895 spin_lock_irq(&mdev->al_lock);
1896 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1897 if (tmp) {
1898 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1899 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1900 spin_unlock_irq(&mdev->al_lock);
1901 return 0;
1902 }
1903 /* Do not slow down if app IO is already waiting for this extent */
1904 }
1905 spin_unlock_irq(&mdev->al_lock);
1906
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001907 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1908 (int)part_stat_read(&disk->part0, sectors[1]) -
1909 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001910
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001911 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1912 unsigned long rs_left;
1913 int i;
1914
1915 mdev->rs_last_events = curr_events;
1916
1917 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1918 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001919 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1920
1921 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1922 rs_left = mdev->ov_left;
1923 else
1924 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001925
1926 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1927 if (!dt)
1928 dt++;
1929 db = mdev->rs_mark_left[i] - rs_left;
1930 dbdt = Bit2KB(db/dt);
1931
1932 if (dbdt > mdev->sync_conf.c_min_rate)
1933 throttle = 1;
1934 }
1935 return throttle;
1936}
1937
1938
Philipp Reisner02918be2010-08-20 14:35:10 +02001939static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001940{
1941 sector_t sector;
1942 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1943 struct drbd_epoch_entry *e;
1944 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001945 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001946 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001947 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001948
1949 sector = be64_to_cpu(p->sector);
1950 size = be32_to_cpu(p->blksize);
1951
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001952 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001953 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1954 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001955 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001956 }
1957 if (sector + (size>>9) > capacity) {
1958 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1959 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001960 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001961 }
1962
1963 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001964 verb = 1;
1965 switch (cmd) {
1966 case P_DATA_REQUEST:
1967 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1968 break;
1969 case P_RS_DATA_REQUEST:
1970 case P_CSUM_RS_REQUEST:
1971 case P_OV_REQUEST:
1972 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1973 break;
1974 case P_OV_REPLY:
1975 verb = 0;
1976 dec_rs_pending(mdev);
1977 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1978 break;
1979 default:
1980 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1981 cmdname(cmd));
1982 }
1983 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984 dev_err(DEV, "Can not satisfy peer's read request, "
1985 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001986
Lars Ellenberga821cc42010-09-06 12:31:37 +02001987 /* drain possibly payload */
1988 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001989 }
1990
1991 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1992 * "criss-cross" setup, that might cause write-out on some other DRBD,
1993 * which in turn might block on the other node at this very place. */
1994 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1995 if (!e) {
1996 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001997 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001998 }
1999
Philipp Reisner02918be2010-08-20 14:35:10 +02002000 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002001 case P_DATA_REQUEST:
2002 e->w.cb = w_e_end_data_req;
2003 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002004 /* application IO, don't drbd_rs_begin_io */
2005 goto submit;
2006
Philipp Reisnerb411b362009-09-25 16:07:19 -07002007 case P_RS_DATA_REQUEST:
2008 e->w.cb = w_e_end_rsdata_req;
2009 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002010 /* used in the sector offset progress display */
2011 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002012 break;
2013
2014 case P_OV_REPLY:
2015 case P_CSUM_RS_REQUEST:
2016 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002017 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2018 if (!di)
2019 goto out_free_e;
2020
2021 di->digest_size = digest_size;
2022 di->digest = (((char *)di)+sizeof(struct digest_info));
2023
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002024 e->digest = di;
2025 e->flags |= EE_HAS_DIGEST;
2026
Philipp Reisnerb411b362009-09-25 16:07:19 -07002027 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2028 goto out_free_e;
2029
Philipp Reisner02918be2010-08-20 14:35:10 +02002030 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002031 D_ASSERT(mdev->agreed_pro_version >= 89);
2032 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002033 /* used in the sector offset progress display */
2034 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002035 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002036 /* track progress, we may need to throttle */
2037 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 e->w.cb = w_e_end_ov_reply;
2039 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002040 /* drbd_rs_begin_io done when we sent this request,
2041 * but accounting still needs to be done. */
2042 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002043 }
2044 break;
2045
2046 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002047 if (mdev->ov_start_sector == ~(sector_t)0 &&
2048 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002049 unsigned long now = jiffies;
2050 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002051 mdev->ov_start_sector = sector;
2052 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002053 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2054 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002055 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2056 mdev->rs_mark_left[i] = mdev->ov_left;
2057 mdev->rs_mark_time[i] = now;
2058 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002059 dev_info(DEV, "Online Verify start sector: %llu\n",
2060 (unsigned long long)sector);
2061 }
2062 e->w.cb = w_e_end_ov_req;
2063 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002064 break;
2065
Philipp Reisnerb411b362009-09-25 16:07:19 -07002066 default:
2067 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002068 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002069 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002070 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002071 }
2072
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002073 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2074 * wrt the receiver, but it is not as straightforward as it may seem.
2075 * Various places in the resync start and stop logic assume resync
2076 * requests are processed in order, requeuing this on the worker thread
2077 * introduces a bunch of new code for synchronization between threads.
2078 *
2079 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2080 * "forever", throttling after drbd_rs_begin_io will lock that extent
2081 * for application writes for the same time. For now, just throttle
2082 * here, where the rest of the code expects the receiver to sleep for
2083 * a while, anyways.
2084 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002085
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002086 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2087 * this defers syncer requests for some time, before letting at least
2088 * on request through. The resync controller on the receiving side
2089 * will adapt to the incoming rate accordingly.
2090 *
2091 * We cannot throttle here if remote is Primary/SyncTarget:
2092 * we would also throttle its application reads.
2093 * In that case, throttling is done on the SyncTarget only.
2094 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002095 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2096 schedule_timeout_uninterruptible(HZ/10);
2097 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002098 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002099
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002100submit_for_resync:
2101 atomic_add(size >> 9, &mdev->rs_sect_ev);
2102
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002103submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002104 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002105 spin_lock_irq(&mdev->req_lock);
2106 list_add_tail(&e->w.list, &mdev->read_ee);
2107 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002108
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002109 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002110 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002111
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002112 /* don't care for the reason here */
2113 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002114 spin_lock_irq(&mdev->req_lock);
2115 list_del(&e->w.list);
2116 spin_unlock_irq(&mdev->req_lock);
2117 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2118
Philipp Reisnerb411b362009-09-25 16:07:19 -07002119out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002120 put_ldev(mdev);
2121 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002122 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002123}
2124
2125static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2126{
2127 int self, peer, rv = -100;
2128 unsigned long ch_self, ch_peer;
2129
2130 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2131 peer = mdev->p_uuid[UI_BITMAP] & 1;
2132
2133 ch_peer = mdev->p_uuid[UI_SIZE];
2134 ch_self = mdev->comm_bm_set;
2135
2136 switch (mdev->net_conf->after_sb_0p) {
2137 case ASB_CONSENSUS:
2138 case ASB_DISCARD_SECONDARY:
2139 case ASB_CALL_HELPER:
2140 dev_err(DEV, "Configuration error.\n");
2141 break;
2142 case ASB_DISCONNECT:
2143 break;
2144 case ASB_DISCARD_YOUNGER_PRI:
2145 if (self == 0 && peer == 1) {
2146 rv = -1;
2147 break;
2148 }
2149 if (self == 1 && peer == 0) {
2150 rv = 1;
2151 break;
2152 }
2153 /* Else fall through to one of the other strategies... */
2154 case ASB_DISCARD_OLDER_PRI:
2155 if (self == 0 && peer == 1) {
2156 rv = 1;
2157 break;
2158 }
2159 if (self == 1 && peer == 0) {
2160 rv = -1;
2161 break;
2162 }
2163 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002164 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002165 "Using discard-least-changes instead\n");
2166 case ASB_DISCARD_ZERO_CHG:
2167 if (ch_peer == 0 && ch_self == 0) {
2168 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2169 ? -1 : 1;
2170 break;
2171 } else {
2172 if (ch_peer == 0) { rv = 1; break; }
2173 if (ch_self == 0) { rv = -1; break; }
2174 }
2175 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2176 break;
2177 case ASB_DISCARD_LEAST_CHG:
2178 if (ch_self < ch_peer)
2179 rv = -1;
2180 else if (ch_self > ch_peer)
2181 rv = 1;
2182 else /* ( ch_self == ch_peer ) */
2183 /* Well, then use something else. */
2184 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2185 ? -1 : 1;
2186 break;
2187 case ASB_DISCARD_LOCAL:
2188 rv = -1;
2189 break;
2190 case ASB_DISCARD_REMOTE:
2191 rv = 1;
2192 }
2193
2194 return rv;
2195}
2196
2197static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2198{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002199 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002200
2201 switch (mdev->net_conf->after_sb_1p) {
2202 case ASB_DISCARD_YOUNGER_PRI:
2203 case ASB_DISCARD_OLDER_PRI:
2204 case ASB_DISCARD_LEAST_CHG:
2205 case ASB_DISCARD_LOCAL:
2206 case ASB_DISCARD_REMOTE:
2207 dev_err(DEV, "Configuration error.\n");
2208 break;
2209 case ASB_DISCONNECT:
2210 break;
2211 case ASB_CONSENSUS:
2212 hg = drbd_asb_recover_0p(mdev);
2213 if (hg == -1 && mdev->state.role == R_SECONDARY)
2214 rv = hg;
2215 if (hg == 1 && mdev->state.role == R_PRIMARY)
2216 rv = hg;
2217 break;
2218 case ASB_VIOLENTLY:
2219 rv = drbd_asb_recover_0p(mdev);
2220 break;
2221 case ASB_DISCARD_SECONDARY:
2222 return mdev->state.role == R_PRIMARY ? 1 : -1;
2223 case ASB_CALL_HELPER:
2224 hg = drbd_asb_recover_0p(mdev);
2225 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002226 enum drbd_state_rv rv2;
2227
2228 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2230 * we might be here in C_WF_REPORT_PARAMS which is transient.
2231 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002232 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2233 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002234 drbd_khelper(mdev, "pri-lost-after-sb");
2235 } else {
2236 dev_warn(DEV, "Successfully gave up primary role.\n");
2237 rv = hg;
2238 }
2239 } else
2240 rv = hg;
2241 }
2242
2243 return rv;
2244}
2245
2246static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2247{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002248 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002249
2250 switch (mdev->net_conf->after_sb_2p) {
2251 case ASB_DISCARD_YOUNGER_PRI:
2252 case ASB_DISCARD_OLDER_PRI:
2253 case ASB_DISCARD_LEAST_CHG:
2254 case ASB_DISCARD_LOCAL:
2255 case ASB_DISCARD_REMOTE:
2256 case ASB_CONSENSUS:
2257 case ASB_DISCARD_SECONDARY:
2258 dev_err(DEV, "Configuration error.\n");
2259 break;
2260 case ASB_VIOLENTLY:
2261 rv = drbd_asb_recover_0p(mdev);
2262 break;
2263 case ASB_DISCONNECT:
2264 break;
2265 case ASB_CALL_HELPER:
2266 hg = drbd_asb_recover_0p(mdev);
2267 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002268 enum drbd_state_rv rv2;
2269
Philipp Reisnerb411b362009-09-25 16:07:19 -07002270 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2271 * we might be here in C_WF_REPORT_PARAMS which is transient.
2272 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002273 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2274 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002275 drbd_khelper(mdev, "pri-lost-after-sb");
2276 } else {
2277 dev_warn(DEV, "Successfully gave up primary role.\n");
2278 rv = hg;
2279 }
2280 } else
2281 rv = hg;
2282 }
2283
2284 return rv;
2285}
2286
2287static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2288 u64 bits, u64 flags)
2289{
2290 if (!uuid) {
2291 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2292 return;
2293 }
2294 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2295 text,
2296 (unsigned long long)uuid[UI_CURRENT],
2297 (unsigned long long)uuid[UI_BITMAP],
2298 (unsigned long long)uuid[UI_HISTORY_START],
2299 (unsigned long long)uuid[UI_HISTORY_END],
2300 (unsigned long long)bits,
2301 (unsigned long long)flags);
2302}
2303
2304/*
2305 100 after split brain try auto recover
2306 2 C_SYNC_SOURCE set BitMap
2307 1 C_SYNC_SOURCE use BitMap
2308 0 no Sync
2309 -1 C_SYNC_TARGET use BitMap
2310 -2 C_SYNC_TARGET set BitMap
2311 -100 after split brain, disconnect
2312-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002313-1091 requires proto 91
2314-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002315 */
2316static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2317{
2318 u64 self, peer;
2319 int i, j;
2320
2321 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2322 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2323
2324 *rule_nr = 10;
2325 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2326 return 0;
2327
2328 *rule_nr = 20;
2329 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2330 peer != UUID_JUST_CREATED)
2331 return -2;
2332
2333 *rule_nr = 30;
2334 if (self != UUID_JUST_CREATED &&
2335 (peer == UUID_JUST_CREATED || peer == (u64)0))
2336 return 2;
2337
2338 if (self == peer) {
2339 int rct, dc; /* roles at crash time */
2340
2341 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2342
2343 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002344 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002345
2346 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2347 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2348 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2349 drbd_uuid_set_bm(mdev, 0UL);
2350
2351 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2352 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2353 *rule_nr = 34;
2354 } else {
2355 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2356 *rule_nr = 36;
2357 }
2358
2359 return 1;
2360 }
2361
2362 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2363
2364 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002365 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002366
2367 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2368 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2369 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2370
2371 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2372 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2373 mdev->p_uuid[UI_BITMAP] = 0UL;
2374
2375 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2376 *rule_nr = 35;
2377 } else {
2378 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2379 *rule_nr = 37;
2380 }
2381
2382 return -1;
2383 }
2384
2385 /* Common power [off|failure] */
2386 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2387 (mdev->p_uuid[UI_FLAGS] & 2);
2388 /* lowest bit is set when we were primary,
2389 * next bit (weight 2) is set when peer was primary */
2390 *rule_nr = 40;
2391
2392 switch (rct) {
2393 case 0: /* !self_pri && !peer_pri */ return 0;
2394 case 1: /* self_pri && !peer_pri */ return 1;
2395 case 2: /* !self_pri && peer_pri */ return -1;
2396 case 3: /* self_pri && peer_pri */
2397 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2398 return dc ? -1 : 1;
2399 }
2400 }
2401
2402 *rule_nr = 50;
2403 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2404 if (self == peer)
2405 return -1;
2406
2407 *rule_nr = 51;
2408 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2409 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002410 if (mdev->agreed_pro_version < 96 ?
2411 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2412 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2413 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002414 /* The last P_SYNC_UUID did not get though. Undo the last start of
2415 resync as sync source modifications of the peer's UUIDs. */
2416
2417 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002418 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002419
2420 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2421 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002422
2423 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2424 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2425
Philipp Reisnerb411b362009-09-25 16:07:19 -07002426 return -1;
2427 }
2428 }
2429
2430 *rule_nr = 60;
2431 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2432 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2433 peer = mdev->p_uuid[i] & ~((u64)1);
2434 if (self == peer)
2435 return -2;
2436 }
2437
2438 *rule_nr = 70;
2439 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2440 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2441 if (self == peer)
2442 return 1;
2443
2444 *rule_nr = 71;
2445 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2446 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002447 if (mdev->agreed_pro_version < 96 ?
2448 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2449 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2450 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002451 /* The last P_SYNC_UUID did not get though. Undo the last start of
2452 resync as sync source modifications of our UUIDs. */
2453
2454 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002455 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002456
2457 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2458 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2459
Philipp Reisner4a23f262011-01-11 17:42:17 +01002460 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002461 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2462 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2463
2464 return 1;
2465 }
2466 }
2467
2468
2469 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002470 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002471 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2472 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2473 if (self == peer)
2474 return 2;
2475 }
2476
2477 *rule_nr = 90;
2478 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2479 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2480 if (self == peer && self != ((u64)0))
2481 return 100;
2482
2483 *rule_nr = 100;
2484 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2485 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2486 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2487 peer = mdev->p_uuid[j] & ~((u64)1);
2488 if (self == peer)
2489 return -100;
2490 }
2491 }
2492
2493 return -1000;
2494}
2495
2496/* drbd_sync_handshake() returns the new conn state on success, or
2497 CONN_MASK (-1) on failure.
2498 */
2499static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2500 enum drbd_disk_state peer_disk) __must_hold(local)
2501{
2502 int hg, rule_nr;
2503 enum drbd_conns rv = C_MASK;
2504 enum drbd_disk_state mydisk;
2505
2506 mydisk = mdev->state.disk;
2507 if (mydisk == D_NEGOTIATING)
2508 mydisk = mdev->new_state_tmp.disk;
2509
2510 dev_info(DEV, "drbd_sync_handshake:\n");
2511 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2512 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2513 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2514
2515 hg = drbd_uuid_compare(mdev, &rule_nr);
2516
2517 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2518
2519 if (hg == -1000) {
2520 dev_alert(DEV, "Unrelated data, aborting!\n");
2521 return C_MASK;
2522 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002523 if (hg < -1000) {
2524 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002525 return C_MASK;
2526 }
2527
2528 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2529 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2530 int f = (hg == -100) || abs(hg) == 2;
2531 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2532 if (f)
2533 hg = hg*2;
2534 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2535 hg > 0 ? "source" : "target");
2536 }
2537
Adam Gandelman3a11a482010-04-08 16:48:23 -07002538 if (abs(hg) == 100)
2539 drbd_khelper(mdev, "initial-split-brain");
2540
Philipp Reisnerb411b362009-09-25 16:07:19 -07002541 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2542 int pcount = (mdev->state.role == R_PRIMARY)
2543 + (peer_role == R_PRIMARY);
2544 int forced = (hg == -100);
2545
2546 switch (pcount) {
2547 case 0:
2548 hg = drbd_asb_recover_0p(mdev);
2549 break;
2550 case 1:
2551 hg = drbd_asb_recover_1p(mdev);
2552 break;
2553 case 2:
2554 hg = drbd_asb_recover_2p(mdev);
2555 break;
2556 }
2557 if (abs(hg) < 100) {
2558 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2559 "automatically solved. Sync from %s node\n",
2560 pcount, (hg < 0) ? "peer" : "this");
2561 if (forced) {
2562 dev_warn(DEV, "Doing a full sync, since"
2563 " UUIDs where ambiguous.\n");
2564 hg = hg*2;
2565 }
2566 }
2567 }
2568
2569 if (hg == -100) {
2570 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2571 hg = -1;
2572 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2573 hg = 1;
2574
2575 if (abs(hg) < 100)
2576 dev_warn(DEV, "Split-Brain detected, manually solved. "
2577 "Sync from %s node\n",
2578 (hg < 0) ? "peer" : "this");
2579 }
2580
2581 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002582 /* FIXME this log message is not correct if we end up here
2583 * after an attempted attach on a diskless node.
2584 * We just refuse to attach -- well, we drop the "connection"
2585 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002586 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002587 drbd_khelper(mdev, "split-brain");
2588 return C_MASK;
2589 }
2590
2591 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2592 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2593 return C_MASK;
2594 }
2595
2596 if (hg < 0 && /* by intention we do not use mydisk here. */
2597 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2598 switch (mdev->net_conf->rr_conflict) {
2599 case ASB_CALL_HELPER:
2600 drbd_khelper(mdev, "pri-lost");
2601 /* fall through */
2602 case ASB_DISCONNECT:
2603 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2604 return C_MASK;
2605 case ASB_VIOLENTLY:
2606 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2607 "assumption\n");
2608 }
2609 }
2610
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002611 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2612 if (hg == 0)
2613 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2614 else
2615 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2616 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2617 abs(hg) >= 2 ? "full" : "bit-map based");
2618 return C_MASK;
2619 }
2620
Philipp Reisnerb411b362009-09-25 16:07:19 -07002621 if (abs(hg) >= 2) {
2622 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002623 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2624 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002625 return C_MASK;
2626 }
2627
2628 if (hg > 0) { /* become sync source. */
2629 rv = C_WF_BITMAP_S;
2630 } else if (hg < 0) { /* become sync target */
2631 rv = C_WF_BITMAP_T;
2632 } else {
2633 rv = C_CONNECTED;
2634 if (drbd_bm_total_weight(mdev)) {
2635 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2636 drbd_bm_total_weight(mdev));
2637 }
2638 }
2639
2640 return rv;
2641}
2642
2643/* returns 1 if invalid */
2644static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2645{
2646 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2647 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2648 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2649 return 0;
2650
2651 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2652 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2653 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2654 return 1;
2655
2656 /* everything else is valid if they are equal on both sides. */
2657 if (peer == self)
2658 return 0;
2659
2660 /* everything es is invalid. */
2661 return 1;
2662}
2663
Philipp Reisner02918be2010-08-20 14:35:10 +02002664static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002665{
Philipp Reisner02918be2010-08-20 14:35:10 +02002666 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002667 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002668 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002669 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2670
Philipp Reisnerb411b362009-09-25 16:07:19 -07002671 p_proto = be32_to_cpu(p->protocol);
2672 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2673 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2674 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002675 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002676 cf = be32_to_cpu(p->conn_flags);
2677 p_want_lose = cf & CF_WANT_LOSE;
2678
2679 clear_bit(CONN_DRY_RUN, &mdev->flags);
2680
2681 if (cf & CF_DRY_RUN)
2682 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002683
2684 if (p_proto != mdev->net_conf->wire_protocol) {
2685 dev_err(DEV, "incompatible communication protocols\n");
2686 goto disconnect;
2687 }
2688
2689 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2690 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2691 goto disconnect;
2692 }
2693
2694 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2695 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2696 goto disconnect;
2697 }
2698
2699 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2700 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2701 goto disconnect;
2702 }
2703
2704 if (p_want_lose && mdev->net_conf->want_lose) {
2705 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2706 goto disconnect;
2707 }
2708
2709 if (p_two_primaries != mdev->net_conf->two_primaries) {
2710 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2711 goto disconnect;
2712 }
2713
2714 if (mdev->agreed_pro_version >= 87) {
2715 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2716
2717 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002718 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002719
2720 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2721 if (strcmp(p_integrity_alg, my_alg)) {
2722 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2723 goto disconnect;
2724 }
2725 dev_info(DEV, "data-integrity-alg: %s\n",
2726 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2727 }
2728
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002729 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002730
2731disconnect:
2732 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002733 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002734}
2735
2736/* helper function
2737 * input: alg name, feature name
2738 * return: NULL (alg name was "")
2739 * ERR_PTR(error) if something goes wrong
2740 * or the crypto hash ptr, if it worked out ok. */
2741struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2742 const char *alg, const char *name)
2743{
2744 struct crypto_hash *tfm;
2745
2746 if (!alg[0])
2747 return NULL;
2748
2749 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2750 if (IS_ERR(tfm)) {
2751 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2752 alg, name, PTR_ERR(tfm));
2753 return tfm;
2754 }
2755 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2756 crypto_free_hash(tfm);
2757 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2758 return ERR_PTR(-EINVAL);
2759 }
2760 return tfm;
2761}
2762
Philipp Reisner02918be2010-08-20 14:35:10 +02002763static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002764{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002765 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002766 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002767 unsigned int header_size, data_size, exp_max_sz;
2768 struct crypto_hash *verify_tfm = NULL;
2769 struct crypto_hash *csums_tfm = NULL;
2770 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002771 int *rs_plan_s = NULL;
2772 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002773
2774 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2775 : apv == 88 ? sizeof(struct p_rs_param)
2776 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002777 : apv <= 94 ? sizeof(struct p_rs_param_89)
2778 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002779
Philipp Reisner02918be2010-08-20 14:35:10 +02002780 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002781 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002782 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002783 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002784 }
2785
2786 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002787 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2788 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002789 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002790 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2791 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002792 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002793 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002794 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2795 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796 D_ASSERT(data_size == 0);
2797 }
2798
2799 /* initialize verify_alg and csums_alg */
2800 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2801
Philipp Reisner02918be2010-08-20 14:35:10 +02002802 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002803 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002804
2805 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2806
2807 if (apv >= 88) {
2808 if (apv == 88) {
2809 if (data_size > SHARED_SECRET_MAX) {
2810 dev_err(DEV, "verify-alg too long, "
2811 "peer wants %u, accepting only %u byte\n",
2812 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002813 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002814 }
2815
2816 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002817 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002818
2819 /* we expect NUL terminated string */
2820 /* but just in case someone tries to be evil */
2821 D_ASSERT(p->verify_alg[data_size-1] == 0);
2822 p->verify_alg[data_size-1] = 0;
2823
2824 } else /* apv >= 89 */ {
2825 /* we still expect NUL terminated strings */
2826 /* but just in case someone tries to be evil */
2827 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2828 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2829 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2830 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2831 }
2832
2833 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2834 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2835 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2836 mdev->sync_conf.verify_alg, p->verify_alg);
2837 goto disconnect;
2838 }
2839 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2840 p->verify_alg, "verify-alg");
2841 if (IS_ERR(verify_tfm)) {
2842 verify_tfm = NULL;
2843 goto disconnect;
2844 }
2845 }
2846
2847 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2848 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2849 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2850 mdev->sync_conf.csums_alg, p->csums_alg);
2851 goto disconnect;
2852 }
2853 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2854 p->csums_alg, "csums-alg");
2855 if (IS_ERR(csums_tfm)) {
2856 csums_tfm = NULL;
2857 goto disconnect;
2858 }
2859 }
2860
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002861 if (apv > 94) {
2862 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2863 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2864 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2865 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2866 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002867
2868 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2869 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2870 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2871 if (!rs_plan_s) {
2872 dev_err(DEV, "kmalloc of fifo_buffer failed");
2873 goto disconnect;
2874 }
2875 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002876 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002877
2878 spin_lock(&mdev->peer_seq_lock);
2879 /* lock against drbd_nl_syncer_conf() */
2880 if (verify_tfm) {
2881 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2882 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2883 crypto_free_hash(mdev->verify_tfm);
2884 mdev->verify_tfm = verify_tfm;
2885 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2886 }
2887 if (csums_tfm) {
2888 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2889 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2890 crypto_free_hash(mdev->csums_tfm);
2891 mdev->csums_tfm = csums_tfm;
2892 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2893 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002894 if (fifo_size != mdev->rs_plan_s.size) {
2895 kfree(mdev->rs_plan_s.values);
2896 mdev->rs_plan_s.values = rs_plan_s;
2897 mdev->rs_plan_s.size = fifo_size;
2898 mdev->rs_planed = 0;
2899 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002900 spin_unlock(&mdev->peer_seq_lock);
2901 }
2902
2903 return ok;
2904disconnect:
2905 /* just for completeness: actually not needed,
2906 * as this is not reached if csums_tfm was ok. */
2907 crypto_free_hash(csums_tfm);
2908 /* but free the verify_tfm again, if csums_tfm did not work out */
2909 crypto_free_hash(verify_tfm);
2910 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002911 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912}
2913
Philipp Reisnerb411b362009-09-25 16:07:19 -07002914/* warn if the arguments differ by more than 12.5% */
2915static void warn_if_differ_considerably(struct drbd_conf *mdev,
2916 const char *s, sector_t a, sector_t b)
2917{
2918 sector_t d;
2919 if (a == 0 || b == 0)
2920 return;
2921 d = (a > b) ? (a - b) : (b - a);
2922 if (d > (a>>3) || d > (b>>3))
2923 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2924 (unsigned long long)a, (unsigned long long)b);
2925}
2926
Philipp Reisner02918be2010-08-20 14:35:10 +02002927static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002928{
Philipp Reisner02918be2010-08-20 14:35:10 +02002929 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002930 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931 sector_t p_size, p_usize, my_usize;
2932 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002933 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002934
Philipp Reisnerb411b362009-09-25 16:07:19 -07002935 p_size = be64_to_cpu(p->d_size);
2936 p_usize = be64_to_cpu(p->u_size);
2937
2938 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2939 dev_err(DEV, "some backing storage is needed\n");
2940 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002941 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002942 }
2943
2944 /* just store the peer's disk size for now.
2945 * we still need to figure out whether we accept that. */
2946 mdev->p_size = p_size;
2947
Philipp Reisnerb411b362009-09-25 16:07:19 -07002948 if (get_ldev(mdev)) {
2949 warn_if_differ_considerably(mdev, "lower level device sizes",
2950 p_size, drbd_get_max_capacity(mdev->ldev));
2951 warn_if_differ_considerably(mdev, "user requested size",
2952 p_usize, mdev->ldev->dc.disk_size);
2953
2954 /* if this is the first connect, or an otherwise expected
2955 * param exchange, choose the minimum */
2956 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2957 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2958 p_usize);
2959
2960 my_usize = mdev->ldev->dc.disk_size;
2961
2962 if (mdev->ldev->dc.disk_size != p_usize) {
2963 mdev->ldev->dc.disk_size = p_usize;
2964 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2965 (unsigned long)mdev->ldev->dc.disk_size);
2966 }
2967
2968 /* Never shrink a device with usable data during connect.
2969 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01002970 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07002971 drbd_get_capacity(mdev->this_bdev) &&
2972 mdev->state.disk >= D_OUTDATED &&
2973 mdev->state.conn < C_CONNECTED) {
2974 dev_err(DEV, "The peer's disk size is too small!\n");
2975 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2976 mdev->ldev->dc.disk_size = my_usize;
2977 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002978 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002979 }
2980 put_ldev(mdev);
2981 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002982
Philipp Reisnere89b5912010-03-24 17:11:33 +01002983 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002984 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02002985 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002986 put_ldev(mdev);
2987 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002988 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002989 drbd_md_sync(mdev);
2990 } else {
2991 /* I am diskless, need to accept the peer's size. */
2992 drbd_set_my_capacity(mdev, p_size);
2993 }
2994
Philipp Reisner99432fc2011-05-20 16:39:13 +02002995 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
2996 drbd_reconsider_max_bio_size(mdev);
2997
Philipp Reisnerb411b362009-09-25 16:07:19 -07002998 if (get_ldev(mdev)) {
2999 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3000 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3001 ldsc = 1;
3002 }
3003
Philipp Reisnerb411b362009-09-25 16:07:19 -07003004 put_ldev(mdev);
3005 }
3006
3007 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3008 if (be64_to_cpu(p->c_size) !=
3009 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3010 /* we have different sizes, probably peer
3011 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003012 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013 }
3014 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3015 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3016 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003017 mdev->state.disk >= D_INCONSISTENT) {
3018 if (ddsf & DDSF_NO_RESYNC)
3019 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3020 else
3021 resync_after_online_grow(mdev);
3022 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003023 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3024 }
3025 }
3026
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003027 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003028}
3029
Philipp Reisner02918be2010-08-20 14:35:10 +02003030static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003031{
Philipp Reisner02918be2010-08-20 14:35:10 +02003032 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003033 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003034 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003035
Philipp Reisnerb411b362009-09-25 16:07:19 -07003036 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3037
3038 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3039 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3040
3041 kfree(mdev->p_uuid);
3042 mdev->p_uuid = p_uuid;
3043
3044 if (mdev->state.conn < C_CONNECTED &&
3045 mdev->state.disk < D_INCONSISTENT &&
3046 mdev->state.role == R_PRIMARY &&
3047 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3048 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3049 (unsigned long long)mdev->ed_uuid);
3050 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003051 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003052 }
3053
3054 if (get_ldev(mdev)) {
3055 int skip_initial_sync =
3056 mdev->state.conn == C_CONNECTED &&
3057 mdev->agreed_pro_version >= 90 &&
3058 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3059 (p_uuid[UI_FLAGS] & 8);
3060 if (skip_initial_sync) {
3061 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3062 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003063 "clear_n_write from receive_uuids",
3064 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003065 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3066 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3067 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3068 CS_VERBOSE, NULL);
3069 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003070 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003071 }
3072 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003073 } else if (mdev->state.disk < D_INCONSISTENT &&
3074 mdev->state.role == R_PRIMARY) {
3075 /* I am a diskless primary, the peer just created a new current UUID
3076 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003077 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003078 }
3079
3080 /* Before we test for the disk state, we should wait until an eventually
3081 ongoing cluster wide state change is finished. That is important if
3082 we are primary and are detaching from our disk. We need to see the
3083 new disk state... */
3084 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3085 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003086 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3087
3088 if (updated_uuids)
3089 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003090
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003091 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003092}
3093
3094/**
3095 * convert_state() - Converts the peer's view of the cluster state to our point of view
3096 * @ps: The state as seen by the peer.
3097 */
3098static union drbd_state convert_state(union drbd_state ps)
3099{
3100 union drbd_state ms;
3101
3102 static enum drbd_conns c_tab[] = {
3103 [C_CONNECTED] = C_CONNECTED,
3104
3105 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3106 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3107 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3108 [C_VERIFY_S] = C_VERIFY_T,
3109 [C_MASK] = C_MASK,
3110 };
3111
3112 ms.i = ps.i;
3113
3114 ms.conn = c_tab[ps.conn];
3115 ms.peer = ps.role;
3116 ms.role = ps.peer;
3117 ms.pdsk = ps.disk;
3118 ms.disk = ps.pdsk;
3119 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3120
3121 return ms;
3122}
3123
Philipp Reisner02918be2010-08-20 14:35:10 +02003124static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003125{
Philipp Reisner02918be2010-08-20 14:35:10 +02003126 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003127 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003128 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003129
Philipp Reisnerb411b362009-09-25 16:07:19 -07003130 mask.i = be32_to_cpu(p->mask);
3131 val.i = be32_to_cpu(p->val);
3132
3133 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3134 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3135 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003136 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003137 }
3138
3139 mask = convert_state(mask);
3140 val = convert_state(val);
3141
3142 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3143
3144 drbd_send_sr_reply(mdev, rv);
3145 drbd_md_sync(mdev);
3146
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003147 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003148}
3149
Philipp Reisner02918be2010-08-20 14:35:10 +02003150static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003151{
Philipp Reisner02918be2010-08-20 14:35:10 +02003152 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003153 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003154 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003155 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003156 int rv;
3157
Philipp Reisnerb411b362009-09-25 16:07:19 -07003158 peer_state.i = be32_to_cpu(p->state);
3159
3160 real_peer_disk = peer_state.disk;
3161 if (peer_state.disk == D_NEGOTIATING) {
3162 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3163 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3164 }
3165
3166 spin_lock_irq(&mdev->req_lock);
3167 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003168 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003169 spin_unlock_irq(&mdev->req_lock);
3170
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003171 /* peer says his disk is uptodate, while we think it is inconsistent,
3172 * and this happens while we think we have a sync going on. */
3173 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3174 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3175 /* If we are (becoming) SyncSource, but peer is still in sync
3176 * preparation, ignore its uptodate-ness to avoid flapping, it
3177 * will change to inconsistent once the peer reaches active
3178 * syncing states.
3179 * It may have changed syncer-paused flags, however, so we
3180 * cannot ignore this completely. */
3181 if (peer_state.conn > C_CONNECTED &&
3182 peer_state.conn < C_SYNC_SOURCE)
3183 real_peer_disk = D_INCONSISTENT;
3184
3185 /* if peer_state changes to connected at the same time,
3186 * it explicitly notifies us that it finished resync.
3187 * Maybe we should finish it up, too? */
3188 else if (os.conn >= C_SYNC_SOURCE &&
3189 peer_state.conn == C_CONNECTED) {
3190 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3191 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003192 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003193 }
3194 }
3195
3196 /* peer says his disk is inconsistent, while we think it is uptodate,
3197 * and this happens while the peer still thinks we have a sync going on,
3198 * but we think we are already done with the sync.
3199 * We ignore this to avoid flapping pdsk.
3200 * This should not happen, if the peer is a recent version of drbd. */
3201 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3202 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3203 real_peer_disk = D_UP_TO_DATE;
3204
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003205 if (ns.conn == C_WF_REPORT_PARAMS)
3206 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003207
Philipp Reisner67531712010-10-27 12:21:30 +02003208 if (peer_state.conn == C_AHEAD)
3209 ns.conn = C_BEHIND;
3210
Philipp Reisnerb411b362009-09-25 16:07:19 -07003211 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3212 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3213 int cr; /* consider resync */
3214
3215 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003216 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003217 /* if we had an established connection
3218 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003219 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003220 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003221 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003222 /* if we have both been inconsistent, and the peer has been
3223 * forced to be UpToDate with --overwrite-data */
3224 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3225 /* if we had been plain connected, and the admin requested to
3226 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003227 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003228 (peer_state.conn >= C_STARTING_SYNC_S &&
3229 peer_state.conn <= C_WF_BITMAP_T));
3230
3231 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003232 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003233
3234 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003235 if (ns.conn == C_MASK) {
3236 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003237 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003238 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003239 } else if (peer_state.disk == D_NEGOTIATING) {
3240 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3241 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003242 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003243 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003244 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003245 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003246 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003248 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003249 }
3250 }
3251 }
3252
3253 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003254 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003255 goto retry;
3256 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003257 ns.peer = peer_state.role;
3258 ns.pdsk = real_peer_disk;
3259 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003260 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003261 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003262 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3263 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003264 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3265 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3266 for temporal network outages! */
3267 spin_unlock_irq(&mdev->req_lock);
3268 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3269 tl_clear(mdev);
3270 drbd_uuid_new_current(mdev);
3271 clear_bit(NEW_CUR_UUID, &mdev->flags);
3272 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003273 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003274 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003275 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003276 ns = mdev->state;
3277 spin_unlock_irq(&mdev->req_lock);
3278
3279 if (rv < SS_SUCCESS) {
3280 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003281 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003282 }
3283
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003284 if (os.conn > C_WF_REPORT_PARAMS) {
3285 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003286 peer_state.disk != D_NEGOTIATING ) {
3287 /* we want resync, peer has not yet decided to sync... */
3288 /* Nowadays only used when forcing a node into primary role and
3289 setting its disk to UpToDate with that */
3290 drbd_send_uuids(mdev);
3291 drbd_send_state(mdev);
3292 }
3293 }
3294
3295 mdev->net_conf->want_lose = 0;
3296
3297 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3298
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003299 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003300}
3301
Philipp Reisner02918be2010-08-20 14:35:10 +02003302static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003303{
Philipp Reisner02918be2010-08-20 14:35:10 +02003304 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003305
3306 wait_event(mdev->misc_wait,
3307 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003308 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003309 mdev->state.conn < C_CONNECTED ||
3310 mdev->state.disk < D_NEGOTIATING);
3311
3312 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3313
Philipp Reisnerb411b362009-09-25 16:07:19 -07003314 /* Here the _drbd_uuid_ functions are right, current should
3315 _not_ be rotated into the history */
3316 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3317 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3318 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3319
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003320 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003321 drbd_start_resync(mdev, C_SYNC_TARGET);
3322
3323 put_ldev(mdev);
3324 } else
3325 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3326
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003327 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003328}
3329
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003330/**
3331 * receive_bitmap_plain
3332 *
3333 * Return 0 when done, 1 when another iteration is needed, and a negative error
3334 * code upon failure.
3335 */
3336static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003337receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3338 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003339{
3340 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3341 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003342 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003343
Philipp Reisner02918be2010-08-20 14:35:10 +02003344 if (want != data_size) {
3345 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003346 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003347 }
3348 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003349 return 0;
3350 err = drbd_recv(mdev, buffer, want);
3351 if (err != want) {
3352 if (err >= 0)
3353 err = -EIO;
3354 return err;
3355 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003356
3357 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3358
3359 c->word_offset += num_words;
3360 c->bit_offset = c->word_offset * BITS_PER_LONG;
3361 if (c->bit_offset > c->bm_bits)
3362 c->bit_offset = c->bm_bits;
3363
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003364 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003365}
3366
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003367/**
3368 * recv_bm_rle_bits
3369 *
3370 * Return 0 when done, 1 when another iteration is needed, and a negative error
3371 * code upon failure.
3372 */
3373static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003374recv_bm_rle_bits(struct drbd_conf *mdev,
3375 struct p_compressed_bm *p,
3376 struct bm_xfer_ctx *c)
3377{
3378 struct bitstream bs;
3379 u64 look_ahead;
3380 u64 rl;
3381 u64 tmp;
3382 unsigned long s = c->bit_offset;
3383 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003384 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003385 int toggle = DCBP_get_start(p);
3386 int have;
3387 int bits;
3388
3389 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3390
3391 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3392 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003393 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003394
3395 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3396 bits = vli_decode_bits(&rl, look_ahead);
3397 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003398 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003399
3400 if (toggle) {
3401 e = s + rl -1;
3402 if (e >= c->bm_bits) {
3403 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003404 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003405 }
3406 _drbd_bm_set_bits(mdev, s, e);
3407 }
3408
3409 if (have < bits) {
3410 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3411 have, bits, look_ahead,
3412 (unsigned int)(bs.cur.b - p->code),
3413 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003414 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003415 }
3416 look_ahead >>= bits;
3417 have -= bits;
3418
3419 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3420 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003421 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003422 look_ahead |= tmp << have;
3423 have += bits;
3424 }
3425
3426 c->bit_offset = s;
3427 bm_xfer_ctx_bit_to_word_offset(c);
3428
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003429 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003430}
3431
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003432/**
3433 * decode_bitmap_c
3434 *
3435 * Return 0 when done, 1 when another iteration is needed, and a negative error
3436 * code upon failure.
3437 */
3438static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003439decode_bitmap_c(struct drbd_conf *mdev,
3440 struct p_compressed_bm *p,
3441 struct bm_xfer_ctx *c)
3442{
3443 if (DCBP_get_code(p) == RLE_VLI_Bits)
3444 return recv_bm_rle_bits(mdev, p, c);
3445
3446 /* other variants had been implemented for evaluation,
3447 * but have been dropped as this one turned out to be "best"
3448 * during all our tests. */
3449
3450 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3451 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003452 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003453}
3454
3455void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3456 const char *direction, struct bm_xfer_ctx *c)
3457{
3458 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003459 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003460 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3461 + c->bm_words * sizeof(long);
3462 unsigned total = c->bytes[0] + c->bytes[1];
3463 unsigned r;
3464
3465 /* total can not be zero. but just in case: */
3466 if (total == 0)
3467 return;
3468
3469 /* don't report if not compressed */
3470 if (total >= plain)
3471 return;
3472
3473 /* total < plain. check for overflow, still */
3474 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3475 : (1000 * total / plain);
3476
3477 if (r > 1000)
3478 r = 1000;
3479
3480 r = 1000 - r;
3481 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3482 "total %u; compression: %u.%u%%\n",
3483 direction,
3484 c->bytes[1], c->packets[1],
3485 c->bytes[0], c->packets[0],
3486 total, r/10, r % 10);
3487}
3488
3489/* Since we are processing the bitfield from lower addresses to higher,
3490 it does not matter if the process it in 32 bit chunks or 64 bit
3491 chunks as long as it is little endian. (Understand it as byte stream,
3492 beginning with the lowest byte...) If we would use big endian
3493 we would need to process it from the highest address to the lowest,
3494 in order to be agnostic to the 32 vs 64 bits issue.
3495
3496 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003497static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003498{
3499 struct bm_xfer_ctx c;
3500 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003501 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003502 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003503 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003504
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003505 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3506 /* you are supposed to send additional out-of-sync information
3507 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003508
3509 /* maybe we should use some per thread scratch page,
3510 * and allocate that during initial device creation? */
3511 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3512 if (!buffer) {
3513 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3514 goto out;
3515 }
3516
3517 c = (struct bm_xfer_ctx) {
3518 .bm_bits = drbd_bm_bits(mdev),
3519 .bm_words = drbd_bm_words(mdev),
3520 };
3521
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003522 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003523 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003524 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003525 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003526 /* MAYBE: sanity check that we speak proto >= 90,
3527 * and the feature is enabled! */
3528 struct p_compressed_bm *p;
3529
Philipp Reisner02918be2010-08-20 14:35:10 +02003530 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003531 dev_err(DEV, "ReportCBitmap packet too large\n");
3532 goto out;
3533 }
3534 /* use the page buff */
3535 p = buffer;
3536 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003537 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003538 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003539 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3540 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003541 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003542 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003543 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003544 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003545 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003546 goto out;
3547 }
3548
Philipp Reisner02918be2010-08-20 14:35:10 +02003549 c.packets[cmd == P_BITMAP]++;
3550 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003551
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003552 if (err <= 0) {
3553 if (err < 0)
3554 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003555 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003556 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003557 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003558 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003559 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003560
3561 INFO_bm_xfer_stats(mdev, "receive", &c);
3562
3563 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003564 enum drbd_state_rv rv;
3565
Philipp Reisnerb411b362009-09-25 16:07:19 -07003566 ok = !drbd_send_bitmap(mdev);
3567 if (!ok)
3568 goto out;
3569 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003570 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3571 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003572 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3573 /* admin may have requested C_DISCONNECTING,
3574 * other threads may have noticed network errors */
3575 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3576 drbd_conn_str(mdev->state.conn));
3577 }
3578
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003579 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003580 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003581 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003582 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3583 drbd_start_resync(mdev, C_SYNC_SOURCE);
3584 free_page((unsigned long) buffer);
3585 return ok;
3586}
3587
Philipp Reisner02918be2010-08-20 14:35:10 +02003588static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003589{
3590 /* TODO zero copy sink :) */
3591 static char sink[128];
3592 int size, want, r;
3593
Philipp Reisner02918be2010-08-20 14:35:10 +02003594 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3595 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003596
Philipp Reisner02918be2010-08-20 14:35:10 +02003597 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003598 while (size > 0) {
3599 want = min_t(int, size, sizeof(sink));
3600 r = drbd_recv(mdev, sink, want);
3601 ERR_IF(r <= 0) break;
3602 size -= r;
3603 }
3604 return size == 0;
3605}
3606
Philipp Reisner02918be2010-08-20 14:35:10 +02003607static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003608{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003609 /* Make sure we've acked all the TCP data associated
3610 * with the data requests being unplugged */
3611 drbd_tcp_quickack(mdev->data.socket);
3612
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003613 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614}
3615
Philipp Reisner73a01a12010-10-27 14:33:00 +02003616static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3617{
3618 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3619
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003620 switch (mdev->state.conn) {
3621 case C_WF_SYNC_UUID:
3622 case C_WF_BITMAP_T:
3623 case C_BEHIND:
3624 break;
3625 default:
3626 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3627 drbd_conn_str(mdev->state.conn));
3628 }
3629
Philipp Reisner73a01a12010-10-27 14:33:00 +02003630 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3631
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003632 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003633}
3634
Philipp Reisner02918be2010-08-20 14:35:10 +02003635typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003636
Philipp Reisner02918be2010-08-20 14:35:10 +02003637struct data_cmd {
3638 int expect_payload;
3639 size_t pkt_size;
3640 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003641};
3642
Philipp Reisner02918be2010-08-20 14:35:10 +02003643static struct data_cmd drbd_cmd_handler[] = {
3644 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3645 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3646 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3647 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3648 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3649 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3650 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3651 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3652 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3653 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3654 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3655 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3656 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3657 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3658 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3659 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3660 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3661 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3662 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3663 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3664 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003665 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003666 /* anything missing from this table is in
3667 * the asender_tbl, see get_asender_cmd */
3668 [P_MAX_CMD] = { 0, 0, NULL },
3669};
3670
3671/* All handler functions that expect a sub-header get that sub-heder in
3672 mdev->data.rbuf.header.head.payload.
3673
3674 Usually in mdev->data.rbuf.header.head the callback can find the usual
3675 p_header, but they may not rely on that. Since there is also p_header95 !
3676 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003677
3678static void drbdd(struct drbd_conf *mdev)
3679{
Philipp Reisner02918be2010-08-20 14:35:10 +02003680 union p_header *header = &mdev->data.rbuf.header;
3681 unsigned int packet_size;
3682 enum drbd_packets cmd;
3683 size_t shs; /* sub header size */
3684 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003685
3686 while (get_t_state(&mdev->receiver) == Running) {
3687 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003688 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3689 goto err_out;
3690
3691 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3692 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3693 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003694 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003695
Philipp Reisner02918be2010-08-20 14:35:10 +02003696 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003697 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3698 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3699 goto err_out;
3700 }
3701
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003702 if (shs) {
3703 rv = drbd_recv(mdev, &header->h80.payload, shs);
3704 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003705 if (!signal_pending(current))
3706 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003707 goto err_out;
3708 }
3709 }
3710
Philipp Reisner02918be2010-08-20 14:35:10 +02003711 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3712
3713 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003714 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003715 cmdname(cmd), packet_size);
3716 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003717 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003718 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003719
Philipp Reisner02918be2010-08-20 14:35:10 +02003720 if (0) {
3721 err_out:
3722 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003723 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003724 /* If we leave here, we probably want to update at least the
3725 * "Connected" indicator on stable storage. Do so explicitly here. */
3726 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003727}
3728
3729void drbd_flush_workqueue(struct drbd_conf *mdev)
3730{
3731 struct drbd_wq_barrier barr;
3732
3733 barr.w.cb = w_prev_work_done;
3734 init_completion(&barr.done);
3735 drbd_queue_work(&mdev->data.work, &barr.w);
3736 wait_for_completion(&barr.done);
3737}
3738
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003739void drbd_free_tl_hash(struct drbd_conf *mdev)
3740{
3741 struct hlist_head *h;
3742
3743 spin_lock_irq(&mdev->req_lock);
3744
3745 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3746 spin_unlock_irq(&mdev->req_lock);
3747 return;
3748 }
3749 /* paranoia code */
3750 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3751 if (h->first)
3752 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3753 (int)(h - mdev->ee_hash), h->first);
3754 kfree(mdev->ee_hash);
3755 mdev->ee_hash = NULL;
3756 mdev->ee_hash_s = 0;
3757
3758 /* paranoia code */
3759 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3760 if (h->first)
3761 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3762 (int)(h - mdev->tl_hash), h->first);
3763 kfree(mdev->tl_hash);
3764 mdev->tl_hash = NULL;
3765 mdev->tl_hash_s = 0;
3766 spin_unlock_irq(&mdev->req_lock);
3767}
3768
Philipp Reisnerb411b362009-09-25 16:07:19 -07003769static void drbd_disconnect(struct drbd_conf *mdev)
3770{
3771 enum drbd_fencing_p fp;
3772 union drbd_state os, ns;
3773 int rv = SS_UNKNOWN_ERROR;
3774 unsigned int i;
3775
3776 if (mdev->state.conn == C_STANDALONE)
3777 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003778
3779 /* asender does not clean up anything. it must not interfere, either */
3780 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003781 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003782
Philipp Reisner85719572010-07-21 10:20:17 +02003783 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003784 spin_lock_irq(&mdev->req_lock);
3785 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3786 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3787 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3788 spin_unlock_irq(&mdev->req_lock);
3789
3790 /* We do not have data structures that would allow us to
3791 * get the rs_pending_cnt down to 0 again.
3792 * * On C_SYNC_TARGET we do not have any data structures describing
3793 * the pending RSDataRequest's we have sent.
3794 * * On C_SYNC_SOURCE there is no data structure that tracks
3795 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3796 * And no, it is not the sum of the reference counts in the
3797 * resync_LRU. The resync_LRU tracks the whole operation including
3798 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3799 * on the fly. */
3800 drbd_rs_cancel_all(mdev);
3801 mdev->rs_total = 0;
3802 mdev->rs_failed = 0;
3803 atomic_set(&mdev->rs_pending_cnt, 0);
3804 wake_up(&mdev->misc_wait);
3805
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003806 del_timer(&mdev->request_timer);
3807
Philipp Reisnerb411b362009-09-25 16:07:19 -07003808 /* make sure syncer is stopped and w_resume_next_sg queued */
3809 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003810 resync_timer_fn((unsigned long)mdev);
3811
Philipp Reisnerb411b362009-09-25 16:07:19 -07003812 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3813 * w_make_resync_request etc. which may still be on the worker queue
3814 * to be "canceled" */
3815 drbd_flush_workqueue(mdev);
3816
3817 /* This also does reclaim_net_ee(). If we do this too early, we might
3818 * miss some resync ee and pages.*/
3819 drbd_process_done_ee(mdev);
3820
3821 kfree(mdev->p_uuid);
3822 mdev->p_uuid = NULL;
3823
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003824 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003825 tl_clear(mdev);
3826
Philipp Reisnerb411b362009-09-25 16:07:19 -07003827 dev_info(DEV, "Connection closed\n");
3828
3829 drbd_md_sync(mdev);
3830
3831 fp = FP_DONT_CARE;
3832 if (get_ldev(mdev)) {
3833 fp = mdev->ldev->dc.fencing;
3834 put_ldev(mdev);
3835 }
3836
Philipp Reisner87f7be42010-06-11 13:56:33 +02003837 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3838 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003839
3840 spin_lock_irq(&mdev->req_lock);
3841 os = mdev->state;
3842 if (os.conn >= C_UNCONNECTED) {
3843 /* Do not restart in case we are C_DISCONNECTING */
3844 ns = os;
3845 ns.conn = C_UNCONNECTED;
3846 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3847 }
3848 spin_unlock_irq(&mdev->req_lock);
3849
3850 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003851 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003852
Philipp Reisnerb411b362009-09-25 16:07:19 -07003853 crypto_free_hash(mdev->cram_hmac_tfm);
3854 mdev->cram_hmac_tfm = NULL;
3855
3856 kfree(mdev->net_conf);
3857 mdev->net_conf = NULL;
3858 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3859 }
3860
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003861 /* serialize with bitmap writeout triggered by the state change,
3862 * if any. */
3863 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3864
Philipp Reisnerb411b362009-09-25 16:07:19 -07003865 /* tcp_close and release of sendpage pages can be deferred. I don't
3866 * want to use SO_LINGER, because apparently it can be deferred for
3867 * more than 20 seconds (longest time I checked).
3868 *
3869 * Actually we don't care for exactly when the network stack does its
3870 * put_page(), but release our reference on these pages right here.
3871 */
3872 i = drbd_release_ee(mdev, &mdev->net_ee);
3873 if (i)
3874 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003875 i = atomic_read(&mdev->pp_in_use_by_net);
3876 if (i)
3877 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003878 i = atomic_read(&mdev->pp_in_use);
3879 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003880 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003881
3882 D_ASSERT(list_empty(&mdev->read_ee));
3883 D_ASSERT(list_empty(&mdev->active_ee));
3884 D_ASSERT(list_empty(&mdev->sync_ee));
3885 D_ASSERT(list_empty(&mdev->done_ee));
3886
3887 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3888 atomic_set(&mdev->current_epoch->epoch_size, 0);
3889 D_ASSERT(list_empty(&mdev->current_epoch->list));
3890}
3891
3892/*
3893 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3894 * we can agree on is stored in agreed_pro_version.
3895 *
3896 * feature flags and the reserved array should be enough room for future
3897 * enhancements of the handshake protocol, and possible plugins...
3898 *
3899 * for now, they are expected to be zero, but ignored.
3900 */
3901static int drbd_send_handshake(struct drbd_conf *mdev)
3902{
3903 /* ASSERT current == mdev->receiver ... */
3904 struct p_handshake *p = &mdev->data.sbuf.handshake;
3905 int ok;
3906
3907 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3908 dev_err(DEV, "interrupted during initial handshake\n");
3909 return 0; /* interrupted. not ok. */
3910 }
3911
3912 if (mdev->data.socket == NULL) {
3913 mutex_unlock(&mdev->data.mutex);
3914 return 0;
3915 }
3916
3917 memset(p, 0, sizeof(*p));
3918 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3919 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3920 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003921 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003922 mutex_unlock(&mdev->data.mutex);
3923 return ok;
3924}
3925
3926/*
3927 * return values:
3928 * 1 yes, we have a valid connection
3929 * 0 oops, did not work out, please try again
3930 * -1 peer talks different language,
3931 * no point in trying again, please go standalone.
3932 */
3933static int drbd_do_handshake(struct drbd_conf *mdev)
3934{
3935 /* ASSERT current == mdev->receiver ... */
3936 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003937 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3938 unsigned int length;
3939 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003940 int rv;
3941
3942 rv = drbd_send_handshake(mdev);
3943 if (!rv)
3944 return 0;
3945
Philipp Reisner02918be2010-08-20 14:35:10 +02003946 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003947 if (!rv)
3948 return 0;
3949
Philipp Reisner02918be2010-08-20 14:35:10 +02003950 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003951 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003952 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003953 return -1;
3954 }
3955
Philipp Reisner02918be2010-08-20 14:35:10 +02003956 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003957 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003958 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003959 return -1;
3960 }
3961
3962 rv = drbd_recv(mdev, &p->head.payload, expect);
3963
3964 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003965 if (!signal_pending(current))
3966 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003967 return 0;
3968 }
3969
Philipp Reisnerb411b362009-09-25 16:07:19 -07003970 p->protocol_min = be32_to_cpu(p->protocol_min);
3971 p->protocol_max = be32_to_cpu(p->protocol_max);
3972 if (p->protocol_max == 0)
3973 p->protocol_max = p->protocol_min;
3974
3975 if (PRO_VERSION_MAX < p->protocol_min ||
3976 PRO_VERSION_MIN > p->protocol_max)
3977 goto incompat;
3978
3979 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3980
3981 dev_info(DEV, "Handshake successful: "
3982 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3983
3984 return 1;
3985
3986 incompat:
3987 dev_err(DEV, "incompatible DRBD dialects: "
3988 "I support %d-%d, peer supports %d-%d\n",
3989 PRO_VERSION_MIN, PRO_VERSION_MAX,
3990 p->protocol_min, p->protocol_max);
3991 return -1;
3992}
3993
3994#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3995static int drbd_do_auth(struct drbd_conf *mdev)
3996{
3997 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3998 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01003999 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004000}
4001#else
4002#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004003
4004/* Return value:
4005 1 - auth succeeded,
4006 0 - failed, try again (network error),
4007 -1 - auth failed, don't try again.
4008*/
4009
Philipp Reisnerb411b362009-09-25 16:07:19 -07004010static int drbd_do_auth(struct drbd_conf *mdev)
4011{
4012 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4013 struct scatterlist sg;
4014 char *response = NULL;
4015 char *right_response = NULL;
4016 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004017 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4018 unsigned int resp_size;
4019 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004020 enum drbd_packets cmd;
4021 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004022 int rv;
4023
4024 desc.tfm = mdev->cram_hmac_tfm;
4025 desc.flags = 0;
4026
4027 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4028 (u8 *)mdev->net_conf->shared_secret, key_len);
4029 if (rv) {
4030 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004031 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004032 goto fail;
4033 }
4034
4035 get_random_bytes(my_challenge, CHALLENGE_LEN);
4036
4037 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4038 if (!rv)
4039 goto fail;
4040
Philipp Reisner02918be2010-08-20 14:35:10 +02004041 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004042 if (!rv)
4043 goto fail;
4044
Philipp Reisner02918be2010-08-20 14:35:10 +02004045 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004046 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004047 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004048 rv = 0;
4049 goto fail;
4050 }
4051
Philipp Reisner02918be2010-08-20 14:35:10 +02004052 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004053 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004054 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004055 goto fail;
4056 }
4057
Philipp Reisner02918be2010-08-20 14:35:10 +02004058 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004059 if (peers_ch == NULL) {
4060 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004061 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004062 goto fail;
4063 }
4064
Philipp Reisner02918be2010-08-20 14:35:10 +02004065 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004066
Philipp Reisner02918be2010-08-20 14:35:10 +02004067 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004068 if (!signal_pending(current))
4069 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004070 rv = 0;
4071 goto fail;
4072 }
4073
4074 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4075 response = kmalloc(resp_size, GFP_NOIO);
4076 if (response == NULL) {
4077 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004078 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004079 goto fail;
4080 }
4081
4082 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004083 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004084
4085 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4086 if (rv) {
4087 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004088 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004089 goto fail;
4090 }
4091
4092 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4093 if (!rv)
4094 goto fail;
4095
Philipp Reisner02918be2010-08-20 14:35:10 +02004096 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004097 if (!rv)
4098 goto fail;
4099
Philipp Reisner02918be2010-08-20 14:35:10 +02004100 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004101 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004102 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004103 rv = 0;
4104 goto fail;
4105 }
4106
Philipp Reisner02918be2010-08-20 14:35:10 +02004107 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004108 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4109 rv = 0;
4110 goto fail;
4111 }
4112
4113 rv = drbd_recv(mdev, response , resp_size);
4114
4115 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004116 if (!signal_pending(current))
4117 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004118 rv = 0;
4119 goto fail;
4120 }
4121
4122 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004123 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004124 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004125 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004126 goto fail;
4127 }
4128
4129 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4130
4131 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4132 if (rv) {
4133 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004134 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004135 goto fail;
4136 }
4137
4138 rv = !memcmp(response, right_response, resp_size);
4139
4140 if (rv)
4141 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4142 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004143 else
4144 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004145
4146 fail:
4147 kfree(peers_ch);
4148 kfree(response);
4149 kfree(right_response);
4150
4151 return rv;
4152}
4153#endif
4154
4155int drbdd_init(struct drbd_thread *thi)
4156{
4157 struct drbd_conf *mdev = thi->mdev;
4158 unsigned int minor = mdev_to_minor(mdev);
4159 int h;
4160
4161 sprintf(current->comm, "drbd%d_receiver", minor);
4162
4163 dev_info(DEV, "receiver (re)started\n");
4164
4165 do {
4166 h = drbd_connect(mdev);
4167 if (h == 0) {
4168 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004169 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004170 }
4171 if (h == -1) {
4172 dev_warn(DEV, "Discarding network configuration.\n");
4173 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4174 }
4175 } while (h == 0);
4176
4177 if (h > 0) {
4178 if (get_net_conf(mdev)) {
4179 drbdd(mdev);
4180 put_net_conf(mdev);
4181 }
4182 }
4183
4184 drbd_disconnect(mdev);
4185
4186 dev_info(DEV, "receiver terminated\n");
4187 return 0;
4188}
4189
4190/* ********* acknowledge sender ******** */
4191
Philipp Reisner0b70a132010-08-20 13:36:10 +02004192static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004193{
4194 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4195
4196 int retcode = be32_to_cpu(p->retcode);
4197
4198 if (retcode >= SS_SUCCESS) {
4199 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4200 } else {
4201 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4202 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4203 drbd_set_st_err_str(retcode), retcode);
4204 }
4205 wake_up(&mdev->state_wait);
4206
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004207 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004208}
4209
Philipp Reisner0b70a132010-08-20 13:36:10 +02004210static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004211{
4212 return drbd_send_ping_ack(mdev);
4213
4214}
4215
Philipp Reisner0b70a132010-08-20 13:36:10 +02004216static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004217{
4218 /* restore idle timeout */
4219 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004220 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4221 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004222
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004223 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004224}
4225
Philipp Reisner0b70a132010-08-20 13:36:10 +02004226static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004227{
4228 struct p_block_ack *p = (struct p_block_ack *)h;
4229 sector_t sector = be64_to_cpu(p->sector);
4230 int blksize = be32_to_cpu(p->blksize);
4231
4232 D_ASSERT(mdev->agreed_pro_version >= 89);
4233
4234 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4235
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004236 if (get_ldev(mdev)) {
4237 drbd_rs_complete_io(mdev, sector);
4238 drbd_set_in_sync(mdev, sector, blksize);
4239 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4240 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4241 put_ldev(mdev);
4242 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004243 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004244 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004245
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004246 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004247}
4248
4249/* when we receive the ACK for a write request,
4250 * verify that we actually know about it */
4251static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4252 u64 id, sector_t sector)
4253{
4254 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4255 struct hlist_node *n;
4256 struct drbd_request *req;
4257
Bart Van Assche24c48302011-05-21 18:32:29 +02004258 hlist_for_each_entry(req, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004259 if ((unsigned long)req == (unsigned long)id) {
4260 if (req->sector != sector) {
4261 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4262 "wrong sector (%llus versus %llus)\n", req,
4263 (unsigned long long)req->sector,
4264 (unsigned long long)sector);
4265 break;
4266 }
4267 return req;
4268 }
4269 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004270 return NULL;
4271}
4272
4273typedef struct drbd_request *(req_validator_fn)
4274 (struct drbd_conf *mdev, u64 id, sector_t sector);
4275
4276static int validate_req_change_req_state(struct drbd_conf *mdev,
4277 u64 id, sector_t sector, req_validator_fn validator,
4278 const char *func, enum drbd_req_event what)
4279{
4280 struct drbd_request *req;
4281 struct bio_and_error m;
4282
4283 spin_lock_irq(&mdev->req_lock);
4284 req = validator(mdev, id, sector);
4285 if (unlikely(!req)) {
4286 spin_unlock_irq(&mdev->req_lock);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004287
4288 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4289 (void *)(unsigned long)id, (unsigned long long)sector);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004290 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004291 }
4292 __req_mod(req, what, &m);
4293 spin_unlock_irq(&mdev->req_lock);
4294
4295 if (m.bio)
4296 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004297 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004298}
4299
Philipp Reisner0b70a132010-08-20 13:36:10 +02004300static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004301{
4302 struct p_block_ack *p = (struct p_block_ack *)h;
4303 sector_t sector = be64_to_cpu(p->sector);
4304 int blksize = be32_to_cpu(p->blksize);
4305 enum drbd_req_event what;
4306
4307 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4308
4309 if (is_syncer_block_id(p->block_id)) {
4310 drbd_set_in_sync(mdev, sector, blksize);
4311 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004312 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004313 }
4314 switch (be16_to_cpu(h->command)) {
4315 case P_RS_WRITE_ACK:
4316 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4317 what = write_acked_by_peer_and_sis;
4318 break;
4319 case P_WRITE_ACK:
4320 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4321 what = write_acked_by_peer;
4322 break;
4323 case P_RECV_ACK:
4324 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4325 what = recv_acked_by_peer;
4326 break;
4327 case P_DISCARD_ACK:
4328 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4329 what = conflict_discarded_by_peer;
4330 break;
4331 default:
4332 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004333 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004334 }
4335
4336 return validate_req_change_req_state(mdev, p->block_id, sector,
4337 _ack_id_to_req, __func__ , what);
4338}
4339
Philipp Reisner0b70a132010-08-20 13:36:10 +02004340static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004341{
4342 struct p_block_ack *p = (struct p_block_ack *)h;
4343 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004344 int size = be32_to_cpu(p->blksize);
4345 struct drbd_request *req;
4346 struct bio_and_error m;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004347
4348 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4349
4350 if (is_syncer_block_id(p->block_id)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004351 dec_rs_pending(mdev);
4352 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004353 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004354 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004355
4356 spin_lock_irq(&mdev->req_lock);
4357 req = _ack_id_to_req(mdev, p->block_id, sector);
4358 if (!req) {
4359 spin_unlock_irq(&mdev->req_lock);
4360 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4361 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4362 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4363 The master bio might already be completed, therefore the
4364 request is no longer in the collision hash.
4365 => Do not try to validate block_id as request. */
4366 /* In Protocol B we might already have got a P_RECV_ACK
4367 but then get a P_NEG_ACK after wards. */
4368 drbd_set_out_of_sync(mdev, sector, size);
4369 return true;
4370 } else {
4371 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4372 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4373 return false;
4374 }
4375 }
4376 __req_mod(req, neg_acked, &m);
4377 spin_unlock_irq(&mdev->req_lock);
4378
4379 if (m.bio)
4380 complete_master_bio(mdev, &m);
4381 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004382}
4383
Philipp Reisner0b70a132010-08-20 13:36:10 +02004384static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004385{
4386 struct p_block_ack *p = (struct p_block_ack *)h;
4387 sector_t sector = be64_to_cpu(p->sector);
4388
4389 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4390 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4391 (unsigned long long)sector, be32_to_cpu(p->blksize));
4392
4393 return validate_req_change_req_state(mdev, p->block_id, sector,
4394 _ar_id_to_req, __func__ , neg_acked);
4395}
4396
Philipp Reisner0b70a132010-08-20 13:36:10 +02004397static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004398{
4399 sector_t sector;
4400 int size;
4401 struct p_block_ack *p = (struct p_block_ack *)h;
4402
4403 sector = be64_to_cpu(p->sector);
4404 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004405
4406 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4407
4408 dec_rs_pending(mdev);
4409
4410 if (get_ldev_if_state(mdev, D_FAILED)) {
4411 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004412 switch (be16_to_cpu(h->command)) {
4413 case P_NEG_RS_DREPLY:
4414 drbd_rs_failed_io(mdev, sector, size);
4415 case P_RS_CANCEL:
4416 break;
4417 default:
4418 D_ASSERT(0);
4419 put_ldev(mdev);
4420 return false;
4421 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004422 put_ldev(mdev);
4423 }
4424
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004425 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004426}
4427
Philipp Reisner0b70a132010-08-20 13:36:10 +02004428static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004429{
4430 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4431
4432 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4433
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004434 if (mdev->state.conn == C_AHEAD &&
4435 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004436 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4437 mdev->start_resync_timer.expires = jiffies + HZ;
4438 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004439 }
4440
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004441 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004442}
4443
Philipp Reisner0b70a132010-08-20 13:36:10 +02004444static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004445{
4446 struct p_block_ack *p = (struct p_block_ack *)h;
4447 struct drbd_work *w;
4448 sector_t sector;
4449 int size;
4450
4451 sector = be64_to_cpu(p->sector);
4452 size = be32_to_cpu(p->blksize);
4453
4454 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4455
4456 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4457 drbd_ov_oos_found(mdev, sector, size);
4458 else
4459 ov_oos_print(mdev);
4460
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004461 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004462 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004463
Philipp Reisnerb411b362009-09-25 16:07:19 -07004464 drbd_rs_complete_io(mdev, sector);
4465 dec_rs_pending(mdev);
4466
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004467 --mdev->ov_left;
4468
4469 /* let's advance progress step marks only for every other megabyte */
4470 if ((mdev->ov_left & 0x200) == 0x200)
4471 drbd_advance_rs_marks(mdev, mdev->ov_left);
4472
4473 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004474 w = kmalloc(sizeof(*w), GFP_NOIO);
4475 if (w) {
4476 w->cb = w_ov_finished;
4477 drbd_queue_work_front(&mdev->data.work, w);
4478 } else {
4479 dev_err(DEV, "kmalloc(w) failed.");
4480 ov_oos_print(mdev);
4481 drbd_resync_finished(mdev);
4482 }
4483 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004484 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004485 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004486}
4487
Philipp Reisner02918be2010-08-20 14:35:10 +02004488static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004489{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004490 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004491}
4492
Philipp Reisnerb411b362009-09-25 16:07:19 -07004493struct asender_cmd {
4494 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004495 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004496};
4497
4498static struct asender_cmd *get_asender_cmd(int cmd)
4499{
4500 static struct asender_cmd asender_tbl[] = {
4501 /* anything missing from this table is in
4502 * the drbd_cmd_handler (drbd_default_handler) table,
4503 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004504 [P_PING] = { sizeof(struct p_header80), got_Ping },
4505 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004506 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4507 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4508 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4509 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4510 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4511 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4512 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4513 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4514 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4515 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4516 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004517 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004518 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004519 [P_MAX_CMD] = { 0, NULL },
4520 };
4521 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4522 return NULL;
4523 return &asender_tbl[cmd];
4524}
4525
4526int drbd_asender(struct drbd_thread *thi)
4527{
4528 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004529 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004530 struct asender_cmd *cmd = NULL;
4531
4532 int rv, len;
4533 void *buf = h;
4534 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004535 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004536 int empty;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004537 int ping_timeout_active = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004538
4539 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4540
4541 current->policy = SCHED_RR; /* Make this a realtime task! */
4542 current->rt_priority = 2; /* more important than all other tasks */
4543
4544 while (get_t_state(thi) == Running) {
4545 drbd_thread_current_set_cpu(mdev);
4546 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4547 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4548 mdev->meta.socket->sk->sk_rcvtimeo =
4549 mdev->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004550 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004551 }
4552
4553 /* conditionally cork;
4554 * it may hurt latency if we cork without much to send */
4555 if (!mdev->net_conf->no_cork &&
4556 3 < atomic_read(&mdev->unacked_cnt))
4557 drbd_tcp_cork(mdev->meta.socket);
4558 while (1) {
4559 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4560 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004561 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004562 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004563 /* to avoid race with newly queued ACKs */
4564 set_bit(SIGNAL_ASENDER, &mdev->flags);
4565 spin_lock_irq(&mdev->req_lock);
4566 empty = list_empty(&mdev->done_ee);
4567 spin_unlock_irq(&mdev->req_lock);
4568 /* new ack may have been queued right here,
4569 * but then there is also a signal pending,
4570 * and we start over... */
4571 if (empty)
4572 break;
4573 }
4574 /* but unconditionally uncork unless disabled */
4575 if (!mdev->net_conf->no_cork)
4576 drbd_tcp_uncork(mdev->meta.socket);
4577
4578 /* short circuit, recv_msg would return EINTR anyways. */
4579 if (signal_pending(current))
4580 continue;
4581
4582 rv = drbd_recv_short(mdev, mdev->meta.socket,
4583 buf, expect-received, 0);
4584 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4585
4586 flush_signals(current);
4587
4588 /* Note:
4589 * -EINTR (on meta) we got a signal
4590 * -EAGAIN (on meta) rcvtimeo expired
4591 * -ECONNRESET other side closed the connection
4592 * -ERESTARTSYS (on data) we got a signal
4593 * rv < 0 other than above: unexpected error!
4594 * rv == expected: full header or command
4595 * rv < expected: "woken" by signal during receive
4596 * rv == 0 : "connection shut down by peer"
4597 */
4598 if (likely(rv > 0)) {
4599 received += rv;
4600 buf += rv;
4601 } else if (rv == 0) {
4602 dev_err(DEV, "meta connection shut down by peer.\n");
4603 goto reconnect;
4604 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004605 /* If the data socket received something meanwhile,
4606 * that is good enough: peer is still alive. */
4607 if (time_after(mdev->last_received,
4608 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4609 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004610 if (ping_timeout_active) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004611 dev_err(DEV, "PingAck did not arrive in time.\n");
4612 goto reconnect;
4613 }
4614 set_bit(SEND_PING, &mdev->flags);
4615 continue;
4616 } else if (rv == -EINTR) {
4617 continue;
4618 } else {
4619 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4620 goto reconnect;
4621 }
4622
4623 if (received == expect && cmd == NULL) {
4624 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004625 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4626 be32_to_cpu(h->magic),
4627 be16_to_cpu(h->command),
4628 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004629 goto reconnect;
4630 }
4631 cmd = get_asender_cmd(be16_to_cpu(h->command));
4632 len = be16_to_cpu(h->length);
4633 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004634 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4635 be32_to_cpu(h->magic),
4636 be16_to_cpu(h->command),
4637 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004638 goto disconnect;
4639 }
4640 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004641 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004642 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004643 }
4644 if (received == expect) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004645 mdev->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004646 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004647 if (!cmd->process(mdev, h))
4648 goto reconnect;
4649
Lars Ellenbergf36af182011-03-09 22:44:55 +01004650 /* the idle_timeout (ping-int)
4651 * has been restored in got_PingAck() */
4652 if (cmd == get_asender_cmd(P_PING_ACK))
4653 ping_timeout_active = 0;
4654
Philipp Reisnerb411b362009-09-25 16:07:19 -07004655 buf = h;
4656 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004657 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004658 cmd = NULL;
4659 }
4660 }
4661
4662 if (0) {
4663reconnect:
4664 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004665 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004666 }
4667 if (0) {
4668disconnect:
4669 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004670 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004671 }
4672 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4673
4674 D_ASSERT(mdev->state.conn < C_CONNECTED);
4675 dev_info(DEV, "asender terminated\n");
4676
4677 return 0;
4678}