blob: 69eec6980c29a4ed1fa93f83072a39b9484672c5 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Bart Van Assche24c48302011-05-21 18:32:29 +0200336 INIT_HLIST_NODE(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100344 /*
345 * The block_id is opaque to the receiver. It is not endianness
346 * converted, and sent back to the sender unchanged.
347 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return e;
351
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200352 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700353 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354 return NULL;
355}
356
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700358{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200359 if (e->flags & EE_HAS_DIGEST)
360 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200361 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200362 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Bart Van Assche24c48302011-05-21 18:32:29 +0200363 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700364 mempool_free(e, drbd_ee_mempool);
365}
366
367int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
368{
369 LIST_HEAD(work_list);
370 struct drbd_epoch_entry *e, *t;
371 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200372 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700373
374 spin_lock_irq(&mdev->req_lock);
375 list_splice_init(list, &work_list);
376 spin_unlock_irq(&mdev->req_lock);
377
378 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200379 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700380 count++;
381 }
382 return count;
383}
384
385
386/*
387 * This function is called from _asender only_
388 * but see also comments in _req_mod(,barrier_acked)
389 * and receive_Barrier.
390 *
391 * Move entries from net_ee to done_ee, if ready.
392 * Grab done_ee, call all callbacks, free the entries.
393 * The callbacks typically send out ACKs.
394 */
395static int drbd_process_done_ee(struct drbd_conf *mdev)
396{
397 LIST_HEAD(work_list);
398 LIST_HEAD(reclaimed);
399 struct drbd_epoch_entry *e, *t;
400 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
401
402 spin_lock_irq(&mdev->req_lock);
403 reclaim_net_ee(mdev, &reclaimed);
404 list_splice_init(&mdev->done_ee, &work_list);
405 spin_unlock_irq(&mdev->req_lock);
406
407 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200408 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409
410 /* possible callbacks here:
411 * e_end_block, and e_end_resync_block, e_send_discard_ack.
412 * all ignore the last argument.
413 */
414 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700415 /* list_del not necessary, next/prev members not touched */
416 ok = e->w.cb(mdev, &e->w, !ok) && ok;
417 drbd_free_ee(mdev, e);
418 }
419 wake_up(&mdev->ee_wait);
420
421 return ok;
422}
423
424void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
425{
426 DEFINE_WAIT(wait);
427
428 /* avoids spin_lock/unlock
429 * and calling prepare_to_wait in the fast path */
430 while (!list_empty(head)) {
431 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
432 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100433 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 finish_wait(&mdev->ee_wait, &wait);
435 spin_lock_irq(&mdev->req_lock);
436 }
437}
438
439void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
440{
441 spin_lock_irq(&mdev->req_lock);
442 _drbd_wait_ee_list_empty(mdev, head);
443 spin_unlock_irq(&mdev->req_lock);
444}
445
446/* see also kernel_accept; which is only present since 2.6.18.
447 * also we want to log which part of it failed, exactly */
448static int drbd_accept(struct drbd_conf *mdev, const char **what,
449 struct socket *sock, struct socket **newsock)
450{
451 struct sock *sk = sock->sk;
452 int err = 0;
453
454 *what = "listen";
455 err = sock->ops->listen(sock, 5);
456 if (err < 0)
457 goto out;
458
459 *what = "sock_create_lite";
460 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
461 newsock);
462 if (err < 0)
463 goto out;
464
465 *what = "accept";
466 err = sock->ops->accept(sock, *newsock, 0);
467 if (err < 0) {
468 sock_release(*newsock);
469 *newsock = NULL;
470 goto out;
471 }
472 (*newsock)->ops = sock->ops;
473
474out:
475 return err;
476}
477
478static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
479 void *buf, size_t size, int flags)
480{
481 mm_segment_t oldfs;
482 struct kvec iov = {
483 .iov_base = buf,
484 .iov_len = size,
485 };
486 struct msghdr msg = {
487 .msg_iovlen = 1,
488 .msg_iov = (struct iovec *)&iov,
489 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
490 };
491 int rv;
492
493 oldfs = get_fs();
494 set_fs(KERNEL_DS);
495 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
496 set_fs(oldfs);
497
498 return rv;
499}
500
501static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
502{
503 mm_segment_t oldfs;
504 struct kvec iov = {
505 .iov_base = buf,
506 .iov_len = size,
507 };
508 struct msghdr msg = {
509 .msg_iovlen = 1,
510 .msg_iov = (struct iovec *)&iov,
511 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
512 };
513 int rv;
514
515 oldfs = get_fs();
516 set_fs(KERNEL_DS);
517
518 for (;;) {
519 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
520 if (rv == size)
521 break;
522
523 /* Note:
524 * ECONNRESET other side closed the connection
525 * ERESTARTSYS (on sock) we got a signal
526 */
527
528 if (rv < 0) {
529 if (rv == -ECONNRESET)
530 dev_info(DEV, "sock was reset by peer\n");
531 else if (rv != -ERESTARTSYS)
532 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
533 break;
534 } else if (rv == 0) {
535 dev_info(DEV, "sock was shut down by peer\n");
536 break;
537 } else {
538 /* signal came in, or peer/link went down,
539 * after we read a partial message
540 */
541 /* D_ASSERT(signal_pending(current)); */
542 break;
543 }
544 };
545
546 set_fs(oldfs);
547
548 if (rv != size)
549 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
550
551 return rv;
552}
553
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200554/* quoting tcp(7):
555 * On individual connections, the socket buffer size must be set prior to the
556 * listen(2) or connect(2) calls in order to have it take effect.
557 * This is our wrapper to do so.
558 */
559static void drbd_setbufsize(struct socket *sock, unsigned int snd,
560 unsigned int rcv)
561{
562 /* open coded SO_SNDBUF, SO_RCVBUF */
563 if (snd) {
564 sock->sk->sk_sndbuf = snd;
565 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
566 }
567 if (rcv) {
568 sock->sk->sk_rcvbuf = rcv;
569 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
570 }
571}
572
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573static struct socket *drbd_try_connect(struct drbd_conf *mdev)
574{
575 const char *what;
576 struct socket *sock;
577 struct sockaddr_in6 src_in6;
578 int err;
579 int disconnect_on_error = 1;
580
581 if (!get_net_conf(mdev))
582 return NULL;
583
584 what = "sock_create_kern";
585 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
586 SOCK_STREAM, IPPROTO_TCP, &sock);
587 if (err < 0) {
588 sock = NULL;
589 goto out;
590 }
591
592 sock->sk->sk_rcvtimeo =
593 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200594 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
595 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700596
597 /* explicitly bind to the configured IP as source IP
598 * for the outgoing connections.
599 * This is needed for multihomed hosts and to be
600 * able to use lo: interfaces for drbd.
601 * Make sure to use 0 as port number, so linux selects
602 * a free one dynamically.
603 */
604 memcpy(&src_in6, mdev->net_conf->my_addr,
605 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
606 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
607 src_in6.sin6_port = 0;
608 else
609 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
610
611 what = "bind before connect";
612 err = sock->ops->bind(sock,
613 (struct sockaddr *) &src_in6,
614 mdev->net_conf->my_addr_len);
615 if (err < 0)
616 goto out;
617
618 /* connect may fail, peer not yet available.
619 * stay C_WF_CONNECTION, don't go Disconnecting! */
620 disconnect_on_error = 0;
621 what = "connect";
622 err = sock->ops->connect(sock,
623 (struct sockaddr *)mdev->net_conf->peer_addr,
624 mdev->net_conf->peer_addr_len, 0);
625
626out:
627 if (err < 0) {
628 if (sock) {
629 sock_release(sock);
630 sock = NULL;
631 }
632 switch (-err) {
633 /* timeout, busy, signal pending */
634 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
635 case EINTR: case ERESTARTSYS:
636 /* peer not (yet) available, network problem */
637 case ECONNREFUSED: case ENETUNREACH:
638 case EHOSTDOWN: case EHOSTUNREACH:
639 disconnect_on_error = 0;
640 break;
641 default:
642 dev_err(DEV, "%s failed, err = %d\n", what, err);
643 }
644 if (disconnect_on_error)
645 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
646 }
647 put_net_conf(mdev);
648 return sock;
649}
650
651static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
652{
653 int timeo, err;
654 struct socket *s_estab = NULL, *s_listen;
655 const char *what;
656
657 if (!get_net_conf(mdev))
658 return NULL;
659
660 what = "sock_create_kern";
661 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
662 SOCK_STREAM, IPPROTO_TCP, &s_listen);
663 if (err) {
664 s_listen = NULL;
665 goto out;
666 }
667
668 timeo = mdev->net_conf->try_connect_int * HZ;
669 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
670
671 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
672 s_listen->sk->sk_rcvtimeo = timeo;
673 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200674 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
675 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700676
677 what = "bind before listen";
678 err = s_listen->ops->bind(s_listen,
679 (struct sockaddr *) mdev->net_conf->my_addr,
680 mdev->net_conf->my_addr_len);
681 if (err < 0)
682 goto out;
683
684 err = drbd_accept(mdev, &what, s_listen, &s_estab);
685
686out:
687 if (s_listen)
688 sock_release(s_listen);
689 if (err < 0) {
690 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
691 dev_err(DEV, "%s failed, err = %d\n", what, err);
692 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
693 }
694 }
695 put_net_conf(mdev);
696
697 return s_estab;
698}
699
700static int drbd_send_fp(struct drbd_conf *mdev,
701 struct socket *sock, enum drbd_packets cmd)
702{
Philipp Reisner02918be2010-08-20 14:35:10 +0200703 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700704
705 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
706}
707
708static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
709{
Philipp Reisner02918be2010-08-20 14:35:10 +0200710 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700711 int rr;
712
713 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
714
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100715 if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716 return be16_to_cpu(h->command);
717
718 return 0xffff;
719}
720
721/**
722 * drbd_socket_okay() - Free the socket if its connection is not okay
723 * @mdev: DRBD device.
724 * @sock: pointer to the pointer to the socket.
725 */
726static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
727{
728 int rr;
729 char tb[4];
730
731 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100732 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700733
734 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
735
736 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100737 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 } else {
739 sock_release(*sock);
740 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100741 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700742 }
743}
744
745/*
746 * return values:
747 * 1 yes, we have a valid connection
748 * 0 oops, did not work out, please try again
749 * -1 peer talks different language,
750 * no point in trying again, please go standalone.
751 * -2 We do not have a network config...
752 */
753static int drbd_connect(struct drbd_conf *mdev)
754{
755 struct socket *s, *sock, *msock;
756 int try, h, ok;
757
758 D_ASSERT(!mdev->data.socket);
759
Philipp Reisnerb411b362009-09-25 16:07:19 -0700760 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
761 return -2;
762
763 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
764
765 sock = NULL;
766 msock = NULL;
767
768 do {
769 for (try = 0;;) {
770 /* 3 tries, this should take less than a second! */
771 s = drbd_try_connect(mdev);
772 if (s || ++try >= 3)
773 break;
774 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100775 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700776 }
777
778 if (s) {
779 if (!sock) {
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
781 sock = s;
782 s = NULL;
783 } else if (!msock) {
784 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
785 msock = s;
786 s = NULL;
787 } else {
788 dev_err(DEV, "Logic error in drbd_connect()\n");
789 goto out_release_sockets;
790 }
791 }
792
793 if (sock && msock) {
Philipp Reisnera8e40792011-05-13 12:03:55 +0200794 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795 ok = drbd_socket_okay(mdev, &sock);
796 ok = drbd_socket_okay(mdev, &msock) && ok;
797 if (ok)
798 break;
799 }
800
801retry:
802 s = drbd_wait_for_connect(mdev);
803 if (s) {
804 try = drbd_recv_fp(mdev, s);
805 drbd_socket_okay(mdev, &sock);
806 drbd_socket_okay(mdev, &msock);
807 switch (try) {
808 case P_HAND_SHAKE_S:
809 if (sock) {
810 dev_warn(DEV, "initial packet S crossed\n");
811 sock_release(sock);
812 }
813 sock = s;
814 break;
815 case P_HAND_SHAKE_M:
816 if (msock) {
817 dev_warn(DEV, "initial packet M crossed\n");
818 sock_release(msock);
819 }
820 msock = s;
821 set_bit(DISCARD_CONCURRENT, &mdev->flags);
822 break;
823 default:
824 dev_warn(DEV, "Error receiving initial packet\n");
825 sock_release(s);
826 if (random32() & 1)
827 goto retry;
828 }
829 }
830
831 if (mdev->state.conn <= C_DISCONNECTING)
832 goto out_release_sockets;
833 if (signal_pending(current)) {
834 flush_signals(current);
835 smp_rmb();
836 if (get_t_state(&mdev->receiver) == Exiting)
837 goto out_release_sockets;
838 }
839
840 if (sock && msock) {
841 ok = drbd_socket_okay(mdev, &sock);
842 ok = drbd_socket_okay(mdev, &msock) && ok;
843 if (ok)
844 break;
845 }
846 } while (1);
847
848 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
849 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
850
851 sock->sk->sk_allocation = GFP_NOIO;
852 msock->sk->sk_allocation = GFP_NOIO;
853
854 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
855 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
856
Philipp Reisnerb411b362009-09-25 16:07:19 -0700857 /* NOT YET ...
858 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
859 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
860 * first set it to the P_HAND_SHAKE timeout,
861 * which we set to 4x the configured ping_timeout. */
862 sock->sk->sk_sndtimeo =
863 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
864
865 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
866 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
867
868 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300869 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700870 drbd_tcp_nodelay(sock);
871 drbd_tcp_nodelay(msock);
872
873 mdev->data.socket = sock;
874 mdev->meta.socket = msock;
875 mdev->last_received = jiffies;
876
877 D_ASSERT(mdev->asender.task == NULL);
878
879 h = drbd_do_handshake(mdev);
880 if (h <= 0)
881 return h;
882
883 if (mdev->cram_hmac_tfm) {
884 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100885 switch (drbd_do_auth(mdev)) {
886 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700887 dev_err(DEV, "Authentication of peer failed\n");
888 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100889 case 0:
890 dev_err(DEV, "Authentication of peer failed, trying again.\n");
891 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892 }
893 }
894
895 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
896 return 0;
897
898 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
899 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
900
901 atomic_set(&mdev->packet_seq, 0);
902 mdev->peer_seq = 0;
903
904 drbd_thread_start(&mdev->asender);
905
Philipp Reisner148efa12011-01-15 00:21:15 +0100906 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200907 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700908 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100909 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700910 drbd_send_uuids(mdev);
911 drbd_send_state(mdev);
912 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
913 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100914 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700915
916 return 1;
917
918out_release_sockets:
919 if (sock)
920 sock_release(sock);
921 if (msock)
922 sock_release(msock);
923 return -1;
924}
925
Philipp Reisner02918be2010-08-20 14:35:10 +0200926static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700927{
Philipp Reisner02918be2010-08-20 14:35:10 +0200928 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700929 int r;
930
931 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932 if (unlikely(r != sizeof(*h))) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +0100933 if (!signal_pending(current))
934 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100935 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200936 }
937
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100938 if (likely(h->h80.magic == cpu_to_be32(DRBD_MAGIC))) {
Philipp Reisner02918be2010-08-20 14:35:10 +0200939 *cmd = be16_to_cpu(h->h80.command);
940 *packet_size = be16_to_cpu(h->h80.length);
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100941 } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
Philipp Reisner02918be2010-08-20 14:35:10 +0200942 *cmd = be16_to_cpu(h->h95.command);
943 *packet_size = be32_to_cpu(h->h95.length);
944 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200945 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
946 be32_to_cpu(h->h80.magic),
947 be16_to_cpu(h->h80.command),
948 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100949 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950 }
951 mdev->last_received = jiffies;
952
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100953 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700954}
955
Philipp Reisner2451fc32010-08-24 13:43:11 +0200956static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957{
958 int rv;
959
960 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400961 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200962 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700963 if (rv) {
964 dev_err(DEV, "local disk flush failed with status %d\n", rv);
965 /* would rather check on EOPNOTSUPP, but that is not reliable.
966 * don't try again for ANY return value != 0
967 * if (rv == -EOPNOTSUPP) */
968 drbd_bump_write_ordering(mdev, WO_drain_io);
969 }
970 put_ldev(mdev);
971 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700972}
973
974/**
975 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
976 * @mdev: DRBD device.
977 * @epoch: Epoch object.
978 * @ev: Epoch event.
979 */
980static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
981 struct drbd_epoch *epoch,
982 enum epoch_event ev)
983{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200984 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700986 enum finish_epoch rv = FE_STILL_LIVE;
987
988 spin_lock(&mdev->epoch_lock);
989 do {
990 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991
992 epoch_size = atomic_read(&epoch->epoch_size);
993
994 switch (ev & ~EV_CLEANUP) {
995 case EV_PUT:
996 atomic_dec(&epoch->active);
997 break;
998 case EV_GOT_BARRIER_NR:
999 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001000 break;
1001 case EV_BECAME_LAST:
1002 /* nothing to do*/
1003 break;
1004 }
1005
Philipp Reisnerb411b362009-09-25 16:07:19 -07001006 if (epoch_size != 0 &&
1007 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001008 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001009 if (!(ev & EV_CLEANUP)) {
1010 spin_unlock(&mdev->epoch_lock);
1011 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1012 spin_lock(&mdev->epoch_lock);
1013 }
1014 dec_unacked(mdev);
1015
1016 if (mdev->current_epoch != epoch) {
1017 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1018 list_del(&epoch->list);
1019 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1020 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021 kfree(epoch);
1022
1023 if (rv == FE_STILL_LIVE)
1024 rv = FE_DESTROYED;
1025 } else {
1026 epoch->flags = 0;
1027 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001028 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001029 if (rv == FE_STILL_LIVE)
1030 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001031 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032 }
1033 }
1034
1035 if (!next_epoch)
1036 break;
1037
1038 epoch = next_epoch;
1039 } while (1);
1040
1041 spin_unlock(&mdev->epoch_lock);
1042
Philipp Reisnerb411b362009-09-25 16:07:19 -07001043 return rv;
1044}
1045
1046/**
1047 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1048 * @mdev: DRBD device.
1049 * @wo: Write ordering method to try.
1050 */
1051void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1052{
1053 enum write_ordering_e pwo;
1054 static char *write_ordering_str[] = {
1055 [WO_none] = "none",
1056 [WO_drain_io] = "drain",
1057 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001058 };
1059
1060 pwo = mdev->write_ordering;
1061 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001062 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1063 wo = WO_drain_io;
1064 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1065 wo = WO_none;
1066 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001067 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001068 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1069}
1070
1071/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001072 * drbd_submit_ee()
1073 * @mdev: DRBD device.
1074 * @e: epoch entry
1075 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001076 *
1077 * May spread the pages to multiple bios,
1078 * depending on bio_add_page restrictions.
1079 *
1080 * Returns 0 if all bios have been submitted,
1081 * -ENOMEM if we could not allocate enough bios,
1082 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1083 * single page to an empty bio (which should never happen and likely indicates
1084 * that the lower level IO stack is in some way broken). This has been observed
1085 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001086 */
1087/* TODO allocate from our own bio_set. */
1088int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1089 const unsigned rw, const int fault_type)
1090{
1091 struct bio *bios = NULL;
1092 struct bio *bio;
1093 struct page *page = e->pages;
1094 sector_t sector = e->sector;
1095 unsigned ds = e->size;
1096 unsigned n_bios = 0;
1097 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001098 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001099
1100 /* In most cases, we will only need one bio. But in case the lower
1101 * level restrictions happen to be different at this offset on this
1102 * side than those of the sending peer, we may need to submit the
1103 * request in more than one bio. */
1104next_bio:
1105 bio = bio_alloc(GFP_NOIO, nr_pages);
1106 if (!bio) {
1107 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1108 goto fail;
1109 }
1110 /* > e->sector, unless this is the first bio */
1111 bio->bi_sector = sector;
1112 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001113 bio->bi_rw = rw;
1114 bio->bi_private = e;
1115 bio->bi_end_io = drbd_endio_sec;
1116
1117 bio->bi_next = bios;
1118 bios = bio;
1119 ++n_bios;
1120
1121 page_chain_for_each(page) {
1122 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1123 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001124 /* A single page must always be possible!
1125 * But in case it fails anyways,
1126 * we deal with it, and complain (below). */
1127 if (bio->bi_vcnt == 0) {
1128 dev_err(DEV,
1129 "bio_add_page failed for len=%u, "
1130 "bi_vcnt=0 (bi_sector=%llu)\n",
1131 len, (unsigned long long)bio->bi_sector);
1132 err = -ENOSPC;
1133 goto fail;
1134 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001135 goto next_bio;
1136 }
1137 ds -= len;
1138 sector += len >> 9;
1139 --nr_pages;
1140 }
1141 D_ASSERT(page == NULL);
1142 D_ASSERT(ds == 0);
1143
1144 atomic_set(&e->pending_bios, n_bios);
1145 do {
1146 bio = bios;
1147 bios = bios->bi_next;
1148 bio->bi_next = NULL;
1149
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001150 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001151 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001152 return 0;
1153
1154fail:
1155 while (bios) {
1156 bio = bios;
1157 bios = bios->bi_next;
1158 bio_put(bio);
1159 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001160 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001161}
1162
Philipp Reisner02918be2010-08-20 14:35:10 +02001163static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001164{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001165 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001166 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001167 struct drbd_epoch *epoch;
1168
Philipp Reisnerb411b362009-09-25 16:07:19 -07001169 inc_unacked(mdev);
1170
Philipp Reisnerb411b362009-09-25 16:07:19 -07001171 mdev->current_epoch->barrier_nr = p->barrier;
1172 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1173
1174 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1175 * the activity log, which means it would not be resynced in case the
1176 * R_PRIMARY crashes now.
1177 * Therefore we must send the barrier_ack after the barrier request was
1178 * completed. */
1179 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180 case WO_none:
1181 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001182 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001183
1184 /* receiver context, in the writeout path of the other node.
1185 * avoid potential distributed deadlock */
1186 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1187 if (epoch)
1188 break;
1189 else
1190 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1191 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192
1193 case WO_bdev_flush:
1194 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001195 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001196 drbd_flush(mdev);
1197
1198 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1199 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1200 if (epoch)
1201 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001202 }
1203
Philipp Reisner2451fc32010-08-24 13:43:11 +02001204 epoch = mdev->current_epoch;
1205 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1206
1207 D_ASSERT(atomic_read(&epoch->active) == 0);
1208 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001209
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001210 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001211 default:
1212 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001213 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001214 }
1215
1216 epoch->flags = 0;
1217 atomic_set(&epoch->epoch_size, 0);
1218 atomic_set(&epoch->active, 0);
1219
1220 spin_lock(&mdev->epoch_lock);
1221 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1222 list_add(&epoch->list, &mdev->current_epoch->list);
1223 mdev->current_epoch = epoch;
1224 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001225 } else {
1226 /* The current_epoch got recycled while we allocated this one... */
1227 kfree(epoch);
1228 }
1229 spin_unlock(&mdev->epoch_lock);
1230
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001231 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001232}
1233
1234/* used from receive_RSDataReply (recv_resync_read)
1235 * and from receive_Data */
1236static struct drbd_epoch_entry *
1237read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1238{
Lars Ellenberg66660322010-04-06 12:15:04 +02001239 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001240 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001241 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001242 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001243 void *dig_in = mdev->int_dig_in;
1244 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001245 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001246
1247 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1248 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1249
1250 if (dgs) {
1251 rr = drbd_recv(mdev, dig_in, dgs);
1252 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001253 if (!signal_pending(current))
1254 dev_warn(DEV,
1255 "short read receiving data digest: read %d expected %d\n",
1256 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001257 return NULL;
1258 }
1259 }
1260
1261 data_size -= dgs;
1262
Philipp Reisnerd07c9c12011-01-20 16:49:33 +01001263 ERR_IF(data_size == 0) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001264 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001265 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001266
Lars Ellenberg66660322010-04-06 12:15:04 +02001267 /* even though we trust out peer,
1268 * we sometimes have to double check. */
1269 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001270 dev_err(DEV, "request from peer beyond end of local disk: "
1271 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001272 (unsigned long long)capacity,
1273 (unsigned long long)sector, data_size);
1274 return NULL;
1275 }
1276
Philipp Reisnerb411b362009-09-25 16:07:19 -07001277 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1278 * "criss-cross" setup, that might cause write-out on some other DRBD,
1279 * which in turn might block on the other node at this very place. */
1280 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1281 if (!e)
1282 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001283
Philipp Reisnerb411b362009-09-25 16:07:19 -07001284 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001285 page = e->pages;
1286 page_chain_for_each(page) {
1287 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001288 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001289 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001290 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001291 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1292 data[0] = data[0] ^ (unsigned long)-1;
1293 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001294 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001295 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001296 drbd_free_ee(mdev, e);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001297 if (!signal_pending(current))
1298 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1299 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001300 return NULL;
1301 }
1302 ds -= rr;
1303 }
1304
1305 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001306 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001308 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1309 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001310 drbd_bcast_ee(mdev, "digest failed",
1311 dgs, dig_in, dig_vv, e);
1312 drbd_free_ee(mdev, e);
1313 return NULL;
1314 }
1315 }
1316 mdev->recv_cnt += data_size>>9;
1317 return e;
1318}
1319
1320/* drbd_drain_block() just takes a data block
1321 * out of the socket input buffer, and discards it.
1322 */
1323static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1324{
1325 struct page *page;
1326 int rr, rv = 1;
1327 void *data;
1328
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001329 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001330 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001331
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001332 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001333
1334 data = kmap(page);
1335 while (data_size) {
1336 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1337 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1338 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001339 if (!signal_pending(current))
1340 dev_warn(DEV,
1341 "short read receiving data: read %d expected %d\n",
1342 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001343 break;
1344 }
1345 data_size -= rr;
1346 }
1347 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001348 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001349 return rv;
1350}
1351
1352static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1353 sector_t sector, int data_size)
1354{
1355 struct bio_vec *bvec;
1356 struct bio *bio;
1357 int dgs, rr, i, expect;
1358 void *dig_in = mdev->int_dig_in;
1359 void *dig_vv = mdev->int_dig_vv;
1360
1361 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1362 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1363
1364 if (dgs) {
1365 rr = drbd_recv(mdev, dig_in, dgs);
1366 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001367 if (!signal_pending(current))
1368 dev_warn(DEV,
1369 "short read receiving data reply digest: read %d expected %d\n",
1370 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001371 return 0;
1372 }
1373 }
1374
1375 data_size -= dgs;
1376
1377 /* optimistically update recv_cnt. if receiving fails below,
1378 * we disconnect anyways, and counters will be reset. */
1379 mdev->recv_cnt += data_size>>9;
1380
1381 bio = req->master_bio;
1382 D_ASSERT(sector == bio->bi_sector);
1383
1384 bio_for_each_segment(bvec, bio, i) {
1385 expect = min_t(int, data_size, bvec->bv_len);
1386 rr = drbd_recv(mdev,
1387 kmap(bvec->bv_page)+bvec->bv_offset,
1388 expect);
1389 kunmap(bvec->bv_page);
1390 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001391 if (!signal_pending(current))
1392 dev_warn(DEV, "short read receiving data reply: "
1393 "read %d expected %d\n",
1394 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001395 return 0;
1396 }
1397 data_size -= rr;
1398 }
1399
1400 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001401 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402 if (memcmp(dig_in, dig_vv, dgs)) {
1403 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1404 return 0;
1405 }
1406 }
1407
1408 D_ASSERT(data_size == 0);
1409 return 1;
1410}
1411
1412/* e_end_resync_block() is called via
1413 * drbd_process_done_ee() by asender only */
1414static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1415{
1416 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1417 sector_t sector = e->sector;
1418 int ok;
1419
Bart Van Assche24c48302011-05-21 18:32:29 +02001420 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001422 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001423 drbd_set_in_sync(mdev, sector, e->size);
1424 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1425 } else {
1426 /* Record failure to sync */
1427 drbd_rs_failed_io(mdev, sector, e->size);
1428
1429 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1430 }
1431 dec_unacked(mdev);
1432
1433 return ok;
1434}
1435
1436static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1437{
1438 struct drbd_epoch_entry *e;
1439
1440 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001441 if (!e)
1442 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001443
1444 dec_rs_pending(mdev);
1445
Philipp Reisnerb411b362009-09-25 16:07:19 -07001446 inc_unacked(mdev);
1447 /* corresponding dec_unacked() in e_end_resync_block()
1448 * respective _drbd_clear_done_ee */
1449
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001450 e->w.cb = e_end_resync_block;
1451
Philipp Reisnerb411b362009-09-25 16:07:19 -07001452 spin_lock_irq(&mdev->req_lock);
1453 list_add(&e->w.list, &mdev->sync_ee);
1454 spin_unlock_irq(&mdev->req_lock);
1455
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001456 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001457 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001458 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001460 /* don't care for the reason here */
1461 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001462 spin_lock_irq(&mdev->req_lock);
1463 list_del(&e->w.list);
1464 spin_unlock_irq(&mdev->req_lock);
1465
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001466 drbd_free_ee(mdev, e);
1467fail:
1468 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001469 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001470}
1471
Philipp Reisner02918be2010-08-20 14:35:10 +02001472static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001473{
1474 struct drbd_request *req;
1475 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001476 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001477 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001478
1479 sector = be64_to_cpu(p->sector);
1480
1481 spin_lock_irq(&mdev->req_lock);
1482 req = _ar_id_to_req(mdev, p->block_id, sector);
1483 spin_unlock_irq(&mdev->req_lock);
1484 if (unlikely(!req)) {
1485 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001486 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001487 }
1488
Bart Van Assche24c48302011-05-21 18:32:29 +02001489 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490 * special casing it there for the various failure cases.
1491 * still no race with drbd_fail_pending_reads */
1492 ok = recv_dless_read(mdev, req, sector, data_size);
1493
1494 if (ok)
1495 req_mod(req, data_received);
1496 /* else: nothing. handled from drbd_disconnect...
1497 * I don't think we may complete this just yet
1498 * in case we are "on-disconnect: freeze" */
1499
1500 return ok;
1501}
1502
Philipp Reisner02918be2010-08-20 14:35:10 +02001503static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504{
1505 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001506 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001507 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001508
1509 sector = be64_to_cpu(p->sector);
1510 D_ASSERT(p->block_id == ID_SYNCER);
1511
1512 if (get_ldev(mdev)) {
1513 /* data is submitted to disk within recv_resync_read.
1514 * corresponding put_ldev done below on error,
1515 * or in drbd_endio_write_sec. */
1516 ok = recv_resync_read(mdev, sector, data_size);
1517 } else {
1518 if (__ratelimit(&drbd_ratelimit_state))
1519 dev_err(DEV, "Can not write resync data to local disk.\n");
1520
1521 ok = drbd_drain_block(mdev, data_size);
1522
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001523 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001524 }
1525
Philipp Reisner778f2712010-07-06 11:14:00 +02001526 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1527
Philipp Reisnerb411b362009-09-25 16:07:19 -07001528 return ok;
1529}
1530
1531/* e_end_block() is called via drbd_process_done_ee().
1532 * this means this function only runs in the asender thread
1533 */
1534static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1535{
1536 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1537 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538 int ok = 1, pcmd;
1539
Philipp Reisnerb411b362009-09-25 16:07:19 -07001540 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001541 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001542 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1543 mdev->state.conn <= C_PAUSED_SYNC_T &&
1544 e->flags & EE_MAY_SET_IN_SYNC) ?
1545 P_RS_WRITE_ACK : P_WRITE_ACK;
1546 ok &= drbd_send_ack(mdev, pcmd, e);
1547 if (pcmd == P_RS_WRITE_ACK)
1548 drbd_set_in_sync(mdev, sector, e->size);
1549 } else {
1550 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1551 /* we expect it to be marked out of sync anyways...
1552 * maybe assert this? */
1553 }
1554 dec_unacked(mdev);
1555 }
1556 /* we delete from the conflict detection hash _after_ we sent out the
1557 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1558 if (mdev->net_conf->two_primaries) {
1559 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001560 D_ASSERT(!hlist_unhashed(&e->collision));
1561 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001562 spin_unlock_irq(&mdev->req_lock);
1563 } else {
Bart Van Assche24c48302011-05-21 18:32:29 +02001564 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001565 }
1566
1567 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1568
1569 return ok;
1570}
1571
1572static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1573{
1574 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1575 int ok = 1;
1576
1577 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1578 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1579
1580 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001581 D_ASSERT(!hlist_unhashed(&e->collision));
1582 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001583 spin_unlock_irq(&mdev->req_lock);
1584
1585 dec_unacked(mdev);
1586
1587 return ok;
1588}
1589
1590/* Called from receive_Data.
1591 * Synchronize packets on sock with packets on msock.
1592 *
1593 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1594 * packet traveling on msock, they are still processed in the order they have
1595 * been sent.
1596 *
1597 * Note: we don't care for Ack packets overtaking P_DATA packets.
1598 *
1599 * In case packet_seq is larger than mdev->peer_seq number, there are
1600 * outstanding packets on the msock. We wait for them to arrive.
1601 * In case we are the logically next packet, we update mdev->peer_seq
1602 * ourselves. Correctly handles 32bit wrap around.
1603 *
1604 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1605 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1606 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1607 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1608 *
1609 * returns 0 if we may process the packet,
1610 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1611static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1612{
1613 DEFINE_WAIT(wait);
1614 unsigned int p_seq;
1615 long timeout;
1616 int ret = 0;
1617 spin_lock(&mdev->peer_seq_lock);
1618 for (;;) {
1619 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1620 if (seq_le(packet_seq, mdev->peer_seq+1))
1621 break;
1622 if (signal_pending(current)) {
1623 ret = -ERESTARTSYS;
1624 break;
1625 }
1626 p_seq = mdev->peer_seq;
1627 spin_unlock(&mdev->peer_seq_lock);
1628 timeout = schedule_timeout(30*HZ);
1629 spin_lock(&mdev->peer_seq_lock);
1630 if (timeout == 0 && p_seq == mdev->peer_seq) {
1631 ret = -ETIMEDOUT;
1632 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1633 break;
1634 }
1635 }
1636 finish_wait(&mdev->seq_wait, &wait);
1637 if (mdev->peer_seq+1 == packet_seq)
1638 mdev->peer_seq++;
1639 spin_unlock(&mdev->peer_seq_lock);
1640 return ret;
1641}
1642
Lars Ellenberg688593c2010-11-17 22:25:03 +01001643/* see also bio_flags_to_wire()
1644 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1645 * flags and back. We may replicate to other kernel versions. */
1646static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001647{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001648 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1649 (dpf & DP_FUA ? REQ_FUA : 0) |
1650 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1651 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001652}
1653
Philipp Reisnerb411b362009-09-25 16:07:19 -07001654/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001655static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001656{
1657 sector_t sector;
1658 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001659 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001660 int rw = WRITE;
1661 u32 dp_flags;
1662
Philipp Reisnerb411b362009-09-25 16:07:19 -07001663 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001664 spin_lock(&mdev->peer_seq_lock);
1665 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1666 mdev->peer_seq++;
1667 spin_unlock(&mdev->peer_seq_lock);
1668
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001669 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001670 atomic_inc(&mdev->current_epoch->epoch_size);
1671 return drbd_drain_block(mdev, data_size);
1672 }
1673
1674 /* get_ldev(mdev) successful.
1675 * Corresponding put_ldev done either below (on various errors),
1676 * or in drbd_endio_write_sec, if we successfully submit the data at
1677 * the end of this function. */
1678
1679 sector = be64_to_cpu(p->sector);
1680 e = read_in_block(mdev, p->block_id, sector, data_size);
1681 if (!e) {
1682 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001683 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001684 }
1685
Philipp Reisnerb411b362009-09-25 16:07:19 -07001686 e->w.cb = e_end_block;
1687
Lars Ellenberg688593c2010-11-17 22:25:03 +01001688 dp_flags = be32_to_cpu(p->dp_flags);
1689 rw |= wire_flags_to_bio(mdev, dp_flags);
1690
1691 if (dp_flags & DP_MAY_SET_IN_SYNC)
1692 e->flags |= EE_MAY_SET_IN_SYNC;
1693
Philipp Reisnerb411b362009-09-25 16:07:19 -07001694 spin_lock(&mdev->epoch_lock);
1695 e->epoch = mdev->current_epoch;
1696 atomic_inc(&e->epoch->epoch_size);
1697 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001698 spin_unlock(&mdev->epoch_lock);
1699
Philipp Reisnerb411b362009-09-25 16:07:19 -07001700 /* I'm the receiver, I do hold a net_cnt reference. */
1701 if (!mdev->net_conf->two_primaries) {
1702 spin_lock_irq(&mdev->req_lock);
1703 } else {
1704 /* don't get the req_lock yet,
1705 * we may sleep in drbd_wait_peer_seq */
1706 const int size = e->size;
1707 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1708 DEFINE_WAIT(wait);
1709 struct drbd_request *i;
1710 struct hlist_node *n;
1711 struct hlist_head *slot;
1712 int first;
1713
1714 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1715 BUG_ON(mdev->ee_hash == NULL);
1716 BUG_ON(mdev->tl_hash == NULL);
1717
1718 /* conflict detection and handling:
1719 * 1. wait on the sequence number,
1720 * in case this data packet overtook ACK packets.
1721 * 2. check our hash tables for conflicting requests.
1722 * we only need to walk the tl_hash, since an ee can not
1723 * have a conflict with an other ee: on the submitting
1724 * node, the corresponding req had already been conflicting,
1725 * and a conflicting req is never sent.
1726 *
1727 * Note: for two_primaries, we are protocol C,
1728 * so there cannot be any request that is DONE
1729 * but still on the transfer log.
1730 *
1731 * unconditionally add to the ee_hash.
1732 *
1733 * if no conflicting request is found:
1734 * submit.
1735 *
1736 * if any conflicting request is found
1737 * that has not yet been acked,
1738 * AND I have the "discard concurrent writes" flag:
1739 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1740 *
1741 * if any conflicting request is found:
1742 * block the receiver, waiting on misc_wait
1743 * until no more conflicting requests are there,
1744 * or we get interrupted (disconnect).
1745 *
1746 * we do not just write after local io completion of those
1747 * requests, but only after req is done completely, i.e.
1748 * we wait for the P_DISCARD_ACK to arrive!
1749 *
1750 * then proceed normally, i.e. submit.
1751 */
1752 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1753 goto out_interrupted;
1754
1755 spin_lock_irq(&mdev->req_lock);
1756
Bart Van Assche24c48302011-05-21 18:32:29 +02001757 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001758
1759#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1760 slot = tl_hash_slot(mdev, sector);
1761 first = 1;
1762 for (;;) {
1763 int have_unacked = 0;
1764 int have_conflict = 0;
1765 prepare_to_wait(&mdev->misc_wait, &wait,
1766 TASK_INTERRUPTIBLE);
Bart Van Assche24c48302011-05-21 18:32:29 +02001767 hlist_for_each_entry(i, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001768 if (OVERLAPS) {
1769 /* only ALERT on first iteration,
1770 * we may be woken up early... */
1771 if (first)
1772 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1773 " new: %llus +%u; pending: %llus +%u\n",
1774 current->comm, current->pid,
1775 (unsigned long long)sector, size,
1776 (unsigned long long)i->sector, i->size);
1777 if (i->rq_state & RQ_NET_PENDING)
1778 ++have_unacked;
1779 ++have_conflict;
1780 }
1781 }
1782#undef OVERLAPS
1783 if (!have_conflict)
1784 break;
1785
1786 /* Discard Ack only for the _first_ iteration */
1787 if (first && discard && have_unacked) {
1788 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1789 (unsigned long long)sector);
1790 inc_unacked(mdev);
1791 e->w.cb = e_send_discard_ack;
1792 list_add_tail(&e->w.list, &mdev->done_ee);
1793
1794 spin_unlock_irq(&mdev->req_lock);
1795
1796 /* we could probably send that P_DISCARD_ACK ourselves,
1797 * but I don't like the receiver using the msock */
1798
1799 put_ldev(mdev);
1800 wake_asender(mdev);
1801 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001802 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001803 }
1804
1805 if (signal_pending(current)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001806 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001807
1808 spin_unlock_irq(&mdev->req_lock);
1809
1810 finish_wait(&mdev->misc_wait, &wait);
1811 goto out_interrupted;
1812 }
1813
1814 spin_unlock_irq(&mdev->req_lock);
1815 if (first) {
1816 first = 0;
1817 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1818 "sec=%llus\n", (unsigned long long)sector);
1819 } else if (discard) {
1820 /* we had none on the first iteration.
1821 * there must be none now. */
1822 D_ASSERT(have_unacked == 0);
1823 }
1824 schedule();
1825 spin_lock_irq(&mdev->req_lock);
1826 }
1827 finish_wait(&mdev->misc_wait, &wait);
1828 }
1829
1830 list_add(&e->w.list, &mdev->active_ee);
1831 spin_unlock_irq(&mdev->req_lock);
1832
1833 switch (mdev->net_conf->wire_protocol) {
1834 case DRBD_PROT_C:
1835 inc_unacked(mdev);
1836 /* corresponding dec_unacked() in e_end_block()
1837 * respective _drbd_clear_done_ee */
1838 break;
1839 case DRBD_PROT_B:
1840 /* I really don't like it that the receiver thread
1841 * sends on the msock, but anyways */
1842 drbd_send_ack(mdev, P_RECV_ACK, e);
1843 break;
1844 case DRBD_PROT_A:
1845 /* nothing to do */
1846 break;
1847 }
1848
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001849 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001850 /* In case we have the only disk of the cluster, */
1851 drbd_set_out_of_sync(mdev, e->sector, e->size);
1852 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001853 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001854 drbd_al_begin_io(mdev, e->sector);
1855 }
1856
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001857 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001858 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001859
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001860 /* don't care for the reason here */
1861 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001862 spin_lock_irq(&mdev->req_lock);
1863 list_del(&e->w.list);
Bart Van Assche24c48302011-05-21 18:32:29 +02001864 hlist_del_init(&e->collision);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001865 spin_unlock_irq(&mdev->req_lock);
1866 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1867 drbd_al_complete_io(mdev, e->sector);
1868
Philipp Reisnerb411b362009-09-25 16:07:19 -07001869out_interrupted:
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001870 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001871 put_ldev(mdev);
1872 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001873 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001874}
1875
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001876/* We may throttle resync, if the lower device seems to be busy,
1877 * and current sync rate is above c_min_rate.
1878 *
1879 * To decide whether or not the lower device is busy, we use a scheme similar
1880 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1881 * (more than 64 sectors) of activity we cannot account for with our own resync
1882 * activity, it obviously is "busy".
1883 *
1884 * The current sync rate used here uses only the most recent two step marks,
1885 * to have a short time average so we can react faster.
1886 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001887int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001888{
1889 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1890 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001891 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001892 int curr_events;
1893 int throttle = 0;
1894
1895 /* feature disabled? */
1896 if (mdev->sync_conf.c_min_rate == 0)
1897 return 0;
1898
Philipp Reisnere3555d82010-11-07 15:56:29 +01001899 spin_lock_irq(&mdev->al_lock);
1900 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1901 if (tmp) {
1902 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1903 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1904 spin_unlock_irq(&mdev->al_lock);
1905 return 0;
1906 }
1907 /* Do not slow down if app IO is already waiting for this extent */
1908 }
1909 spin_unlock_irq(&mdev->al_lock);
1910
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001911 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1912 (int)part_stat_read(&disk->part0, sectors[1]) -
1913 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001914
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001915 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1916 unsigned long rs_left;
1917 int i;
1918
1919 mdev->rs_last_events = curr_events;
1920
1921 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1922 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001923 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1924
1925 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1926 rs_left = mdev->ov_left;
1927 else
1928 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001929
1930 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1931 if (!dt)
1932 dt++;
1933 db = mdev->rs_mark_left[i] - rs_left;
1934 dbdt = Bit2KB(db/dt);
1935
1936 if (dbdt > mdev->sync_conf.c_min_rate)
1937 throttle = 1;
1938 }
1939 return throttle;
1940}
1941
1942
Philipp Reisner02918be2010-08-20 14:35:10 +02001943static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001944{
1945 sector_t sector;
1946 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1947 struct drbd_epoch_entry *e;
1948 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001949 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001950 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001951 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001952
1953 sector = be64_to_cpu(p->sector);
1954 size = be32_to_cpu(p->blksize);
1955
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001956 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001957 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1958 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001959 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001960 }
1961 if (sector + (size>>9) > capacity) {
1962 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1963 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001964 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001965 }
1966
1967 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001968 verb = 1;
1969 switch (cmd) {
1970 case P_DATA_REQUEST:
1971 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1972 break;
1973 case P_RS_DATA_REQUEST:
1974 case P_CSUM_RS_REQUEST:
1975 case P_OV_REQUEST:
1976 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1977 break;
1978 case P_OV_REPLY:
1979 verb = 0;
1980 dec_rs_pending(mdev);
1981 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1982 break;
1983 default:
1984 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1985 cmdname(cmd));
1986 }
1987 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001988 dev_err(DEV, "Can not satisfy peer's read request, "
1989 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001990
Lars Ellenberga821cc42010-09-06 12:31:37 +02001991 /* drain possibly payload */
1992 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001993 }
1994
1995 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1996 * "criss-cross" setup, that might cause write-out on some other DRBD,
1997 * which in turn might block on the other node at this very place. */
1998 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1999 if (!e) {
2000 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002001 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002002 }
2003
Philipp Reisner02918be2010-08-20 14:35:10 +02002004 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002005 case P_DATA_REQUEST:
2006 e->w.cb = w_e_end_data_req;
2007 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002008 /* application IO, don't drbd_rs_begin_io */
2009 goto submit;
2010
Philipp Reisnerb411b362009-09-25 16:07:19 -07002011 case P_RS_DATA_REQUEST:
2012 e->w.cb = w_e_end_rsdata_req;
2013 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002014 /* used in the sector offset progress display */
2015 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002016 break;
2017
2018 case P_OV_REPLY:
2019 case P_CSUM_RS_REQUEST:
2020 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002021 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2022 if (!di)
2023 goto out_free_e;
2024
2025 di->digest_size = digest_size;
2026 di->digest = (((char *)di)+sizeof(struct digest_info));
2027
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002028 e->digest = di;
2029 e->flags |= EE_HAS_DIGEST;
2030
Philipp Reisnerb411b362009-09-25 16:07:19 -07002031 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2032 goto out_free_e;
2033
Philipp Reisner02918be2010-08-20 14:35:10 +02002034 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002035 D_ASSERT(mdev->agreed_pro_version >= 89);
2036 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002037 /* used in the sector offset progress display */
2038 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002039 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002040 /* track progress, we may need to throttle */
2041 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002042 e->w.cb = w_e_end_ov_reply;
2043 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002044 /* drbd_rs_begin_io done when we sent this request,
2045 * but accounting still needs to be done. */
2046 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002047 }
2048 break;
2049
2050 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002051 if (mdev->ov_start_sector == ~(sector_t)0 &&
2052 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002053 unsigned long now = jiffies;
2054 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002055 mdev->ov_start_sector = sector;
2056 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002057 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2058 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002059 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2060 mdev->rs_mark_left[i] = mdev->ov_left;
2061 mdev->rs_mark_time[i] = now;
2062 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002063 dev_info(DEV, "Online Verify start sector: %llu\n",
2064 (unsigned long long)sector);
2065 }
2066 e->w.cb = w_e_end_ov_req;
2067 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002068 break;
2069
Philipp Reisnerb411b362009-09-25 16:07:19 -07002070 default:
2071 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002072 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002073 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002074 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002075 }
2076
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002077 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2078 * wrt the receiver, but it is not as straightforward as it may seem.
2079 * Various places in the resync start and stop logic assume resync
2080 * requests are processed in order, requeuing this on the worker thread
2081 * introduces a bunch of new code for synchronization between threads.
2082 *
2083 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2084 * "forever", throttling after drbd_rs_begin_io will lock that extent
2085 * for application writes for the same time. For now, just throttle
2086 * here, where the rest of the code expects the receiver to sleep for
2087 * a while, anyways.
2088 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002089
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002090 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2091 * this defers syncer requests for some time, before letting at least
2092 * on request through. The resync controller on the receiving side
2093 * will adapt to the incoming rate accordingly.
2094 *
2095 * We cannot throttle here if remote is Primary/SyncTarget:
2096 * we would also throttle its application reads.
2097 * In that case, throttling is done on the SyncTarget only.
2098 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002099 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2100 schedule_timeout_uninterruptible(HZ/10);
2101 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002102 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002103
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002104submit_for_resync:
2105 atomic_add(size >> 9, &mdev->rs_sect_ev);
2106
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002107submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002108 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002109 spin_lock_irq(&mdev->req_lock);
2110 list_add_tail(&e->w.list, &mdev->read_ee);
2111 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002112
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002113 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002114 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002115
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002116 /* don't care for the reason here */
2117 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002118 spin_lock_irq(&mdev->req_lock);
2119 list_del(&e->w.list);
2120 spin_unlock_irq(&mdev->req_lock);
2121 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2122
Philipp Reisnerb411b362009-09-25 16:07:19 -07002123out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002124 put_ldev(mdev);
2125 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002126 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002127}
2128
2129static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2130{
2131 int self, peer, rv = -100;
2132 unsigned long ch_self, ch_peer;
2133
2134 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2135 peer = mdev->p_uuid[UI_BITMAP] & 1;
2136
2137 ch_peer = mdev->p_uuid[UI_SIZE];
2138 ch_self = mdev->comm_bm_set;
2139
2140 switch (mdev->net_conf->after_sb_0p) {
2141 case ASB_CONSENSUS:
2142 case ASB_DISCARD_SECONDARY:
2143 case ASB_CALL_HELPER:
2144 dev_err(DEV, "Configuration error.\n");
2145 break;
2146 case ASB_DISCONNECT:
2147 break;
2148 case ASB_DISCARD_YOUNGER_PRI:
2149 if (self == 0 && peer == 1) {
2150 rv = -1;
2151 break;
2152 }
2153 if (self == 1 && peer == 0) {
2154 rv = 1;
2155 break;
2156 }
2157 /* Else fall through to one of the other strategies... */
2158 case ASB_DISCARD_OLDER_PRI:
2159 if (self == 0 && peer == 1) {
2160 rv = 1;
2161 break;
2162 }
2163 if (self == 1 && peer == 0) {
2164 rv = -1;
2165 break;
2166 }
2167 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002168 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002169 "Using discard-least-changes instead\n");
2170 case ASB_DISCARD_ZERO_CHG:
2171 if (ch_peer == 0 && ch_self == 0) {
2172 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2173 ? -1 : 1;
2174 break;
2175 } else {
2176 if (ch_peer == 0) { rv = 1; break; }
2177 if (ch_self == 0) { rv = -1; break; }
2178 }
2179 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2180 break;
2181 case ASB_DISCARD_LEAST_CHG:
2182 if (ch_self < ch_peer)
2183 rv = -1;
2184 else if (ch_self > ch_peer)
2185 rv = 1;
2186 else /* ( ch_self == ch_peer ) */
2187 /* Well, then use something else. */
2188 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2189 ? -1 : 1;
2190 break;
2191 case ASB_DISCARD_LOCAL:
2192 rv = -1;
2193 break;
2194 case ASB_DISCARD_REMOTE:
2195 rv = 1;
2196 }
2197
2198 return rv;
2199}
2200
2201static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2202{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002203 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002204
2205 switch (mdev->net_conf->after_sb_1p) {
2206 case ASB_DISCARD_YOUNGER_PRI:
2207 case ASB_DISCARD_OLDER_PRI:
2208 case ASB_DISCARD_LEAST_CHG:
2209 case ASB_DISCARD_LOCAL:
2210 case ASB_DISCARD_REMOTE:
2211 dev_err(DEV, "Configuration error.\n");
2212 break;
2213 case ASB_DISCONNECT:
2214 break;
2215 case ASB_CONSENSUS:
2216 hg = drbd_asb_recover_0p(mdev);
2217 if (hg == -1 && mdev->state.role == R_SECONDARY)
2218 rv = hg;
2219 if (hg == 1 && mdev->state.role == R_PRIMARY)
2220 rv = hg;
2221 break;
2222 case ASB_VIOLENTLY:
2223 rv = drbd_asb_recover_0p(mdev);
2224 break;
2225 case ASB_DISCARD_SECONDARY:
2226 return mdev->state.role == R_PRIMARY ? 1 : -1;
2227 case ASB_CALL_HELPER:
2228 hg = drbd_asb_recover_0p(mdev);
2229 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002230 enum drbd_state_rv rv2;
2231
2232 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002233 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2234 * we might be here in C_WF_REPORT_PARAMS which is transient.
2235 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002236 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2237 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002238 drbd_khelper(mdev, "pri-lost-after-sb");
2239 } else {
2240 dev_warn(DEV, "Successfully gave up primary role.\n");
2241 rv = hg;
2242 }
2243 } else
2244 rv = hg;
2245 }
2246
2247 return rv;
2248}
2249
2250static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2251{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002252 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002253
2254 switch (mdev->net_conf->after_sb_2p) {
2255 case ASB_DISCARD_YOUNGER_PRI:
2256 case ASB_DISCARD_OLDER_PRI:
2257 case ASB_DISCARD_LEAST_CHG:
2258 case ASB_DISCARD_LOCAL:
2259 case ASB_DISCARD_REMOTE:
2260 case ASB_CONSENSUS:
2261 case ASB_DISCARD_SECONDARY:
2262 dev_err(DEV, "Configuration error.\n");
2263 break;
2264 case ASB_VIOLENTLY:
2265 rv = drbd_asb_recover_0p(mdev);
2266 break;
2267 case ASB_DISCONNECT:
2268 break;
2269 case ASB_CALL_HELPER:
2270 hg = drbd_asb_recover_0p(mdev);
2271 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002272 enum drbd_state_rv rv2;
2273
Philipp Reisnerb411b362009-09-25 16:07:19 -07002274 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2275 * we might be here in C_WF_REPORT_PARAMS which is transient.
2276 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002277 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2278 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002279 drbd_khelper(mdev, "pri-lost-after-sb");
2280 } else {
2281 dev_warn(DEV, "Successfully gave up primary role.\n");
2282 rv = hg;
2283 }
2284 } else
2285 rv = hg;
2286 }
2287
2288 return rv;
2289}
2290
2291static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2292 u64 bits, u64 flags)
2293{
2294 if (!uuid) {
2295 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2296 return;
2297 }
2298 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2299 text,
2300 (unsigned long long)uuid[UI_CURRENT],
2301 (unsigned long long)uuid[UI_BITMAP],
2302 (unsigned long long)uuid[UI_HISTORY_START],
2303 (unsigned long long)uuid[UI_HISTORY_END],
2304 (unsigned long long)bits,
2305 (unsigned long long)flags);
2306}
2307
2308/*
2309 100 after split brain try auto recover
2310 2 C_SYNC_SOURCE set BitMap
2311 1 C_SYNC_SOURCE use BitMap
2312 0 no Sync
2313 -1 C_SYNC_TARGET use BitMap
2314 -2 C_SYNC_TARGET set BitMap
2315 -100 after split brain, disconnect
2316-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002317-1091 requires proto 91
2318-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002319 */
2320static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2321{
2322 u64 self, peer;
2323 int i, j;
2324
2325 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2326 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2327
2328 *rule_nr = 10;
2329 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2330 return 0;
2331
2332 *rule_nr = 20;
2333 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2334 peer != UUID_JUST_CREATED)
2335 return -2;
2336
2337 *rule_nr = 30;
2338 if (self != UUID_JUST_CREATED &&
2339 (peer == UUID_JUST_CREATED || peer == (u64)0))
2340 return 2;
2341
2342 if (self == peer) {
2343 int rct, dc; /* roles at crash time */
2344
2345 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2346
2347 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002348 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002349
2350 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2351 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2352 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2353 drbd_uuid_set_bm(mdev, 0UL);
2354
2355 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2356 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2357 *rule_nr = 34;
2358 } else {
2359 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2360 *rule_nr = 36;
2361 }
2362
2363 return 1;
2364 }
2365
2366 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2367
2368 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002369 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370
2371 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2372 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2373 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2374
2375 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2376 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2377 mdev->p_uuid[UI_BITMAP] = 0UL;
2378
2379 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2380 *rule_nr = 35;
2381 } else {
2382 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2383 *rule_nr = 37;
2384 }
2385
2386 return -1;
2387 }
2388
2389 /* Common power [off|failure] */
2390 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2391 (mdev->p_uuid[UI_FLAGS] & 2);
2392 /* lowest bit is set when we were primary,
2393 * next bit (weight 2) is set when peer was primary */
2394 *rule_nr = 40;
2395
2396 switch (rct) {
2397 case 0: /* !self_pri && !peer_pri */ return 0;
2398 case 1: /* self_pri && !peer_pri */ return 1;
2399 case 2: /* !self_pri && peer_pri */ return -1;
2400 case 3: /* self_pri && peer_pri */
2401 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2402 return dc ? -1 : 1;
2403 }
2404 }
2405
2406 *rule_nr = 50;
2407 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2408 if (self == peer)
2409 return -1;
2410
2411 *rule_nr = 51;
2412 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2413 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002414 if (mdev->agreed_pro_version < 96 ?
2415 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2416 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2417 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002418 /* The last P_SYNC_UUID did not get though. Undo the last start of
2419 resync as sync source modifications of the peer's UUIDs. */
2420
2421 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002422 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002423
2424 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2425 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002426
2427 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2428 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2429
Philipp Reisnerb411b362009-09-25 16:07:19 -07002430 return -1;
2431 }
2432 }
2433
2434 *rule_nr = 60;
2435 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2436 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2437 peer = mdev->p_uuid[i] & ~((u64)1);
2438 if (self == peer)
2439 return -2;
2440 }
2441
2442 *rule_nr = 70;
2443 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2444 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2445 if (self == peer)
2446 return 1;
2447
2448 *rule_nr = 71;
2449 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2450 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002451 if (mdev->agreed_pro_version < 96 ?
2452 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2453 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2454 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002455 /* The last P_SYNC_UUID did not get though. Undo the last start of
2456 resync as sync source modifications of our UUIDs. */
2457
2458 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002459 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002460
2461 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2462 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2463
Philipp Reisner4a23f262011-01-11 17:42:17 +01002464 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002465 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2466 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2467
2468 return 1;
2469 }
2470 }
2471
2472
2473 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002474 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002475 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2476 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2477 if (self == peer)
2478 return 2;
2479 }
2480
2481 *rule_nr = 90;
2482 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2483 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2484 if (self == peer && self != ((u64)0))
2485 return 100;
2486
2487 *rule_nr = 100;
2488 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2489 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2490 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2491 peer = mdev->p_uuid[j] & ~((u64)1);
2492 if (self == peer)
2493 return -100;
2494 }
2495 }
2496
2497 return -1000;
2498}
2499
2500/* drbd_sync_handshake() returns the new conn state on success, or
2501 CONN_MASK (-1) on failure.
2502 */
2503static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2504 enum drbd_disk_state peer_disk) __must_hold(local)
2505{
2506 int hg, rule_nr;
2507 enum drbd_conns rv = C_MASK;
2508 enum drbd_disk_state mydisk;
2509
2510 mydisk = mdev->state.disk;
2511 if (mydisk == D_NEGOTIATING)
2512 mydisk = mdev->new_state_tmp.disk;
2513
2514 dev_info(DEV, "drbd_sync_handshake:\n");
2515 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2516 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2517 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2518
2519 hg = drbd_uuid_compare(mdev, &rule_nr);
2520
2521 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2522
2523 if (hg == -1000) {
2524 dev_alert(DEV, "Unrelated data, aborting!\n");
2525 return C_MASK;
2526 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002527 if (hg < -1000) {
2528 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002529 return C_MASK;
2530 }
2531
2532 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2533 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2534 int f = (hg == -100) || abs(hg) == 2;
2535 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2536 if (f)
2537 hg = hg*2;
2538 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2539 hg > 0 ? "source" : "target");
2540 }
2541
Adam Gandelman3a11a482010-04-08 16:48:23 -07002542 if (abs(hg) == 100)
2543 drbd_khelper(mdev, "initial-split-brain");
2544
Philipp Reisnerb411b362009-09-25 16:07:19 -07002545 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2546 int pcount = (mdev->state.role == R_PRIMARY)
2547 + (peer_role == R_PRIMARY);
2548 int forced = (hg == -100);
2549
2550 switch (pcount) {
2551 case 0:
2552 hg = drbd_asb_recover_0p(mdev);
2553 break;
2554 case 1:
2555 hg = drbd_asb_recover_1p(mdev);
2556 break;
2557 case 2:
2558 hg = drbd_asb_recover_2p(mdev);
2559 break;
2560 }
2561 if (abs(hg) < 100) {
2562 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2563 "automatically solved. Sync from %s node\n",
2564 pcount, (hg < 0) ? "peer" : "this");
2565 if (forced) {
2566 dev_warn(DEV, "Doing a full sync, since"
2567 " UUIDs where ambiguous.\n");
2568 hg = hg*2;
2569 }
2570 }
2571 }
2572
2573 if (hg == -100) {
2574 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2575 hg = -1;
2576 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2577 hg = 1;
2578
2579 if (abs(hg) < 100)
2580 dev_warn(DEV, "Split-Brain detected, manually solved. "
2581 "Sync from %s node\n",
2582 (hg < 0) ? "peer" : "this");
2583 }
2584
2585 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002586 /* FIXME this log message is not correct if we end up here
2587 * after an attempted attach on a diskless node.
2588 * We just refuse to attach -- well, we drop the "connection"
2589 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002590 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002591 drbd_khelper(mdev, "split-brain");
2592 return C_MASK;
2593 }
2594
2595 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2596 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2597 return C_MASK;
2598 }
2599
2600 if (hg < 0 && /* by intention we do not use mydisk here. */
2601 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2602 switch (mdev->net_conf->rr_conflict) {
2603 case ASB_CALL_HELPER:
2604 drbd_khelper(mdev, "pri-lost");
2605 /* fall through */
2606 case ASB_DISCONNECT:
2607 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2608 return C_MASK;
2609 case ASB_VIOLENTLY:
2610 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2611 "assumption\n");
2612 }
2613 }
2614
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002615 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2616 if (hg == 0)
2617 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2618 else
2619 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2620 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2621 abs(hg) >= 2 ? "full" : "bit-map based");
2622 return C_MASK;
2623 }
2624
Philipp Reisnerb411b362009-09-25 16:07:19 -07002625 if (abs(hg) >= 2) {
2626 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002627 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2628 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002629 return C_MASK;
2630 }
2631
2632 if (hg > 0) { /* become sync source. */
2633 rv = C_WF_BITMAP_S;
2634 } else if (hg < 0) { /* become sync target */
2635 rv = C_WF_BITMAP_T;
2636 } else {
2637 rv = C_CONNECTED;
2638 if (drbd_bm_total_weight(mdev)) {
2639 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2640 drbd_bm_total_weight(mdev));
2641 }
2642 }
2643
2644 return rv;
2645}
2646
2647/* returns 1 if invalid */
2648static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2649{
2650 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2651 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2652 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2653 return 0;
2654
2655 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2656 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2657 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2658 return 1;
2659
2660 /* everything else is valid if they are equal on both sides. */
2661 if (peer == self)
2662 return 0;
2663
2664 /* everything es is invalid. */
2665 return 1;
2666}
2667
Philipp Reisner02918be2010-08-20 14:35:10 +02002668static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002669{
Philipp Reisner02918be2010-08-20 14:35:10 +02002670 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002671 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002672 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002673 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2674
Philipp Reisnerb411b362009-09-25 16:07:19 -07002675 p_proto = be32_to_cpu(p->protocol);
2676 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2677 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2678 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002679 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002680 cf = be32_to_cpu(p->conn_flags);
2681 p_want_lose = cf & CF_WANT_LOSE;
2682
2683 clear_bit(CONN_DRY_RUN, &mdev->flags);
2684
2685 if (cf & CF_DRY_RUN)
2686 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002687
2688 if (p_proto != mdev->net_conf->wire_protocol) {
2689 dev_err(DEV, "incompatible communication protocols\n");
2690 goto disconnect;
2691 }
2692
2693 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2694 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2695 goto disconnect;
2696 }
2697
2698 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2699 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2700 goto disconnect;
2701 }
2702
2703 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2704 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2705 goto disconnect;
2706 }
2707
2708 if (p_want_lose && mdev->net_conf->want_lose) {
2709 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2710 goto disconnect;
2711 }
2712
2713 if (p_two_primaries != mdev->net_conf->two_primaries) {
2714 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2715 goto disconnect;
2716 }
2717
2718 if (mdev->agreed_pro_version >= 87) {
2719 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2720
2721 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002722 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002723
2724 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2725 if (strcmp(p_integrity_alg, my_alg)) {
2726 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2727 goto disconnect;
2728 }
2729 dev_info(DEV, "data-integrity-alg: %s\n",
2730 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2731 }
2732
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002733 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002734
2735disconnect:
2736 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002737 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002738}
2739
2740/* helper function
2741 * input: alg name, feature name
2742 * return: NULL (alg name was "")
2743 * ERR_PTR(error) if something goes wrong
2744 * or the crypto hash ptr, if it worked out ok. */
2745struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2746 const char *alg, const char *name)
2747{
2748 struct crypto_hash *tfm;
2749
2750 if (!alg[0])
2751 return NULL;
2752
2753 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2754 if (IS_ERR(tfm)) {
2755 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2756 alg, name, PTR_ERR(tfm));
2757 return tfm;
2758 }
2759 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2760 crypto_free_hash(tfm);
2761 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2762 return ERR_PTR(-EINVAL);
2763 }
2764 return tfm;
2765}
2766
Philipp Reisner02918be2010-08-20 14:35:10 +02002767static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002768{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002769 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002770 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002771 unsigned int header_size, data_size, exp_max_sz;
2772 struct crypto_hash *verify_tfm = NULL;
2773 struct crypto_hash *csums_tfm = NULL;
2774 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002775 int *rs_plan_s = NULL;
2776 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002777
2778 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2779 : apv == 88 ? sizeof(struct p_rs_param)
2780 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002781 : apv <= 94 ? sizeof(struct p_rs_param_89)
2782 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002783
Philipp Reisner02918be2010-08-20 14:35:10 +02002784 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002785 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002786 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002787 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002788 }
2789
2790 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002791 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2792 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002793 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002794 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2795 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002797 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002798 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2799 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002800 D_ASSERT(data_size == 0);
2801 }
2802
2803 /* initialize verify_alg and csums_alg */
2804 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2805
Philipp Reisner02918be2010-08-20 14:35:10 +02002806 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002807 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002808
2809 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2810
2811 if (apv >= 88) {
2812 if (apv == 88) {
2813 if (data_size > SHARED_SECRET_MAX) {
2814 dev_err(DEV, "verify-alg too long, "
2815 "peer wants %u, accepting only %u byte\n",
2816 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002817 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002818 }
2819
2820 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002821 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002822
2823 /* we expect NUL terminated string */
2824 /* but just in case someone tries to be evil */
2825 D_ASSERT(p->verify_alg[data_size-1] == 0);
2826 p->verify_alg[data_size-1] = 0;
2827
2828 } else /* apv >= 89 */ {
2829 /* we still expect NUL terminated strings */
2830 /* but just in case someone tries to be evil */
2831 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2832 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2833 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2834 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2835 }
2836
2837 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2838 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2839 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2840 mdev->sync_conf.verify_alg, p->verify_alg);
2841 goto disconnect;
2842 }
2843 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2844 p->verify_alg, "verify-alg");
2845 if (IS_ERR(verify_tfm)) {
2846 verify_tfm = NULL;
2847 goto disconnect;
2848 }
2849 }
2850
2851 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2852 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2853 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2854 mdev->sync_conf.csums_alg, p->csums_alg);
2855 goto disconnect;
2856 }
2857 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2858 p->csums_alg, "csums-alg");
2859 if (IS_ERR(csums_tfm)) {
2860 csums_tfm = NULL;
2861 goto disconnect;
2862 }
2863 }
2864
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002865 if (apv > 94) {
2866 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2867 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2868 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2869 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2870 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002871
2872 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2873 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2874 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2875 if (!rs_plan_s) {
2876 dev_err(DEV, "kmalloc of fifo_buffer failed");
2877 goto disconnect;
2878 }
2879 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002880 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002881
2882 spin_lock(&mdev->peer_seq_lock);
2883 /* lock against drbd_nl_syncer_conf() */
2884 if (verify_tfm) {
2885 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2886 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2887 crypto_free_hash(mdev->verify_tfm);
2888 mdev->verify_tfm = verify_tfm;
2889 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2890 }
2891 if (csums_tfm) {
2892 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2893 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2894 crypto_free_hash(mdev->csums_tfm);
2895 mdev->csums_tfm = csums_tfm;
2896 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2897 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002898 if (fifo_size != mdev->rs_plan_s.size) {
2899 kfree(mdev->rs_plan_s.values);
2900 mdev->rs_plan_s.values = rs_plan_s;
2901 mdev->rs_plan_s.size = fifo_size;
2902 mdev->rs_planed = 0;
2903 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002904 spin_unlock(&mdev->peer_seq_lock);
2905 }
2906
2907 return ok;
2908disconnect:
2909 /* just for completeness: actually not needed,
2910 * as this is not reached if csums_tfm was ok. */
2911 crypto_free_hash(csums_tfm);
2912 /* but free the verify_tfm again, if csums_tfm did not work out */
2913 crypto_free_hash(verify_tfm);
2914 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002915 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002916}
2917
Philipp Reisnerb411b362009-09-25 16:07:19 -07002918/* warn if the arguments differ by more than 12.5% */
2919static void warn_if_differ_considerably(struct drbd_conf *mdev,
2920 const char *s, sector_t a, sector_t b)
2921{
2922 sector_t d;
2923 if (a == 0 || b == 0)
2924 return;
2925 d = (a > b) ? (a - b) : (b - a);
2926 if (d > (a>>3) || d > (b>>3))
2927 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2928 (unsigned long long)a, (unsigned long long)b);
2929}
2930
Philipp Reisner02918be2010-08-20 14:35:10 +02002931static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002932{
Philipp Reisner02918be2010-08-20 14:35:10 +02002933 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002934 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002935 sector_t p_size, p_usize, my_usize;
2936 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002937 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002938
Philipp Reisnerb411b362009-09-25 16:07:19 -07002939 p_size = be64_to_cpu(p->d_size);
2940 p_usize = be64_to_cpu(p->u_size);
2941
2942 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2943 dev_err(DEV, "some backing storage is needed\n");
2944 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002945 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002946 }
2947
2948 /* just store the peer's disk size for now.
2949 * we still need to figure out whether we accept that. */
2950 mdev->p_size = p_size;
2951
Philipp Reisnerb411b362009-09-25 16:07:19 -07002952 if (get_ldev(mdev)) {
2953 warn_if_differ_considerably(mdev, "lower level device sizes",
2954 p_size, drbd_get_max_capacity(mdev->ldev));
2955 warn_if_differ_considerably(mdev, "user requested size",
2956 p_usize, mdev->ldev->dc.disk_size);
2957
2958 /* if this is the first connect, or an otherwise expected
2959 * param exchange, choose the minimum */
2960 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2961 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2962 p_usize);
2963
2964 my_usize = mdev->ldev->dc.disk_size;
2965
2966 if (mdev->ldev->dc.disk_size != p_usize) {
2967 mdev->ldev->dc.disk_size = p_usize;
2968 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2969 (unsigned long)mdev->ldev->dc.disk_size);
2970 }
2971
2972 /* Never shrink a device with usable data during connect.
2973 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01002974 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07002975 drbd_get_capacity(mdev->this_bdev) &&
2976 mdev->state.disk >= D_OUTDATED &&
2977 mdev->state.conn < C_CONNECTED) {
2978 dev_err(DEV, "The peer's disk size is too small!\n");
2979 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2980 mdev->ldev->dc.disk_size = my_usize;
2981 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002982 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002983 }
2984 put_ldev(mdev);
2985 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002986
Philipp Reisnere89b5912010-03-24 17:11:33 +01002987 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002988 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02002989 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002990 put_ldev(mdev);
2991 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002992 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002993 drbd_md_sync(mdev);
2994 } else {
2995 /* I am diskless, need to accept the peer's size. */
2996 drbd_set_my_capacity(mdev, p_size);
2997 }
2998
Philipp Reisner99432fc2011-05-20 16:39:13 +02002999 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3000 drbd_reconsider_max_bio_size(mdev);
3001
Philipp Reisnerb411b362009-09-25 16:07:19 -07003002 if (get_ldev(mdev)) {
3003 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3004 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3005 ldsc = 1;
3006 }
3007
Philipp Reisnerb411b362009-09-25 16:07:19 -07003008 put_ldev(mdev);
3009 }
3010
3011 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3012 if (be64_to_cpu(p->c_size) !=
3013 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3014 /* we have different sizes, probably peer
3015 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003016 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003017 }
3018 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3019 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3020 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003021 mdev->state.disk >= D_INCONSISTENT) {
3022 if (ddsf & DDSF_NO_RESYNC)
3023 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3024 else
3025 resync_after_online_grow(mdev);
3026 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003027 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3028 }
3029 }
3030
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003031 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003032}
3033
Philipp Reisner02918be2010-08-20 14:35:10 +02003034static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003035{
Philipp Reisner02918be2010-08-20 14:35:10 +02003036 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003037 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003038 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003039
Philipp Reisnerb411b362009-09-25 16:07:19 -07003040 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3041
3042 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3043 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3044
3045 kfree(mdev->p_uuid);
3046 mdev->p_uuid = p_uuid;
3047
3048 if (mdev->state.conn < C_CONNECTED &&
3049 mdev->state.disk < D_INCONSISTENT &&
3050 mdev->state.role == R_PRIMARY &&
3051 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3052 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3053 (unsigned long long)mdev->ed_uuid);
3054 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003055 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003056 }
3057
3058 if (get_ldev(mdev)) {
3059 int skip_initial_sync =
3060 mdev->state.conn == C_CONNECTED &&
3061 mdev->agreed_pro_version >= 90 &&
3062 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3063 (p_uuid[UI_FLAGS] & 8);
3064 if (skip_initial_sync) {
3065 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3066 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003067 "clear_n_write from receive_uuids",
3068 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003069 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3070 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3071 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3072 CS_VERBOSE, NULL);
3073 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003074 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003075 }
3076 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003077 } else if (mdev->state.disk < D_INCONSISTENT &&
3078 mdev->state.role == R_PRIMARY) {
3079 /* I am a diskless primary, the peer just created a new current UUID
3080 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003081 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003082 }
3083
3084 /* Before we test for the disk state, we should wait until an eventually
3085 ongoing cluster wide state change is finished. That is important if
3086 we are primary and are detaching from our disk. We need to see the
3087 new disk state... */
3088 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3089 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003090 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3091
3092 if (updated_uuids)
3093 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003094
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003095 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003096}
3097
3098/**
3099 * convert_state() - Converts the peer's view of the cluster state to our point of view
3100 * @ps: The state as seen by the peer.
3101 */
3102static union drbd_state convert_state(union drbd_state ps)
3103{
3104 union drbd_state ms;
3105
3106 static enum drbd_conns c_tab[] = {
3107 [C_CONNECTED] = C_CONNECTED,
3108
3109 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3110 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3111 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3112 [C_VERIFY_S] = C_VERIFY_T,
3113 [C_MASK] = C_MASK,
3114 };
3115
3116 ms.i = ps.i;
3117
3118 ms.conn = c_tab[ps.conn];
3119 ms.peer = ps.role;
3120 ms.role = ps.peer;
3121 ms.pdsk = ps.disk;
3122 ms.disk = ps.pdsk;
3123 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3124
3125 return ms;
3126}
3127
Philipp Reisner02918be2010-08-20 14:35:10 +02003128static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003129{
Philipp Reisner02918be2010-08-20 14:35:10 +02003130 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003131 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003132 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003133
Philipp Reisnerb411b362009-09-25 16:07:19 -07003134 mask.i = be32_to_cpu(p->mask);
3135 val.i = be32_to_cpu(p->val);
3136
3137 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3138 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3139 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003140 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003141 }
3142
3143 mask = convert_state(mask);
3144 val = convert_state(val);
3145
3146 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3147
3148 drbd_send_sr_reply(mdev, rv);
3149 drbd_md_sync(mdev);
3150
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003151 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003152}
3153
Philipp Reisner02918be2010-08-20 14:35:10 +02003154static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003155{
Philipp Reisner02918be2010-08-20 14:35:10 +02003156 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003157 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003158 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003159 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003160 int rv;
3161
Philipp Reisnerb411b362009-09-25 16:07:19 -07003162 peer_state.i = be32_to_cpu(p->state);
3163
3164 real_peer_disk = peer_state.disk;
3165 if (peer_state.disk == D_NEGOTIATING) {
3166 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3167 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3168 }
3169
3170 spin_lock_irq(&mdev->req_lock);
3171 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003172 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003173 spin_unlock_irq(&mdev->req_lock);
3174
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003175 /* peer says his disk is uptodate, while we think it is inconsistent,
3176 * and this happens while we think we have a sync going on. */
3177 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3178 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3179 /* If we are (becoming) SyncSource, but peer is still in sync
3180 * preparation, ignore its uptodate-ness to avoid flapping, it
3181 * will change to inconsistent once the peer reaches active
3182 * syncing states.
3183 * It may have changed syncer-paused flags, however, so we
3184 * cannot ignore this completely. */
3185 if (peer_state.conn > C_CONNECTED &&
3186 peer_state.conn < C_SYNC_SOURCE)
3187 real_peer_disk = D_INCONSISTENT;
3188
3189 /* if peer_state changes to connected at the same time,
3190 * it explicitly notifies us that it finished resync.
3191 * Maybe we should finish it up, too? */
3192 else if (os.conn >= C_SYNC_SOURCE &&
3193 peer_state.conn == C_CONNECTED) {
3194 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3195 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003196 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003197 }
3198 }
3199
3200 /* peer says his disk is inconsistent, while we think it is uptodate,
3201 * and this happens while the peer still thinks we have a sync going on,
3202 * but we think we are already done with the sync.
3203 * We ignore this to avoid flapping pdsk.
3204 * This should not happen, if the peer is a recent version of drbd. */
3205 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3206 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3207 real_peer_disk = D_UP_TO_DATE;
3208
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003209 if (ns.conn == C_WF_REPORT_PARAMS)
3210 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003211
Philipp Reisner67531712010-10-27 12:21:30 +02003212 if (peer_state.conn == C_AHEAD)
3213 ns.conn = C_BEHIND;
3214
Philipp Reisnerb411b362009-09-25 16:07:19 -07003215 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3216 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3217 int cr; /* consider resync */
3218
3219 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003220 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003221 /* if we had an established connection
3222 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003223 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003224 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003225 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003226 /* if we have both been inconsistent, and the peer has been
3227 * forced to be UpToDate with --overwrite-data */
3228 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3229 /* if we had been plain connected, and the admin requested to
3230 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003231 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003232 (peer_state.conn >= C_STARTING_SYNC_S &&
3233 peer_state.conn <= C_WF_BITMAP_T));
3234
3235 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003236 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003237
3238 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003239 if (ns.conn == C_MASK) {
3240 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003241 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003242 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003243 } else if (peer_state.disk == D_NEGOTIATING) {
3244 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3245 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003246 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003248 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003249 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003250 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003251 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003252 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003253 }
3254 }
3255 }
3256
3257 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003258 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003259 goto retry;
3260 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003261 ns.peer = peer_state.role;
3262 ns.pdsk = real_peer_disk;
3263 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003264 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003265 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003266 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3267 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003268 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3269 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3270 for temporal network outages! */
3271 spin_unlock_irq(&mdev->req_lock);
3272 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3273 tl_clear(mdev);
3274 drbd_uuid_new_current(mdev);
3275 clear_bit(NEW_CUR_UUID, &mdev->flags);
3276 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003277 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003278 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003279 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003280 ns = mdev->state;
3281 spin_unlock_irq(&mdev->req_lock);
3282
3283 if (rv < SS_SUCCESS) {
3284 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003285 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003286 }
3287
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003288 if (os.conn > C_WF_REPORT_PARAMS) {
3289 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003290 peer_state.disk != D_NEGOTIATING ) {
3291 /* we want resync, peer has not yet decided to sync... */
3292 /* Nowadays only used when forcing a node into primary role and
3293 setting its disk to UpToDate with that */
3294 drbd_send_uuids(mdev);
3295 drbd_send_state(mdev);
3296 }
3297 }
3298
3299 mdev->net_conf->want_lose = 0;
3300
3301 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3302
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003303 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003304}
3305
Philipp Reisner02918be2010-08-20 14:35:10 +02003306static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003307{
Philipp Reisner02918be2010-08-20 14:35:10 +02003308 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003309
3310 wait_event(mdev->misc_wait,
3311 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003312 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003313 mdev->state.conn < C_CONNECTED ||
3314 mdev->state.disk < D_NEGOTIATING);
3315
3316 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3317
Philipp Reisnerb411b362009-09-25 16:07:19 -07003318 /* Here the _drbd_uuid_ functions are right, current should
3319 _not_ be rotated into the history */
3320 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3321 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3322 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3323
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003324 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003325 drbd_start_resync(mdev, C_SYNC_TARGET);
3326
3327 put_ldev(mdev);
3328 } else
3329 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3330
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003331 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003332}
3333
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003334/**
3335 * receive_bitmap_plain
3336 *
3337 * Return 0 when done, 1 when another iteration is needed, and a negative error
3338 * code upon failure.
3339 */
3340static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003341receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3342 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003343{
3344 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3345 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003346 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003347
Philipp Reisner02918be2010-08-20 14:35:10 +02003348 if (want != data_size) {
3349 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003350 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003351 }
3352 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003353 return 0;
3354 err = drbd_recv(mdev, buffer, want);
3355 if (err != want) {
3356 if (err >= 0)
3357 err = -EIO;
3358 return err;
3359 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003360
3361 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3362
3363 c->word_offset += num_words;
3364 c->bit_offset = c->word_offset * BITS_PER_LONG;
3365 if (c->bit_offset > c->bm_bits)
3366 c->bit_offset = c->bm_bits;
3367
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003368 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003369}
3370
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003371/**
3372 * recv_bm_rle_bits
3373 *
3374 * Return 0 when done, 1 when another iteration is needed, and a negative error
3375 * code upon failure.
3376 */
3377static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003378recv_bm_rle_bits(struct drbd_conf *mdev,
3379 struct p_compressed_bm *p,
3380 struct bm_xfer_ctx *c)
3381{
3382 struct bitstream bs;
3383 u64 look_ahead;
3384 u64 rl;
3385 u64 tmp;
3386 unsigned long s = c->bit_offset;
3387 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003388 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003389 int toggle = DCBP_get_start(p);
3390 int have;
3391 int bits;
3392
3393 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3394
3395 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3396 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003397 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003398
3399 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3400 bits = vli_decode_bits(&rl, look_ahead);
3401 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003402 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003403
3404 if (toggle) {
3405 e = s + rl -1;
3406 if (e >= c->bm_bits) {
3407 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003408 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003409 }
3410 _drbd_bm_set_bits(mdev, s, e);
3411 }
3412
3413 if (have < bits) {
3414 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3415 have, bits, look_ahead,
3416 (unsigned int)(bs.cur.b - p->code),
3417 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003418 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003419 }
3420 look_ahead >>= bits;
3421 have -= bits;
3422
3423 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3424 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003425 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003426 look_ahead |= tmp << have;
3427 have += bits;
3428 }
3429
3430 c->bit_offset = s;
3431 bm_xfer_ctx_bit_to_word_offset(c);
3432
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003433 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003434}
3435
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003436/**
3437 * decode_bitmap_c
3438 *
3439 * Return 0 when done, 1 when another iteration is needed, and a negative error
3440 * code upon failure.
3441 */
3442static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003443decode_bitmap_c(struct drbd_conf *mdev,
3444 struct p_compressed_bm *p,
3445 struct bm_xfer_ctx *c)
3446{
3447 if (DCBP_get_code(p) == RLE_VLI_Bits)
3448 return recv_bm_rle_bits(mdev, p, c);
3449
3450 /* other variants had been implemented for evaluation,
3451 * but have been dropped as this one turned out to be "best"
3452 * during all our tests. */
3453
3454 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3455 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003456 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003457}
3458
3459void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3460 const char *direction, struct bm_xfer_ctx *c)
3461{
3462 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003463 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003464 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3465 + c->bm_words * sizeof(long);
3466 unsigned total = c->bytes[0] + c->bytes[1];
3467 unsigned r;
3468
3469 /* total can not be zero. but just in case: */
3470 if (total == 0)
3471 return;
3472
3473 /* don't report if not compressed */
3474 if (total >= plain)
3475 return;
3476
3477 /* total < plain. check for overflow, still */
3478 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3479 : (1000 * total / plain);
3480
3481 if (r > 1000)
3482 r = 1000;
3483
3484 r = 1000 - r;
3485 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3486 "total %u; compression: %u.%u%%\n",
3487 direction,
3488 c->bytes[1], c->packets[1],
3489 c->bytes[0], c->packets[0],
3490 total, r/10, r % 10);
3491}
3492
3493/* Since we are processing the bitfield from lower addresses to higher,
3494 it does not matter if the process it in 32 bit chunks or 64 bit
3495 chunks as long as it is little endian. (Understand it as byte stream,
3496 beginning with the lowest byte...) If we would use big endian
3497 we would need to process it from the highest address to the lowest,
3498 in order to be agnostic to the 32 vs 64 bits issue.
3499
3500 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003501static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003502{
3503 struct bm_xfer_ctx c;
3504 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003505 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003506 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003507 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003508
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003509 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3510 /* you are supposed to send additional out-of-sync information
3511 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003512
3513 /* maybe we should use some per thread scratch page,
3514 * and allocate that during initial device creation? */
3515 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3516 if (!buffer) {
3517 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3518 goto out;
3519 }
3520
3521 c = (struct bm_xfer_ctx) {
3522 .bm_bits = drbd_bm_bits(mdev),
3523 .bm_words = drbd_bm_words(mdev),
3524 };
3525
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003526 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003527 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003528 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003529 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003530 /* MAYBE: sanity check that we speak proto >= 90,
3531 * and the feature is enabled! */
3532 struct p_compressed_bm *p;
3533
Philipp Reisner02918be2010-08-20 14:35:10 +02003534 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003535 dev_err(DEV, "ReportCBitmap packet too large\n");
3536 goto out;
3537 }
3538 /* use the page buff */
3539 p = buffer;
3540 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003541 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003542 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003543 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3544 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003545 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003546 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003547 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003548 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003549 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003550 goto out;
3551 }
3552
Philipp Reisner02918be2010-08-20 14:35:10 +02003553 c.packets[cmd == P_BITMAP]++;
3554 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003555
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003556 if (err <= 0) {
3557 if (err < 0)
3558 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003559 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003560 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003561 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003562 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003563 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003564
3565 INFO_bm_xfer_stats(mdev, "receive", &c);
3566
3567 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003568 enum drbd_state_rv rv;
3569
Philipp Reisnerb411b362009-09-25 16:07:19 -07003570 ok = !drbd_send_bitmap(mdev);
3571 if (!ok)
3572 goto out;
3573 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003574 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3575 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003576 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3577 /* admin may have requested C_DISCONNECTING,
3578 * other threads may have noticed network errors */
3579 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3580 drbd_conn_str(mdev->state.conn));
3581 }
3582
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003583 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003584 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003585 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003586 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3587 drbd_start_resync(mdev, C_SYNC_SOURCE);
3588 free_page((unsigned long) buffer);
3589 return ok;
3590}
3591
Philipp Reisner02918be2010-08-20 14:35:10 +02003592static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003593{
3594 /* TODO zero copy sink :) */
3595 static char sink[128];
3596 int size, want, r;
3597
Philipp Reisner02918be2010-08-20 14:35:10 +02003598 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3599 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003600
Philipp Reisner02918be2010-08-20 14:35:10 +02003601 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003602 while (size > 0) {
3603 want = min_t(int, size, sizeof(sink));
3604 r = drbd_recv(mdev, sink, want);
3605 ERR_IF(r <= 0) break;
3606 size -= r;
3607 }
3608 return size == 0;
3609}
3610
Philipp Reisner02918be2010-08-20 14:35:10 +02003611static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003612{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003613 /* Make sure we've acked all the TCP data associated
3614 * with the data requests being unplugged */
3615 drbd_tcp_quickack(mdev->data.socket);
3616
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003617 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003618}
3619
Philipp Reisner73a01a12010-10-27 14:33:00 +02003620static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3621{
3622 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3623
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003624 switch (mdev->state.conn) {
3625 case C_WF_SYNC_UUID:
3626 case C_WF_BITMAP_T:
3627 case C_BEHIND:
3628 break;
3629 default:
3630 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3631 drbd_conn_str(mdev->state.conn));
3632 }
3633
Philipp Reisner73a01a12010-10-27 14:33:00 +02003634 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3635
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003636 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003637}
3638
Philipp Reisner02918be2010-08-20 14:35:10 +02003639typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003640
Philipp Reisner02918be2010-08-20 14:35:10 +02003641struct data_cmd {
3642 int expect_payload;
3643 size_t pkt_size;
3644 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003645};
3646
Philipp Reisner02918be2010-08-20 14:35:10 +02003647static struct data_cmd drbd_cmd_handler[] = {
3648 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3649 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3650 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3651 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3652 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3653 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3654 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3655 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3656 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3657 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3658 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3659 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3660 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3661 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3662 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3663 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3664 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3665 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3666 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3667 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3668 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003669 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003670 /* anything missing from this table is in
3671 * the asender_tbl, see get_asender_cmd */
3672 [P_MAX_CMD] = { 0, 0, NULL },
3673};
3674
3675/* All handler functions that expect a sub-header get that sub-heder in
3676 mdev->data.rbuf.header.head.payload.
3677
3678 Usually in mdev->data.rbuf.header.head the callback can find the usual
3679 p_header, but they may not rely on that. Since there is also p_header95 !
3680 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003681
3682static void drbdd(struct drbd_conf *mdev)
3683{
Philipp Reisner02918be2010-08-20 14:35:10 +02003684 union p_header *header = &mdev->data.rbuf.header;
3685 unsigned int packet_size;
3686 enum drbd_packets cmd;
3687 size_t shs; /* sub header size */
3688 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003689
3690 while (get_t_state(&mdev->receiver) == Running) {
3691 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003692 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3693 goto err_out;
3694
3695 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3696 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3697 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003698 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003699
Philipp Reisner02918be2010-08-20 14:35:10 +02003700 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003701 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3702 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3703 goto err_out;
3704 }
3705
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003706 if (shs) {
3707 rv = drbd_recv(mdev, &header->h80.payload, shs);
3708 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003709 if (!signal_pending(current))
3710 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003711 goto err_out;
3712 }
3713 }
3714
Philipp Reisner02918be2010-08-20 14:35:10 +02003715 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3716
3717 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003718 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003719 cmdname(cmd), packet_size);
3720 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003721 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003722 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003723
Philipp Reisner02918be2010-08-20 14:35:10 +02003724 if (0) {
3725 err_out:
3726 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003727 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003728 /* If we leave here, we probably want to update at least the
3729 * "Connected" indicator on stable storage. Do so explicitly here. */
3730 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003731}
3732
3733void drbd_flush_workqueue(struct drbd_conf *mdev)
3734{
3735 struct drbd_wq_barrier barr;
3736
3737 barr.w.cb = w_prev_work_done;
3738 init_completion(&barr.done);
3739 drbd_queue_work(&mdev->data.work, &barr.w);
3740 wait_for_completion(&barr.done);
3741}
3742
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003743void drbd_free_tl_hash(struct drbd_conf *mdev)
3744{
3745 struct hlist_head *h;
3746
3747 spin_lock_irq(&mdev->req_lock);
3748
3749 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3750 spin_unlock_irq(&mdev->req_lock);
3751 return;
3752 }
3753 /* paranoia code */
3754 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3755 if (h->first)
3756 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3757 (int)(h - mdev->ee_hash), h->first);
3758 kfree(mdev->ee_hash);
3759 mdev->ee_hash = NULL;
3760 mdev->ee_hash_s = 0;
3761
3762 /* paranoia code */
3763 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3764 if (h->first)
3765 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3766 (int)(h - mdev->tl_hash), h->first);
3767 kfree(mdev->tl_hash);
3768 mdev->tl_hash = NULL;
3769 mdev->tl_hash_s = 0;
3770 spin_unlock_irq(&mdev->req_lock);
3771}
3772
Philipp Reisnerb411b362009-09-25 16:07:19 -07003773static void drbd_disconnect(struct drbd_conf *mdev)
3774{
3775 enum drbd_fencing_p fp;
3776 union drbd_state os, ns;
3777 int rv = SS_UNKNOWN_ERROR;
3778 unsigned int i;
3779
3780 if (mdev->state.conn == C_STANDALONE)
3781 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003782
3783 /* asender does not clean up anything. it must not interfere, either */
3784 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003785 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003786
Philipp Reisner85719572010-07-21 10:20:17 +02003787 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003788 spin_lock_irq(&mdev->req_lock);
3789 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3790 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3791 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3792 spin_unlock_irq(&mdev->req_lock);
3793
3794 /* We do not have data structures that would allow us to
3795 * get the rs_pending_cnt down to 0 again.
3796 * * On C_SYNC_TARGET we do not have any data structures describing
3797 * the pending RSDataRequest's we have sent.
3798 * * On C_SYNC_SOURCE there is no data structure that tracks
3799 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3800 * And no, it is not the sum of the reference counts in the
3801 * resync_LRU. The resync_LRU tracks the whole operation including
3802 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3803 * on the fly. */
3804 drbd_rs_cancel_all(mdev);
3805 mdev->rs_total = 0;
3806 mdev->rs_failed = 0;
3807 atomic_set(&mdev->rs_pending_cnt, 0);
3808 wake_up(&mdev->misc_wait);
3809
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003810 del_timer(&mdev->request_timer);
3811
Philipp Reisnerb411b362009-09-25 16:07:19 -07003812 /* make sure syncer is stopped and w_resume_next_sg queued */
3813 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003814 resync_timer_fn((unsigned long)mdev);
3815
Philipp Reisnerb411b362009-09-25 16:07:19 -07003816 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3817 * w_make_resync_request etc. which may still be on the worker queue
3818 * to be "canceled" */
3819 drbd_flush_workqueue(mdev);
3820
3821 /* This also does reclaim_net_ee(). If we do this too early, we might
3822 * miss some resync ee and pages.*/
3823 drbd_process_done_ee(mdev);
3824
3825 kfree(mdev->p_uuid);
3826 mdev->p_uuid = NULL;
3827
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003828 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003829 tl_clear(mdev);
3830
Philipp Reisnerb411b362009-09-25 16:07:19 -07003831 dev_info(DEV, "Connection closed\n");
3832
3833 drbd_md_sync(mdev);
3834
3835 fp = FP_DONT_CARE;
3836 if (get_ldev(mdev)) {
3837 fp = mdev->ldev->dc.fencing;
3838 put_ldev(mdev);
3839 }
3840
Philipp Reisner87f7be42010-06-11 13:56:33 +02003841 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3842 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003843
3844 spin_lock_irq(&mdev->req_lock);
3845 os = mdev->state;
3846 if (os.conn >= C_UNCONNECTED) {
3847 /* Do not restart in case we are C_DISCONNECTING */
3848 ns = os;
3849 ns.conn = C_UNCONNECTED;
3850 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3851 }
3852 spin_unlock_irq(&mdev->req_lock);
3853
3854 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003855 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003856
Philipp Reisnerb411b362009-09-25 16:07:19 -07003857 crypto_free_hash(mdev->cram_hmac_tfm);
3858 mdev->cram_hmac_tfm = NULL;
3859
3860 kfree(mdev->net_conf);
3861 mdev->net_conf = NULL;
3862 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3863 }
3864
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003865 /* serialize with bitmap writeout triggered by the state change,
3866 * if any. */
3867 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3868
Philipp Reisnerb411b362009-09-25 16:07:19 -07003869 /* tcp_close and release of sendpage pages can be deferred. I don't
3870 * want to use SO_LINGER, because apparently it can be deferred for
3871 * more than 20 seconds (longest time I checked).
3872 *
3873 * Actually we don't care for exactly when the network stack does its
3874 * put_page(), but release our reference on these pages right here.
3875 */
3876 i = drbd_release_ee(mdev, &mdev->net_ee);
3877 if (i)
3878 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003879 i = atomic_read(&mdev->pp_in_use_by_net);
3880 if (i)
3881 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003882 i = atomic_read(&mdev->pp_in_use);
3883 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003884 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003885
3886 D_ASSERT(list_empty(&mdev->read_ee));
3887 D_ASSERT(list_empty(&mdev->active_ee));
3888 D_ASSERT(list_empty(&mdev->sync_ee));
3889 D_ASSERT(list_empty(&mdev->done_ee));
3890
3891 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3892 atomic_set(&mdev->current_epoch->epoch_size, 0);
3893 D_ASSERT(list_empty(&mdev->current_epoch->list));
3894}
3895
3896/*
3897 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3898 * we can agree on is stored in agreed_pro_version.
3899 *
3900 * feature flags and the reserved array should be enough room for future
3901 * enhancements of the handshake protocol, and possible plugins...
3902 *
3903 * for now, they are expected to be zero, but ignored.
3904 */
3905static int drbd_send_handshake(struct drbd_conf *mdev)
3906{
3907 /* ASSERT current == mdev->receiver ... */
3908 struct p_handshake *p = &mdev->data.sbuf.handshake;
3909 int ok;
3910
3911 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3912 dev_err(DEV, "interrupted during initial handshake\n");
3913 return 0; /* interrupted. not ok. */
3914 }
3915
3916 if (mdev->data.socket == NULL) {
3917 mutex_unlock(&mdev->data.mutex);
3918 return 0;
3919 }
3920
3921 memset(p, 0, sizeof(*p));
3922 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3923 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3924 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003925 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003926 mutex_unlock(&mdev->data.mutex);
3927 return ok;
3928}
3929
3930/*
3931 * return values:
3932 * 1 yes, we have a valid connection
3933 * 0 oops, did not work out, please try again
3934 * -1 peer talks different language,
3935 * no point in trying again, please go standalone.
3936 */
3937static int drbd_do_handshake(struct drbd_conf *mdev)
3938{
3939 /* ASSERT current == mdev->receiver ... */
3940 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003941 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3942 unsigned int length;
3943 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003944 int rv;
3945
3946 rv = drbd_send_handshake(mdev);
3947 if (!rv)
3948 return 0;
3949
Philipp Reisner02918be2010-08-20 14:35:10 +02003950 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003951 if (!rv)
3952 return 0;
3953
Philipp Reisner02918be2010-08-20 14:35:10 +02003954 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003955 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003956 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003957 return -1;
3958 }
3959
Philipp Reisner02918be2010-08-20 14:35:10 +02003960 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003961 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003962 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003963 return -1;
3964 }
3965
3966 rv = drbd_recv(mdev, &p->head.payload, expect);
3967
3968 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003969 if (!signal_pending(current))
3970 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003971 return 0;
3972 }
3973
Philipp Reisnerb411b362009-09-25 16:07:19 -07003974 p->protocol_min = be32_to_cpu(p->protocol_min);
3975 p->protocol_max = be32_to_cpu(p->protocol_max);
3976 if (p->protocol_max == 0)
3977 p->protocol_max = p->protocol_min;
3978
3979 if (PRO_VERSION_MAX < p->protocol_min ||
3980 PRO_VERSION_MIN > p->protocol_max)
3981 goto incompat;
3982
3983 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3984
3985 dev_info(DEV, "Handshake successful: "
3986 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3987
3988 return 1;
3989
3990 incompat:
3991 dev_err(DEV, "incompatible DRBD dialects: "
3992 "I support %d-%d, peer supports %d-%d\n",
3993 PRO_VERSION_MIN, PRO_VERSION_MAX,
3994 p->protocol_min, p->protocol_max);
3995 return -1;
3996}
3997
3998#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3999static int drbd_do_auth(struct drbd_conf *mdev)
4000{
4001 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4002 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004003 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004004}
4005#else
4006#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004007
4008/* Return value:
4009 1 - auth succeeded,
4010 0 - failed, try again (network error),
4011 -1 - auth failed, don't try again.
4012*/
4013
Philipp Reisnerb411b362009-09-25 16:07:19 -07004014static int drbd_do_auth(struct drbd_conf *mdev)
4015{
4016 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4017 struct scatterlist sg;
4018 char *response = NULL;
4019 char *right_response = NULL;
4020 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004021 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4022 unsigned int resp_size;
4023 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004024 enum drbd_packets cmd;
4025 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004026 int rv;
4027
4028 desc.tfm = mdev->cram_hmac_tfm;
4029 desc.flags = 0;
4030
4031 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4032 (u8 *)mdev->net_conf->shared_secret, key_len);
4033 if (rv) {
4034 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004035 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004036 goto fail;
4037 }
4038
4039 get_random_bytes(my_challenge, CHALLENGE_LEN);
4040
4041 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4042 if (!rv)
4043 goto fail;
4044
Philipp Reisner02918be2010-08-20 14:35:10 +02004045 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004046 if (!rv)
4047 goto fail;
4048
Philipp Reisner02918be2010-08-20 14:35:10 +02004049 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004050 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004051 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004052 rv = 0;
4053 goto fail;
4054 }
4055
Philipp Reisner02918be2010-08-20 14:35:10 +02004056 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004057 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004058 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004059 goto fail;
4060 }
4061
Philipp Reisner02918be2010-08-20 14:35:10 +02004062 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004063 if (peers_ch == NULL) {
4064 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004065 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004066 goto fail;
4067 }
4068
Philipp Reisner02918be2010-08-20 14:35:10 +02004069 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004070
Philipp Reisner02918be2010-08-20 14:35:10 +02004071 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004072 if (!signal_pending(current))
4073 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004074 rv = 0;
4075 goto fail;
4076 }
4077
4078 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4079 response = kmalloc(resp_size, GFP_NOIO);
4080 if (response == NULL) {
4081 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004082 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004083 goto fail;
4084 }
4085
4086 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004087 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004088
4089 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4090 if (rv) {
4091 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004092 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004093 goto fail;
4094 }
4095
4096 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4097 if (!rv)
4098 goto fail;
4099
Philipp Reisner02918be2010-08-20 14:35:10 +02004100 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004101 if (!rv)
4102 goto fail;
4103
Philipp Reisner02918be2010-08-20 14:35:10 +02004104 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004105 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004106 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004107 rv = 0;
4108 goto fail;
4109 }
4110
Philipp Reisner02918be2010-08-20 14:35:10 +02004111 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004112 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4113 rv = 0;
4114 goto fail;
4115 }
4116
4117 rv = drbd_recv(mdev, response , resp_size);
4118
4119 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004120 if (!signal_pending(current))
4121 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004122 rv = 0;
4123 goto fail;
4124 }
4125
4126 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004127 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004128 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004129 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004130 goto fail;
4131 }
4132
4133 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4134
4135 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4136 if (rv) {
4137 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004138 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004139 goto fail;
4140 }
4141
4142 rv = !memcmp(response, right_response, resp_size);
4143
4144 if (rv)
4145 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4146 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004147 else
4148 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004149
4150 fail:
4151 kfree(peers_ch);
4152 kfree(response);
4153 kfree(right_response);
4154
4155 return rv;
4156}
4157#endif
4158
4159int drbdd_init(struct drbd_thread *thi)
4160{
4161 struct drbd_conf *mdev = thi->mdev;
4162 unsigned int minor = mdev_to_minor(mdev);
4163 int h;
4164
4165 sprintf(current->comm, "drbd%d_receiver", minor);
4166
4167 dev_info(DEV, "receiver (re)started\n");
4168
4169 do {
4170 h = drbd_connect(mdev);
4171 if (h == 0) {
4172 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004173 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004174 }
4175 if (h == -1) {
4176 dev_warn(DEV, "Discarding network configuration.\n");
4177 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4178 }
4179 } while (h == 0);
4180
4181 if (h > 0) {
4182 if (get_net_conf(mdev)) {
4183 drbdd(mdev);
4184 put_net_conf(mdev);
4185 }
4186 }
4187
4188 drbd_disconnect(mdev);
4189
4190 dev_info(DEV, "receiver terminated\n");
4191 return 0;
4192}
4193
4194/* ********* acknowledge sender ******** */
4195
Philipp Reisner0b70a132010-08-20 13:36:10 +02004196static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004197{
4198 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4199
4200 int retcode = be32_to_cpu(p->retcode);
4201
4202 if (retcode >= SS_SUCCESS) {
4203 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4204 } else {
4205 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4206 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4207 drbd_set_st_err_str(retcode), retcode);
4208 }
4209 wake_up(&mdev->state_wait);
4210
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004211 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004212}
4213
Philipp Reisner0b70a132010-08-20 13:36:10 +02004214static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004215{
4216 return drbd_send_ping_ack(mdev);
4217
4218}
4219
Philipp Reisner0b70a132010-08-20 13:36:10 +02004220static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004221{
4222 /* restore idle timeout */
4223 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004224 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4225 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004226
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004227 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228}
4229
Philipp Reisner0b70a132010-08-20 13:36:10 +02004230static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004231{
4232 struct p_block_ack *p = (struct p_block_ack *)h;
4233 sector_t sector = be64_to_cpu(p->sector);
4234 int blksize = be32_to_cpu(p->blksize);
4235
4236 D_ASSERT(mdev->agreed_pro_version >= 89);
4237
4238 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4239
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004240 if (get_ldev(mdev)) {
4241 drbd_rs_complete_io(mdev, sector);
4242 drbd_set_in_sync(mdev, sector, blksize);
4243 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4244 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4245 put_ldev(mdev);
4246 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004247 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004248 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004249
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004250 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004251}
4252
4253/* when we receive the ACK for a write request,
4254 * verify that we actually know about it */
4255static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4256 u64 id, sector_t sector)
4257{
4258 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4259 struct hlist_node *n;
4260 struct drbd_request *req;
4261
Bart Van Assche24c48302011-05-21 18:32:29 +02004262 hlist_for_each_entry(req, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004263 if ((unsigned long)req == (unsigned long)id) {
4264 if (req->sector != sector) {
4265 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4266 "wrong sector (%llus versus %llus)\n", req,
4267 (unsigned long long)req->sector,
4268 (unsigned long long)sector);
4269 break;
4270 }
4271 return req;
4272 }
4273 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004274 return NULL;
4275}
4276
Philipp Reisnerb411b362009-09-25 16:07:19 -07004277static int validate_req_change_req_state(struct drbd_conf *mdev,
Andreas Gruenbacher28c455c2011-01-09 17:52:09 +01004278 u64 id, sector_t sector,
4279 struct drbd_request *(*validator)(struct drbd_conf *, u64, sector_t),
Philipp Reisnerb411b362009-09-25 16:07:19 -07004280 const char *func, enum drbd_req_event what)
4281{
4282 struct drbd_request *req;
4283 struct bio_and_error m;
4284
4285 spin_lock_irq(&mdev->req_lock);
4286 req = validator(mdev, id, sector);
4287 if (unlikely(!req)) {
4288 spin_unlock_irq(&mdev->req_lock);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004289
4290 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4291 (void *)(unsigned long)id, (unsigned long long)sector);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004292 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004293 }
4294 __req_mod(req, what, &m);
4295 spin_unlock_irq(&mdev->req_lock);
4296
4297 if (m.bio)
4298 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004299 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004300}
4301
Philipp Reisner0b70a132010-08-20 13:36:10 +02004302static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004303{
4304 struct p_block_ack *p = (struct p_block_ack *)h;
4305 sector_t sector = be64_to_cpu(p->sector);
4306 int blksize = be32_to_cpu(p->blksize);
4307 enum drbd_req_event what;
4308
4309 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4310
4311 if (is_syncer_block_id(p->block_id)) {
4312 drbd_set_in_sync(mdev, sector, blksize);
4313 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004314 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004315 }
4316 switch (be16_to_cpu(h->command)) {
4317 case P_RS_WRITE_ACK:
4318 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4319 what = write_acked_by_peer_and_sis;
4320 break;
4321 case P_WRITE_ACK:
4322 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4323 what = write_acked_by_peer;
4324 break;
4325 case P_RECV_ACK:
4326 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4327 what = recv_acked_by_peer;
4328 break;
4329 case P_DISCARD_ACK:
4330 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4331 what = conflict_discarded_by_peer;
4332 break;
4333 default:
4334 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004335 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004336 }
4337
4338 return validate_req_change_req_state(mdev, p->block_id, sector,
4339 _ack_id_to_req, __func__ , what);
4340}
4341
Philipp Reisner0b70a132010-08-20 13:36:10 +02004342static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004343{
4344 struct p_block_ack *p = (struct p_block_ack *)h;
4345 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004346 int size = be32_to_cpu(p->blksize);
4347 struct drbd_request *req;
4348 struct bio_and_error m;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004349
4350 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4351
4352 if (is_syncer_block_id(p->block_id)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004353 dec_rs_pending(mdev);
4354 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004355 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004356 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004357
4358 spin_lock_irq(&mdev->req_lock);
4359 req = _ack_id_to_req(mdev, p->block_id, sector);
4360 if (!req) {
4361 spin_unlock_irq(&mdev->req_lock);
4362 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4363 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4364 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4365 The master bio might already be completed, therefore the
4366 request is no longer in the collision hash.
4367 => Do not try to validate block_id as request. */
4368 /* In Protocol B we might already have got a P_RECV_ACK
4369 but then get a P_NEG_ACK after wards. */
4370 drbd_set_out_of_sync(mdev, sector, size);
4371 return true;
4372 } else {
4373 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4374 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4375 return false;
4376 }
4377 }
4378 __req_mod(req, neg_acked, &m);
4379 spin_unlock_irq(&mdev->req_lock);
4380
4381 if (m.bio)
4382 complete_master_bio(mdev, &m);
4383 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004384}
4385
Philipp Reisner0b70a132010-08-20 13:36:10 +02004386static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004387{
4388 struct p_block_ack *p = (struct p_block_ack *)h;
4389 sector_t sector = be64_to_cpu(p->sector);
4390
4391 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4392 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4393 (unsigned long long)sector, be32_to_cpu(p->blksize));
4394
4395 return validate_req_change_req_state(mdev, p->block_id, sector,
4396 _ar_id_to_req, __func__ , neg_acked);
4397}
4398
Philipp Reisner0b70a132010-08-20 13:36:10 +02004399static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004400{
4401 sector_t sector;
4402 int size;
4403 struct p_block_ack *p = (struct p_block_ack *)h;
4404
4405 sector = be64_to_cpu(p->sector);
4406 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004407
4408 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4409
4410 dec_rs_pending(mdev);
4411
4412 if (get_ldev_if_state(mdev, D_FAILED)) {
4413 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004414 switch (be16_to_cpu(h->command)) {
4415 case P_NEG_RS_DREPLY:
4416 drbd_rs_failed_io(mdev, sector, size);
4417 case P_RS_CANCEL:
4418 break;
4419 default:
4420 D_ASSERT(0);
4421 put_ldev(mdev);
4422 return false;
4423 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004424 put_ldev(mdev);
4425 }
4426
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004427 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004428}
4429
Philipp Reisner0b70a132010-08-20 13:36:10 +02004430static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004431{
4432 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4433
4434 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4435
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004436 if (mdev->state.conn == C_AHEAD &&
4437 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004438 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4439 mdev->start_resync_timer.expires = jiffies + HZ;
4440 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004441 }
4442
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004443 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004444}
4445
Philipp Reisner0b70a132010-08-20 13:36:10 +02004446static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004447{
4448 struct p_block_ack *p = (struct p_block_ack *)h;
4449 struct drbd_work *w;
4450 sector_t sector;
4451 int size;
4452
4453 sector = be64_to_cpu(p->sector);
4454 size = be32_to_cpu(p->blksize);
4455
4456 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4457
4458 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4459 drbd_ov_oos_found(mdev, sector, size);
4460 else
4461 ov_oos_print(mdev);
4462
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004463 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004464 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004465
Philipp Reisnerb411b362009-09-25 16:07:19 -07004466 drbd_rs_complete_io(mdev, sector);
4467 dec_rs_pending(mdev);
4468
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004469 --mdev->ov_left;
4470
4471 /* let's advance progress step marks only for every other megabyte */
4472 if ((mdev->ov_left & 0x200) == 0x200)
4473 drbd_advance_rs_marks(mdev, mdev->ov_left);
4474
4475 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004476 w = kmalloc(sizeof(*w), GFP_NOIO);
4477 if (w) {
4478 w->cb = w_ov_finished;
4479 drbd_queue_work_front(&mdev->data.work, w);
4480 } else {
4481 dev_err(DEV, "kmalloc(w) failed.");
4482 ov_oos_print(mdev);
4483 drbd_resync_finished(mdev);
4484 }
4485 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004486 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004487 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004488}
4489
Philipp Reisner02918be2010-08-20 14:35:10 +02004490static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004491{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004492 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004493}
4494
Philipp Reisnerb411b362009-09-25 16:07:19 -07004495struct asender_cmd {
4496 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004497 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004498};
4499
4500static struct asender_cmd *get_asender_cmd(int cmd)
4501{
4502 static struct asender_cmd asender_tbl[] = {
4503 /* anything missing from this table is in
4504 * the drbd_cmd_handler (drbd_default_handler) table,
4505 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004506 [P_PING] = { sizeof(struct p_header80), got_Ping },
4507 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004508 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4509 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4510 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4511 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4512 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4513 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4514 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4515 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4516 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4517 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4518 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004519 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004520 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004521 [P_MAX_CMD] = { 0, NULL },
4522 };
4523 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4524 return NULL;
4525 return &asender_tbl[cmd];
4526}
4527
4528int drbd_asender(struct drbd_thread *thi)
4529{
4530 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004531 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004532 struct asender_cmd *cmd = NULL;
4533
4534 int rv, len;
4535 void *buf = h;
4536 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004537 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004538 int empty;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004539 int ping_timeout_active = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004540
4541 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4542
4543 current->policy = SCHED_RR; /* Make this a realtime task! */
4544 current->rt_priority = 2; /* more important than all other tasks */
4545
4546 while (get_t_state(thi) == Running) {
4547 drbd_thread_current_set_cpu(mdev);
4548 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4549 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4550 mdev->meta.socket->sk->sk_rcvtimeo =
4551 mdev->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004552 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004553 }
4554
4555 /* conditionally cork;
4556 * it may hurt latency if we cork without much to send */
4557 if (!mdev->net_conf->no_cork &&
4558 3 < atomic_read(&mdev->unacked_cnt))
4559 drbd_tcp_cork(mdev->meta.socket);
4560 while (1) {
4561 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4562 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004563 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004564 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004565 /* to avoid race with newly queued ACKs */
4566 set_bit(SIGNAL_ASENDER, &mdev->flags);
4567 spin_lock_irq(&mdev->req_lock);
4568 empty = list_empty(&mdev->done_ee);
4569 spin_unlock_irq(&mdev->req_lock);
4570 /* new ack may have been queued right here,
4571 * but then there is also a signal pending,
4572 * and we start over... */
4573 if (empty)
4574 break;
4575 }
4576 /* but unconditionally uncork unless disabled */
4577 if (!mdev->net_conf->no_cork)
4578 drbd_tcp_uncork(mdev->meta.socket);
4579
4580 /* short circuit, recv_msg would return EINTR anyways. */
4581 if (signal_pending(current))
4582 continue;
4583
4584 rv = drbd_recv_short(mdev, mdev->meta.socket,
4585 buf, expect-received, 0);
4586 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4587
4588 flush_signals(current);
4589
4590 /* Note:
4591 * -EINTR (on meta) we got a signal
4592 * -EAGAIN (on meta) rcvtimeo expired
4593 * -ECONNRESET other side closed the connection
4594 * -ERESTARTSYS (on data) we got a signal
4595 * rv < 0 other than above: unexpected error!
4596 * rv == expected: full header or command
4597 * rv < expected: "woken" by signal during receive
4598 * rv == 0 : "connection shut down by peer"
4599 */
4600 if (likely(rv > 0)) {
4601 received += rv;
4602 buf += rv;
4603 } else if (rv == 0) {
4604 dev_err(DEV, "meta connection shut down by peer.\n");
4605 goto reconnect;
4606 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004607 /* If the data socket received something meanwhile,
4608 * that is good enough: peer is still alive. */
4609 if (time_after(mdev->last_received,
4610 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4611 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004612 if (ping_timeout_active) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004613 dev_err(DEV, "PingAck did not arrive in time.\n");
4614 goto reconnect;
4615 }
4616 set_bit(SEND_PING, &mdev->flags);
4617 continue;
4618 } else if (rv == -EINTR) {
4619 continue;
4620 } else {
4621 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4622 goto reconnect;
4623 }
4624
4625 if (received == expect && cmd == NULL) {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01004626 if (unlikely(h->magic != cpu_to_be32(DRBD_MAGIC))) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004627 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4628 be32_to_cpu(h->magic),
4629 be16_to_cpu(h->command),
4630 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004631 goto reconnect;
4632 }
4633 cmd = get_asender_cmd(be16_to_cpu(h->command));
4634 len = be16_to_cpu(h->length);
4635 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004636 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4637 be32_to_cpu(h->magic),
4638 be16_to_cpu(h->command),
4639 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004640 goto disconnect;
4641 }
4642 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004643 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004644 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004645 }
4646 if (received == expect) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004647 mdev->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004648 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004649 if (!cmd->process(mdev, h))
4650 goto reconnect;
4651
Lars Ellenbergf36af182011-03-09 22:44:55 +01004652 /* the idle_timeout (ping-int)
4653 * has been restored in got_PingAck() */
4654 if (cmd == get_asender_cmd(P_PING_ACK))
4655 ping_timeout_active = 0;
4656
Philipp Reisnerb411b362009-09-25 16:07:19 -07004657 buf = h;
4658 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004659 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004660 cmd = NULL;
4661 }
4662 }
4663
4664 if (0) {
4665reconnect:
4666 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004667 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004668 }
4669 if (0) {
4670disconnect:
4671 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004672 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004673 }
4674 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4675
4676 D_ASSERT(mdev->state.conn < C_CONNECTED);
4677 dev_info(DEV, "asender terminated\n");
4678
4679 return 0;
4680}