blob: 6b0725842508c92f1ae35e0bec0cf2132c7c6d67 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Bart Van Assche24c48302011-05-21 18:32:29 +0200336 INIT_HLIST_NODE(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +0100344 /*
345 * The block_id is opaque to the receiver. It is not endianness
346 * converted, and sent back to the sender unchanged.
347 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return e;
351
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200352 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700353 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354 return NULL;
355}
356
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700358{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200359 if (e->flags & EE_HAS_DIGEST)
360 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200361 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200362 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Bart Van Assche24c48302011-05-21 18:32:29 +0200363 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700364 mempool_free(e, drbd_ee_mempool);
365}
366
367int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
368{
369 LIST_HEAD(work_list);
370 struct drbd_epoch_entry *e, *t;
371 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200372 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700373
374 spin_lock_irq(&mdev->req_lock);
375 list_splice_init(list, &work_list);
376 spin_unlock_irq(&mdev->req_lock);
377
378 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200379 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700380 count++;
381 }
382 return count;
383}
384
385
386/*
387 * This function is called from _asender only_
388 * but see also comments in _req_mod(,barrier_acked)
389 * and receive_Barrier.
390 *
391 * Move entries from net_ee to done_ee, if ready.
392 * Grab done_ee, call all callbacks, free the entries.
393 * The callbacks typically send out ACKs.
394 */
395static int drbd_process_done_ee(struct drbd_conf *mdev)
396{
397 LIST_HEAD(work_list);
398 LIST_HEAD(reclaimed);
399 struct drbd_epoch_entry *e, *t;
400 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
401
402 spin_lock_irq(&mdev->req_lock);
403 reclaim_net_ee(mdev, &reclaimed);
404 list_splice_init(&mdev->done_ee, &work_list);
405 spin_unlock_irq(&mdev->req_lock);
406
407 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200408 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409
410 /* possible callbacks here:
411 * e_end_block, and e_end_resync_block, e_send_discard_ack.
412 * all ignore the last argument.
413 */
414 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700415 /* list_del not necessary, next/prev members not touched */
416 ok = e->w.cb(mdev, &e->w, !ok) && ok;
417 drbd_free_ee(mdev, e);
418 }
419 wake_up(&mdev->ee_wait);
420
421 return ok;
422}
423
424void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
425{
426 DEFINE_WAIT(wait);
427
428 /* avoids spin_lock/unlock
429 * and calling prepare_to_wait in the fast path */
430 while (!list_empty(head)) {
431 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
432 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100433 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 finish_wait(&mdev->ee_wait, &wait);
435 spin_lock_irq(&mdev->req_lock);
436 }
437}
438
439void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
440{
441 spin_lock_irq(&mdev->req_lock);
442 _drbd_wait_ee_list_empty(mdev, head);
443 spin_unlock_irq(&mdev->req_lock);
444}
445
446/* see also kernel_accept; which is only present since 2.6.18.
447 * also we want to log which part of it failed, exactly */
448static int drbd_accept(struct drbd_conf *mdev, const char **what,
449 struct socket *sock, struct socket **newsock)
450{
451 struct sock *sk = sock->sk;
452 int err = 0;
453
454 *what = "listen";
455 err = sock->ops->listen(sock, 5);
456 if (err < 0)
457 goto out;
458
459 *what = "sock_create_lite";
460 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
461 newsock);
462 if (err < 0)
463 goto out;
464
465 *what = "accept";
466 err = sock->ops->accept(sock, *newsock, 0);
467 if (err < 0) {
468 sock_release(*newsock);
469 *newsock = NULL;
470 goto out;
471 }
472 (*newsock)->ops = sock->ops;
473
474out:
475 return err;
476}
477
478static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
479 void *buf, size_t size, int flags)
480{
481 mm_segment_t oldfs;
482 struct kvec iov = {
483 .iov_base = buf,
484 .iov_len = size,
485 };
486 struct msghdr msg = {
487 .msg_iovlen = 1,
488 .msg_iov = (struct iovec *)&iov,
489 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
490 };
491 int rv;
492
493 oldfs = get_fs();
494 set_fs(KERNEL_DS);
495 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
496 set_fs(oldfs);
497
498 return rv;
499}
500
501static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
502{
503 mm_segment_t oldfs;
504 struct kvec iov = {
505 .iov_base = buf,
506 .iov_len = size,
507 };
508 struct msghdr msg = {
509 .msg_iovlen = 1,
510 .msg_iov = (struct iovec *)&iov,
511 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
512 };
513 int rv;
514
515 oldfs = get_fs();
516 set_fs(KERNEL_DS);
517
518 for (;;) {
519 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
520 if (rv == size)
521 break;
522
523 /* Note:
524 * ECONNRESET other side closed the connection
525 * ERESTARTSYS (on sock) we got a signal
526 */
527
528 if (rv < 0) {
529 if (rv == -ECONNRESET)
530 dev_info(DEV, "sock was reset by peer\n");
531 else if (rv != -ERESTARTSYS)
532 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
533 break;
534 } else if (rv == 0) {
535 dev_info(DEV, "sock was shut down by peer\n");
536 break;
537 } else {
538 /* signal came in, or peer/link went down,
539 * after we read a partial message
540 */
541 /* D_ASSERT(signal_pending(current)); */
542 break;
543 }
544 };
545
546 set_fs(oldfs);
547
548 if (rv != size)
549 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
550
551 return rv;
552}
553
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200554/* quoting tcp(7):
555 * On individual connections, the socket buffer size must be set prior to the
556 * listen(2) or connect(2) calls in order to have it take effect.
557 * This is our wrapper to do so.
558 */
559static void drbd_setbufsize(struct socket *sock, unsigned int snd,
560 unsigned int rcv)
561{
562 /* open coded SO_SNDBUF, SO_RCVBUF */
563 if (snd) {
564 sock->sk->sk_sndbuf = snd;
565 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
566 }
567 if (rcv) {
568 sock->sk->sk_rcvbuf = rcv;
569 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
570 }
571}
572
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573static struct socket *drbd_try_connect(struct drbd_conf *mdev)
574{
575 const char *what;
576 struct socket *sock;
577 struct sockaddr_in6 src_in6;
578 int err;
579 int disconnect_on_error = 1;
580
581 if (!get_net_conf(mdev))
582 return NULL;
583
584 what = "sock_create_kern";
585 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
586 SOCK_STREAM, IPPROTO_TCP, &sock);
587 if (err < 0) {
588 sock = NULL;
589 goto out;
590 }
591
592 sock->sk->sk_rcvtimeo =
593 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200594 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
595 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700596
597 /* explicitly bind to the configured IP as source IP
598 * for the outgoing connections.
599 * This is needed for multihomed hosts and to be
600 * able to use lo: interfaces for drbd.
601 * Make sure to use 0 as port number, so linux selects
602 * a free one dynamically.
603 */
604 memcpy(&src_in6, mdev->net_conf->my_addr,
605 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
606 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
607 src_in6.sin6_port = 0;
608 else
609 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
610
611 what = "bind before connect";
612 err = sock->ops->bind(sock,
613 (struct sockaddr *) &src_in6,
614 mdev->net_conf->my_addr_len);
615 if (err < 0)
616 goto out;
617
618 /* connect may fail, peer not yet available.
619 * stay C_WF_CONNECTION, don't go Disconnecting! */
620 disconnect_on_error = 0;
621 what = "connect";
622 err = sock->ops->connect(sock,
623 (struct sockaddr *)mdev->net_conf->peer_addr,
624 mdev->net_conf->peer_addr_len, 0);
625
626out:
627 if (err < 0) {
628 if (sock) {
629 sock_release(sock);
630 sock = NULL;
631 }
632 switch (-err) {
633 /* timeout, busy, signal pending */
634 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
635 case EINTR: case ERESTARTSYS:
636 /* peer not (yet) available, network problem */
637 case ECONNREFUSED: case ENETUNREACH:
638 case EHOSTDOWN: case EHOSTUNREACH:
639 disconnect_on_error = 0;
640 break;
641 default:
642 dev_err(DEV, "%s failed, err = %d\n", what, err);
643 }
644 if (disconnect_on_error)
645 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
646 }
647 put_net_conf(mdev);
648 return sock;
649}
650
651static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
652{
653 int timeo, err;
654 struct socket *s_estab = NULL, *s_listen;
655 const char *what;
656
657 if (!get_net_conf(mdev))
658 return NULL;
659
660 what = "sock_create_kern";
661 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
662 SOCK_STREAM, IPPROTO_TCP, &s_listen);
663 if (err) {
664 s_listen = NULL;
665 goto out;
666 }
667
668 timeo = mdev->net_conf->try_connect_int * HZ;
669 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
670
671 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
672 s_listen->sk->sk_rcvtimeo = timeo;
673 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200674 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
675 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700676
677 what = "bind before listen";
678 err = s_listen->ops->bind(s_listen,
679 (struct sockaddr *) mdev->net_conf->my_addr,
680 mdev->net_conf->my_addr_len);
681 if (err < 0)
682 goto out;
683
684 err = drbd_accept(mdev, &what, s_listen, &s_estab);
685
686out:
687 if (s_listen)
688 sock_release(s_listen);
689 if (err < 0) {
690 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
691 dev_err(DEV, "%s failed, err = %d\n", what, err);
692 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
693 }
694 }
695 put_net_conf(mdev);
696
697 return s_estab;
698}
699
700static int drbd_send_fp(struct drbd_conf *mdev,
701 struct socket *sock, enum drbd_packets cmd)
702{
Philipp Reisner02918be2010-08-20 14:35:10 +0200703 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700704
705 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
706}
707
708static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
709{
Philipp Reisner02918be2010-08-20 14:35:10 +0200710 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700711 int rr;
712
713 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
714
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100715 if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716 return be16_to_cpu(h->command);
717
718 return 0xffff;
719}
720
721/**
722 * drbd_socket_okay() - Free the socket if its connection is not okay
723 * @mdev: DRBD device.
724 * @sock: pointer to the pointer to the socket.
725 */
726static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
727{
728 int rr;
729 char tb[4];
730
731 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100732 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700733
734 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
735
736 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100737 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 } else {
739 sock_release(*sock);
740 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100741 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700742 }
743}
744
745/*
746 * return values:
747 * 1 yes, we have a valid connection
748 * 0 oops, did not work out, please try again
749 * -1 peer talks different language,
750 * no point in trying again, please go standalone.
751 * -2 We do not have a network config...
752 */
753static int drbd_connect(struct drbd_conf *mdev)
754{
755 struct socket *s, *sock, *msock;
756 int try, h, ok;
757
758 D_ASSERT(!mdev->data.socket);
759
Philipp Reisnerb411b362009-09-25 16:07:19 -0700760 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
761 return -2;
762
763 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
764
765 sock = NULL;
766 msock = NULL;
767
768 do {
769 for (try = 0;;) {
770 /* 3 tries, this should take less than a second! */
771 s = drbd_try_connect(mdev);
772 if (s || ++try >= 3)
773 break;
774 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100775 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700776 }
777
778 if (s) {
779 if (!sock) {
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
781 sock = s;
782 s = NULL;
783 } else if (!msock) {
784 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
785 msock = s;
786 s = NULL;
787 } else {
788 dev_err(DEV, "Logic error in drbd_connect()\n");
789 goto out_release_sockets;
790 }
791 }
792
793 if (sock && msock) {
Philipp Reisnera8e40792011-05-13 12:03:55 +0200794 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795 ok = drbd_socket_okay(mdev, &sock);
796 ok = drbd_socket_okay(mdev, &msock) && ok;
797 if (ok)
798 break;
799 }
800
801retry:
802 s = drbd_wait_for_connect(mdev);
803 if (s) {
804 try = drbd_recv_fp(mdev, s);
805 drbd_socket_okay(mdev, &sock);
806 drbd_socket_okay(mdev, &msock);
807 switch (try) {
808 case P_HAND_SHAKE_S:
809 if (sock) {
810 dev_warn(DEV, "initial packet S crossed\n");
811 sock_release(sock);
812 }
813 sock = s;
814 break;
815 case P_HAND_SHAKE_M:
816 if (msock) {
817 dev_warn(DEV, "initial packet M crossed\n");
818 sock_release(msock);
819 }
820 msock = s;
821 set_bit(DISCARD_CONCURRENT, &mdev->flags);
822 break;
823 default:
824 dev_warn(DEV, "Error receiving initial packet\n");
825 sock_release(s);
826 if (random32() & 1)
827 goto retry;
828 }
829 }
830
831 if (mdev->state.conn <= C_DISCONNECTING)
832 goto out_release_sockets;
833 if (signal_pending(current)) {
834 flush_signals(current);
835 smp_rmb();
836 if (get_t_state(&mdev->receiver) == Exiting)
837 goto out_release_sockets;
838 }
839
840 if (sock && msock) {
841 ok = drbd_socket_okay(mdev, &sock);
842 ok = drbd_socket_okay(mdev, &msock) && ok;
843 if (ok)
844 break;
845 }
846 } while (1);
847
848 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
849 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
850
851 sock->sk->sk_allocation = GFP_NOIO;
852 msock->sk->sk_allocation = GFP_NOIO;
853
854 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
855 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
856
Philipp Reisnerb411b362009-09-25 16:07:19 -0700857 /* NOT YET ...
858 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
859 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
860 * first set it to the P_HAND_SHAKE timeout,
861 * which we set to 4x the configured ping_timeout. */
862 sock->sk->sk_sndtimeo =
863 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
864
865 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
866 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
867
868 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300869 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700870 drbd_tcp_nodelay(sock);
871 drbd_tcp_nodelay(msock);
872
873 mdev->data.socket = sock;
874 mdev->meta.socket = msock;
875 mdev->last_received = jiffies;
876
877 D_ASSERT(mdev->asender.task == NULL);
878
879 h = drbd_do_handshake(mdev);
880 if (h <= 0)
881 return h;
882
883 if (mdev->cram_hmac_tfm) {
884 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100885 switch (drbd_do_auth(mdev)) {
886 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700887 dev_err(DEV, "Authentication of peer failed\n");
888 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100889 case 0:
890 dev_err(DEV, "Authentication of peer failed, trying again.\n");
891 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892 }
893 }
894
895 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
896 return 0;
897
898 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
899 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
900
901 atomic_set(&mdev->packet_seq, 0);
902 mdev->peer_seq = 0;
903
904 drbd_thread_start(&mdev->asender);
905
Philipp Reisner148efa12011-01-15 00:21:15 +0100906 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200907 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700908 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100909 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700910 drbd_send_uuids(mdev);
911 drbd_send_state(mdev);
912 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
913 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100914 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700915
916 return 1;
917
918out_release_sockets:
919 if (sock)
920 sock_release(sock);
921 if (msock)
922 sock_release(msock);
923 return -1;
924}
925
Philipp Reisner02918be2010-08-20 14:35:10 +0200926static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700927{
Philipp Reisner02918be2010-08-20 14:35:10 +0200928 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700929 int r;
930
931 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932 if (unlikely(r != sizeof(*h))) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +0100933 if (!signal_pending(current))
934 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100935 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200936 }
937
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100938 if (likely(h->h80.magic == cpu_to_be32(DRBD_MAGIC))) {
Philipp Reisner02918be2010-08-20 14:35:10 +0200939 *cmd = be16_to_cpu(h->h80.command);
940 *packet_size = be16_to_cpu(h->h80.length);
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +0100941 } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
Philipp Reisner02918be2010-08-20 14:35:10 +0200942 *cmd = be16_to_cpu(h->h95.command);
943 *packet_size = be32_to_cpu(h->h95.length);
944 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200945 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
946 be32_to_cpu(h->h80.magic),
947 be16_to_cpu(h->h80.command),
948 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100949 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950 }
951 mdev->last_received = jiffies;
952
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100953 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700954}
955
Philipp Reisner2451fc32010-08-24 13:43:11 +0200956static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957{
958 int rv;
959
960 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400961 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200962 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700963 if (rv) {
964 dev_err(DEV, "local disk flush failed with status %d\n", rv);
965 /* would rather check on EOPNOTSUPP, but that is not reliable.
966 * don't try again for ANY return value != 0
967 * if (rv == -EOPNOTSUPP) */
968 drbd_bump_write_ordering(mdev, WO_drain_io);
969 }
970 put_ldev(mdev);
971 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700972}
973
974/**
975 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
976 * @mdev: DRBD device.
977 * @epoch: Epoch object.
978 * @ev: Epoch event.
979 */
980static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
981 struct drbd_epoch *epoch,
982 enum epoch_event ev)
983{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200984 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700986 enum finish_epoch rv = FE_STILL_LIVE;
987
988 spin_lock(&mdev->epoch_lock);
989 do {
990 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991
992 epoch_size = atomic_read(&epoch->epoch_size);
993
994 switch (ev & ~EV_CLEANUP) {
995 case EV_PUT:
996 atomic_dec(&epoch->active);
997 break;
998 case EV_GOT_BARRIER_NR:
999 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001000 break;
1001 case EV_BECAME_LAST:
1002 /* nothing to do*/
1003 break;
1004 }
1005
Philipp Reisnerb411b362009-09-25 16:07:19 -07001006 if (epoch_size != 0 &&
1007 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001008 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001009 if (!(ev & EV_CLEANUP)) {
1010 spin_unlock(&mdev->epoch_lock);
1011 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1012 spin_lock(&mdev->epoch_lock);
1013 }
1014 dec_unacked(mdev);
1015
1016 if (mdev->current_epoch != epoch) {
1017 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1018 list_del(&epoch->list);
1019 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1020 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021 kfree(epoch);
1022
1023 if (rv == FE_STILL_LIVE)
1024 rv = FE_DESTROYED;
1025 } else {
1026 epoch->flags = 0;
1027 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001028 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001029 if (rv == FE_STILL_LIVE)
1030 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001031 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032 }
1033 }
1034
1035 if (!next_epoch)
1036 break;
1037
1038 epoch = next_epoch;
1039 } while (1);
1040
1041 spin_unlock(&mdev->epoch_lock);
1042
Philipp Reisnerb411b362009-09-25 16:07:19 -07001043 return rv;
1044}
1045
1046/**
1047 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1048 * @mdev: DRBD device.
1049 * @wo: Write ordering method to try.
1050 */
1051void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1052{
1053 enum write_ordering_e pwo;
1054 static char *write_ordering_str[] = {
1055 [WO_none] = "none",
1056 [WO_drain_io] = "drain",
1057 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001058 };
1059
1060 pwo = mdev->write_ordering;
1061 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001062 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1063 wo = WO_drain_io;
1064 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1065 wo = WO_none;
1066 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001067 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001068 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1069}
1070
1071/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001072 * drbd_submit_ee()
1073 * @mdev: DRBD device.
1074 * @e: epoch entry
1075 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001076 *
1077 * May spread the pages to multiple bios,
1078 * depending on bio_add_page restrictions.
1079 *
1080 * Returns 0 if all bios have been submitted,
1081 * -ENOMEM if we could not allocate enough bios,
1082 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1083 * single page to an empty bio (which should never happen and likely indicates
1084 * that the lower level IO stack is in some way broken). This has been observed
1085 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001086 */
1087/* TODO allocate from our own bio_set. */
1088int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1089 const unsigned rw, const int fault_type)
1090{
1091 struct bio *bios = NULL;
1092 struct bio *bio;
1093 struct page *page = e->pages;
1094 sector_t sector = e->sector;
1095 unsigned ds = e->size;
1096 unsigned n_bios = 0;
1097 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001098 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001099
1100 /* In most cases, we will only need one bio. But in case the lower
1101 * level restrictions happen to be different at this offset on this
1102 * side than those of the sending peer, we may need to submit the
1103 * request in more than one bio. */
1104next_bio:
1105 bio = bio_alloc(GFP_NOIO, nr_pages);
1106 if (!bio) {
1107 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1108 goto fail;
1109 }
1110 /* > e->sector, unless this is the first bio */
1111 bio->bi_sector = sector;
1112 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001113 bio->bi_rw = rw;
1114 bio->bi_private = e;
1115 bio->bi_end_io = drbd_endio_sec;
1116
1117 bio->bi_next = bios;
1118 bios = bio;
1119 ++n_bios;
1120
1121 page_chain_for_each(page) {
1122 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1123 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001124 /* A single page must always be possible!
1125 * But in case it fails anyways,
1126 * we deal with it, and complain (below). */
1127 if (bio->bi_vcnt == 0) {
1128 dev_err(DEV,
1129 "bio_add_page failed for len=%u, "
1130 "bi_vcnt=0 (bi_sector=%llu)\n",
1131 len, (unsigned long long)bio->bi_sector);
1132 err = -ENOSPC;
1133 goto fail;
1134 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001135 goto next_bio;
1136 }
1137 ds -= len;
1138 sector += len >> 9;
1139 --nr_pages;
1140 }
1141 D_ASSERT(page == NULL);
1142 D_ASSERT(ds == 0);
1143
1144 atomic_set(&e->pending_bios, n_bios);
1145 do {
1146 bio = bios;
1147 bios = bios->bi_next;
1148 bio->bi_next = NULL;
1149
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001150 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001151 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001152 return 0;
1153
1154fail:
1155 while (bios) {
1156 bio = bios;
1157 bios = bios->bi_next;
1158 bio_put(bio);
1159 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001160 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001161}
1162
Philipp Reisner02918be2010-08-20 14:35:10 +02001163static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001164{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001165 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001166 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001167 struct drbd_epoch *epoch;
1168
Philipp Reisnerb411b362009-09-25 16:07:19 -07001169 inc_unacked(mdev);
1170
Philipp Reisnerb411b362009-09-25 16:07:19 -07001171 mdev->current_epoch->barrier_nr = p->barrier;
1172 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1173
1174 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1175 * the activity log, which means it would not be resynced in case the
1176 * R_PRIMARY crashes now.
1177 * Therefore we must send the barrier_ack after the barrier request was
1178 * completed. */
1179 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180 case WO_none:
1181 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001182 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001183
1184 /* receiver context, in the writeout path of the other node.
1185 * avoid potential distributed deadlock */
1186 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1187 if (epoch)
1188 break;
1189 else
1190 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1191 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192
1193 case WO_bdev_flush:
1194 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001195 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001196 drbd_flush(mdev);
1197
1198 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1199 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1200 if (epoch)
1201 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001202 }
1203
Philipp Reisner2451fc32010-08-24 13:43:11 +02001204 epoch = mdev->current_epoch;
1205 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1206
1207 D_ASSERT(atomic_read(&epoch->active) == 0);
1208 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001209
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001210 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001211 default:
1212 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001213 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001214 }
1215
1216 epoch->flags = 0;
1217 atomic_set(&epoch->epoch_size, 0);
1218 atomic_set(&epoch->active, 0);
1219
1220 spin_lock(&mdev->epoch_lock);
1221 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1222 list_add(&epoch->list, &mdev->current_epoch->list);
1223 mdev->current_epoch = epoch;
1224 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001225 } else {
1226 /* The current_epoch got recycled while we allocated this one... */
1227 kfree(epoch);
1228 }
1229 spin_unlock(&mdev->epoch_lock);
1230
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001231 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001232}
1233
1234/* used from receive_RSDataReply (recv_resync_read)
1235 * and from receive_Data */
1236static struct drbd_epoch_entry *
1237read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1238{
Lars Ellenberg66660322010-04-06 12:15:04 +02001239 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001240 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001241 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001242 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001243 void *dig_in = mdev->int_dig_in;
1244 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001245 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001246
1247 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1248 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1249
1250 if (dgs) {
1251 rr = drbd_recv(mdev, dig_in, dgs);
1252 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001253 if (!signal_pending(current))
1254 dev_warn(DEV,
1255 "short read receiving data digest: read %d expected %d\n",
1256 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001257 return NULL;
1258 }
1259 }
1260
1261 data_size -= dgs;
1262
Philipp Reisnerd07c9c12011-01-20 16:49:33 +01001263 ERR_IF(data_size == 0) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001264 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001265 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001266
Lars Ellenberg66660322010-04-06 12:15:04 +02001267 /* even though we trust out peer,
1268 * we sometimes have to double check. */
1269 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001270 dev_err(DEV, "request from peer beyond end of local disk: "
1271 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001272 (unsigned long long)capacity,
1273 (unsigned long long)sector, data_size);
1274 return NULL;
1275 }
1276
Philipp Reisnerb411b362009-09-25 16:07:19 -07001277 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1278 * "criss-cross" setup, that might cause write-out on some other DRBD,
1279 * which in turn might block on the other node at this very place. */
1280 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1281 if (!e)
1282 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001283
Philipp Reisnerb411b362009-09-25 16:07:19 -07001284 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001285 page = e->pages;
1286 page_chain_for_each(page) {
1287 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001288 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001289 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001290 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001291 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1292 data[0] = data[0] ^ (unsigned long)-1;
1293 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001294 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001295 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001296 drbd_free_ee(mdev, e);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001297 if (!signal_pending(current))
1298 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1299 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001300 return NULL;
1301 }
1302 ds -= rr;
1303 }
1304
1305 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001306 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001308 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1309 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001310 drbd_bcast_ee(mdev, "digest failed",
1311 dgs, dig_in, dig_vv, e);
1312 drbd_free_ee(mdev, e);
1313 return NULL;
1314 }
1315 }
1316 mdev->recv_cnt += data_size>>9;
1317 return e;
1318}
1319
1320/* drbd_drain_block() just takes a data block
1321 * out of the socket input buffer, and discards it.
1322 */
1323static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1324{
1325 struct page *page;
1326 int rr, rv = 1;
1327 void *data;
1328
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001329 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001330 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001331
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001332 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001333
1334 data = kmap(page);
1335 while (data_size) {
1336 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1337 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1338 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001339 if (!signal_pending(current))
1340 dev_warn(DEV,
1341 "short read receiving data: read %d expected %d\n",
1342 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001343 break;
1344 }
1345 data_size -= rr;
1346 }
1347 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001348 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001349 return rv;
1350}
1351
1352static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1353 sector_t sector, int data_size)
1354{
1355 struct bio_vec *bvec;
1356 struct bio *bio;
1357 int dgs, rr, i, expect;
1358 void *dig_in = mdev->int_dig_in;
1359 void *dig_vv = mdev->int_dig_vv;
1360
1361 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1362 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1363
1364 if (dgs) {
1365 rr = drbd_recv(mdev, dig_in, dgs);
1366 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001367 if (!signal_pending(current))
1368 dev_warn(DEV,
1369 "short read receiving data reply digest: read %d expected %d\n",
1370 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001371 return 0;
1372 }
1373 }
1374
1375 data_size -= dgs;
1376
1377 /* optimistically update recv_cnt. if receiving fails below,
1378 * we disconnect anyways, and counters will be reset. */
1379 mdev->recv_cnt += data_size>>9;
1380
1381 bio = req->master_bio;
1382 D_ASSERT(sector == bio->bi_sector);
1383
1384 bio_for_each_segment(bvec, bio, i) {
1385 expect = min_t(int, data_size, bvec->bv_len);
1386 rr = drbd_recv(mdev,
1387 kmap(bvec->bv_page)+bvec->bv_offset,
1388 expect);
1389 kunmap(bvec->bv_page);
1390 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001391 if (!signal_pending(current))
1392 dev_warn(DEV, "short read receiving data reply: "
1393 "read %d expected %d\n",
1394 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001395 return 0;
1396 }
1397 data_size -= rr;
1398 }
1399
1400 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001401 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402 if (memcmp(dig_in, dig_vv, dgs)) {
1403 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1404 return 0;
1405 }
1406 }
1407
1408 D_ASSERT(data_size == 0);
1409 return 1;
1410}
1411
1412/* e_end_resync_block() is called via
1413 * drbd_process_done_ee() by asender only */
1414static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1415{
1416 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1417 sector_t sector = e->sector;
1418 int ok;
1419
Bart Van Assche24c48302011-05-21 18:32:29 +02001420 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001422 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001423 drbd_set_in_sync(mdev, sector, e->size);
1424 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1425 } else {
1426 /* Record failure to sync */
1427 drbd_rs_failed_io(mdev, sector, e->size);
1428
1429 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1430 }
1431 dec_unacked(mdev);
1432
1433 return ok;
1434}
1435
1436static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1437{
1438 struct drbd_epoch_entry *e;
1439
1440 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001441 if (!e)
1442 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001443
1444 dec_rs_pending(mdev);
1445
Philipp Reisnerb411b362009-09-25 16:07:19 -07001446 inc_unacked(mdev);
1447 /* corresponding dec_unacked() in e_end_resync_block()
1448 * respective _drbd_clear_done_ee */
1449
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001450 e->w.cb = e_end_resync_block;
1451
Philipp Reisnerb411b362009-09-25 16:07:19 -07001452 spin_lock_irq(&mdev->req_lock);
1453 list_add(&e->w.list, &mdev->sync_ee);
1454 spin_unlock_irq(&mdev->req_lock);
1455
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001456 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001457 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001458 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001460 /* don't care for the reason here */
1461 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001462 spin_lock_irq(&mdev->req_lock);
1463 list_del(&e->w.list);
1464 spin_unlock_irq(&mdev->req_lock);
1465
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001466 drbd_free_ee(mdev, e);
1467fail:
1468 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001469 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001470}
1471
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001472static struct drbd_request *
1473find_request(struct drbd_conf *mdev,
1474 struct hlist_head *(*hash_slot)(struct drbd_conf *, sector_t),
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001475 u64 id, sector_t sector, bool missing_ok, const char *func)
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001476{
1477 struct hlist_head *slot = hash_slot(mdev, sector);
1478 struct hlist_node *n;
1479 struct drbd_request *req;
1480
1481 hlist_for_each_entry(req, n, slot, collision) {
1482 if ((unsigned long)req != (unsigned long)id)
1483 continue;
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001484 if (req->i.sector != sector) {
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001485 dev_err(DEV, "%s: found request %lu but it has "
1486 "wrong sector (%llus versus %llus)\n",
1487 func, (unsigned long)req,
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001488 (unsigned long long)req->i.sector,
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001489 (unsigned long long)sector);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001490 return NULL;
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001491 }
1492 return req;
1493 }
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001494 if (!missing_ok) {
1495 dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
1496 (unsigned long)id, (unsigned long long)sector);
1497 }
Andreas Gruenbacher668eebc2011-01-20 17:14:26 +01001498 return NULL;
1499}
1500
Philipp Reisner02918be2010-08-20 14:35:10 +02001501static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001502{
1503 struct drbd_request *req;
1504 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001505 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001506 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001507
1508 sector = be64_to_cpu(p->sector);
1509
1510 spin_lock_irq(&mdev->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001511 req = find_request(mdev, ar_hash_slot, p->block_id, sector, false, __func__);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512 spin_unlock_irq(&mdev->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01001513 if (unlikely(!req))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001514 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515
Bart Van Assche24c48302011-05-21 18:32:29 +02001516 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001517 * special casing it there for the various failure cases.
1518 * still no race with drbd_fail_pending_reads */
1519 ok = recv_dless_read(mdev, req, sector, data_size);
1520
1521 if (ok)
1522 req_mod(req, data_received);
1523 /* else: nothing. handled from drbd_disconnect...
1524 * I don't think we may complete this just yet
1525 * in case we are "on-disconnect: freeze" */
1526
1527 return ok;
1528}
1529
Philipp Reisner02918be2010-08-20 14:35:10 +02001530static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001531{
1532 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001533 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001534 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535
1536 sector = be64_to_cpu(p->sector);
1537 D_ASSERT(p->block_id == ID_SYNCER);
1538
1539 if (get_ldev(mdev)) {
1540 /* data is submitted to disk within recv_resync_read.
1541 * corresponding put_ldev done below on error,
Andreas Gruenbacher9c508422011-01-14 21:19:36 +01001542 * or in drbd_endio_sec. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001543 ok = recv_resync_read(mdev, sector, data_size);
1544 } else {
1545 if (__ratelimit(&drbd_ratelimit_state))
1546 dev_err(DEV, "Can not write resync data to local disk.\n");
1547
1548 ok = drbd_drain_block(mdev, data_size);
1549
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001550 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001551 }
1552
Philipp Reisner778f2712010-07-06 11:14:00 +02001553 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1554
Philipp Reisnerb411b362009-09-25 16:07:19 -07001555 return ok;
1556}
1557
1558/* e_end_block() is called via drbd_process_done_ee().
1559 * this means this function only runs in the asender thread
1560 */
1561static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1562{
1563 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1564 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001565 int ok = 1, pcmd;
1566
Philipp Reisnerb411b362009-09-25 16:07:19 -07001567 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001568 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001569 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1570 mdev->state.conn <= C_PAUSED_SYNC_T &&
1571 e->flags & EE_MAY_SET_IN_SYNC) ?
1572 P_RS_WRITE_ACK : P_WRITE_ACK;
1573 ok &= drbd_send_ack(mdev, pcmd, e);
1574 if (pcmd == P_RS_WRITE_ACK)
1575 drbd_set_in_sync(mdev, sector, e->size);
1576 } else {
1577 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1578 /* we expect it to be marked out of sync anyways...
1579 * maybe assert this? */
1580 }
1581 dec_unacked(mdev);
1582 }
1583 /* we delete from the conflict detection hash _after_ we sent out the
1584 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1585 if (mdev->net_conf->two_primaries) {
1586 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001587 D_ASSERT(!hlist_unhashed(&e->collision));
1588 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001589 spin_unlock_irq(&mdev->req_lock);
1590 } else {
Bart Van Assche24c48302011-05-21 18:32:29 +02001591 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001592 }
1593
1594 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1595
1596 return ok;
1597}
1598
1599static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1600{
1601 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1602 int ok = 1;
1603
1604 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1605 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1606
1607 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001608 D_ASSERT(!hlist_unhashed(&e->collision));
1609 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001610 spin_unlock_irq(&mdev->req_lock);
1611
1612 dec_unacked(mdev);
1613
1614 return ok;
1615}
1616
1617/* Called from receive_Data.
1618 * Synchronize packets on sock with packets on msock.
1619 *
1620 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1621 * packet traveling on msock, they are still processed in the order they have
1622 * been sent.
1623 *
1624 * Note: we don't care for Ack packets overtaking P_DATA packets.
1625 *
1626 * In case packet_seq is larger than mdev->peer_seq number, there are
1627 * outstanding packets on the msock. We wait for them to arrive.
1628 * In case we are the logically next packet, we update mdev->peer_seq
1629 * ourselves. Correctly handles 32bit wrap around.
1630 *
1631 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1632 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1633 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1634 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1635 *
1636 * returns 0 if we may process the packet,
1637 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1638static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1639{
1640 DEFINE_WAIT(wait);
1641 unsigned int p_seq;
1642 long timeout;
1643 int ret = 0;
1644 spin_lock(&mdev->peer_seq_lock);
1645 for (;;) {
1646 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1647 if (seq_le(packet_seq, mdev->peer_seq+1))
1648 break;
1649 if (signal_pending(current)) {
1650 ret = -ERESTARTSYS;
1651 break;
1652 }
1653 p_seq = mdev->peer_seq;
1654 spin_unlock(&mdev->peer_seq_lock);
1655 timeout = schedule_timeout(30*HZ);
1656 spin_lock(&mdev->peer_seq_lock);
1657 if (timeout == 0 && p_seq == mdev->peer_seq) {
1658 ret = -ETIMEDOUT;
1659 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1660 break;
1661 }
1662 }
1663 finish_wait(&mdev->seq_wait, &wait);
1664 if (mdev->peer_seq+1 == packet_seq)
1665 mdev->peer_seq++;
1666 spin_unlock(&mdev->peer_seq_lock);
1667 return ret;
1668}
1669
Lars Ellenberg688593c2010-11-17 22:25:03 +01001670/* see also bio_flags_to_wire()
1671 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1672 * flags and back. We may replicate to other kernel versions. */
1673static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001674{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001675 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1676 (dpf & DP_FUA ? REQ_FUA : 0) |
1677 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1678 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001679}
1680
Philipp Reisnerb411b362009-09-25 16:07:19 -07001681/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001682static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001683{
1684 sector_t sector;
1685 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001686 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001687 int rw = WRITE;
1688 u32 dp_flags;
1689
Philipp Reisnerb411b362009-09-25 16:07:19 -07001690 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001691 spin_lock(&mdev->peer_seq_lock);
1692 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1693 mdev->peer_seq++;
1694 spin_unlock(&mdev->peer_seq_lock);
1695
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001696 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001697 atomic_inc(&mdev->current_epoch->epoch_size);
1698 return drbd_drain_block(mdev, data_size);
1699 }
1700
1701 /* get_ldev(mdev) successful.
1702 * Corresponding put_ldev done either below (on various errors),
Andreas Gruenbacher9c508422011-01-14 21:19:36 +01001703 * or in drbd_endio_sec, if we successfully submit the data at
Philipp Reisnerb411b362009-09-25 16:07:19 -07001704 * the end of this function. */
1705
1706 sector = be64_to_cpu(p->sector);
1707 e = read_in_block(mdev, p->block_id, sector, data_size);
1708 if (!e) {
1709 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001710 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001711 }
1712
Philipp Reisnerb411b362009-09-25 16:07:19 -07001713 e->w.cb = e_end_block;
1714
Lars Ellenberg688593c2010-11-17 22:25:03 +01001715 dp_flags = be32_to_cpu(p->dp_flags);
1716 rw |= wire_flags_to_bio(mdev, dp_flags);
1717
1718 if (dp_flags & DP_MAY_SET_IN_SYNC)
1719 e->flags |= EE_MAY_SET_IN_SYNC;
1720
Philipp Reisnerb411b362009-09-25 16:07:19 -07001721 spin_lock(&mdev->epoch_lock);
1722 e->epoch = mdev->current_epoch;
1723 atomic_inc(&e->epoch->epoch_size);
1724 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001725 spin_unlock(&mdev->epoch_lock);
1726
Philipp Reisnerb411b362009-09-25 16:07:19 -07001727 /* I'm the receiver, I do hold a net_cnt reference. */
1728 if (!mdev->net_conf->two_primaries) {
1729 spin_lock_irq(&mdev->req_lock);
1730 } else {
1731 /* don't get the req_lock yet,
1732 * we may sleep in drbd_wait_peer_seq */
1733 const int size = e->size;
1734 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1735 DEFINE_WAIT(wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001736 int first;
1737
1738 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1739 BUG_ON(mdev->ee_hash == NULL);
1740 BUG_ON(mdev->tl_hash == NULL);
1741
1742 /* conflict detection and handling:
1743 * 1. wait on the sequence number,
1744 * in case this data packet overtook ACK packets.
1745 * 2. check our hash tables for conflicting requests.
1746 * we only need to walk the tl_hash, since an ee can not
1747 * have a conflict with an other ee: on the submitting
1748 * node, the corresponding req had already been conflicting,
1749 * and a conflicting req is never sent.
1750 *
1751 * Note: for two_primaries, we are protocol C,
1752 * so there cannot be any request that is DONE
1753 * but still on the transfer log.
1754 *
1755 * unconditionally add to the ee_hash.
1756 *
1757 * if no conflicting request is found:
1758 * submit.
1759 *
1760 * if any conflicting request is found
1761 * that has not yet been acked,
1762 * AND I have the "discard concurrent writes" flag:
1763 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1764 *
1765 * if any conflicting request is found:
1766 * block the receiver, waiting on misc_wait
1767 * until no more conflicting requests are there,
1768 * or we get interrupted (disconnect).
1769 *
1770 * we do not just write after local io completion of those
1771 * requests, but only after req is done completely, i.e.
1772 * we wait for the P_DISCARD_ACK to arrive!
1773 *
1774 * then proceed normally, i.e. submit.
1775 */
1776 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1777 goto out_interrupted;
1778
1779 spin_lock_irq(&mdev->req_lock);
1780
Bart Van Assche24c48302011-05-21 18:32:29 +02001781 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001782
Philipp Reisnerb411b362009-09-25 16:07:19 -07001783 first = 1;
1784 for (;;) {
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001785 struct drbd_interval *i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001786 int have_unacked = 0;
1787 int have_conflict = 0;
1788 prepare_to_wait(&mdev->misc_wait, &wait,
1789 TASK_INTERRUPTIBLE);
Andreas Gruenbacherde696712011-01-20 15:00:24 +01001790
1791 i = drbd_find_overlap(&mdev->write_requests, sector, size);
1792 if (i) {
1793 struct drbd_request *req2 =
1794 container_of(i, struct drbd_request, i);
1795
1796 /* only ALERT on first iteration,
1797 * we may be woken up early... */
1798 if (first)
1799 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1800 " new: %llus +%u; pending: %llus +%u\n",
1801 current->comm, current->pid,
1802 (unsigned long long)sector, size,
1803 (unsigned long long)req2->i.sector, req2->i.size);
1804 if (req2->rq_state & RQ_NET_PENDING)
1805 ++have_unacked;
1806 ++have_conflict;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001807 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001808 if (!have_conflict)
1809 break;
1810
1811 /* Discard Ack only for the _first_ iteration */
1812 if (first && discard && have_unacked) {
1813 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1814 (unsigned long long)sector);
1815 inc_unacked(mdev);
1816 e->w.cb = e_send_discard_ack;
1817 list_add_tail(&e->w.list, &mdev->done_ee);
1818
1819 spin_unlock_irq(&mdev->req_lock);
1820
1821 /* we could probably send that P_DISCARD_ACK ourselves,
1822 * but I don't like the receiver using the msock */
1823
1824 put_ldev(mdev);
1825 wake_asender(mdev);
1826 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001827 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001828 }
1829
1830 if (signal_pending(current)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001831 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001832
1833 spin_unlock_irq(&mdev->req_lock);
1834
1835 finish_wait(&mdev->misc_wait, &wait);
1836 goto out_interrupted;
1837 }
1838
1839 spin_unlock_irq(&mdev->req_lock);
1840 if (first) {
1841 first = 0;
1842 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1843 "sec=%llus\n", (unsigned long long)sector);
1844 } else if (discard) {
1845 /* we had none on the first iteration.
1846 * there must be none now. */
1847 D_ASSERT(have_unacked == 0);
1848 }
1849 schedule();
1850 spin_lock_irq(&mdev->req_lock);
1851 }
1852 finish_wait(&mdev->misc_wait, &wait);
1853 }
1854
1855 list_add(&e->w.list, &mdev->active_ee);
1856 spin_unlock_irq(&mdev->req_lock);
1857
1858 switch (mdev->net_conf->wire_protocol) {
1859 case DRBD_PROT_C:
1860 inc_unacked(mdev);
1861 /* corresponding dec_unacked() in e_end_block()
1862 * respective _drbd_clear_done_ee */
1863 break;
1864 case DRBD_PROT_B:
1865 /* I really don't like it that the receiver thread
1866 * sends on the msock, but anyways */
1867 drbd_send_ack(mdev, P_RECV_ACK, e);
1868 break;
1869 case DRBD_PROT_A:
1870 /* nothing to do */
1871 break;
1872 }
1873
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001874 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001875 /* In case we have the only disk of the cluster, */
1876 drbd_set_out_of_sync(mdev, e->sector, e->size);
1877 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001878 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001879 drbd_al_begin_io(mdev, e->sector);
1880 }
1881
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001882 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001883 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001884
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001885 /* don't care for the reason here */
1886 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001887 spin_lock_irq(&mdev->req_lock);
1888 list_del(&e->w.list);
Bart Van Assche24c48302011-05-21 18:32:29 +02001889 hlist_del_init(&e->collision);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001890 spin_unlock_irq(&mdev->req_lock);
1891 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1892 drbd_al_complete_io(mdev, e->sector);
1893
Philipp Reisnerb411b362009-09-25 16:07:19 -07001894out_interrupted:
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001895 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001896 put_ldev(mdev);
1897 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001898 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001899}
1900
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001901/* We may throttle resync, if the lower device seems to be busy,
1902 * and current sync rate is above c_min_rate.
1903 *
1904 * To decide whether or not the lower device is busy, we use a scheme similar
1905 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1906 * (more than 64 sectors) of activity we cannot account for with our own resync
1907 * activity, it obviously is "busy".
1908 *
1909 * The current sync rate used here uses only the most recent two step marks,
1910 * to have a short time average so we can react faster.
1911 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001912int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001913{
1914 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1915 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001916 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001917 int curr_events;
1918 int throttle = 0;
1919
1920 /* feature disabled? */
1921 if (mdev->sync_conf.c_min_rate == 0)
1922 return 0;
1923
Philipp Reisnere3555d82010-11-07 15:56:29 +01001924 spin_lock_irq(&mdev->al_lock);
1925 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1926 if (tmp) {
1927 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1928 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1929 spin_unlock_irq(&mdev->al_lock);
1930 return 0;
1931 }
1932 /* Do not slow down if app IO is already waiting for this extent */
1933 }
1934 spin_unlock_irq(&mdev->al_lock);
1935
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001936 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1937 (int)part_stat_read(&disk->part0, sectors[1]) -
1938 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001939
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001940 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1941 unsigned long rs_left;
1942 int i;
1943
1944 mdev->rs_last_events = curr_events;
1945
1946 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1947 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001948 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1949
1950 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1951 rs_left = mdev->ov_left;
1952 else
1953 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001954
1955 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1956 if (!dt)
1957 dt++;
1958 db = mdev->rs_mark_left[i] - rs_left;
1959 dbdt = Bit2KB(db/dt);
1960
1961 if (dbdt > mdev->sync_conf.c_min_rate)
1962 throttle = 1;
1963 }
1964 return throttle;
1965}
1966
1967
Philipp Reisner02918be2010-08-20 14:35:10 +02001968static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001969{
1970 sector_t sector;
1971 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1972 struct drbd_epoch_entry *e;
1973 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001974 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001975 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001976 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001977
1978 sector = be64_to_cpu(p->sector);
1979 size = be32_to_cpu(p->blksize);
1980
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001981 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001982 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1983 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001984 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001985 }
1986 if (sector + (size>>9) > capacity) {
1987 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1988 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001989 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990 }
1991
1992 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001993 verb = 1;
1994 switch (cmd) {
1995 case P_DATA_REQUEST:
1996 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1997 break;
1998 case P_RS_DATA_REQUEST:
1999 case P_CSUM_RS_REQUEST:
2000 case P_OV_REQUEST:
2001 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2002 break;
2003 case P_OV_REPLY:
2004 verb = 0;
2005 dec_rs_pending(mdev);
2006 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2007 break;
2008 default:
2009 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2010 cmdname(cmd));
2011 }
2012 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002013 dev_err(DEV, "Can not satisfy peer's read request, "
2014 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002015
Lars Ellenberga821cc42010-09-06 12:31:37 +02002016 /* drain possibly payload */
2017 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002018 }
2019
2020 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2021 * "criss-cross" setup, that might cause write-out on some other DRBD,
2022 * which in turn might block on the other node at this very place. */
2023 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2024 if (!e) {
2025 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002026 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002027 }
2028
Philipp Reisner02918be2010-08-20 14:35:10 +02002029 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002030 case P_DATA_REQUEST:
2031 e->w.cb = w_e_end_data_req;
2032 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002033 /* application IO, don't drbd_rs_begin_io */
2034 goto submit;
2035
Philipp Reisnerb411b362009-09-25 16:07:19 -07002036 case P_RS_DATA_REQUEST:
2037 e->w.cb = w_e_end_rsdata_req;
2038 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002039 /* used in the sector offset progress display */
2040 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002041 break;
2042
2043 case P_OV_REPLY:
2044 case P_CSUM_RS_REQUEST:
2045 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002046 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2047 if (!di)
2048 goto out_free_e;
2049
2050 di->digest_size = digest_size;
2051 di->digest = (((char *)di)+sizeof(struct digest_info));
2052
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002053 e->digest = di;
2054 e->flags |= EE_HAS_DIGEST;
2055
Philipp Reisnerb411b362009-09-25 16:07:19 -07002056 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2057 goto out_free_e;
2058
Philipp Reisner02918be2010-08-20 14:35:10 +02002059 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002060 D_ASSERT(mdev->agreed_pro_version >= 89);
2061 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002062 /* used in the sector offset progress display */
2063 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002064 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002065 /* track progress, we may need to throttle */
2066 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002067 e->w.cb = w_e_end_ov_reply;
2068 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002069 /* drbd_rs_begin_io done when we sent this request,
2070 * but accounting still needs to be done. */
2071 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072 }
2073 break;
2074
2075 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002076 if (mdev->ov_start_sector == ~(sector_t)0 &&
2077 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002078 unsigned long now = jiffies;
2079 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080 mdev->ov_start_sector = sector;
2081 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002082 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2083 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002084 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2085 mdev->rs_mark_left[i] = mdev->ov_left;
2086 mdev->rs_mark_time[i] = now;
2087 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002088 dev_info(DEV, "Online Verify start sector: %llu\n",
2089 (unsigned long long)sector);
2090 }
2091 e->w.cb = w_e_end_ov_req;
2092 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002093 break;
2094
Philipp Reisnerb411b362009-09-25 16:07:19 -07002095 default:
2096 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002097 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002098 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002099 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002100 }
2101
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002102 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2103 * wrt the receiver, but it is not as straightforward as it may seem.
2104 * Various places in the resync start and stop logic assume resync
2105 * requests are processed in order, requeuing this on the worker thread
2106 * introduces a bunch of new code for synchronization between threads.
2107 *
2108 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2109 * "forever", throttling after drbd_rs_begin_io will lock that extent
2110 * for application writes for the same time. For now, just throttle
2111 * here, where the rest of the code expects the receiver to sleep for
2112 * a while, anyways.
2113 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002114
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002115 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2116 * this defers syncer requests for some time, before letting at least
2117 * on request through. The resync controller on the receiving side
2118 * will adapt to the incoming rate accordingly.
2119 *
2120 * We cannot throttle here if remote is Primary/SyncTarget:
2121 * we would also throttle its application reads.
2122 * In that case, throttling is done on the SyncTarget only.
2123 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002124 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2125 schedule_timeout_uninterruptible(HZ/10);
2126 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002127 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002128
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002129submit_for_resync:
2130 atomic_add(size >> 9, &mdev->rs_sect_ev);
2131
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002132submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002133 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002134 spin_lock_irq(&mdev->req_lock);
2135 list_add_tail(&e->w.list, &mdev->read_ee);
2136 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002137
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002138 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002139 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002140
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002141 /* don't care for the reason here */
2142 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002143 spin_lock_irq(&mdev->req_lock);
2144 list_del(&e->w.list);
2145 spin_unlock_irq(&mdev->req_lock);
2146 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2147
Philipp Reisnerb411b362009-09-25 16:07:19 -07002148out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002149 put_ldev(mdev);
2150 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002151 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002152}
2153
2154static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2155{
2156 int self, peer, rv = -100;
2157 unsigned long ch_self, ch_peer;
2158
2159 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2160 peer = mdev->p_uuid[UI_BITMAP] & 1;
2161
2162 ch_peer = mdev->p_uuid[UI_SIZE];
2163 ch_self = mdev->comm_bm_set;
2164
2165 switch (mdev->net_conf->after_sb_0p) {
2166 case ASB_CONSENSUS:
2167 case ASB_DISCARD_SECONDARY:
2168 case ASB_CALL_HELPER:
2169 dev_err(DEV, "Configuration error.\n");
2170 break;
2171 case ASB_DISCONNECT:
2172 break;
2173 case ASB_DISCARD_YOUNGER_PRI:
2174 if (self == 0 && peer == 1) {
2175 rv = -1;
2176 break;
2177 }
2178 if (self == 1 && peer == 0) {
2179 rv = 1;
2180 break;
2181 }
2182 /* Else fall through to one of the other strategies... */
2183 case ASB_DISCARD_OLDER_PRI:
2184 if (self == 0 && peer == 1) {
2185 rv = 1;
2186 break;
2187 }
2188 if (self == 1 && peer == 0) {
2189 rv = -1;
2190 break;
2191 }
2192 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002193 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002194 "Using discard-least-changes instead\n");
2195 case ASB_DISCARD_ZERO_CHG:
2196 if (ch_peer == 0 && ch_self == 0) {
2197 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2198 ? -1 : 1;
2199 break;
2200 } else {
2201 if (ch_peer == 0) { rv = 1; break; }
2202 if (ch_self == 0) { rv = -1; break; }
2203 }
2204 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2205 break;
2206 case ASB_DISCARD_LEAST_CHG:
2207 if (ch_self < ch_peer)
2208 rv = -1;
2209 else if (ch_self > ch_peer)
2210 rv = 1;
2211 else /* ( ch_self == ch_peer ) */
2212 /* Well, then use something else. */
2213 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2214 ? -1 : 1;
2215 break;
2216 case ASB_DISCARD_LOCAL:
2217 rv = -1;
2218 break;
2219 case ASB_DISCARD_REMOTE:
2220 rv = 1;
2221 }
2222
2223 return rv;
2224}
2225
2226static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2227{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002228 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229
2230 switch (mdev->net_conf->after_sb_1p) {
2231 case ASB_DISCARD_YOUNGER_PRI:
2232 case ASB_DISCARD_OLDER_PRI:
2233 case ASB_DISCARD_LEAST_CHG:
2234 case ASB_DISCARD_LOCAL:
2235 case ASB_DISCARD_REMOTE:
2236 dev_err(DEV, "Configuration error.\n");
2237 break;
2238 case ASB_DISCONNECT:
2239 break;
2240 case ASB_CONSENSUS:
2241 hg = drbd_asb_recover_0p(mdev);
2242 if (hg == -1 && mdev->state.role == R_SECONDARY)
2243 rv = hg;
2244 if (hg == 1 && mdev->state.role == R_PRIMARY)
2245 rv = hg;
2246 break;
2247 case ASB_VIOLENTLY:
2248 rv = drbd_asb_recover_0p(mdev);
2249 break;
2250 case ASB_DISCARD_SECONDARY:
2251 return mdev->state.role == R_PRIMARY ? 1 : -1;
2252 case ASB_CALL_HELPER:
2253 hg = drbd_asb_recover_0p(mdev);
2254 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002255 enum drbd_state_rv rv2;
2256
2257 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002258 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2259 * we might be here in C_WF_REPORT_PARAMS which is transient.
2260 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002261 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2262 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002263 drbd_khelper(mdev, "pri-lost-after-sb");
2264 } else {
2265 dev_warn(DEV, "Successfully gave up primary role.\n");
2266 rv = hg;
2267 }
2268 } else
2269 rv = hg;
2270 }
2271
2272 return rv;
2273}
2274
2275static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2276{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002277 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002278
2279 switch (mdev->net_conf->after_sb_2p) {
2280 case ASB_DISCARD_YOUNGER_PRI:
2281 case ASB_DISCARD_OLDER_PRI:
2282 case ASB_DISCARD_LEAST_CHG:
2283 case ASB_DISCARD_LOCAL:
2284 case ASB_DISCARD_REMOTE:
2285 case ASB_CONSENSUS:
2286 case ASB_DISCARD_SECONDARY:
2287 dev_err(DEV, "Configuration error.\n");
2288 break;
2289 case ASB_VIOLENTLY:
2290 rv = drbd_asb_recover_0p(mdev);
2291 break;
2292 case ASB_DISCONNECT:
2293 break;
2294 case ASB_CALL_HELPER:
2295 hg = drbd_asb_recover_0p(mdev);
2296 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002297 enum drbd_state_rv rv2;
2298
Philipp Reisnerb411b362009-09-25 16:07:19 -07002299 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2300 * we might be here in C_WF_REPORT_PARAMS which is transient.
2301 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002302 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2303 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002304 drbd_khelper(mdev, "pri-lost-after-sb");
2305 } else {
2306 dev_warn(DEV, "Successfully gave up primary role.\n");
2307 rv = hg;
2308 }
2309 } else
2310 rv = hg;
2311 }
2312
2313 return rv;
2314}
2315
2316static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2317 u64 bits, u64 flags)
2318{
2319 if (!uuid) {
2320 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2321 return;
2322 }
2323 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2324 text,
2325 (unsigned long long)uuid[UI_CURRENT],
2326 (unsigned long long)uuid[UI_BITMAP],
2327 (unsigned long long)uuid[UI_HISTORY_START],
2328 (unsigned long long)uuid[UI_HISTORY_END],
2329 (unsigned long long)bits,
2330 (unsigned long long)flags);
2331}
2332
2333/*
2334 100 after split brain try auto recover
2335 2 C_SYNC_SOURCE set BitMap
2336 1 C_SYNC_SOURCE use BitMap
2337 0 no Sync
2338 -1 C_SYNC_TARGET use BitMap
2339 -2 C_SYNC_TARGET set BitMap
2340 -100 after split brain, disconnect
2341-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002342-1091 requires proto 91
2343-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002344 */
2345static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2346{
2347 u64 self, peer;
2348 int i, j;
2349
2350 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2351 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2352
2353 *rule_nr = 10;
2354 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2355 return 0;
2356
2357 *rule_nr = 20;
2358 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2359 peer != UUID_JUST_CREATED)
2360 return -2;
2361
2362 *rule_nr = 30;
2363 if (self != UUID_JUST_CREATED &&
2364 (peer == UUID_JUST_CREATED || peer == (u64)0))
2365 return 2;
2366
2367 if (self == peer) {
2368 int rct, dc; /* roles at crash time */
2369
2370 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2371
2372 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002373 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002374
2375 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2376 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2377 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2378 drbd_uuid_set_bm(mdev, 0UL);
2379
2380 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2381 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2382 *rule_nr = 34;
2383 } else {
2384 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2385 *rule_nr = 36;
2386 }
2387
2388 return 1;
2389 }
2390
2391 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2392
2393 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002394 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002395
2396 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2397 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2398 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2399
2400 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2401 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2402 mdev->p_uuid[UI_BITMAP] = 0UL;
2403
2404 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2405 *rule_nr = 35;
2406 } else {
2407 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2408 *rule_nr = 37;
2409 }
2410
2411 return -1;
2412 }
2413
2414 /* Common power [off|failure] */
2415 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2416 (mdev->p_uuid[UI_FLAGS] & 2);
2417 /* lowest bit is set when we were primary,
2418 * next bit (weight 2) is set when peer was primary */
2419 *rule_nr = 40;
2420
2421 switch (rct) {
2422 case 0: /* !self_pri && !peer_pri */ return 0;
2423 case 1: /* self_pri && !peer_pri */ return 1;
2424 case 2: /* !self_pri && peer_pri */ return -1;
2425 case 3: /* self_pri && peer_pri */
2426 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2427 return dc ? -1 : 1;
2428 }
2429 }
2430
2431 *rule_nr = 50;
2432 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2433 if (self == peer)
2434 return -1;
2435
2436 *rule_nr = 51;
2437 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2438 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002439 if (mdev->agreed_pro_version < 96 ?
2440 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2441 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2442 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002443 /* The last P_SYNC_UUID did not get though. Undo the last start of
2444 resync as sync source modifications of the peer's UUIDs. */
2445
2446 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002447 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002448
2449 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2450 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002451
2452 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2453 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2454
Philipp Reisnerb411b362009-09-25 16:07:19 -07002455 return -1;
2456 }
2457 }
2458
2459 *rule_nr = 60;
2460 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2461 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2462 peer = mdev->p_uuid[i] & ~((u64)1);
2463 if (self == peer)
2464 return -2;
2465 }
2466
2467 *rule_nr = 70;
2468 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2469 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2470 if (self == peer)
2471 return 1;
2472
2473 *rule_nr = 71;
2474 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2475 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002476 if (mdev->agreed_pro_version < 96 ?
2477 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2478 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2479 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002480 /* The last P_SYNC_UUID did not get though. Undo the last start of
2481 resync as sync source modifications of our UUIDs. */
2482
2483 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002484 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002485
2486 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2487 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2488
Philipp Reisner4a23f262011-01-11 17:42:17 +01002489 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002490 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2491 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2492
2493 return 1;
2494 }
2495 }
2496
2497
2498 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002499 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002500 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2501 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2502 if (self == peer)
2503 return 2;
2504 }
2505
2506 *rule_nr = 90;
2507 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2508 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2509 if (self == peer && self != ((u64)0))
2510 return 100;
2511
2512 *rule_nr = 100;
2513 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2514 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2515 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2516 peer = mdev->p_uuid[j] & ~((u64)1);
2517 if (self == peer)
2518 return -100;
2519 }
2520 }
2521
2522 return -1000;
2523}
2524
2525/* drbd_sync_handshake() returns the new conn state on success, or
2526 CONN_MASK (-1) on failure.
2527 */
2528static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2529 enum drbd_disk_state peer_disk) __must_hold(local)
2530{
2531 int hg, rule_nr;
2532 enum drbd_conns rv = C_MASK;
2533 enum drbd_disk_state mydisk;
2534
2535 mydisk = mdev->state.disk;
2536 if (mydisk == D_NEGOTIATING)
2537 mydisk = mdev->new_state_tmp.disk;
2538
2539 dev_info(DEV, "drbd_sync_handshake:\n");
2540 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2541 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2542 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2543
2544 hg = drbd_uuid_compare(mdev, &rule_nr);
2545
2546 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2547
2548 if (hg == -1000) {
2549 dev_alert(DEV, "Unrelated data, aborting!\n");
2550 return C_MASK;
2551 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002552 if (hg < -1000) {
2553 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002554 return C_MASK;
2555 }
2556
2557 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2558 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2559 int f = (hg == -100) || abs(hg) == 2;
2560 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2561 if (f)
2562 hg = hg*2;
2563 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2564 hg > 0 ? "source" : "target");
2565 }
2566
Adam Gandelman3a11a482010-04-08 16:48:23 -07002567 if (abs(hg) == 100)
2568 drbd_khelper(mdev, "initial-split-brain");
2569
Philipp Reisnerb411b362009-09-25 16:07:19 -07002570 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2571 int pcount = (mdev->state.role == R_PRIMARY)
2572 + (peer_role == R_PRIMARY);
2573 int forced = (hg == -100);
2574
2575 switch (pcount) {
2576 case 0:
2577 hg = drbd_asb_recover_0p(mdev);
2578 break;
2579 case 1:
2580 hg = drbd_asb_recover_1p(mdev);
2581 break;
2582 case 2:
2583 hg = drbd_asb_recover_2p(mdev);
2584 break;
2585 }
2586 if (abs(hg) < 100) {
2587 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2588 "automatically solved. Sync from %s node\n",
2589 pcount, (hg < 0) ? "peer" : "this");
2590 if (forced) {
2591 dev_warn(DEV, "Doing a full sync, since"
2592 " UUIDs where ambiguous.\n");
2593 hg = hg*2;
2594 }
2595 }
2596 }
2597
2598 if (hg == -100) {
2599 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2600 hg = -1;
2601 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2602 hg = 1;
2603
2604 if (abs(hg) < 100)
2605 dev_warn(DEV, "Split-Brain detected, manually solved. "
2606 "Sync from %s node\n",
2607 (hg < 0) ? "peer" : "this");
2608 }
2609
2610 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002611 /* FIXME this log message is not correct if we end up here
2612 * after an attempted attach on a diskless node.
2613 * We just refuse to attach -- well, we drop the "connection"
2614 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002615 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002616 drbd_khelper(mdev, "split-brain");
2617 return C_MASK;
2618 }
2619
2620 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2621 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2622 return C_MASK;
2623 }
2624
2625 if (hg < 0 && /* by intention we do not use mydisk here. */
2626 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2627 switch (mdev->net_conf->rr_conflict) {
2628 case ASB_CALL_HELPER:
2629 drbd_khelper(mdev, "pri-lost");
2630 /* fall through */
2631 case ASB_DISCONNECT:
2632 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2633 return C_MASK;
2634 case ASB_VIOLENTLY:
2635 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2636 "assumption\n");
2637 }
2638 }
2639
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002640 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2641 if (hg == 0)
2642 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2643 else
2644 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2645 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2646 abs(hg) >= 2 ? "full" : "bit-map based");
2647 return C_MASK;
2648 }
2649
Philipp Reisnerb411b362009-09-25 16:07:19 -07002650 if (abs(hg) >= 2) {
2651 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002652 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2653 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002654 return C_MASK;
2655 }
2656
2657 if (hg > 0) { /* become sync source. */
2658 rv = C_WF_BITMAP_S;
2659 } else if (hg < 0) { /* become sync target */
2660 rv = C_WF_BITMAP_T;
2661 } else {
2662 rv = C_CONNECTED;
2663 if (drbd_bm_total_weight(mdev)) {
2664 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2665 drbd_bm_total_weight(mdev));
2666 }
2667 }
2668
2669 return rv;
2670}
2671
2672/* returns 1 if invalid */
2673static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2674{
2675 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2676 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2677 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2678 return 0;
2679
2680 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2681 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2682 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2683 return 1;
2684
2685 /* everything else is valid if they are equal on both sides. */
2686 if (peer == self)
2687 return 0;
2688
2689 /* everything es is invalid. */
2690 return 1;
2691}
2692
Philipp Reisner02918be2010-08-20 14:35:10 +02002693static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002694{
Philipp Reisner02918be2010-08-20 14:35:10 +02002695 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002696 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002697 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002698 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2699
Philipp Reisnerb411b362009-09-25 16:07:19 -07002700 p_proto = be32_to_cpu(p->protocol);
2701 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2702 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2703 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002704 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002705 cf = be32_to_cpu(p->conn_flags);
2706 p_want_lose = cf & CF_WANT_LOSE;
2707
2708 clear_bit(CONN_DRY_RUN, &mdev->flags);
2709
2710 if (cf & CF_DRY_RUN)
2711 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002712
2713 if (p_proto != mdev->net_conf->wire_protocol) {
2714 dev_err(DEV, "incompatible communication protocols\n");
2715 goto disconnect;
2716 }
2717
2718 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2719 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2720 goto disconnect;
2721 }
2722
2723 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2724 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2725 goto disconnect;
2726 }
2727
2728 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2729 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2730 goto disconnect;
2731 }
2732
2733 if (p_want_lose && mdev->net_conf->want_lose) {
2734 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2735 goto disconnect;
2736 }
2737
2738 if (p_two_primaries != mdev->net_conf->two_primaries) {
2739 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2740 goto disconnect;
2741 }
2742
2743 if (mdev->agreed_pro_version >= 87) {
2744 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2745
2746 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002747 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002748
2749 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2750 if (strcmp(p_integrity_alg, my_alg)) {
2751 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2752 goto disconnect;
2753 }
2754 dev_info(DEV, "data-integrity-alg: %s\n",
2755 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2756 }
2757
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002758 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002759
2760disconnect:
2761 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002762 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002763}
2764
2765/* helper function
2766 * input: alg name, feature name
2767 * return: NULL (alg name was "")
2768 * ERR_PTR(error) if something goes wrong
2769 * or the crypto hash ptr, if it worked out ok. */
2770struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2771 const char *alg, const char *name)
2772{
2773 struct crypto_hash *tfm;
2774
2775 if (!alg[0])
2776 return NULL;
2777
2778 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2779 if (IS_ERR(tfm)) {
2780 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2781 alg, name, PTR_ERR(tfm));
2782 return tfm;
2783 }
2784 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2785 crypto_free_hash(tfm);
2786 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2787 return ERR_PTR(-EINVAL);
2788 }
2789 return tfm;
2790}
2791
Philipp Reisner02918be2010-08-20 14:35:10 +02002792static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002793{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002794 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002795 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796 unsigned int header_size, data_size, exp_max_sz;
2797 struct crypto_hash *verify_tfm = NULL;
2798 struct crypto_hash *csums_tfm = NULL;
2799 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002800 int *rs_plan_s = NULL;
2801 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002802
2803 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2804 : apv == 88 ? sizeof(struct p_rs_param)
2805 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002806 : apv <= 94 ? sizeof(struct p_rs_param_89)
2807 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002808
Philipp Reisner02918be2010-08-20 14:35:10 +02002809 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002810 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002811 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002812 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002813 }
2814
2815 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002816 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2817 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002818 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002819 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2820 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002821 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002822 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002823 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2824 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002825 D_ASSERT(data_size == 0);
2826 }
2827
2828 /* initialize verify_alg and csums_alg */
2829 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2830
Philipp Reisner02918be2010-08-20 14:35:10 +02002831 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002832 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002833
2834 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2835
2836 if (apv >= 88) {
2837 if (apv == 88) {
2838 if (data_size > SHARED_SECRET_MAX) {
2839 dev_err(DEV, "verify-alg too long, "
2840 "peer wants %u, accepting only %u byte\n",
2841 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002842 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002843 }
2844
2845 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002846 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002847
2848 /* we expect NUL terminated string */
2849 /* but just in case someone tries to be evil */
2850 D_ASSERT(p->verify_alg[data_size-1] == 0);
2851 p->verify_alg[data_size-1] = 0;
2852
2853 } else /* apv >= 89 */ {
2854 /* we still expect NUL terminated strings */
2855 /* but just in case someone tries to be evil */
2856 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2857 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2858 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2859 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2860 }
2861
2862 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2863 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2864 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2865 mdev->sync_conf.verify_alg, p->verify_alg);
2866 goto disconnect;
2867 }
2868 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2869 p->verify_alg, "verify-alg");
2870 if (IS_ERR(verify_tfm)) {
2871 verify_tfm = NULL;
2872 goto disconnect;
2873 }
2874 }
2875
2876 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2877 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2878 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2879 mdev->sync_conf.csums_alg, p->csums_alg);
2880 goto disconnect;
2881 }
2882 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2883 p->csums_alg, "csums-alg");
2884 if (IS_ERR(csums_tfm)) {
2885 csums_tfm = NULL;
2886 goto disconnect;
2887 }
2888 }
2889
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002890 if (apv > 94) {
2891 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2892 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2893 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2894 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2895 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002896
2897 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2898 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2899 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2900 if (!rs_plan_s) {
2901 dev_err(DEV, "kmalloc of fifo_buffer failed");
2902 goto disconnect;
2903 }
2904 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002905 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002906
2907 spin_lock(&mdev->peer_seq_lock);
2908 /* lock against drbd_nl_syncer_conf() */
2909 if (verify_tfm) {
2910 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2911 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2912 crypto_free_hash(mdev->verify_tfm);
2913 mdev->verify_tfm = verify_tfm;
2914 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2915 }
2916 if (csums_tfm) {
2917 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2918 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2919 crypto_free_hash(mdev->csums_tfm);
2920 mdev->csums_tfm = csums_tfm;
2921 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2922 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002923 if (fifo_size != mdev->rs_plan_s.size) {
2924 kfree(mdev->rs_plan_s.values);
2925 mdev->rs_plan_s.values = rs_plan_s;
2926 mdev->rs_plan_s.size = fifo_size;
2927 mdev->rs_planed = 0;
2928 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002929 spin_unlock(&mdev->peer_seq_lock);
2930 }
2931
2932 return ok;
2933disconnect:
2934 /* just for completeness: actually not needed,
2935 * as this is not reached if csums_tfm was ok. */
2936 crypto_free_hash(csums_tfm);
2937 /* but free the verify_tfm again, if csums_tfm did not work out */
2938 crypto_free_hash(verify_tfm);
2939 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002940 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002941}
2942
Philipp Reisnerb411b362009-09-25 16:07:19 -07002943/* warn if the arguments differ by more than 12.5% */
2944static void warn_if_differ_considerably(struct drbd_conf *mdev,
2945 const char *s, sector_t a, sector_t b)
2946{
2947 sector_t d;
2948 if (a == 0 || b == 0)
2949 return;
2950 d = (a > b) ? (a - b) : (b - a);
2951 if (d > (a>>3) || d > (b>>3))
2952 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2953 (unsigned long long)a, (unsigned long long)b);
2954}
2955
Philipp Reisner02918be2010-08-20 14:35:10 +02002956static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002957{
Philipp Reisner02918be2010-08-20 14:35:10 +02002958 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002959 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002960 sector_t p_size, p_usize, my_usize;
2961 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002962 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002963
Philipp Reisnerb411b362009-09-25 16:07:19 -07002964 p_size = be64_to_cpu(p->d_size);
2965 p_usize = be64_to_cpu(p->u_size);
2966
2967 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2968 dev_err(DEV, "some backing storage is needed\n");
2969 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002970 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002971 }
2972
2973 /* just store the peer's disk size for now.
2974 * we still need to figure out whether we accept that. */
2975 mdev->p_size = p_size;
2976
Philipp Reisnerb411b362009-09-25 16:07:19 -07002977 if (get_ldev(mdev)) {
2978 warn_if_differ_considerably(mdev, "lower level device sizes",
2979 p_size, drbd_get_max_capacity(mdev->ldev));
2980 warn_if_differ_considerably(mdev, "user requested size",
2981 p_usize, mdev->ldev->dc.disk_size);
2982
2983 /* if this is the first connect, or an otherwise expected
2984 * param exchange, choose the minimum */
2985 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2986 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2987 p_usize);
2988
2989 my_usize = mdev->ldev->dc.disk_size;
2990
2991 if (mdev->ldev->dc.disk_size != p_usize) {
2992 mdev->ldev->dc.disk_size = p_usize;
2993 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2994 (unsigned long)mdev->ldev->dc.disk_size);
2995 }
2996
2997 /* Never shrink a device with usable data during connect.
2998 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01002999 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07003000 drbd_get_capacity(mdev->this_bdev) &&
3001 mdev->state.disk >= D_OUTDATED &&
3002 mdev->state.conn < C_CONNECTED) {
3003 dev_err(DEV, "The peer's disk size is too small!\n");
3004 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3005 mdev->ldev->dc.disk_size = my_usize;
3006 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003007 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003008 }
3009 put_ldev(mdev);
3010 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003011
Philipp Reisnere89b5912010-03-24 17:11:33 +01003012 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02003014 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003015 put_ldev(mdev);
3016 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003017 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003018 drbd_md_sync(mdev);
3019 } else {
3020 /* I am diskless, need to accept the peer's size. */
3021 drbd_set_my_capacity(mdev, p_size);
3022 }
3023
Philipp Reisner99432fc2011-05-20 16:39:13 +02003024 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3025 drbd_reconsider_max_bio_size(mdev);
3026
Philipp Reisnerb411b362009-09-25 16:07:19 -07003027 if (get_ldev(mdev)) {
3028 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3029 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3030 ldsc = 1;
3031 }
3032
Philipp Reisnerb411b362009-09-25 16:07:19 -07003033 put_ldev(mdev);
3034 }
3035
3036 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3037 if (be64_to_cpu(p->c_size) !=
3038 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3039 /* we have different sizes, probably peer
3040 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003041 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003042 }
3043 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3044 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3045 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003046 mdev->state.disk >= D_INCONSISTENT) {
3047 if (ddsf & DDSF_NO_RESYNC)
3048 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3049 else
3050 resync_after_online_grow(mdev);
3051 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003052 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3053 }
3054 }
3055
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003056 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003057}
3058
Philipp Reisner02918be2010-08-20 14:35:10 +02003059static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003060{
Philipp Reisner02918be2010-08-20 14:35:10 +02003061 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003062 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003063 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003064
Philipp Reisnerb411b362009-09-25 16:07:19 -07003065 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3066
3067 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3068 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3069
3070 kfree(mdev->p_uuid);
3071 mdev->p_uuid = p_uuid;
3072
3073 if (mdev->state.conn < C_CONNECTED &&
3074 mdev->state.disk < D_INCONSISTENT &&
3075 mdev->state.role == R_PRIMARY &&
3076 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3077 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3078 (unsigned long long)mdev->ed_uuid);
3079 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003080 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003081 }
3082
3083 if (get_ldev(mdev)) {
3084 int skip_initial_sync =
3085 mdev->state.conn == C_CONNECTED &&
3086 mdev->agreed_pro_version >= 90 &&
3087 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3088 (p_uuid[UI_FLAGS] & 8);
3089 if (skip_initial_sync) {
3090 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3091 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003092 "clear_n_write from receive_uuids",
3093 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003094 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3095 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3096 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3097 CS_VERBOSE, NULL);
3098 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003099 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003100 }
3101 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003102 } else if (mdev->state.disk < D_INCONSISTENT &&
3103 mdev->state.role == R_PRIMARY) {
3104 /* I am a diskless primary, the peer just created a new current UUID
3105 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003106 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003107 }
3108
3109 /* Before we test for the disk state, we should wait until an eventually
3110 ongoing cluster wide state change is finished. That is important if
3111 we are primary and are detaching from our disk. We need to see the
3112 new disk state... */
3113 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3114 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003115 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3116
3117 if (updated_uuids)
3118 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003119
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003120 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003121}
3122
3123/**
3124 * convert_state() - Converts the peer's view of the cluster state to our point of view
3125 * @ps: The state as seen by the peer.
3126 */
3127static union drbd_state convert_state(union drbd_state ps)
3128{
3129 union drbd_state ms;
3130
3131 static enum drbd_conns c_tab[] = {
3132 [C_CONNECTED] = C_CONNECTED,
3133
3134 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3135 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3136 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3137 [C_VERIFY_S] = C_VERIFY_T,
3138 [C_MASK] = C_MASK,
3139 };
3140
3141 ms.i = ps.i;
3142
3143 ms.conn = c_tab[ps.conn];
3144 ms.peer = ps.role;
3145 ms.role = ps.peer;
3146 ms.pdsk = ps.disk;
3147 ms.disk = ps.pdsk;
3148 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3149
3150 return ms;
3151}
3152
Philipp Reisner02918be2010-08-20 14:35:10 +02003153static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003154{
Philipp Reisner02918be2010-08-20 14:35:10 +02003155 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003156 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003157 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003158
Philipp Reisnerb411b362009-09-25 16:07:19 -07003159 mask.i = be32_to_cpu(p->mask);
3160 val.i = be32_to_cpu(p->val);
3161
3162 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3163 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3164 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003165 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003166 }
3167
3168 mask = convert_state(mask);
3169 val = convert_state(val);
3170
3171 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3172
3173 drbd_send_sr_reply(mdev, rv);
3174 drbd_md_sync(mdev);
3175
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003176 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003177}
3178
Philipp Reisner02918be2010-08-20 14:35:10 +02003179static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003180{
Philipp Reisner02918be2010-08-20 14:35:10 +02003181 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003182 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003183 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003184 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003185 int rv;
3186
Philipp Reisnerb411b362009-09-25 16:07:19 -07003187 peer_state.i = be32_to_cpu(p->state);
3188
3189 real_peer_disk = peer_state.disk;
3190 if (peer_state.disk == D_NEGOTIATING) {
3191 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3192 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3193 }
3194
3195 spin_lock_irq(&mdev->req_lock);
3196 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003197 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003198 spin_unlock_irq(&mdev->req_lock);
3199
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003200 /* peer says his disk is uptodate, while we think it is inconsistent,
3201 * and this happens while we think we have a sync going on. */
3202 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3203 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3204 /* If we are (becoming) SyncSource, but peer is still in sync
3205 * preparation, ignore its uptodate-ness to avoid flapping, it
3206 * will change to inconsistent once the peer reaches active
3207 * syncing states.
3208 * It may have changed syncer-paused flags, however, so we
3209 * cannot ignore this completely. */
3210 if (peer_state.conn > C_CONNECTED &&
3211 peer_state.conn < C_SYNC_SOURCE)
3212 real_peer_disk = D_INCONSISTENT;
3213
3214 /* if peer_state changes to connected at the same time,
3215 * it explicitly notifies us that it finished resync.
3216 * Maybe we should finish it up, too? */
3217 else if (os.conn >= C_SYNC_SOURCE &&
3218 peer_state.conn == C_CONNECTED) {
3219 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3220 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003221 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003222 }
3223 }
3224
3225 /* peer says his disk is inconsistent, while we think it is uptodate,
3226 * and this happens while the peer still thinks we have a sync going on,
3227 * but we think we are already done with the sync.
3228 * We ignore this to avoid flapping pdsk.
3229 * This should not happen, if the peer is a recent version of drbd. */
3230 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3231 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3232 real_peer_disk = D_UP_TO_DATE;
3233
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003234 if (ns.conn == C_WF_REPORT_PARAMS)
3235 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003236
Philipp Reisner67531712010-10-27 12:21:30 +02003237 if (peer_state.conn == C_AHEAD)
3238 ns.conn = C_BEHIND;
3239
Philipp Reisnerb411b362009-09-25 16:07:19 -07003240 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3241 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3242 int cr; /* consider resync */
3243
3244 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003245 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003246 /* if we had an established connection
3247 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003248 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003249 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003250 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003251 /* if we have both been inconsistent, and the peer has been
3252 * forced to be UpToDate with --overwrite-data */
3253 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3254 /* if we had been plain connected, and the admin requested to
3255 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003256 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003257 (peer_state.conn >= C_STARTING_SYNC_S &&
3258 peer_state.conn <= C_WF_BITMAP_T));
3259
3260 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003261 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003262
3263 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003264 if (ns.conn == C_MASK) {
3265 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003266 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003267 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003268 } else if (peer_state.disk == D_NEGOTIATING) {
3269 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3270 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003271 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003272 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003273 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003274 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003275 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003276 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003277 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003278 }
3279 }
3280 }
3281
3282 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003283 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003284 goto retry;
3285 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003286 ns.peer = peer_state.role;
3287 ns.pdsk = real_peer_disk;
3288 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003289 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003290 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003291 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3292 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003293 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3294 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3295 for temporal network outages! */
3296 spin_unlock_irq(&mdev->req_lock);
3297 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3298 tl_clear(mdev);
3299 drbd_uuid_new_current(mdev);
3300 clear_bit(NEW_CUR_UUID, &mdev->flags);
3301 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003302 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003303 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003304 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003305 ns = mdev->state;
3306 spin_unlock_irq(&mdev->req_lock);
3307
3308 if (rv < SS_SUCCESS) {
3309 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003310 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003311 }
3312
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003313 if (os.conn > C_WF_REPORT_PARAMS) {
3314 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003315 peer_state.disk != D_NEGOTIATING ) {
3316 /* we want resync, peer has not yet decided to sync... */
3317 /* Nowadays only used when forcing a node into primary role and
3318 setting its disk to UpToDate with that */
3319 drbd_send_uuids(mdev);
3320 drbd_send_state(mdev);
3321 }
3322 }
3323
3324 mdev->net_conf->want_lose = 0;
3325
3326 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3327
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003328 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003329}
3330
Philipp Reisner02918be2010-08-20 14:35:10 +02003331static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003332{
Philipp Reisner02918be2010-08-20 14:35:10 +02003333 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003334
3335 wait_event(mdev->misc_wait,
3336 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003337 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003338 mdev->state.conn < C_CONNECTED ||
3339 mdev->state.disk < D_NEGOTIATING);
3340
3341 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3342
Philipp Reisnerb411b362009-09-25 16:07:19 -07003343 /* Here the _drbd_uuid_ functions are right, current should
3344 _not_ be rotated into the history */
3345 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3346 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3347 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3348
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003349 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003350 drbd_start_resync(mdev, C_SYNC_TARGET);
3351
3352 put_ldev(mdev);
3353 } else
3354 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3355
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003356 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003357}
3358
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003359/**
3360 * receive_bitmap_plain
3361 *
3362 * Return 0 when done, 1 when another iteration is needed, and a negative error
3363 * code upon failure.
3364 */
3365static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003366receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3367 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003368{
3369 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3370 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003371 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003372
Philipp Reisner02918be2010-08-20 14:35:10 +02003373 if (want != data_size) {
3374 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003375 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003376 }
3377 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003378 return 0;
3379 err = drbd_recv(mdev, buffer, want);
3380 if (err != want) {
3381 if (err >= 0)
3382 err = -EIO;
3383 return err;
3384 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003385
3386 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3387
3388 c->word_offset += num_words;
3389 c->bit_offset = c->word_offset * BITS_PER_LONG;
3390 if (c->bit_offset > c->bm_bits)
3391 c->bit_offset = c->bm_bits;
3392
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003393 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003394}
3395
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003396/**
3397 * recv_bm_rle_bits
3398 *
3399 * Return 0 when done, 1 when another iteration is needed, and a negative error
3400 * code upon failure.
3401 */
3402static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003403recv_bm_rle_bits(struct drbd_conf *mdev,
3404 struct p_compressed_bm *p,
3405 struct bm_xfer_ctx *c)
3406{
3407 struct bitstream bs;
3408 u64 look_ahead;
3409 u64 rl;
3410 u64 tmp;
3411 unsigned long s = c->bit_offset;
3412 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003413 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003414 int toggle = DCBP_get_start(p);
3415 int have;
3416 int bits;
3417
3418 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3419
3420 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3421 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003422 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003423
3424 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3425 bits = vli_decode_bits(&rl, look_ahead);
3426 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003427 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003428
3429 if (toggle) {
3430 e = s + rl -1;
3431 if (e >= c->bm_bits) {
3432 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003433 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003434 }
3435 _drbd_bm_set_bits(mdev, s, e);
3436 }
3437
3438 if (have < bits) {
3439 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3440 have, bits, look_ahead,
3441 (unsigned int)(bs.cur.b - p->code),
3442 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003443 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003444 }
3445 look_ahead >>= bits;
3446 have -= bits;
3447
3448 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3449 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003450 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003451 look_ahead |= tmp << have;
3452 have += bits;
3453 }
3454
3455 c->bit_offset = s;
3456 bm_xfer_ctx_bit_to_word_offset(c);
3457
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003458 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003459}
3460
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003461/**
3462 * decode_bitmap_c
3463 *
3464 * Return 0 when done, 1 when another iteration is needed, and a negative error
3465 * code upon failure.
3466 */
3467static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003468decode_bitmap_c(struct drbd_conf *mdev,
3469 struct p_compressed_bm *p,
3470 struct bm_xfer_ctx *c)
3471{
3472 if (DCBP_get_code(p) == RLE_VLI_Bits)
3473 return recv_bm_rle_bits(mdev, p, c);
3474
3475 /* other variants had been implemented for evaluation,
3476 * but have been dropped as this one turned out to be "best"
3477 * during all our tests. */
3478
3479 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3480 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003481 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003482}
3483
3484void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3485 const char *direction, struct bm_xfer_ctx *c)
3486{
3487 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003488 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003489 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3490 + c->bm_words * sizeof(long);
3491 unsigned total = c->bytes[0] + c->bytes[1];
3492 unsigned r;
3493
3494 /* total can not be zero. but just in case: */
3495 if (total == 0)
3496 return;
3497
3498 /* don't report if not compressed */
3499 if (total >= plain)
3500 return;
3501
3502 /* total < plain. check for overflow, still */
3503 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3504 : (1000 * total / plain);
3505
3506 if (r > 1000)
3507 r = 1000;
3508
3509 r = 1000 - r;
3510 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3511 "total %u; compression: %u.%u%%\n",
3512 direction,
3513 c->bytes[1], c->packets[1],
3514 c->bytes[0], c->packets[0],
3515 total, r/10, r % 10);
3516}
3517
3518/* Since we are processing the bitfield from lower addresses to higher,
3519 it does not matter if the process it in 32 bit chunks or 64 bit
3520 chunks as long as it is little endian. (Understand it as byte stream,
3521 beginning with the lowest byte...) If we would use big endian
3522 we would need to process it from the highest address to the lowest,
3523 in order to be agnostic to the 32 vs 64 bits issue.
3524
3525 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003526static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003527{
3528 struct bm_xfer_ctx c;
3529 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003530 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003531 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003532 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003533
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003534 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3535 /* you are supposed to send additional out-of-sync information
3536 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003537
3538 /* maybe we should use some per thread scratch page,
3539 * and allocate that during initial device creation? */
3540 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3541 if (!buffer) {
3542 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3543 goto out;
3544 }
3545
3546 c = (struct bm_xfer_ctx) {
3547 .bm_bits = drbd_bm_bits(mdev),
3548 .bm_words = drbd_bm_words(mdev),
3549 };
3550
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003551 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003552 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003553 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003554 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003555 /* MAYBE: sanity check that we speak proto >= 90,
3556 * and the feature is enabled! */
3557 struct p_compressed_bm *p;
3558
Philipp Reisner02918be2010-08-20 14:35:10 +02003559 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003560 dev_err(DEV, "ReportCBitmap packet too large\n");
3561 goto out;
3562 }
3563 /* use the page buff */
3564 p = buffer;
3565 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003566 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003567 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003568 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3569 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003570 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003571 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003572 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003573 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003574 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003575 goto out;
3576 }
3577
Philipp Reisner02918be2010-08-20 14:35:10 +02003578 c.packets[cmd == P_BITMAP]++;
3579 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003580
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003581 if (err <= 0) {
3582 if (err < 0)
3583 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003584 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003585 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003586 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003587 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003588 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003589
3590 INFO_bm_xfer_stats(mdev, "receive", &c);
3591
3592 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003593 enum drbd_state_rv rv;
3594
Philipp Reisnerb411b362009-09-25 16:07:19 -07003595 ok = !drbd_send_bitmap(mdev);
3596 if (!ok)
3597 goto out;
3598 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003599 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3600 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003601 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3602 /* admin may have requested C_DISCONNECTING,
3603 * other threads may have noticed network errors */
3604 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3605 drbd_conn_str(mdev->state.conn));
3606 }
3607
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003608 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003609 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003610 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003611 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3612 drbd_start_resync(mdev, C_SYNC_SOURCE);
3613 free_page((unsigned long) buffer);
3614 return ok;
3615}
3616
Philipp Reisner02918be2010-08-20 14:35:10 +02003617static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003618{
3619 /* TODO zero copy sink :) */
3620 static char sink[128];
3621 int size, want, r;
3622
Philipp Reisner02918be2010-08-20 14:35:10 +02003623 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3624 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003625
Philipp Reisner02918be2010-08-20 14:35:10 +02003626 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003627 while (size > 0) {
3628 want = min_t(int, size, sizeof(sink));
3629 r = drbd_recv(mdev, sink, want);
3630 ERR_IF(r <= 0) break;
3631 size -= r;
3632 }
3633 return size == 0;
3634}
3635
Philipp Reisner02918be2010-08-20 14:35:10 +02003636static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003637{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003638 /* Make sure we've acked all the TCP data associated
3639 * with the data requests being unplugged */
3640 drbd_tcp_quickack(mdev->data.socket);
3641
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003642 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003643}
3644
Philipp Reisner73a01a12010-10-27 14:33:00 +02003645static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3646{
3647 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3648
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003649 switch (mdev->state.conn) {
3650 case C_WF_SYNC_UUID:
3651 case C_WF_BITMAP_T:
3652 case C_BEHIND:
3653 break;
3654 default:
3655 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3656 drbd_conn_str(mdev->state.conn));
3657 }
3658
Philipp Reisner73a01a12010-10-27 14:33:00 +02003659 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3660
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003661 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003662}
3663
Philipp Reisner02918be2010-08-20 14:35:10 +02003664typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003665
Philipp Reisner02918be2010-08-20 14:35:10 +02003666struct data_cmd {
3667 int expect_payload;
3668 size_t pkt_size;
3669 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003670};
3671
Philipp Reisner02918be2010-08-20 14:35:10 +02003672static struct data_cmd drbd_cmd_handler[] = {
3673 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3674 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3675 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3676 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3677 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3678 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3679 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3680 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3681 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3682 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3683 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3684 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3685 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3686 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3687 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3688 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3689 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3690 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3691 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3692 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3693 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003694 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003695 /* anything missing from this table is in
3696 * the asender_tbl, see get_asender_cmd */
3697 [P_MAX_CMD] = { 0, 0, NULL },
3698};
3699
3700/* All handler functions that expect a sub-header get that sub-heder in
3701 mdev->data.rbuf.header.head.payload.
3702
3703 Usually in mdev->data.rbuf.header.head the callback can find the usual
3704 p_header, but they may not rely on that. Since there is also p_header95 !
3705 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003706
3707static void drbdd(struct drbd_conf *mdev)
3708{
Philipp Reisner02918be2010-08-20 14:35:10 +02003709 union p_header *header = &mdev->data.rbuf.header;
3710 unsigned int packet_size;
3711 enum drbd_packets cmd;
3712 size_t shs; /* sub header size */
3713 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003714
3715 while (get_t_state(&mdev->receiver) == Running) {
3716 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003717 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3718 goto err_out;
3719
3720 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3721 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3722 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003723 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003724
Philipp Reisner02918be2010-08-20 14:35:10 +02003725 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003726 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3727 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3728 goto err_out;
3729 }
3730
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003731 if (shs) {
3732 rv = drbd_recv(mdev, &header->h80.payload, shs);
3733 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003734 if (!signal_pending(current))
3735 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003736 goto err_out;
3737 }
3738 }
3739
Philipp Reisner02918be2010-08-20 14:35:10 +02003740 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3741
3742 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003743 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003744 cmdname(cmd), packet_size);
3745 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003746 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003747 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003748
Philipp Reisner02918be2010-08-20 14:35:10 +02003749 if (0) {
3750 err_out:
3751 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003752 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003753 /* If we leave here, we probably want to update at least the
3754 * "Connected" indicator on stable storage. Do so explicitly here. */
3755 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003756}
3757
3758void drbd_flush_workqueue(struct drbd_conf *mdev)
3759{
3760 struct drbd_wq_barrier barr;
3761
3762 barr.w.cb = w_prev_work_done;
3763 init_completion(&barr.done);
3764 drbd_queue_work(&mdev->data.work, &barr.w);
3765 wait_for_completion(&barr.done);
3766}
3767
3768static void drbd_disconnect(struct drbd_conf *mdev)
3769{
3770 enum drbd_fencing_p fp;
3771 union drbd_state os, ns;
3772 int rv = SS_UNKNOWN_ERROR;
3773 unsigned int i;
3774
3775 if (mdev->state.conn == C_STANDALONE)
3776 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003777
3778 /* asender does not clean up anything. it must not interfere, either */
3779 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003780 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003781
Philipp Reisner85719572010-07-21 10:20:17 +02003782 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003783 spin_lock_irq(&mdev->req_lock);
3784 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3785 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3786 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3787 spin_unlock_irq(&mdev->req_lock);
3788
3789 /* We do not have data structures that would allow us to
3790 * get the rs_pending_cnt down to 0 again.
3791 * * On C_SYNC_TARGET we do not have any data structures describing
3792 * the pending RSDataRequest's we have sent.
3793 * * On C_SYNC_SOURCE there is no data structure that tracks
3794 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3795 * And no, it is not the sum of the reference counts in the
3796 * resync_LRU. The resync_LRU tracks the whole operation including
3797 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3798 * on the fly. */
3799 drbd_rs_cancel_all(mdev);
3800 mdev->rs_total = 0;
3801 mdev->rs_failed = 0;
3802 atomic_set(&mdev->rs_pending_cnt, 0);
3803 wake_up(&mdev->misc_wait);
3804
Philipp Reisner7fde2be2011-03-01 11:08:28 +01003805 del_timer(&mdev->request_timer);
3806
Philipp Reisnerb411b362009-09-25 16:07:19 -07003807 /* make sure syncer is stopped and w_resume_next_sg queued */
3808 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003809 resync_timer_fn((unsigned long)mdev);
3810
Philipp Reisnerb411b362009-09-25 16:07:19 -07003811 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3812 * w_make_resync_request etc. which may still be on the worker queue
3813 * to be "canceled" */
3814 drbd_flush_workqueue(mdev);
3815
3816 /* This also does reclaim_net_ee(). If we do this too early, we might
3817 * miss some resync ee and pages.*/
3818 drbd_process_done_ee(mdev);
3819
3820 kfree(mdev->p_uuid);
3821 mdev->p_uuid = NULL;
3822
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003823 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003824 tl_clear(mdev);
3825
Philipp Reisnerb411b362009-09-25 16:07:19 -07003826 dev_info(DEV, "Connection closed\n");
3827
3828 drbd_md_sync(mdev);
3829
3830 fp = FP_DONT_CARE;
3831 if (get_ldev(mdev)) {
3832 fp = mdev->ldev->dc.fencing;
3833 put_ldev(mdev);
3834 }
3835
Philipp Reisner87f7be42010-06-11 13:56:33 +02003836 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3837 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003838
3839 spin_lock_irq(&mdev->req_lock);
3840 os = mdev->state;
3841 if (os.conn >= C_UNCONNECTED) {
3842 /* Do not restart in case we are C_DISCONNECTING */
3843 ns = os;
3844 ns.conn = C_UNCONNECTED;
3845 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3846 }
3847 spin_unlock_irq(&mdev->req_lock);
3848
3849 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003850 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003851
Philipp Reisnerb411b362009-09-25 16:07:19 -07003852 crypto_free_hash(mdev->cram_hmac_tfm);
3853 mdev->cram_hmac_tfm = NULL;
3854
3855 kfree(mdev->net_conf);
3856 mdev->net_conf = NULL;
3857 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3858 }
3859
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003860 /* serialize with bitmap writeout triggered by the state change,
3861 * if any. */
3862 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3863
Philipp Reisnerb411b362009-09-25 16:07:19 -07003864 /* tcp_close and release of sendpage pages can be deferred. I don't
3865 * want to use SO_LINGER, because apparently it can be deferred for
3866 * more than 20 seconds (longest time I checked).
3867 *
3868 * Actually we don't care for exactly when the network stack does its
3869 * put_page(), but release our reference on these pages right here.
3870 */
3871 i = drbd_release_ee(mdev, &mdev->net_ee);
3872 if (i)
3873 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003874 i = atomic_read(&mdev->pp_in_use_by_net);
3875 if (i)
3876 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003877 i = atomic_read(&mdev->pp_in_use);
3878 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003879 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003880
3881 D_ASSERT(list_empty(&mdev->read_ee));
3882 D_ASSERT(list_empty(&mdev->active_ee));
3883 D_ASSERT(list_empty(&mdev->sync_ee));
3884 D_ASSERT(list_empty(&mdev->done_ee));
3885
3886 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3887 atomic_set(&mdev->current_epoch->epoch_size, 0);
3888 D_ASSERT(list_empty(&mdev->current_epoch->list));
3889}
3890
3891/*
3892 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3893 * we can agree on is stored in agreed_pro_version.
3894 *
3895 * feature flags and the reserved array should be enough room for future
3896 * enhancements of the handshake protocol, and possible plugins...
3897 *
3898 * for now, they are expected to be zero, but ignored.
3899 */
3900static int drbd_send_handshake(struct drbd_conf *mdev)
3901{
3902 /* ASSERT current == mdev->receiver ... */
3903 struct p_handshake *p = &mdev->data.sbuf.handshake;
3904 int ok;
3905
3906 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3907 dev_err(DEV, "interrupted during initial handshake\n");
3908 return 0; /* interrupted. not ok. */
3909 }
3910
3911 if (mdev->data.socket == NULL) {
3912 mutex_unlock(&mdev->data.mutex);
3913 return 0;
3914 }
3915
3916 memset(p, 0, sizeof(*p));
3917 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3918 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3919 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003920 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003921 mutex_unlock(&mdev->data.mutex);
3922 return ok;
3923}
3924
3925/*
3926 * return values:
3927 * 1 yes, we have a valid connection
3928 * 0 oops, did not work out, please try again
3929 * -1 peer talks different language,
3930 * no point in trying again, please go standalone.
3931 */
3932static int drbd_do_handshake(struct drbd_conf *mdev)
3933{
3934 /* ASSERT current == mdev->receiver ... */
3935 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003936 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3937 unsigned int length;
3938 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003939 int rv;
3940
3941 rv = drbd_send_handshake(mdev);
3942 if (!rv)
3943 return 0;
3944
Philipp Reisner02918be2010-08-20 14:35:10 +02003945 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003946 if (!rv)
3947 return 0;
3948
Philipp Reisner02918be2010-08-20 14:35:10 +02003949 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003950 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003951 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003952 return -1;
3953 }
3954
Philipp Reisner02918be2010-08-20 14:35:10 +02003955 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003956 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003957 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003958 return -1;
3959 }
3960
3961 rv = drbd_recv(mdev, &p->head.payload, expect);
3962
3963 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003964 if (!signal_pending(current))
3965 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003966 return 0;
3967 }
3968
Philipp Reisnerb411b362009-09-25 16:07:19 -07003969 p->protocol_min = be32_to_cpu(p->protocol_min);
3970 p->protocol_max = be32_to_cpu(p->protocol_max);
3971 if (p->protocol_max == 0)
3972 p->protocol_max = p->protocol_min;
3973
3974 if (PRO_VERSION_MAX < p->protocol_min ||
3975 PRO_VERSION_MIN > p->protocol_max)
3976 goto incompat;
3977
3978 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3979
3980 dev_info(DEV, "Handshake successful: "
3981 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3982
3983 return 1;
3984
3985 incompat:
3986 dev_err(DEV, "incompatible DRBD dialects: "
3987 "I support %d-%d, peer supports %d-%d\n",
3988 PRO_VERSION_MIN, PRO_VERSION_MAX,
3989 p->protocol_min, p->protocol_max);
3990 return -1;
3991}
3992
3993#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3994static int drbd_do_auth(struct drbd_conf *mdev)
3995{
3996 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3997 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01003998 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003999}
4000#else
4001#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004002
4003/* Return value:
4004 1 - auth succeeded,
4005 0 - failed, try again (network error),
4006 -1 - auth failed, don't try again.
4007*/
4008
Philipp Reisnerb411b362009-09-25 16:07:19 -07004009static int drbd_do_auth(struct drbd_conf *mdev)
4010{
4011 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4012 struct scatterlist sg;
4013 char *response = NULL;
4014 char *right_response = NULL;
4015 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004016 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4017 unsigned int resp_size;
4018 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004019 enum drbd_packets cmd;
4020 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004021 int rv;
4022
4023 desc.tfm = mdev->cram_hmac_tfm;
4024 desc.flags = 0;
4025
4026 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4027 (u8 *)mdev->net_conf->shared_secret, key_len);
4028 if (rv) {
4029 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004030 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004031 goto fail;
4032 }
4033
4034 get_random_bytes(my_challenge, CHALLENGE_LEN);
4035
4036 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4037 if (!rv)
4038 goto fail;
4039
Philipp Reisner02918be2010-08-20 14:35:10 +02004040 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004041 if (!rv)
4042 goto fail;
4043
Philipp Reisner02918be2010-08-20 14:35:10 +02004044 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004045 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004046 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004047 rv = 0;
4048 goto fail;
4049 }
4050
Philipp Reisner02918be2010-08-20 14:35:10 +02004051 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004052 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004053 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004054 goto fail;
4055 }
4056
Philipp Reisner02918be2010-08-20 14:35:10 +02004057 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004058 if (peers_ch == NULL) {
4059 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004060 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004061 goto fail;
4062 }
4063
Philipp Reisner02918be2010-08-20 14:35:10 +02004064 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004065
Philipp Reisner02918be2010-08-20 14:35:10 +02004066 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004067 if (!signal_pending(current))
4068 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004069 rv = 0;
4070 goto fail;
4071 }
4072
4073 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4074 response = kmalloc(resp_size, GFP_NOIO);
4075 if (response == NULL) {
4076 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004077 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004078 goto fail;
4079 }
4080
4081 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004082 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004083
4084 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4085 if (rv) {
4086 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004087 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004088 goto fail;
4089 }
4090
4091 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4092 if (!rv)
4093 goto fail;
4094
Philipp Reisner02918be2010-08-20 14:35:10 +02004095 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004096 if (!rv)
4097 goto fail;
4098
Philipp Reisner02918be2010-08-20 14:35:10 +02004099 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004100 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004101 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004102 rv = 0;
4103 goto fail;
4104 }
4105
Philipp Reisner02918be2010-08-20 14:35:10 +02004106 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004107 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4108 rv = 0;
4109 goto fail;
4110 }
4111
4112 rv = drbd_recv(mdev, response , resp_size);
4113
4114 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004115 if (!signal_pending(current))
4116 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117 rv = 0;
4118 goto fail;
4119 }
4120
4121 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004122 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004123 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004124 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004125 goto fail;
4126 }
4127
4128 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4129
4130 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4131 if (rv) {
4132 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004133 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004134 goto fail;
4135 }
4136
4137 rv = !memcmp(response, right_response, resp_size);
4138
4139 if (rv)
4140 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4141 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004142 else
4143 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004144
4145 fail:
4146 kfree(peers_ch);
4147 kfree(response);
4148 kfree(right_response);
4149
4150 return rv;
4151}
4152#endif
4153
4154int drbdd_init(struct drbd_thread *thi)
4155{
4156 struct drbd_conf *mdev = thi->mdev;
4157 unsigned int minor = mdev_to_minor(mdev);
4158 int h;
4159
4160 sprintf(current->comm, "drbd%d_receiver", minor);
4161
4162 dev_info(DEV, "receiver (re)started\n");
4163
4164 do {
4165 h = drbd_connect(mdev);
4166 if (h == 0) {
4167 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004168 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004169 }
4170 if (h == -1) {
4171 dev_warn(DEV, "Discarding network configuration.\n");
4172 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4173 }
4174 } while (h == 0);
4175
4176 if (h > 0) {
4177 if (get_net_conf(mdev)) {
4178 drbdd(mdev);
4179 put_net_conf(mdev);
4180 }
4181 }
4182
4183 drbd_disconnect(mdev);
4184
4185 dev_info(DEV, "receiver terminated\n");
4186 return 0;
4187}
4188
4189/* ********* acknowledge sender ******** */
4190
Philipp Reisner0b70a132010-08-20 13:36:10 +02004191static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004192{
4193 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4194
4195 int retcode = be32_to_cpu(p->retcode);
4196
4197 if (retcode >= SS_SUCCESS) {
4198 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4199 } else {
4200 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4201 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4202 drbd_set_st_err_str(retcode), retcode);
4203 }
4204 wake_up(&mdev->state_wait);
4205
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004206 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004207}
4208
Philipp Reisner0b70a132010-08-20 13:36:10 +02004209static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004210{
4211 return drbd_send_ping_ack(mdev);
4212
4213}
4214
Philipp Reisner0b70a132010-08-20 13:36:10 +02004215static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004216{
4217 /* restore idle timeout */
4218 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004219 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4220 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004221
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004222 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004223}
4224
Philipp Reisner0b70a132010-08-20 13:36:10 +02004225static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004226{
4227 struct p_block_ack *p = (struct p_block_ack *)h;
4228 sector_t sector = be64_to_cpu(p->sector);
4229 int blksize = be32_to_cpu(p->blksize);
4230
4231 D_ASSERT(mdev->agreed_pro_version >= 89);
4232
4233 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4234
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004235 if (get_ldev(mdev)) {
4236 drbd_rs_complete_io(mdev, sector);
4237 drbd_set_in_sync(mdev, sector, blksize);
4238 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4239 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4240 put_ldev(mdev);
4241 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004242 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004243 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004244
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004245 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004246}
4247
Philipp Reisnerb411b362009-09-25 16:07:19 -07004248static int validate_req_change_req_state(struct drbd_conf *mdev,
Andreas Gruenbacher28c455c2011-01-09 17:52:09 +01004249 u64 id, sector_t sector,
Andreas Gruenbacherae3388d2011-01-20 17:23:59 +01004250 struct hlist_head *(*hash_slot)(struct drbd_conf *, sector_t),
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004251 const char *func, enum drbd_req_event what, bool missing_ok)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004252{
4253 struct drbd_request *req;
4254 struct bio_and_error m;
4255
4256 spin_lock_irq(&mdev->req_lock);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004257 req = find_request(mdev, hash_slot, id, sector, missing_ok, func);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004258 if (unlikely(!req)) {
4259 spin_unlock_irq(&mdev->req_lock);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004260 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004261 }
4262 __req_mod(req, what, &m);
4263 spin_unlock_irq(&mdev->req_lock);
4264
4265 if (m.bio)
4266 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004267 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004268}
4269
Philipp Reisner0b70a132010-08-20 13:36:10 +02004270static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004271{
4272 struct p_block_ack *p = (struct p_block_ack *)h;
4273 sector_t sector = be64_to_cpu(p->sector);
4274 int blksize = be32_to_cpu(p->blksize);
4275 enum drbd_req_event what;
4276
4277 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4278
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004279 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004280 drbd_set_in_sync(mdev, sector, blksize);
4281 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004282 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004283 }
4284 switch (be16_to_cpu(h->command)) {
4285 case P_RS_WRITE_ACK:
4286 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4287 what = write_acked_by_peer_and_sis;
4288 break;
4289 case P_WRITE_ACK:
4290 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4291 what = write_acked_by_peer;
4292 break;
4293 case P_RECV_ACK:
4294 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4295 what = recv_acked_by_peer;
4296 break;
4297 case P_DISCARD_ACK:
4298 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4299 what = conflict_discarded_by_peer;
4300 break;
4301 default:
4302 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004303 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004304 }
4305
4306 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004307 tl_hash_slot, __func__, what,
4308 false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004309}
4310
Philipp Reisner0b70a132010-08-20 13:36:10 +02004311static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004312{
4313 struct p_block_ack *p = (struct p_block_ack *)h;
4314 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004315 int size = be32_to_cpu(p->blksize);
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004316 bool missing_ok = mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4317 mdev->net_conf->wire_protocol == DRBD_PROT_B;
4318 bool found;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004319
4320 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4321
Andreas Gruenbacher579b57e2011-01-13 18:40:57 +01004322 if (p->block_id == ID_SYNCER) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004323 dec_rs_pending(mdev);
4324 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004325 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004326 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004327
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004328 found = validate_req_change_req_state(mdev, p->block_id, sector,
4329 tl_hash_slot, __func__,
4330 neg_acked, missing_ok);
4331 if (!found) {
4332 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4333 The master bio might already be completed, therefore the
4334 request is no longer in the collision hash. */
4335 /* In Protocol B we might already have got a P_RECV_ACK
4336 but then get a P_NEG_ACK afterwards. */
4337 if (!missing_ok)
Philipp Reisner2deb8332011-01-17 18:39:18 +01004338 return false;
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004339 drbd_set_out_of_sync(mdev, sector, size);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004340 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004341 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004342}
4343
Philipp Reisner0b70a132010-08-20 13:36:10 +02004344static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004345{
4346 struct p_block_ack *p = (struct p_block_ack *)h;
4347 sector_t sector = be64_to_cpu(p->sector);
4348
4349 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4350 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4351 (unsigned long long)sector, be32_to_cpu(p->blksize));
4352
4353 return validate_req_change_req_state(mdev, p->block_id, sector,
Andreas Gruenbacherc3afd8f2011-01-20 22:25:40 +01004354 ar_hash_slot, __func__, neg_acked,
4355 false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004356}
4357
Philipp Reisner0b70a132010-08-20 13:36:10 +02004358static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004359{
4360 sector_t sector;
4361 int size;
4362 struct p_block_ack *p = (struct p_block_ack *)h;
4363
4364 sector = be64_to_cpu(p->sector);
4365 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004366
4367 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4368
4369 dec_rs_pending(mdev);
4370
4371 if (get_ldev_if_state(mdev, D_FAILED)) {
4372 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004373 switch (be16_to_cpu(h->command)) {
4374 case P_NEG_RS_DREPLY:
4375 drbd_rs_failed_io(mdev, sector, size);
4376 case P_RS_CANCEL:
4377 break;
4378 default:
4379 D_ASSERT(0);
4380 put_ldev(mdev);
4381 return false;
4382 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004383 put_ldev(mdev);
4384 }
4385
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004386 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004387}
4388
Philipp Reisner0b70a132010-08-20 13:36:10 +02004389static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004390{
4391 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4392
4393 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4394
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004395 if (mdev->state.conn == C_AHEAD &&
4396 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004397 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4398 mdev->start_resync_timer.expires = jiffies + HZ;
4399 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004400 }
4401
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004402 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004403}
4404
Philipp Reisner0b70a132010-08-20 13:36:10 +02004405static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004406{
4407 struct p_block_ack *p = (struct p_block_ack *)h;
4408 struct drbd_work *w;
4409 sector_t sector;
4410 int size;
4411
4412 sector = be64_to_cpu(p->sector);
4413 size = be32_to_cpu(p->blksize);
4414
4415 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4416
4417 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4418 drbd_ov_oos_found(mdev, sector, size);
4419 else
4420 ov_oos_print(mdev);
4421
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004422 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004423 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004424
Philipp Reisnerb411b362009-09-25 16:07:19 -07004425 drbd_rs_complete_io(mdev, sector);
4426 dec_rs_pending(mdev);
4427
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004428 --mdev->ov_left;
4429
4430 /* let's advance progress step marks only for every other megabyte */
4431 if ((mdev->ov_left & 0x200) == 0x200)
4432 drbd_advance_rs_marks(mdev, mdev->ov_left);
4433
4434 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004435 w = kmalloc(sizeof(*w), GFP_NOIO);
4436 if (w) {
4437 w->cb = w_ov_finished;
4438 drbd_queue_work_front(&mdev->data.work, w);
4439 } else {
4440 dev_err(DEV, "kmalloc(w) failed.");
4441 ov_oos_print(mdev);
4442 drbd_resync_finished(mdev);
4443 }
4444 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004445 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004446 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004447}
4448
Philipp Reisner02918be2010-08-20 14:35:10 +02004449static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004450{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004451 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004452}
4453
Philipp Reisnerb411b362009-09-25 16:07:19 -07004454struct asender_cmd {
4455 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004456 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004457};
4458
4459static struct asender_cmd *get_asender_cmd(int cmd)
4460{
4461 static struct asender_cmd asender_tbl[] = {
4462 /* anything missing from this table is in
4463 * the drbd_cmd_handler (drbd_default_handler) table,
4464 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004465 [P_PING] = { sizeof(struct p_header80), got_Ping },
4466 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004467 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4468 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4469 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4470 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4471 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4472 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4473 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4474 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4475 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4476 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4477 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004478 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004479 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004480 [P_MAX_CMD] = { 0, NULL },
4481 };
4482 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4483 return NULL;
4484 return &asender_tbl[cmd];
4485}
4486
4487int drbd_asender(struct drbd_thread *thi)
4488{
4489 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004490 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004491 struct asender_cmd *cmd = NULL;
4492
4493 int rv, len;
4494 void *buf = h;
4495 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004496 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004497 int empty;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004498 int ping_timeout_active = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004499
4500 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4501
4502 current->policy = SCHED_RR; /* Make this a realtime task! */
4503 current->rt_priority = 2; /* more important than all other tasks */
4504
4505 while (get_t_state(thi) == Running) {
4506 drbd_thread_current_set_cpu(mdev);
4507 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4508 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4509 mdev->meta.socket->sk->sk_rcvtimeo =
4510 mdev->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004511 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004512 }
4513
4514 /* conditionally cork;
4515 * it may hurt latency if we cork without much to send */
4516 if (!mdev->net_conf->no_cork &&
4517 3 < atomic_read(&mdev->unacked_cnt))
4518 drbd_tcp_cork(mdev->meta.socket);
4519 while (1) {
4520 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4521 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004522 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004523 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004524 /* to avoid race with newly queued ACKs */
4525 set_bit(SIGNAL_ASENDER, &mdev->flags);
4526 spin_lock_irq(&mdev->req_lock);
4527 empty = list_empty(&mdev->done_ee);
4528 spin_unlock_irq(&mdev->req_lock);
4529 /* new ack may have been queued right here,
4530 * but then there is also a signal pending,
4531 * and we start over... */
4532 if (empty)
4533 break;
4534 }
4535 /* but unconditionally uncork unless disabled */
4536 if (!mdev->net_conf->no_cork)
4537 drbd_tcp_uncork(mdev->meta.socket);
4538
4539 /* short circuit, recv_msg would return EINTR anyways. */
4540 if (signal_pending(current))
4541 continue;
4542
4543 rv = drbd_recv_short(mdev, mdev->meta.socket,
4544 buf, expect-received, 0);
4545 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4546
4547 flush_signals(current);
4548
4549 /* Note:
4550 * -EINTR (on meta) we got a signal
4551 * -EAGAIN (on meta) rcvtimeo expired
4552 * -ECONNRESET other side closed the connection
4553 * -ERESTARTSYS (on data) we got a signal
4554 * rv < 0 other than above: unexpected error!
4555 * rv == expected: full header or command
4556 * rv < expected: "woken" by signal during receive
4557 * rv == 0 : "connection shut down by peer"
4558 */
4559 if (likely(rv > 0)) {
4560 received += rv;
4561 buf += rv;
4562 } else if (rv == 0) {
4563 dev_err(DEV, "meta connection shut down by peer.\n");
4564 goto reconnect;
4565 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004566 /* If the data socket received something meanwhile,
4567 * that is good enough: peer is still alive. */
4568 if (time_after(mdev->last_received,
4569 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4570 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004571 if (ping_timeout_active) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004572 dev_err(DEV, "PingAck did not arrive in time.\n");
4573 goto reconnect;
4574 }
4575 set_bit(SEND_PING, &mdev->flags);
4576 continue;
4577 } else if (rv == -EINTR) {
4578 continue;
4579 } else {
4580 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4581 goto reconnect;
4582 }
4583
4584 if (received == expect && cmd == NULL) {
Andreas Gruenbacherca9bc122011-01-11 13:47:24 +01004585 if (unlikely(h->magic != cpu_to_be32(DRBD_MAGIC))) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004586 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4587 be32_to_cpu(h->magic),
4588 be16_to_cpu(h->command),
4589 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004590 goto reconnect;
4591 }
4592 cmd = get_asender_cmd(be16_to_cpu(h->command));
4593 len = be16_to_cpu(h->length);
4594 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004595 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4596 be32_to_cpu(h->magic),
4597 be16_to_cpu(h->command),
4598 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004599 goto disconnect;
4600 }
4601 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004602 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004603 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004604 }
4605 if (received == expect) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004606 mdev->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004607 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004608 if (!cmd->process(mdev, h))
4609 goto reconnect;
4610
Lars Ellenbergf36af182011-03-09 22:44:55 +01004611 /* the idle_timeout (ping-int)
4612 * has been restored in got_PingAck() */
4613 if (cmd == get_asender_cmd(P_PING_ACK))
4614 ping_timeout_active = 0;
4615
Philipp Reisnerb411b362009-09-25 16:07:19 -07004616 buf = h;
4617 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004618 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004619 cmd = NULL;
4620 }
4621 }
4622
4623 if (0) {
4624reconnect:
4625 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004626 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004627 }
4628 if (0) {
4629disconnect:
4630 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004631 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004632 }
4633 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4634
4635 D_ASSERT(mdev->state.conn < C_CONNECTED);
4636 dev_info(DEV, "asender terminated\n");
4637
4638 return 0;
4639}