blob: 08e694ef5ed90ba57738287f29fa191046b46d9f [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Bart Van Assche24c48302011-05-21 18:32:29 +0200336 INIT_HLIST_NODE(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200344 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 return e;
347
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return NULL;
351}
352
Lars Ellenberg435f0742010-09-06 12:30:25 +0200353void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200355 if (e->flags & EE_HAS_DIGEST)
356 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Bart Van Assche24c48302011-05-21 18:32:29 +0200359 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 mempool_free(e, drbd_ee_mempool);
361}
362
363int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
364{
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
367 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200368 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
373
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200375 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 count++;
377 }
378 return count;
379}
380
381
382/*
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
386 *
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
390 */
391static int drbd_process_done_ee(struct drbd_conf *mdev)
392{
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
397
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
402
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200404 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
409 */
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
414 }
415 wake_up(&mdev->ee_wait);
416
417 return ok;
418}
419
420void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
421{
422 DEFINE_WAIT(wait);
423
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100429 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
432 }
433}
434
435void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436{
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
440}
441
442/* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
446{
447 struct sock *sk = sock->sk;
448 int err = 0;
449
450 *what = "listen";
451 err = sock->ops->listen(sock, 5);
452 if (err < 0)
453 goto out;
454
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
457 newsock);
458 if (err < 0)
459 goto out;
460
461 *what = "accept";
462 err = sock->ops->accept(sock, *newsock, 0);
463 if (err < 0) {
464 sock_release(*newsock);
465 *newsock = NULL;
466 goto out;
467 }
468 (*newsock)->ops = sock->ops;
469
470out:
471 return err;
472}
473
474static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
475 void *buf, size_t size, int flags)
476{
477 mm_segment_t oldfs;
478 struct kvec iov = {
479 .iov_base = buf,
480 .iov_len = size,
481 };
482 struct msghdr msg = {
483 .msg_iovlen = 1,
484 .msg_iov = (struct iovec *)&iov,
485 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
486 };
487 int rv;
488
489 oldfs = get_fs();
490 set_fs(KERNEL_DS);
491 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
492 set_fs(oldfs);
493
494 return rv;
495}
496
497static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
498{
499 mm_segment_t oldfs;
500 struct kvec iov = {
501 .iov_base = buf,
502 .iov_len = size,
503 };
504 struct msghdr msg = {
505 .msg_iovlen = 1,
506 .msg_iov = (struct iovec *)&iov,
507 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
508 };
509 int rv;
510
511 oldfs = get_fs();
512 set_fs(KERNEL_DS);
513
514 for (;;) {
515 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
516 if (rv == size)
517 break;
518
519 /* Note:
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
522 */
523
524 if (rv < 0) {
525 if (rv == -ECONNRESET)
526 dev_info(DEV, "sock was reset by peer\n");
527 else if (rv != -ERESTARTSYS)
528 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
529 break;
530 } else if (rv == 0) {
531 dev_info(DEV, "sock was shut down by peer\n");
532 break;
533 } else {
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
536 */
537 /* D_ASSERT(signal_pending(current)); */
538 break;
539 }
540 };
541
542 set_fs(oldfs);
543
544 if (rv != size)
545 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
546
547 return rv;
548}
549
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200550/* quoting tcp(7):
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
554 */
555static void drbd_setbufsize(struct socket *sock, unsigned int snd,
556 unsigned int rcv)
557{
558 /* open coded SO_SNDBUF, SO_RCVBUF */
559 if (snd) {
560 sock->sk->sk_sndbuf = snd;
561 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
562 }
563 if (rcv) {
564 sock->sk->sk_rcvbuf = rcv;
565 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
566 }
567}
568
Philipp Reisnerb411b362009-09-25 16:07:19 -0700569static struct socket *drbd_try_connect(struct drbd_conf *mdev)
570{
571 const char *what;
572 struct socket *sock;
573 struct sockaddr_in6 src_in6;
574 int err;
575 int disconnect_on_error = 1;
576
577 if (!get_net_conf(mdev))
578 return NULL;
579
580 what = "sock_create_kern";
581 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
582 SOCK_STREAM, IPPROTO_TCP, &sock);
583 if (err < 0) {
584 sock = NULL;
585 goto out;
586 }
587
588 sock->sk->sk_rcvtimeo =
589 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200590 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
591 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700592
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
599 */
600 memcpy(&src_in6, mdev->net_conf->my_addr,
601 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
602 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
603 src_in6.sin6_port = 0;
604 else
605 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
606
607 what = "bind before connect";
608 err = sock->ops->bind(sock,
609 (struct sockaddr *) &src_in6,
610 mdev->net_conf->my_addr_len);
611 if (err < 0)
612 goto out;
613
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error = 0;
617 what = "connect";
618 err = sock->ops->connect(sock,
619 (struct sockaddr *)mdev->net_conf->peer_addr,
620 mdev->net_conf->peer_addr_len, 0);
621
622out:
623 if (err < 0) {
624 if (sock) {
625 sock_release(sock);
626 sock = NULL;
627 }
628 switch (-err) {
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
631 case EINTR: case ERESTARTSYS:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED: case ENETUNREACH:
634 case EHOSTDOWN: case EHOSTUNREACH:
635 disconnect_on_error = 0;
636 break;
637 default:
638 dev_err(DEV, "%s failed, err = %d\n", what, err);
639 }
640 if (disconnect_on_error)
641 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
642 }
643 put_net_conf(mdev);
644 return sock;
645}
646
647static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
648{
649 int timeo, err;
650 struct socket *s_estab = NULL, *s_listen;
651 const char *what;
652
653 if (!get_net_conf(mdev))
654 return NULL;
655
656 what = "sock_create_kern";
657 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
658 SOCK_STREAM, IPPROTO_TCP, &s_listen);
659 if (err) {
660 s_listen = NULL;
661 goto out;
662 }
663
664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
666
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
671 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700672
673 what = "bind before listen";
674 err = s_listen->ops->bind(s_listen,
675 (struct sockaddr *) mdev->net_conf->my_addr,
676 mdev->net_conf->my_addr_len);
677 if (err < 0)
678 goto out;
679
680 err = drbd_accept(mdev, &what, s_listen, &s_estab);
681
682out:
683 if (s_listen)
684 sock_release(s_listen);
685 if (err < 0) {
686 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
687 dev_err(DEV, "%s failed, err = %d\n", what, err);
688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
689 }
690 }
691 put_net_conf(mdev);
692
693 return s_estab;
694}
695
696static int drbd_send_fp(struct drbd_conf *mdev,
697 struct socket *sock, enum drbd_packets cmd)
698{
Philipp Reisner02918be2010-08-20 14:35:10 +0200699 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700700
701 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
702}
703
704static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
705{
Philipp Reisner02918be2010-08-20 14:35:10 +0200706 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 int rr;
708
709 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
710
711 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
712 return be16_to_cpu(h->command);
713
714 return 0xffff;
715}
716
717/**
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
721 */
722static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
723{
724 int rr;
725 char tb[4];
726
727 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100728 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700729
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
731
732 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100733 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 } else {
735 sock_release(*sock);
736 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100737 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 }
739}
740
741/*
742 * return values:
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
748 */
749static int drbd_connect(struct drbd_conf *mdev)
750{
751 struct socket *s, *sock, *msock;
752 int try, h, ok;
753
754 D_ASSERT(!mdev->data.socket);
755
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
757 return -2;
758
759 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
760
761 sock = NULL;
762 msock = NULL;
763
764 do {
765 for (try = 0;;) {
766 /* 3 tries, this should take less than a second! */
767 s = drbd_try_connect(mdev);
768 if (s || ++try >= 3)
769 break;
770 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100771 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772 }
773
774 if (s) {
775 if (!sock) {
776 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
777 sock = s;
778 s = NULL;
779 } else if (!msock) {
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
781 msock = s;
782 s = NULL;
783 } else {
784 dev_err(DEV, "Logic error in drbd_connect()\n");
785 goto out_release_sockets;
786 }
787 }
788
789 if (sock && msock) {
Philipp Reisnera8e40792011-05-13 12:03:55 +0200790 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700791 ok = drbd_socket_okay(mdev, &sock);
792 ok = drbd_socket_okay(mdev, &msock) && ok;
793 if (ok)
794 break;
795 }
796
797retry:
798 s = drbd_wait_for_connect(mdev);
799 if (s) {
800 try = drbd_recv_fp(mdev, s);
801 drbd_socket_okay(mdev, &sock);
802 drbd_socket_okay(mdev, &msock);
803 switch (try) {
804 case P_HAND_SHAKE_S:
805 if (sock) {
806 dev_warn(DEV, "initial packet S crossed\n");
807 sock_release(sock);
808 }
809 sock = s;
810 break;
811 case P_HAND_SHAKE_M:
812 if (msock) {
813 dev_warn(DEV, "initial packet M crossed\n");
814 sock_release(msock);
815 }
816 msock = s;
817 set_bit(DISCARD_CONCURRENT, &mdev->flags);
818 break;
819 default:
820 dev_warn(DEV, "Error receiving initial packet\n");
821 sock_release(s);
822 if (random32() & 1)
823 goto retry;
824 }
825 }
826
827 if (mdev->state.conn <= C_DISCONNECTING)
828 goto out_release_sockets;
829 if (signal_pending(current)) {
830 flush_signals(current);
831 smp_rmb();
832 if (get_t_state(&mdev->receiver) == Exiting)
833 goto out_release_sockets;
834 }
835
836 if (sock && msock) {
837 ok = drbd_socket_okay(mdev, &sock);
838 ok = drbd_socket_okay(mdev, &msock) && ok;
839 if (ok)
840 break;
841 }
842 } while (1);
843
844 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
845 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
846
847 sock->sk->sk_allocation = GFP_NOIO;
848 msock->sk->sk_allocation = GFP_NOIO;
849
850 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
851 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
852
Philipp Reisnerb411b362009-09-25 16:07:19 -0700853 /* NOT YET ...
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock->sk->sk_sndtimeo =
859 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
860
861 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
863
864 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300865 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700866 drbd_tcp_nodelay(sock);
867 drbd_tcp_nodelay(msock);
868
869 mdev->data.socket = sock;
870 mdev->meta.socket = msock;
871 mdev->last_received = jiffies;
872
873 D_ASSERT(mdev->asender.task == NULL);
874
875 h = drbd_do_handshake(mdev);
876 if (h <= 0)
877 return h;
878
879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100881 switch (drbd_do_auth(mdev)) {
882 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883 dev_err(DEV, "Authentication of peer failed\n");
884 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100885 case 0:
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700888 }
889 }
890
Philipp Reisnerb411b362009-09-25 16:07:19 -0700891 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
892 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
893
894 atomic_set(&mdev->packet_seq, 0);
895 mdev->peer_seq = 0;
896
Philipp Reisner148efa12011-01-15 00:21:15 +0100897 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200898 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700899 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100900 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700901 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200902 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700903 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
904 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner1e86ac42011-08-04 10:33:08 +0200905
906 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
907 return 0;
908
909 drbd_thread_start(&mdev->asender);
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100910 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911
912 return 1;
913
914out_release_sockets:
915 if (sock)
916 sock_release(sock);
917 if (msock)
918 sock_release(msock);
919 return -1;
920}
921
Philipp Reisner02918be2010-08-20 14:35:10 +0200922static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700923{
Philipp Reisner02918be2010-08-20 14:35:10 +0200924 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700925 int r;
926
927 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700928 if (unlikely(r != sizeof(*h))) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +0100929 if (!signal_pending(current))
930 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100931 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200932 }
933
934 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
935 *cmd = be16_to_cpu(h->h80.command);
936 *packet_size = be16_to_cpu(h->h80.length);
937 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
938 *cmd = be16_to_cpu(h->h95.command);
939 *packet_size = be32_to_cpu(h->h95.length);
940 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200941 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
942 be32_to_cpu(h->h80.magic),
943 be16_to_cpu(h->h80.command),
944 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100945 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700946 }
947 mdev->last_received = jiffies;
948
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100949 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950}
951
Philipp Reisner2451fc32010-08-24 13:43:11 +0200952static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700953{
954 int rv;
955
956 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400957 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200958 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959 if (rv) {
Philipp Reisnerebd2b0c2011-05-25 11:03:04 +0200960 dev_info(DEV, "local disk flush failed with status %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700961 /* would rather check on EOPNOTSUPP, but that is not reliable.
962 * don't try again for ANY return value != 0
963 * if (rv == -EOPNOTSUPP) */
964 drbd_bump_write_ordering(mdev, WO_drain_io);
965 }
966 put_ldev(mdev);
967 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700968}
969
970/**
971 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
972 * @mdev: DRBD device.
973 * @epoch: Epoch object.
974 * @ev: Epoch event.
975 */
976static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
977 struct drbd_epoch *epoch,
978 enum epoch_event ev)
979{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200980 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700982 enum finish_epoch rv = FE_STILL_LIVE;
983
984 spin_lock(&mdev->epoch_lock);
985 do {
986 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700987
988 epoch_size = atomic_read(&epoch->epoch_size);
989
990 switch (ev & ~EV_CLEANUP) {
991 case EV_PUT:
992 atomic_dec(&epoch->active);
993 break;
994 case EV_GOT_BARRIER_NR:
995 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700996 break;
997 case EV_BECAME_LAST:
998 /* nothing to do*/
999 break;
1000 }
1001
Philipp Reisnerb411b362009-09-25 16:07:19 -07001002 if (epoch_size != 0 &&
1003 atomic_read(&epoch->active) == 0 &&
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001004 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001005 if (!(ev & EV_CLEANUP)) {
1006 spin_unlock(&mdev->epoch_lock);
1007 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1008 spin_lock(&mdev->epoch_lock);
1009 }
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001010 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1011 dec_unacked(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001012
1013 if (mdev->current_epoch != epoch) {
1014 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1015 list_del(&epoch->list);
1016 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1017 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001018 kfree(epoch);
1019
1020 if (rv == FE_STILL_LIVE)
1021 rv = FE_DESTROYED;
1022 } else {
1023 epoch->flags = 0;
1024 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001025 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001026 if (rv == FE_STILL_LIVE)
1027 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001028 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001029 }
1030 }
1031
1032 if (!next_epoch)
1033 break;
1034
1035 epoch = next_epoch;
1036 } while (1);
1037
1038 spin_unlock(&mdev->epoch_lock);
1039
Philipp Reisnerb411b362009-09-25 16:07:19 -07001040 return rv;
1041}
1042
1043/**
1044 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1045 * @mdev: DRBD device.
1046 * @wo: Write ordering method to try.
1047 */
1048void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1049{
1050 enum write_ordering_e pwo;
1051 static char *write_ordering_str[] = {
1052 [WO_none] = "none",
1053 [WO_drain_io] = "drain",
1054 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001055 };
1056
1057 pwo = mdev->write_ordering;
1058 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001059 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1060 wo = WO_drain_io;
1061 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1062 wo = WO_none;
1063 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001064 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001065 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1066}
1067
1068/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001069 * drbd_submit_ee()
1070 * @mdev: DRBD device.
1071 * @e: epoch entry
1072 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001073 *
1074 * May spread the pages to multiple bios,
1075 * depending on bio_add_page restrictions.
1076 *
1077 * Returns 0 if all bios have been submitted,
1078 * -ENOMEM if we could not allocate enough bios,
1079 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1080 * single page to an empty bio (which should never happen and likely indicates
1081 * that the lower level IO stack is in some way broken). This has been observed
1082 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001083 */
1084/* TODO allocate from our own bio_set. */
1085int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1086 const unsigned rw, const int fault_type)
1087{
1088 struct bio *bios = NULL;
1089 struct bio *bio;
1090 struct page *page = e->pages;
1091 sector_t sector = e->sector;
1092 unsigned ds = e->size;
1093 unsigned n_bios = 0;
1094 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001095 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001096
1097 /* In most cases, we will only need one bio. But in case the lower
1098 * level restrictions happen to be different at this offset on this
1099 * side than those of the sending peer, we may need to submit the
1100 * request in more than one bio. */
1101next_bio:
1102 bio = bio_alloc(GFP_NOIO, nr_pages);
1103 if (!bio) {
1104 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1105 goto fail;
1106 }
1107 /* > e->sector, unless this is the first bio */
1108 bio->bi_sector = sector;
1109 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001110 bio->bi_rw = rw;
1111 bio->bi_private = e;
1112 bio->bi_end_io = drbd_endio_sec;
1113
1114 bio->bi_next = bios;
1115 bios = bio;
1116 ++n_bios;
1117
1118 page_chain_for_each(page) {
1119 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1120 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001121 /* A single page must always be possible!
1122 * But in case it fails anyways,
1123 * we deal with it, and complain (below). */
1124 if (bio->bi_vcnt == 0) {
1125 dev_err(DEV,
1126 "bio_add_page failed for len=%u, "
1127 "bi_vcnt=0 (bi_sector=%llu)\n",
1128 len, (unsigned long long)bio->bi_sector);
1129 err = -ENOSPC;
1130 goto fail;
1131 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001132 goto next_bio;
1133 }
1134 ds -= len;
1135 sector += len >> 9;
1136 --nr_pages;
1137 }
1138 D_ASSERT(page == NULL);
1139 D_ASSERT(ds == 0);
1140
1141 atomic_set(&e->pending_bios, n_bios);
1142 do {
1143 bio = bios;
1144 bios = bios->bi_next;
1145 bio->bi_next = NULL;
1146
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001147 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001148 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001149 return 0;
1150
1151fail:
1152 while (bios) {
1153 bio = bios;
1154 bios = bios->bi_next;
1155 bio_put(bio);
1156 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001157 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001158}
1159
Philipp Reisner02918be2010-08-20 14:35:10 +02001160static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001161{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001162 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001163 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001164 struct drbd_epoch *epoch;
1165
Philipp Reisnerb411b362009-09-25 16:07:19 -07001166 inc_unacked(mdev);
1167
Philipp Reisnerb411b362009-09-25 16:07:19 -07001168 mdev->current_epoch->barrier_nr = p->barrier;
1169 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1170
1171 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1172 * the activity log, which means it would not be resynced in case the
1173 * R_PRIMARY crashes now.
1174 * Therefore we must send the barrier_ack after the barrier request was
1175 * completed. */
1176 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001177 case WO_none:
1178 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001179 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001180
1181 /* receiver context, in the writeout path of the other node.
1182 * avoid potential distributed deadlock */
1183 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1184 if (epoch)
1185 break;
1186 else
1187 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1188 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001189
1190 case WO_bdev_flush:
1191 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001193 drbd_flush(mdev);
1194
1195 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1196 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1197 if (epoch)
1198 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001199 }
1200
Philipp Reisner2451fc32010-08-24 13:43:11 +02001201 epoch = mdev->current_epoch;
1202 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1203
1204 D_ASSERT(atomic_read(&epoch->active) == 0);
1205 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001206
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001207 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001208 default:
1209 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001210 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001211 }
1212
1213 epoch->flags = 0;
1214 atomic_set(&epoch->epoch_size, 0);
1215 atomic_set(&epoch->active, 0);
1216
1217 spin_lock(&mdev->epoch_lock);
1218 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1219 list_add(&epoch->list, &mdev->current_epoch->list);
1220 mdev->current_epoch = epoch;
1221 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001222 } else {
1223 /* The current_epoch got recycled while we allocated this one... */
1224 kfree(epoch);
1225 }
1226 spin_unlock(&mdev->epoch_lock);
1227
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001228 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001229}
1230
1231/* used from receive_RSDataReply (recv_resync_read)
1232 * and from receive_Data */
1233static struct drbd_epoch_entry *
1234read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1235{
Lars Ellenberg66660322010-04-06 12:15:04 +02001236 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001237 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001238 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001239 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001240 void *dig_in = mdev->int_dig_in;
1241 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001242 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001243
1244 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1245 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1246
1247 if (dgs) {
1248 rr = drbd_recv(mdev, dig_in, dgs);
1249 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001250 if (!signal_pending(current))
1251 dev_warn(DEV,
1252 "short read receiving data digest: read %d expected %d\n",
1253 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001254 return NULL;
1255 }
1256 }
1257
1258 data_size -= dgs;
1259
Philipp Reisnerd07c9c12011-01-20 16:49:33 +01001260 ERR_IF(data_size == 0) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001261 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001262 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263
Lars Ellenberg66660322010-04-06 12:15:04 +02001264 /* even though we trust out peer,
1265 * we sometimes have to double check. */
1266 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001267 dev_err(DEV, "request from peer beyond end of local disk: "
1268 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001269 (unsigned long long)capacity,
1270 (unsigned long long)sector, data_size);
1271 return NULL;
1272 }
1273
Philipp Reisnerb411b362009-09-25 16:07:19 -07001274 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1275 * "criss-cross" setup, that might cause write-out on some other DRBD,
1276 * which in turn might block on the other node at this very place. */
1277 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1278 if (!e)
1279 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001280
Philipp Reisnerb411b362009-09-25 16:07:19 -07001281 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001282 page = e->pages;
1283 page_chain_for_each(page) {
1284 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001285 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001286 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001287 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001288 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1289 data[0] = data[0] ^ (unsigned long)-1;
1290 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001291 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001292 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001293 drbd_free_ee(mdev, e);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001294 if (!signal_pending(current))
1295 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1296 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001297 return NULL;
1298 }
1299 ds -= rr;
1300 }
1301
1302 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001303 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001304 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001305 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1306 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307 drbd_bcast_ee(mdev, "digest failed",
1308 dgs, dig_in, dig_vv, e);
1309 drbd_free_ee(mdev, e);
1310 return NULL;
1311 }
1312 }
1313 mdev->recv_cnt += data_size>>9;
1314 return e;
1315}
1316
1317/* drbd_drain_block() just takes a data block
1318 * out of the socket input buffer, and discards it.
1319 */
1320static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1321{
1322 struct page *page;
1323 int rr, rv = 1;
1324 void *data;
1325
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001326 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001327 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001328
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001329 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001330
1331 data = kmap(page);
1332 while (data_size) {
1333 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1334 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1335 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001336 if (!signal_pending(current))
1337 dev_warn(DEV,
1338 "short read receiving data: read %d expected %d\n",
1339 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001340 break;
1341 }
1342 data_size -= rr;
1343 }
1344 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001345 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001346 return rv;
1347}
1348
1349static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1350 sector_t sector, int data_size)
1351{
1352 struct bio_vec *bvec;
1353 struct bio *bio;
1354 int dgs, rr, i, expect;
1355 void *dig_in = mdev->int_dig_in;
1356 void *dig_vv = mdev->int_dig_vv;
1357
1358 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1359 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1360
1361 if (dgs) {
1362 rr = drbd_recv(mdev, dig_in, dgs);
1363 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001364 if (!signal_pending(current))
1365 dev_warn(DEV,
1366 "short read receiving data reply digest: read %d expected %d\n",
1367 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001368 return 0;
1369 }
1370 }
1371
1372 data_size -= dgs;
1373
1374 /* optimistically update recv_cnt. if receiving fails below,
1375 * we disconnect anyways, and counters will be reset. */
1376 mdev->recv_cnt += data_size>>9;
1377
1378 bio = req->master_bio;
1379 D_ASSERT(sector == bio->bi_sector);
1380
1381 bio_for_each_segment(bvec, bio, i) {
1382 expect = min_t(int, data_size, bvec->bv_len);
1383 rr = drbd_recv(mdev,
1384 kmap(bvec->bv_page)+bvec->bv_offset,
1385 expect);
1386 kunmap(bvec->bv_page);
1387 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001388 if (!signal_pending(current))
1389 dev_warn(DEV, "short read receiving data reply: "
1390 "read %d expected %d\n",
1391 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001392 return 0;
1393 }
1394 data_size -= rr;
1395 }
1396
1397 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001398 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001399 if (memcmp(dig_in, dig_vv, dgs)) {
1400 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1401 return 0;
1402 }
1403 }
1404
1405 D_ASSERT(data_size == 0);
1406 return 1;
1407}
1408
1409/* e_end_resync_block() is called via
1410 * drbd_process_done_ee() by asender only */
1411static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1412{
1413 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1414 sector_t sector = e->sector;
1415 int ok;
1416
Bart Van Assche24c48302011-05-21 18:32:29 +02001417 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001418
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001419 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001420 drbd_set_in_sync(mdev, sector, e->size);
1421 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1422 } else {
1423 /* Record failure to sync */
1424 drbd_rs_failed_io(mdev, sector, e->size);
1425
1426 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1427 }
1428 dec_unacked(mdev);
1429
1430 return ok;
1431}
1432
1433static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1434{
1435 struct drbd_epoch_entry *e;
1436
1437 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001438 if (!e)
1439 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440
1441 dec_rs_pending(mdev);
1442
Philipp Reisnerb411b362009-09-25 16:07:19 -07001443 inc_unacked(mdev);
1444 /* corresponding dec_unacked() in e_end_resync_block()
1445 * respective _drbd_clear_done_ee */
1446
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001447 e->w.cb = e_end_resync_block;
1448
Philipp Reisnerb411b362009-09-25 16:07:19 -07001449 spin_lock_irq(&mdev->req_lock);
1450 list_add(&e->w.list, &mdev->sync_ee);
1451 spin_unlock_irq(&mdev->req_lock);
1452
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001453 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001454 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001455 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001456
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001457 /* don't care for the reason here */
1458 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001459 spin_lock_irq(&mdev->req_lock);
1460 list_del(&e->w.list);
1461 spin_unlock_irq(&mdev->req_lock);
1462
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001463 drbd_free_ee(mdev, e);
1464fail:
1465 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001466 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001467}
1468
Philipp Reisner02918be2010-08-20 14:35:10 +02001469static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001470{
1471 struct drbd_request *req;
1472 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001473 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001474 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475
1476 sector = be64_to_cpu(p->sector);
1477
1478 spin_lock_irq(&mdev->req_lock);
1479 req = _ar_id_to_req(mdev, p->block_id, sector);
1480 spin_unlock_irq(&mdev->req_lock);
1481 if (unlikely(!req)) {
1482 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001483 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001484 }
1485
Bart Van Assche24c48302011-05-21 18:32:29 +02001486 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001487 * special casing it there for the various failure cases.
1488 * still no race with drbd_fail_pending_reads */
1489 ok = recv_dless_read(mdev, req, sector, data_size);
1490
1491 if (ok)
1492 req_mod(req, data_received);
1493 /* else: nothing. handled from drbd_disconnect...
1494 * I don't think we may complete this just yet
1495 * in case we are "on-disconnect: freeze" */
1496
1497 return ok;
1498}
1499
Philipp Reisner02918be2010-08-20 14:35:10 +02001500static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001501{
1502 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001503 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001504 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001505
1506 sector = be64_to_cpu(p->sector);
1507 D_ASSERT(p->block_id == ID_SYNCER);
1508
1509 if (get_ldev(mdev)) {
1510 /* data is submitted to disk within recv_resync_read.
1511 * corresponding put_ldev done below on error,
1512 * or in drbd_endio_write_sec. */
1513 ok = recv_resync_read(mdev, sector, data_size);
1514 } else {
1515 if (__ratelimit(&drbd_ratelimit_state))
1516 dev_err(DEV, "Can not write resync data to local disk.\n");
1517
1518 ok = drbd_drain_block(mdev, data_size);
1519
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001520 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001521 }
1522
Philipp Reisner778f2712010-07-06 11:14:00 +02001523 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1524
Philipp Reisnerb411b362009-09-25 16:07:19 -07001525 return ok;
1526}
1527
1528/* e_end_block() is called via drbd_process_done_ee().
1529 * this means this function only runs in the asender thread
1530 */
1531static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1532{
1533 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1534 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535 int ok = 1, pcmd;
1536
Philipp Reisnerb411b362009-09-25 16:07:19 -07001537 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001538 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001539 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1540 mdev->state.conn <= C_PAUSED_SYNC_T &&
1541 e->flags & EE_MAY_SET_IN_SYNC) ?
1542 P_RS_WRITE_ACK : P_WRITE_ACK;
1543 ok &= drbd_send_ack(mdev, pcmd, e);
1544 if (pcmd == P_RS_WRITE_ACK)
1545 drbd_set_in_sync(mdev, sector, e->size);
1546 } else {
1547 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1548 /* we expect it to be marked out of sync anyways...
1549 * maybe assert this? */
1550 }
1551 dec_unacked(mdev);
1552 }
1553 /* we delete from the conflict detection hash _after_ we sent out the
1554 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1555 if (mdev->net_conf->two_primaries) {
1556 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001557 D_ASSERT(!hlist_unhashed(&e->collision));
1558 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001559 spin_unlock_irq(&mdev->req_lock);
1560 } else {
Bart Van Assche24c48302011-05-21 18:32:29 +02001561 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001562 }
1563
1564 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1565
1566 return ok;
1567}
1568
1569static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1570{
1571 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1572 int ok = 1;
1573
1574 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1575 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1576
1577 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001578 D_ASSERT(!hlist_unhashed(&e->collision));
1579 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001580 spin_unlock_irq(&mdev->req_lock);
1581
1582 dec_unacked(mdev);
1583
1584 return ok;
1585}
1586
1587/* Called from receive_Data.
1588 * Synchronize packets on sock with packets on msock.
1589 *
1590 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1591 * packet traveling on msock, they are still processed in the order they have
1592 * been sent.
1593 *
1594 * Note: we don't care for Ack packets overtaking P_DATA packets.
1595 *
1596 * In case packet_seq is larger than mdev->peer_seq number, there are
1597 * outstanding packets on the msock. We wait for them to arrive.
1598 * In case we are the logically next packet, we update mdev->peer_seq
1599 * ourselves. Correctly handles 32bit wrap around.
1600 *
1601 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1602 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1603 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1604 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1605 *
1606 * returns 0 if we may process the packet,
1607 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1608static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1609{
1610 DEFINE_WAIT(wait);
1611 unsigned int p_seq;
1612 long timeout;
1613 int ret = 0;
1614 spin_lock(&mdev->peer_seq_lock);
1615 for (;;) {
1616 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1617 if (seq_le(packet_seq, mdev->peer_seq+1))
1618 break;
1619 if (signal_pending(current)) {
1620 ret = -ERESTARTSYS;
1621 break;
1622 }
1623 p_seq = mdev->peer_seq;
1624 spin_unlock(&mdev->peer_seq_lock);
1625 timeout = schedule_timeout(30*HZ);
1626 spin_lock(&mdev->peer_seq_lock);
1627 if (timeout == 0 && p_seq == mdev->peer_seq) {
1628 ret = -ETIMEDOUT;
1629 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1630 break;
1631 }
1632 }
1633 finish_wait(&mdev->seq_wait, &wait);
1634 if (mdev->peer_seq+1 == packet_seq)
1635 mdev->peer_seq++;
1636 spin_unlock(&mdev->peer_seq_lock);
1637 return ret;
1638}
1639
Lars Ellenberg688593c2010-11-17 22:25:03 +01001640/* see also bio_flags_to_wire()
1641 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1642 * flags and back. We may replicate to other kernel versions. */
1643static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001644{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001645 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1646 (dpf & DP_FUA ? REQ_FUA : 0) |
1647 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1648 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001649}
1650
Philipp Reisnerb411b362009-09-25 16:07:19 -07001651/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001652static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001653{
1654 sector_t sector;
1655 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001656 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001657 int rw = WRITE;
1658 u32 dp_flags;
1659
Philipp Reisnerb411b362009-09-25 16:07:19 -07001660 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001661 spin_lock(&mdev->peer_seq_lock);
1662 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1663 mdev->peer_seq++;
1664 spin_unlock(&mdev->peer_seq_lock);
1665
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001666 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001667 atomic_inc(&mdev->current_epoch->epoch_size);
1668 return drbd_drain_block(mdev, data_size);
1669 }
1670
1671 /* get_ldev(mdev) successful.
1672 * Corresponding put_ldev done either below (on various errors),
1673 * or in drbd_endio_write_sec, if we successfully submit the data at
1674 * the end of this function. */
1675
1676 sector = be64_to_cpu(p->sector);
1677 e = read_in_block(mdev, p->block_id, sector, data_size);
1678 if (!e) {
1679 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001680 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001681 }
1682
Philipp Reisnerb411b362009-09-25 16:07:19 -07001683 e->w.cb = e_end_block;
1684
Lars Ellenberg688593c2010-11-17 22:25:03 +01001685 dp_flags = be32_to_cpu(p->dp_flags);
1686 rw |= wire_flags_to_bio(mdev, dp_flags);
1687
1688 if (dp_flags & DP_MAY_SET_IN_SYNC)
1689 e->flags |= EE_MAY_SET_IN_SYNC;
1690
Philipp Reisnerb411b362009-09-25 16:07:19 -07001691 spin_lock(&mdev->epoch_lock);
1692 e->epoch = mdev->current_epoch;
1693 atomic_inc(&e->epoch->epoch_size);
1694 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001695 spin_unlock(&mdev->epoch_lock);
1696
Philipp Reisnerb411b362009-09-25 16:07:19 -07001697 /* I'm the receiver, I do hold a net_cnt reference. */
1698 if (!mdev->net_conf->two_primaries) {
1699 spin_lock_irq(&mdev->req_lock);
1700 } else {
1701 /* don't get the req_lock yet,
1702 * we may sleep in drbd_wait_peer_seq */
1703 const int size = e->size;
1704 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1705 DEFINE_WAIT(wait);
1706 struct drbd_request *i;
1707 struct hlist_node *n;
1708 struct hlist_head *slot;
1709 int first;
1710
1711 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1712 BUG_ON(mdev->ee_hash == NULL);
1713 BUG_ON(mdev->tl_hash == NULL);
1714
1715 /* conflict detection and handling:
1716 * 1. wait on the sequence number,
1717 * in case this data packet overtook ACK packets.
1718 * 2. check our hash tables for conflicting requests.
1719 * we only need to walk the tl_hash, since an ee can not
1720 * have a conflict with an other ee: on the submitting
1721 * node, the corresponding req had already been conflicting,
1722 * and a conflicting req is never sent.
1723 *
1724 * Note: for two_primaries, we are protocol C,
1725 * so there cannot be any request that is DONE
1726 * but still on the transfer log.
1727 *
1728 * unconditionally add to the ee_hash.
1729 *
1730 * if no conflicting request is found:
1731 * submit.
1732 *
1733 * if any conflicting request is found
1734 * that has not yet been acked,
1735 * AND I have the "discard concurrent writes" flag:
1736 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1737 *
1738 * if any conflicting request is found:
1739 * block the receiver, waiting on misc_wait
1740 * until no more conflicting requests are there,
1741 * or we get interrupted (disconnect).
1742 *
1743 * we do not just write after local io completion of those
1744 * requests, but only after req is done completely, i.e.
1745 * we wait for the P_DISCARD_ACK to arrive!
1746 *
1747 * then proceed normally, i.e. submit.
1748 */
1749 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1750 goto out_interrupted;
1751
1752 spin_lock_irq(&mdev->req_lock);
1753
Bart Van Assche24c48302011-05-21 18:32:29 +02001754 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001755
1756#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1757 slot = tl_hash_slot(mdev, sector);
1758 first = 1;
1759 for (;;) {
1760 int have_unacked = 0;
1761 int have_conflict = 0;
1762 prepare_to_wait(&mdev->misc_wait, &wait,
1763 TASK_INTERRUPTIBLE);
Bart Van Assche24c48302011-05-21 18:32:29 +02001764 hlist_for_each_entry(i, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001765 if (OVERLAPS) {
1766 /* only ALERT on first iteration,
1767 * we may be woken up early... */
1768 if (first)
1769 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1770 " new: %llus +%u; pending: %llus +%u\n",
1771 current->comm, current->pid,
1772 (unsigned long long)sector, size,
1773 (unsigned long long)i->sector, i->size);
1774 if (i->rq_state & RQ_NET_PENDING)
1775 ++have_unacked;
1776 ++have_conflict;
1777 }
1778 }
1779#undef OVERLAPS
1780 if (!have_conflict)
1781 break;
1782
1783 /* Discard Ack only for the _first_ iteration */
1784 if (first && discard && have_unacked) {
1785 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1786 (unsigned long long)sector);
1787 inc_unacked(mdev);
1788 e->w.cb = e_send_discard_ack;
1789 list_add_tail(&e->w.list, &mdev->done_ee);
1790
1791 spin_unlock_irq(&mdev->req_lock);
1792
1793 /* we could probably send that P_DISCARD_ACK ourselves,
1794 * but I don't like the receiver using the msock */
1795
1796 put_ldev(mdev);
1797 wake_asender(mdev);
1798 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001799 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001800 }
1801
1802 if (signal_pending(current)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001803 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001804
1805 spin_unlock_irq(&mdev->req_lock);
1806
1807 finish_wait(&mdev->misc_wait, &wait);
1808 goto out_interrupted;
1809 }
1810
1811 spin_unlock_irq(&mdev->req_lock);
1812 if (first) {
1813 first = 0;
1814 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1815 "sec=%llus\n", (unsigned long long)sector);
1816 } else if (discard) {
1817 /* we had none on the first iteration.
1818 * there must be none now. */
1819 D_ASSERT(have_unacked == 0);
1820 }
1821 schedule();
1822 spin_lock_irq(&mdev->req_lock);
1823 }
1824 finish_wait(&mdev->misc_wait, &wait);
1825 }
1826
1827 list_add(&e->w.list, &mdev->active_ee);
1828 spin_unlock_irq(&mdev->req_lock);
1829
1830 switch (mdev->net_conf->wire_protocol) {
1831 case DRBD_PROT_C:
1832 inc_unacked(mdev);
1833 /* corresponding dec_unacked() in e_end_block()
1834 * respective _drbd_clear_done_ee */
1835 break;
1836 case DRBD_PROT_B:
1837 /* I really don't like it that the receiver thread
1838 * sends on the msock, but anyways */
1839 drbd_send_ack(mdev, P_RECV_ACK, e);
1840 break;
1841 case DRBD_PROT_A:
1842 /* nothing to do */
1843 break;
1844 }
1845
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001846 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001847 /* In case we have the only disk of the cluster, */
1848 drbd_set_out_of_sync(mdev, e->sector, e->size);
1849 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001850 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001851 drbd_al_begin_io(mdev, e->sector);
1852 }
1853
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001854 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001855 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001856
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001857 /* don't care for the reason here */
1858 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001859 spin_lock_irq(&mdev->req_lock);
1860 list_del(&e->w.list);
Bart Van Assche24c48302011-05-21 18:32:29 +02001861 hlist_del_init(&e->collision);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001862 spin_unlock_irq(&mdev->req_lock);
1863 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1864 drbd_al_complete_io(mdev, e->sector);
1865
Philipp Reisnerb411b362009-09-25 16:07:19 -07001866out_interrupted:
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001867 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001868 put_ldev(mdev);
1869 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001870 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001871}
1872
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001873/* We may throttle resync, if the lower device seems to be busy,
1874 * and current sync rate is above c_min_rate.
1875 *
1876 * To decide whether or not the lower device is busy, we use a scheme similar
1877 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1878 * (more than 64 sectors) of activity we cannot account for with our own resync
1879 * activity, it obviously is "busy".
1880 *
1881 * The current sync rate used here uses only the most recent two step marks,
1882 * to have a short time average so we can react faster.
1883 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001884int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001885{
1886 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1887 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001888 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001889 int curr_events;
1890 int throttle = 0;
1891
1892 /* feature disabled? */
1893 if (mdev->sync_conf.c_min_rate == 0)
1894 return 0;
1895
Philipp Reisnere3555d82010-11-07 15:56:29 +01001896 spin_lock_irq(&mdev->al_lock);
1897 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1898 if (tmp) {
1899 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1900 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1901 spin_unlock_irq(&mdev->al_lock);
1902 return 0;
1903 }
1904 /* Do not slow down if app IO is already waiting for this extent */
1905 }
1906 spin_unlock_irq(&mdev->al_lock);
1907
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001908 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1909 (int)part_stat_read(&disk->part0, sectors[1]) -
1910 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001911
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001912 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1913 unsigned long rs_left;
1914 int i;
1915
1916 mdev->rs_last_events = curr_events;
1917
1918 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1919 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001920 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1921
1922 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1923 rs_left = mdev->ov_left;
1924 else
1925 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001926
1927 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1928 if (!dt)
1929 dt++;
1930 db = mdev->rs_mark_left[i] - rs_left;
1931 dbdt = Bit2KB(db/dt);
1932
1933 if (dbdt > mdev->sync_conf.c_min_rate)
1934 throttle = 1;
1935 }
1936 return throttle;
1937}
1938
1939
Philipp Reisner02918be2010-08-20 14:35:10 +02001940static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001941{
1942 sector_t sector;
1943 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1944 struct drbd_epoch_entry *e;
1945 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001946 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001947 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001948 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001949
1950 sector = be64_to_cpu(p->sector);
1951 size = be32_to_cpu(p->blksize);
1952
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001953 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001954 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1955 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001956 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001957 }
1958 if (sector + (size>>9) > capacity) {
1959 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1960 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001961 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001962 }
1963
1964 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001965 verb = 1;
1966 switch (cmd) {
1967 case P_DATA_REQUEST:
1968 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1969 break;
1970 case P_RS_DATA_REQUEST:
1971 case P_CSUM_RS_REQUEST:
1972 case P_OV_REQUEST:
1973 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1974 break;
1975 case P_OV_REPLY:
1976 verb = 0;
1977 dec_rs_pending(mdev);
1978 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1979 break;
1980 default:
1981 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1982 cmdname(cmd));
1983 }
1984 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001985 dev_err(DEV, "Can not satisfy peer's read request, "
1986 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001987
Lars Ellenberga821cc42010-09-06 12:31:37 +02001988 /* drain possibly payload */
1989 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990 }
1991
1992 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1993 * "criss-cross" setup, that might cause write-out on some other DRBD,
1994 * which in turn might block on the other node at this very place. */
1995 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1996 if (!e) {
1997 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001998 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001999 }
2000
Philipp Reisner02918be2010-08-20 14:35:10 +02002001 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002002 case P_DATA_REQUEST:
2003 e->w.cb = w_e_end_data_req;
2004 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002005 /* application IO, don't drbd_rs_begin_io */
2006 goto submit;
2007
Philipp Reisnerb411b362009-09-25 16:07:19 -07002008 case P_RS_DATA_REQUEST:
2009 e->w.cb = w_e_end_rsdata_req;
2010 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002011 /* used in the sector offset progress display */
2012 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002013 break;
2014
2015 case P_OV_REPLY:
2016 case P_CSUM_RS_REQUEST:
2017 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002018 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2019 if (!di)
2020 goto out_free_e;
2021
2022 di->digest_size = digest_size;
2023 di->digest = (((char *)di)+sizeof(struct digest_info));
2024
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002025 e->digest = di;
2026 e->flags |= EE_HAS_DIGEST;
2027
Philipp Reisnerb411b362009-09-25 16:07:19 -07002028 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2029 goto out_free_e;
2030
Philipp Reisner02918be2010-08-20 14:35:10 +02002031 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002032 D_ASSERT(mdev->agreed_pro_version >= 89);
2033 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002034 /* used in the sector offset progress display */
2035 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002036 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002037 /* track progress, we may need to throttle */
2038 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002039 e->w.cb = w_e_end_ov_reply;
2040 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002041 /* drbd_rs_begin_io done when we sent this request,
2042 * but accounting still needs to be done. */
2043 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002044 }
2045 break;
2046
2047 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002048 if (mdev->ov_start_sector == ~(sector_t)0 &&
2049 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002050 unsigned long now = jiffies;
2051 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002052 mdev->ov_start_sector = sector;
2053 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002054 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2055 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002056 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2057 mdev->rs_mark_left[i] = mdev->ov_left;
2058 mdev->rs_mark_time[i] = now;
2059 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002060 dev_info(DEV, "Online Verify start sector: %llu\n",
2061 (unsigned long long)sector);
2062 }
2063 e->w.cb = w_e_end_ov_req;
2064 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002065 break;
2066
Philipp Reisnerb411b362009-09-25 16:07:19 -07002067 default:
2068 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002069 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002070 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002071 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072 }
2073
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002074 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2075 * wrt the receiver, but it is not as straightforward as it may seem.
2076 * Various places in the resync start and stop logic assume resync
2077 * requests are processed in order, requeuing this on the worker thread
2078 * introduces a bunch of new code for synchronization between threads.
2079 *
2080 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2081 * "forever", throttling after drbd_rs_begin_io will lock that extent
2082 * for application writes for the same time. For now, just throttle
2083 * here, where the rest of the code expects the receiver to sleep for
2084 * a while, anyways.
2085 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002087 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2088 * this defers syncer requests for some time, before letting at least
2089 * on request through. The resync controller on the receiving side
2090 * will adapt to the incoming rate accordingly.
2091 *
2092 * We cannot throttle here if remote is Primary/SyncTarget:
2093 * we would also throttle its application reads.
2094 * In that case, throttling is done on the SyncTarget only.
2095 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002096 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2097 schedule_timeout_uninterruptible(HZ/10);
2098 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002099 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002100
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002101submit_for_resync:
2102 atomic_add(size >> 9, &mdev->rs_sect_ev);
2103
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002104submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002105 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002106 spin_lock_irq(&mdev->req_lock);
2107 list_add_tail(&e->w.list, &mdev->read_ee);
2108 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002109
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002110 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002111 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002112
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002113 /* don't care for the reason here */
2114 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002115 spin_lock_irq(&mdev->req_lock);
2116 list_del(&e->w.list);
2117 spin_unlock_irq(&mdev->req_lock);
2118 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2119
Philipp Reisnerb411b362009-09-25 16:07:19 -07002120out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002121 put_ldev(mdev);
2122 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002123 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002124}
2125
2126static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2127{
2128 int self, peer, rv = -100;
2129 unsigned long ch_self, ch_peer;
2130
2131 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2132 peer = mdev->p_uuid[UI_BITMAP] & 1;
2133
2134 ch_peer = mdev->p_uuid[UI_SIZE];
2135 ch_self = mdev->comm_bm_set;
2136
2137 switch (mdev->net_conf->after_sb_0p) {
2138 case ASB_CONSENSUS:
2139 case ASB_DISCARD_SECONDARY:
2140 case ASB_CALL_HELPER:
2141 dev_err(DEV, "Configuration error.\n");
2142 break;
2143 case ASB_DISCONNECT:
2144 break;
2145 case ASB_DISCARD_YOUNGER_PRI:
2146 if (self == 0 && peer == 1) {
2147 rv = -1;
2148 break;
2149 }
2150 if (self == 1 && peer == 0) {
2151 rv = 1;
2152 break;
2153 }
2154 /* Else fall through to one of the other strategies... */
2155 case ASB_DISCARD_OLDER_PRI:
2156 if (self == 0 && peer == 1) {
2157 rv = 1;
2158 break;
2159 }
2160 if (self == 1 && peer == 0) {
2161 rv = -1;
2162 break;
2163 }
2164 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002165 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002166 "Using discard-least-changes instead\n");
2167 case ASB_DISCARD_ZERO_CHG:
2168 if (ch_peer == 0 && ch_self == 0) {
2169 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2170 ? -1 : 1;
2171 break;
2172 } else {
2173 if (ch_peer == 0) { rv = 1; break; }
2174 if (ch_self == 0) { rv = -1; break; }
2175 }
2176 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2177 break;
2178 case ASB_DISCARD_LEAST_CHG:
2179 if (ch_self < ch_peer)
2180 rv = -1;
2181 else if (ch_self > ch_peer)
2182 rv = 1;
2183 else /* ( ch_self == ch_peer ) */
2184 /* Well, then use something else. */
2185 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2186 ? -1 : 1;
2187 break;
2188 case ASB_DISCARD_LOCAL:
2189 rv = -1;
2190 break;
2191 case ASB_DISCARD_REMOTE:
2192 rv = 1;
2193 }
2194
2195 return rv;
2196}
2197
2198static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2199{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002200 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002201
2202 switch (mdev->net_conf->after_sb_1p) {
2203 case ASB_DISCARD_YOUNGER_PRI:
2204 case ASB_DISCARD_OLDER_PRI:
2205 case ASB_DISCARD_LEAST_CHG:
2206 case ASB_DISCARD_LOCAL:
2207 case ASB_DISCARD_REMOTE:
2208 dev_err(DEV, "Configuration error.\n");
2209 break;
2210 case ASB_DISCONNECT:
2211 break;
2212 case ASB_CONSENSUS:
2213 hg = drbd_asb_recover_0p(mdev);
2214 if (hg == -1 && mdev->state.role == R_SECONDARY)
2215 rv = hg;
2216 if (hg == 1 && mdev->state.role == R_PRIMARY)
2217 rv = hg;
2218 break;
2219 case ASB_VIOLENTLY:
2220 rv = drbd_asb_recover_0p(mdev);
2221 break;
2222 case ASB_DISCARD_SECONDARY:
2223 return mdev->state.role == R_PRIMARY ? 1 : -1;
2224 case ASB_CALL_HELPER:
2225 hg = drbd_asb_recover_0p(mdev);
2226 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002227 enum drbd_state_rv rv2;
2228
2229 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002230 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2231 * we might be here in C_WF_REPORT_PARAMS which is transient.
2232 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002233 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2234 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002235 drbd_khelper(mdev, "pri-lost-after-sb");
2236 } else {
2237 dev_warn(DEV, "Successfully gave up primary role.\n");
2238 rv = hg;
2239 }
2240 } else
2241 rv = hg;
2242 }
2243
2244 return rv;
2245}
2246
2247static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2248{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002249 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002250
2251 switch (mdev->net_conf->after_sb_2p) {
2252 case ASB_DISCARD_YOUNGER_PRI:
2253 case ASB_DISCARD_OLDER_PRI:
2254 case ASB_DISCARD_LEAST_CHG:
2255 case ASB_DISCARD_LOCAL:
2256 case ASB_DISCARD_REMOTE:
2257 case ASB_CONSENSUS:
2258 case ASB_DISCARD_SECONDARY:
2259 dev_err(DEV, "Configuration error.\n");
2260 break;
2261 case ASB_VIOLENTLY:
2262 rv = drbd_asb_recover_0p(mdev);
2263 break;
2264 case ASB_DISCONNECT:
2265 break;
2266 case ASB_CALL_HELPER:
2267 hg = drbd_asb_recover_0p(mdev);
2268 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002269 enum drbd_state_rv rv2;
2270
Philipp Reisnerb411b362009-09-25 16:07:19 -07002271 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2272 * we might be here in C_WF_REPORT_PARAMS which is transient.
2273 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002274 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2275 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002276 drbd_khelper(mdev, "pri-lost-after-sb");
2277 } else {
2278 dev_warn(DEV, "Successfully gave up primary role.\n");
2279 rv = hg;
2280 }
2281 } else
2282 rv = hg;
2283 }
2284
2285 return rv;
2286}
2287
2288static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2289 u64 bits, u64 flags)
2290{
2291 if (!uuid) {
2292 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2293 return;
2294 }
2295 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2296 text,
2297 (unsigned long long)uuid[UI_CURRENT],
2298 (unsigned long long)uuid[UI_BITMAP],
2299 (unsigned long long)uuid[UI_HISTORY_START],
2300 (unsigned long long)uuid[UI_HISTORY_END],
2301 (unsigned long long)bits,
2302 (unsigned long long)flags);
2303}
2304
2305/*
2306 100 after split brain try auto recover
2307 2 C_SYNC_SOURCE set BitMap
2308 1 C_SYNC_SOURCE use BitMap
2309 0 no Sync
2310 -1 C_SYNC_TARGET use BitMap
2311 -2 C_SYNC_TARGET set BitMap
2312 -100 after split brain, disconnect
2313-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002314-1091 requires proto 91
2315-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002316 */
2317static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2318{
2319 u64 self, peer;
2320 int i, j;
2321
2322 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2323 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2324
2325 *rule_nr = 10;
2326 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2327 return 0;
2328
2329 *rule_nr = 20;
2330 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2331 peer != UUID_JUST_CREATED)
2332 return -2;
2333
2334 *rule_nr = 30;
2335 if (self != UUID_JUST_CREATED &&
2336 (peer == UUID_JUST_CREATED || peer == (u64)0))
2337 return 2;
2338
2339 if (self == peer) {
2340 int rct, dc; /* roles at crash time */
2341
2342 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2343
2344 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002345 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002346
2347 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2348 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2349 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2350 drbd_uuid_set_bm(mdev, 0UL);
2351
2352 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2353 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2354 *rule_nr = 34;
2355 } else {
2356 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2357 *rule_nr = 36;
2358 }
2359
2360 return 1;
2361 }
2362
2363 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2364
2365 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002366 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002367
2368 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2369 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2370 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2371
2372 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2373 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2374 mdev->p_uuid[UI_BITMAP] = 0UL;
2375
2376 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2377 *rule_nr = 35;
2378 } else {
2379 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2380 *rule_nr = 37;
2381 }
2382
2383 return -1;
2384 }
2385
2386 /* Common power [off|failure] */
2387 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2388 (mdev->p_uuid[UI_FLAGS] & 2);
2389 /* lowest bit is set when we were primary,
2390 * next bit (weight 2) is set when peer was primary */
2391 *rule_nr = 40;
2392
2393 switch (rct) {
2394 case 0: /* !self_pri && !peer_pri */ return 0;
2395 case 1: /* self_pri && !peer_pri */ return 1;
2396 case 2: /* !self_pri && peer_pri */ return -1;
2397 case 3: /* self_pri && peer_pri */
2398 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2399 return dc ? -1 : 1;
2400 }
2401 }
2402
2403 *rule_nr = 50;
2404 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2405 if (self == peer)
2406 return -1;
2407
2408 *rule_nr = 51;
2409 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2410 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002411 if (mdev->agreed_pro_version < 96 ?
2412 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2413 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2414 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002415 /* The last P_SYNC_UUID did not get though. Undo the last start of
2416 resync as sync source modifications of the peer's UUIDs. */
2417
2418 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002419 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002420
2421 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2422 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002423
2424 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2425 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2426
Philipp Reisnerb411b362009-09-25 16:07:19 -07002427 return -1;
2428 }
2429 }
2430
2431 *rule_nr = 60;
2432 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2433 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2434 peer = mdev->p_uuid[i] & ~((u64)1);
2435 if (self == peer)
2436 return -2;
2437 }
2438
2439 *rule_nr = 70;
2440 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2441 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2442 if (self == peer)
2443 return 1;
2444
2445 *rule_nr = 71;
2446 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2447 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002448 if (mdev->agreed_pro_version < 96 ?
2449 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2450 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2451 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002452 /* The last P_SYNC_UUID did not get though. Undo the last start of
2453 resync as sync source modifications of our UUIDs. */
2454
2455 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002456 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002457
2458 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2459 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2460
Philipp Reisner4a23f262011-01-11 17:42:17 +01002461 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002462 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2463 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2464
2465 return 1;
2466 }
2467 }
2468
2469
2470 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002471 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002472 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2473 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2474 if (self == peer)
2475 return 2;
2476 }
2477
2478 *rule_nr = 90;
2479 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2480 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2481 if (self == peer && self != ((u64)0))
2482 return 100;
2483
2484 *rule_nr = 100;
2485 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2486 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2487 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2488 peer = mdev->p_uuid[j] & ~((u64)1);
2489 if (self == peer)
2490 return -100;
2491 }
2492 }
2493
2494 return -1000;
2495}
2496
2497/* drbd_sync_handshake() returns the new conn state on success, or
2498 CONN_MASK (-1) on failure.
2499 */
2500static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2501 enum drbd_disk_state peer_disk) __must_hold(local)
2502{
2503 int hg, rule_nr;
2504 enum drbd_conns rv = C_MASK;
2505 enum drbd_disk_state mydisk;
2506
2507 mydisk = mdev->state.disk;
2508 if (mydisk == D_NEGOTIATING)
2509 mydisk = mdev->new_state_tmp.disk;
2510
2511 dev_info(DEV, "drbd_sync_handshake:\n");
2512 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2513 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2514 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2515
2516 hg = drbd_uuid_compare(mdev, &rule_nr);
2517
2518 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2519
2520 if (hg == -1000) {
2521 dev_alert(DEV, "Unrelated data, aborting!\n");
2522 return C_MASK;
2523 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002524 if (hg < -1000) {
2525 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002526 return C_MASK;
2527 }
2528
2529 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2530 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2531 int f = (hg == -100) || abs(hg) == 2;
2532 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2533 if (f)
2534 hg = hg*2;
2535 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2536 hg > 0 ? "source" : "target");
2537 }
2538
Adam Gandelman3a11a482010-04-08 16:48:23 -07002539 if (abs(hg) == 100)
2540 drbd_khelper(mdev, "initial-split-brain");
2541
Philipp Reisnerb411b362009-09-25 16:07:19 -07002542 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2543 int pcount = (mdev->state.role == R_PRIMARY)
2544 + (peer_role == R_PRIMARY);
2545 int forced = (hg == -100);
2546
2547 switch (pcount) {
2548 case 0:
2549 hg = drbd_asb_recover_0p(mdev);
2550 break;
2551 case 1:
2552 hg = drbd_asb_recover_1p(mdev);
2553 break;
2554 case 2:
2555 hg = drbd_asb_recover_2p(mdev);
2556 break;
2557 }
2558 if (abs(hg) < 100) {
2559 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2560 "automatically solved. Sync from %s node\n",
2561 pcount, (hg < 0) ? "peer" : "this");
2562 if (forced) {
2563 dev_warn(DEV, "Doing a full sync, since"
2564 " UUIDs where ambiguous.\n");
2565 hg = hg*2;
2566 }
2567 }
2568 }
2569
2570 if (hg == -100) {
2571 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2572 hg = -1;
2573 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2574 hg = 1;
2575
2576 if (abs(hg) < 100)
2577 dev_warn(DEV, "Split-Brain detected, manually solved. "
2578 "Sync from %s node\n",
2579 (hg < 0) ? "peer" : "this");
2580 }
2581
2582 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002583 /* FIXME this log message is not correct if we end up here
2584 * after an attempted attach on a diskless node.
2585 * We just refuse to attach -- well, we drop the "connection"
2586 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002587 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002588 drbd_khelper(mdev, "split-brain");
2589 return C_MASK;
2590 }
2591
2592 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2593 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2594 return C_MASK;
2595 }
2596
2597 if (hg < 0 && /* by intention we do not use mydisk here. */
2598 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2599 switch (mdev->net_conf->rr_conflict) {
2600 case ASB_CALL_HELPER:
2601 drbd_khelper(mdev, "pri-lost");
2602 /* fall through */
2603 case ASB_DISCONNECT:
2604 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2605 return C_MASK;
2606 case ASB_VIOLENTLY:
2607 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2608 "assumption\n");
2609 }
2610 }
2611
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002612 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2613 if (hg == 0)
2614 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2615 else
2616 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2617 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2618 abs(hg) >= 2 ? "full" : "bit-map based");
2619 return C_MASK;
2620 }
2621
Philipp Reisnerb411b362009-09-25 16:07:19 -07002622 if (abs(hg) >= 2) {
2623 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002624 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2625 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002626 return C_MASK;
2627 }
2628
2629 if (hg > 0) { /* become sync source. */
2630 rv = C_WF_BITMAP_S;
2631 } else if (hg < 0) { /* become sync target */
2632 rv = C_WF_BITMAP_T;
2633 } else {
2634 rv = C_CONNECTED;
2635 if (drbd_bm_total_weight(mdev)) {
2636 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2637 drbd_bm_total_weight(mdev));
2638 }
2639 }
2640
2641 return rv;
2642}
2643
2644/* returns 1 if invalid */
2645static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2646{
2647 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2648 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2649 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2650 return 0;
2651
2652 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2653 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2654 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2655 return 1;
2656
2657 /* everything else is valid if they are equal on both sides. */
2658 if (peer == self)
2659 return 0;
2660
2661 /* everything es is invalid. */
2662 return 1;
2663}
2664
Philipp Reisner02918be2010-08-20 14:35:10 +02002665static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666{
Philipp Reisner02918be2010-08-20 14:35:10 +02002667 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002668 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002669 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002670 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2671
Philipp Reisnerb411b362009-09-25 16:07:19 -07002672 p_proto = be32_to_cpu(p->protocol);
2673 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2674 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2675 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002676 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002677 cf = be32_to_cpu(p->conn_flags);
2678 p_want_lose = cf & CF_WANT_LOSE;
2679
2680 clear_bit(CONN_DRY_RUN, &mdev->flags);
2681
2682 if (cf & CF_DRY_RUN)
2683 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002684
2685 if (p_proto != mdev->net_conf->wire_protocol) {
2686 dev_err(DEV, "incompatible communication protocols\n");
2687 goto disconnect;
2688 }
2689
2690 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2691 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2692 goto disconnect;
2693 }
2694
2695 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2696 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2697 goto disconnect;
2698 }
2699
2700 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2701 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2702 goto disconnect;
2703 }
2704
2705 if (p_want_lose && mdev->net_conf->want_lose) {
2706 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2707 goto disconnect;
2708 }
2709
2710 if (p_two_primaries != mdev->net_conf->two_primaries) {
2711 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2712 goto disconnect;
2713 }
2714
2715 if (mdev->agreed_pro_version >= 87) {
2716 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2717
2718 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002719 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002720
2721 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2722 if (strcmp(p_integrity_alg, my_alg)) {
2723 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2724 goto disconnect;
2725 }
2726 dev_info(DEV, "data-integrity-alg: %s\n",
2727 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2728 }
2729
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002730 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002731
2732disconnect:
2733 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002734 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002735}
2736
2737/* helper function
2738 * input: alg name, feature name
2739 * return: NULL (alg name was "")
2740 * ERR_PTR(error) if something goes wrong
2741 * or the crypto hash ptr, if it worked out ok. */
2742struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2743 const char *alg, const char *name)
2744{
2745 struct crypto_hash *tfm;
2746
2747 if (!alg[0])
2748 return NULL;
2749
2750 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2751 if (IS_ERR(tfm)) {
2752 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2753 alg, name, PTR_ERR(tfm));
2754 return tfm;
2755 }
2756 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2757 crypto_free_hash(tfm);
2758 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2759 return ERR_PTR(-EINVAL);
2760 }
2761 return tfm;
2762}
2763
Philipp Reisner02918be2010-08-20 14:35:10 +02002764static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002765{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002766 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002767 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002768 unsigned int header_size, data_size, exp_max_sz;
2769 struct crypto_hash *verify_tfm = NULL;
2770 struct crypto_hash *csums_tfm = NULL;
2771 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002772 int *rs_plan_s = NULL;
2773 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002774
2775 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2776 : apv == 88 ? sizeof(struct p_rs_param)
2777 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002778 : apv <= 94 ? sizeof(struct p_rs_param_89)
2779 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002780
Philipp Reisner02918be2010-08-20 14:35:10 +02002781 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002782 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002783 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002784 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002785 }
2786
2787 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002788 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2789 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002790 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002791 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2792 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002793 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002794 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002795 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2796 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002797 D_ASSERT(data_size == 0);
2798 }
2799
2800 /* initialize verify_alg and csums_alg */
2801 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2802
Philipp Reisner02918be2010-08-20 14:35:10 +02002803 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002804 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002805
2806 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2807
2808 if (apv >= 88) {
2809 if (apv == 88) {
2810 if (data_size > SHARED_SECRET_MAX) {
2811 dev_err(DEV, "verify-alg too long, "
2812 "peer wants %u, accepting only %u byte\n",
2813 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002814 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002815 }
2816
2817 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002818 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002819
2820 /* we expect NUL terminated string */
2821 /* but just in case someone tries to be evil */
2822 D_ASSERT(p->verify_alg[data_size-1] == 0);
2823 p->verify_alg[data_size-1] = 0;
2824
2825 } else /* apv >= 89 */ {
2826 /* we still expect NUL terminated strings */
2827 /* but just in case someone tries to be evil */
2828 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2829 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2830 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2831 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2832 }
2833
2834 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2835 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2836 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2837 mdev->sync_conf.verify_alg, p->verify_alg);
2838 goto disconnect;
2839 }
2840 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2841 p->verify_alg, "verify-alg");
2842 if (IS_ERR(verify_tfm)) {
2843 verify_tfm = NULL;
2844 goto disconnect;
2845 }
2846 }
2847
2848 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2849 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2850 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2851 mdev->sync_conf.csums_alg, p->csums_alg);
2852 goto disconnect;
2853 }
2854 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2855 p->csums_alg, "csums-alg");
2856 if (IS_ERR(csums_tfm)) {
2857 csums_tfm = NULL;
2858 goto disconnect;
2859 }
2860 }
2861
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002862 if (apv > 94) {
2863 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2864 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2865 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2866 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2867 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002868
2869 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2870 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2871 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2872 if (!rs_plan_s) {
2873 dev_err(DEV, "kmalloc of fifo_buffer failed");
2874 goto disconnect;
2875 }
2876 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002877 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002878
2879 spin_lock(&mdev->peer_seq_lock);
2880 /* lock against drbd_nl_syncer_conf() */
2881 if (verify_tfm) {
2882 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2883 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2884 crypto_free_hash(mdev->verify_tfm);
2885 mdev->verify_tfm = verify_tfm;
2886 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2887 }
2888 if (csums_tfm) {
2889 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2890 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2891 crypto_free_hash(mdev->csums_tfm);
2892 mdev->csums_tfm = csums_tfm;
2893 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2894 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002895 if (fifo_size != mdev->rs_plan_s.size) {
2896 kfree(mdev->rs_plan_s.values);
2897 mdev->rs_plan_s.values = rs_plan_s;
2898 mdev->rs_plan_s.size = fifo_size;
2899 mdev->rs_planed = 0;
2900 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002901 spin_unlock(&mdev->peer_seq_lock);
2902 }
2903
2904 return ok;
2905disconnect:
2906 /* just for completeness: actually not needed,
2907 * as this is not reached if csums_tfm was ok. */
2908 crypto_free_hash(csums_tfm);
2909 /* but free the verify_tfm again, if csums_tfm did not work out */
2910 crypto_free_hash(verify_tfm);
2911 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002912 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002913}
2914
Philipp Reisnerb411b362009-09-25 16:07:19 -07002915/* warn if the arguments differ by more than 12.5% */
2916static void warn_if_differ_considerably(struct drbd_conf *mdev,
2917 const char *s, sector_t a, sector_t b)
2918{
2919 sector_t d;
2920 if (a == 0 || b == 0)
2921 return;
2922 d = (a > b) ? (a - b) : (b - a);
2923 if (d > (a>>3) || d > (b>>3))
2924 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2925 (unsigned long long)a, (unsigned long long)b);
2926}
2927
Philipp Reisner02918be2010-08-20 14:35:10 +02002928static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002929{
Philipp Reisner02918be2010-08-20 14:35:10 +02002930 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002932 sector_t p_size, p_usize, my_usize;
2933 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002934 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002935
Philipp Reisnerb411b362009-09-25 16:07:19 -07002936 p_size = be64_to_cpu(p->d_size);
2937 p_usize = be64_to_cpu(p->u_size);
2938
2939 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2940 dev_err(DEV, "some backing storage is needed\n");
2941 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002942 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002943 }
2944
2945 /* just store the peer's disk size for now.
2946 * we still need to figure out whether we accept that. */
2947 mdev->p_size = p_size;
2948
Philipp Reisnerb411b362009-09-25 16:07:19 -07002949 if (get_ldev(mdev)) {
2950 warn_if_differ_considerably(mdev, "lower level device sizes",
2951 p_size, drbd_get_max_capacity(mdev->ldev));
2952 warn_if_differ_considerably(mdev, "user requested size",
2953 p_usize, mdev->ldev->dc.disk_size);
2954
2955 /* if this is the first connect, or an otherwise expected
2956 * param exchange, choose the minimum */
2957 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2958 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2959 p_usize);
2960
2961 my_usize = mdev->ldev->dc.disk_size;
2962
2963 if (mdev->ldev->dc.disk_size != p_usize) {
2964 mdev->ldev->dc.disk_size = p_usize;
2965 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2966 (unsigned long)mdev->ldev->dc.disk_size);
2967 }
2968
2969 /* Never shrink a device with usable data during connect.
2970 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01002971 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07002972 drbd_get_capacity(mdev->this_bdev) &&
2973 mdev->state.disk >= D_OUTDATED &&
2974 mdev->state.conn < C_CONNECTED) {
2975 dev_err(DEV, "The peer's disk size is too small!\n");
2976 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2977 mdev->ldev->dc.disk_size = my_usize;
2978 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002979 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002980 }
2981 put_ldev(mdev);
2982 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002983
Philipp Reisnere89b5912010-03-24 17:11:33 +01002984 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002985 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02002986 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002987 put_ldev(mdev);
2988 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002989 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002990 drbd_md_sync(mdev);
2991 } else {
2992 /* I am diskless, need to accept the peer's size. */
2993 drbd_set_my_capacity(mdev, p_size);
2994 }
2995
Philipp Reisner99432fc2011-05-20 16:39:13 +02002996 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
2997 drbd_reconsider_max_bio_size(mdev);
2998
Philipp Reisnerb411b362009-09-25 16:07:19 -07002999 if (get_ldev(mdev)) {
3000 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3001 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3002 ldsc = 1;
3003 }
3004
Philipp Reisnerb411b362009-09-25 16:07:19 -07003005 put_ldev(mdev);
3006 }
3007
3008 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3009 if (be64_to_cpu(p->c_size) !=
3010 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3011 /* we have different sizes, probably peer
3012 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003013 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003014 }
3015 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3016 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3017 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003018 mdev->state.disk >= D_INCONSISTENT) {
3019 if (ddsf & DDSF_NO_RESYNC)
3020 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3021 else
3022 resync_after_online_grow(mdev);
3023 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003024 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3025 }
3026 }
3027
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003028 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003029}
3030
Philipp Reisner02918be2010-08-20 14:35:10 +02003031static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003032{
Philipp Reisner02918be2010-08-20 14:35:10 +02003033 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003035 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003036
Philipp Reisnerb411b362009-09-25 16:07:19 -07003037 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3038
3039 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3040 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3041
3042 kfree(mdev->p_uuid);
3043 mdev->p_uuid = p_uuid;
3044
3045 if (mdev->state.conn < C_CONNECTED &&
3046 mdev->state.disk < D_INCONSISTENT &&
3047 mdev->state.role == R_PRIMARY &&
3048 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3049 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3050 (unsigned long long)mdev->ed_uuid);
3051 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003052 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003053 }
3054
3055 if (get_ldev(mdev)) {
3056 int skip_initial_sync =
3057 mdev->state.conn == C_CONNECTED &&
3058 mdev->agreed_pro_version >= 90 &&
3059 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3060 (p_uuid[UI_FLAGS] & 8);
3061 if (skip_initial_sync) {
3062 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3063 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003064 "clear_n_write from receive_uuids",
3065 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003066 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3067 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3068 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3069 CS_VERBOSE, NULL);
3070 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003071 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003072 }
3073 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003074 } else if (mdev->state.disk < D_INCONSISTENT &&
3075 mdev->state.role == R_PRIMARY) {
3076 /* I am a diskless primary, the peer just created a new current UUID
3077 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003078 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003079 }
3080
3081 /* Before we test for the disk state, we should wait until an eventually
3082 ongoing cluster wide state change is finished. That is important if
3083 we are primary and are detaching from our disk. We need to see the
3084 new disk state... */
3085 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3086 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003087 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3088
3089 if (updated_uuids)
3090 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003091
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003092 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003093}
3094
3095/**
3096 * convert_state() - Converts the peer's view of the cluster state to our point of view
3097 * @ps: The state as seen by the peer.
3098 */
3099static union drbd_state convert_state(union drbd_state ps)
3100{
3101 union drbd_state ms;
3102
3103 static enum drbd_conns c_tab[] = {
3104 [C_CONNECTED] = C_CONNECTED,
3105
3106 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3107 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3108 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3109 [C_VERIFY_S] = C_VERIFY_T,
3110 [C_MASK] = C_MASK,
3111 };
3112
3113 ms.i = ps.i;
3114
3115 ms.conn = c_tab[ps.conn];
3116 ms.peer = ps.role;
3117 ms.role = ps.peer;
3118 ms.pdsk = ps.disk;
3119 ms.disk = ps.pdsk;
3120 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3121
3122 return ms;
3123}
3124
Philipp Reisner02918be2010-08-20 14:35:10 +02003125static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003126{
Philipp Reisner02918be2010-08-20 14:35:10 +02003127 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003128 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003129 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003130
Philipp Reisnerb411b362009-09-25 16:07:19 -07003131 mask.i = be32_to_cpu(p->mask);
3132 val.i = be32_to_cpu(p->val);
3133
3134 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3135 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3136 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003137 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003138 }
3139
3140 mask = convert_state(mask);
3141 val = convert_state(val);
3142
3143 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3144
3145 drbd_send_sr_reply(mdev, rv);
3146 drbd_md_sync(mdev);
3147
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003148 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003149}
3150
Philipp Reisner02918be2010-08-20 14:35:10 +02003151static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003152{
Philipp Reisner02918be2010-08-20 14:35:10 +02003153 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003154 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003155 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003156 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003157 int rv;
3158
Philipp Reisnerb411b362009-09-25 16:07:19 -07003159 peer_state.i = be32_to_cpu(p->state);
3160
3161 real_peer_disk = peer_state.disk;
3162 if (peer_state.disk == D_NEGOTIATING) {
3163 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3164 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3165 }
3166
3167 spin_lock_irq(&mdev->req_lock);
3168 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003169 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003170 spin_unlock_irq(&mdev->req_lock);
3171
Lars Ellenberg545752d2011-12-05 14:39:25 +01003172 /* If some other part of the code (asender thread, timeout)
3173 * already decided to close the connection again,
3174 * we must not "re-establish" it here. */
3175 if (os.conn <= C_TEAR_DOWN)
3176 return false;
3177
Lars Ellenberg40424e42011-09-26 15:24:56 +02003178 /* If this is the "end of sync" confirmation, usually the peer disk
3179 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3180 * set) resync started in PausedSyncT, or if the timing of pause-/
3181 * unpause-sync events has been "just right", the peer disk may
3182 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3183 */
3184 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3185 real_peer_disk == D_UP_TO_DATE &&
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003186 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3187 /* If we are (becoming) SyncSource, but peer is still in sync
3188 * preparation, ignore its uptodate-ness to avoid flapping, it
3189 * will change to inconsistent once the peer reaches active
3190 * syncing states.
3191 * It may have changed syncer-paused flags, however, so we
3192 * cannot ignore this completely. */
3193 if (peer_state.conn > C_CONNECTED &&
3194 peer_state.conn < C_SYNC_SOURCE)
3195 real_peer_disk = D_INCONSISTENT;
3196
3197 /* if peer_state changes to connected at the same time,
3198 * it explicitly notifies us that it finished resync.
3199 * Maybe we should finish it up, too? */
3200 else if (os.conn >= C_SYNC_SOURCE &&
3201 peer_state.conn == C_CONNECTED) {
3202 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3203 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003204 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003205 }
3206 }
3207
3208 /* peer says his disk is inconsistent, while we think it is uptodate,
3209 * and this happens while the peer still thinks we have a sync going on,
3210 * but we think we are already done with the sync.
3211 * We ignore this to avoid flapping pdsk.
3212 * This should not happen, if the peer is a recent version of drbd. */
3213 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3214 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3215 real_peer_disk = D_UP_TO_DATE;
3216
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003217 if (ns.conn == C_WF_REPORT_PARAMS)
3218 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003219
Philipp Reisner67531712010-10-27 12:21:30 +02003220 if (peer_state.conn == C_AHEAD)
3221 ns.conn = C_BEHIND;
3222
Philipp Reisnerb411b362009-09-25 16:07:19 -07003223 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3224 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3225 int cr; /* consider resync */
3226
3227 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003228 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003229 /* if we had an established connection
3230 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003231 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003232 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003233 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003234 /* if we have both been inconsistent, and the peer has been
3235 * forced to be UpToDate with --overwrite-data */
3236 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3237 /* if we had been plain connected, and the admin requested to
3238 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003239 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003240 (peer_state.conn >= C_STARTING_SYNC_S &&
3241 peer_state.conn <= C_WF_BITMAP_T));
3242
3243 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003244 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003245
3246 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003247 if (ns.conn == C_MASK) {
3248 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003249 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003250 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003251 } else if (peer_state.disk == D_NEGOTIATING) {
3252 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3253 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003254 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003255 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003256 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003257 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003258 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003259 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003260 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003261 }
3262 }
3263 }
3264
3265 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003266 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003267 goto retry;
3268 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003269 ns.peer = peer_state.role;
3270 ns.pdsk = real_peer_disk;
3271 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003272 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003273 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003274 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3275 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003276 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3277 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3278 for temporal network outages! */
3279 spin_unlock_irq(&mdev->req_lock);
3280 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3281 tl_clear(mdev);
3282 drbd_uuid_new_current(mdev);
3283 clear_bit(NEW_CUR_UUID, &mdev->flags);
3284 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003285 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003286 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003287 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003288 ns = mdev->state;
3289 spin_unlock_irq(&mdev->req_lock);
3290
3291 if (rv < SS_SUCCESS) {
3292 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003293 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003294 }
3295
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003296 if (os.conn > C_WF_REPORT_PARAMS) {
3297 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003298 peer_state.disk != D_NEGOTIATING ) {
3299 /* we want resync, peer has not yet decided to sync... */
3300 /* Nowadays only used when forcing a node into primary role and
3301 setting its disk to UpToDate with that */
3302 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02003303 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003304 }
3305 }
3306
3307 mdev->net_conf->want_lose = 0;
3308
3309 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3310
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003311 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003312}
3313
Philipp Reisner02918be2010-08-20 14:35:10 +02003314static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003315{
Philipp Reisner02918be2010-08-20 14:35:10 +02003316 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003317
3318 wait_event(mdev->misc_wait,
3319 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003320 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003321 mdev->state.conn < C_CONNECTED ||
3322 mdev->state.disk < D_NEGOTIATING);
3323
3324 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3325
Philipp Reisnerb411b362009-09-25 16:07:19 -07003326 /* Here the _drbd_uuid_ functions are right, current should
3327 _not_ be rotated into the history */
3328 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3329 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3330 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3331
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003332 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003333 drbd_start_resync(mdev, C_SYNC_TARGET);
3334
3335 put_ldev(mdev);
3336 } else
3337 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3338
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003339 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003340}
3341
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003342/**
3343 * receive_bitmap_plain
3344 *
3345 * Return 0 when done, 1 when another iteration is needed, and a negative error
3346 * code upon failure.
3347 */
3348static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003349receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3350 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003351{
3352 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3353 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003354 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003355
Philipp Reisner02918be2010-08-20 14:35:10 +02003356 if (want != data_size) {
3357 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003358 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003359 }
3360 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003361 return 0;
3362 err = drbd_recv(mdev, buffer, want);
3363 if (err != want) {
3364 if (err >= 0)
3365 err = -EIO;
3366 return err;
3367 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003368
3369 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3370
3371 c->word_offset += num_words;
3372 c->bit_offset = c->word_offset * BITS_PER_LONG;
3373 if (c->bit_offset > c->bm_bits)
3374 c->bit_offset = c->bm_bits;
3375
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003376 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003377}
3378
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003379/**
3380 * recv_bm_rle_bits
3381 *
3382 * Return 0 when done, 1 when another iteration is needed, and a negative error
3383 * code upon failure.
3384 */
3385static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003386recv_bm_rle_bits(struct drbd_conf *mdev,
3387 struct p_compressed_bm *p,
3388 struct bm_xfer_ctx *c)
3389{
3390 struct bitstream bs;
3391 u64 look_ahead;
3392 u64 rl;
3393 u64 tmp;
3394 unsigned long s = c->bit_offset;
3395 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003396 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003397 int toggle = DCBP_get_start(p);
3398 int have;
3399 int bits;
3400
3401 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3402
3403 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3404 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003405 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003406
3407 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3408 bits = vli_decode_bits(&rl, look_ahead);
3409 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003410 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003411
3412 if (toggle) {
3413 e = s + rl -1;
3414 if (e >= c->bm_bits) {
3415 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003416 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003417 }
3418 _drbd_bm_set_bits(mdev, s, e);
3419 }
3420
3421 if (have < bits) {
3422 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3423 have, bits, look_ahead,
3424 (unsigned int)(bs.cur.b - p->code),
3425 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003426 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003427 }
3428 look_ahead >>= bits;
3429 have -= bits;
3430
3431 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3432 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003433 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003434 look_ahead |= tmp << have;
3435 have += bits;
3436 }
3437
3438 c->bit_offset = s;
3439 bm_xfer_ctx_bit_to_word_offset(c);
3440
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003441 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003442}
3443
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003444/**
3445 * decode_bitmap_c
3446 *
3447 * Return 0 when done, 1 when another iteration is needed, and a negative error
3448 * code upon failure.
3449 */
3450static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003451decode_bitmap_c(struct drbd_conf *mdev,
3452 struct p_compressed_bm *p,
3453 struct bm_xfer_ctx *c)
3454{
3455 if (DCBP_get_code(p) == RLE_VLI_Bits)
3456 return recv_bm_rle_bits(mdev, p, c);
3457
3458 /* other variants had been implemented for evaluation,
3459 * but have been dropped as this one turned out to be "best"
3460 * during all our tests. */
3461
3462 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3463 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003464 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003465}
3466
3467void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3468 const char *direction, struct bm_xfer_ctx *c)
3469{
3470 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003471 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003472 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3473 + c->bm_words * sizeof(long);
3474 unsigned total = c->bytes[0] + c->bytes[1];
3475 unsigned r;
3476
3477 /* total can not be zero. but just in case: */
3478 if (total == 0)
3479 return;
3480
3481 /* don't report if not compressed */
3482 if (total >= plain)
3483 return;
3484
3485 /* total < plain. check for overflow, still */
3486 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3487 : (1000 * total / plain);
3488
3489 if (r > 1000)
3490 r = 1000;
3491
3492 r = 1000 - r;
3493 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3494 "total %u; compression: %u.%u%%\n",
3495 direction,
3496 c->bytes[1], c->packets[1],
3497 c->bytes[0], c->packets[0],
3498 total, r/10, r % 10);
3499}
3500
3501/* Since we are processing the bitfield from lower addresses to higher,
3502 it does not matter if the process it in 32 bit chunks or 64 bit
3503 chunks as long as it is little endian. (Understand it as byte stream,
3504 beginning with the lowest byte...) If we would use big endian
3505 we would need to process it from the highest address to the lowest,
3506 in order to be agnostic to the 32 vs 64 bits issue.
3507
3508 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003509static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003510{
3511 struct bm_xfer_ctx c;
3512 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003513 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003514 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003515 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003516
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003517 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3518 /* you are supposed to send additional out-of-sync information
3519 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003520
3521 /* maybe we should use some per thread scratch page,
3522 * and allocate that during initial device creation? */
3523 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3524 if (!buffer) {
3525 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3526 goto out;
3527 }
3528
3529 c = (struct bm_xfer_ctx) {
3530 .bm_bits = drbd_bm_bits(mdev),
3531 .bm_words = drbd_bm_words(mdev),
3532 };
3533
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003534 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003535 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003536 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003537 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003538 /* MAYBE: sanity check that we speak proto >= 90,
3539 * and the feature is enabled! */
3540 struct p_compressed_bm *p;
3541
Philipp Reisner02918be2010-08-20 14:35:10 +02003542 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003543 dev_err(DEV, "ReportCBitmap packet too large\n");
3544 goto out;
3545 }
3546 /* use the page buff */
3547 p = buffer;
3548 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003549 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003550 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003551 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3552 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003553 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003554 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003555 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003556 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003557 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003558 goto out;
3559 }
3560
Philipp Reisner02918be2010-08-20 14:35:10 +02003561 c.packets[cmd == P_BITMAP]++;
3562 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003563
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003564 if (err <= 0) {
3565 if (err < 0)
3566 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003567 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003568 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003569 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003570 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003571 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003572
3573 INFO_bm_xfer_stats(mdev, "receive", &c);
3574
3575 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003576 enum drbd_state_rv rv;
3577
Philipp Reisnerb411b362009-09-25 16:07:19 -07003578 ok = !drbd_send_bitmap(mdev);
3579 if (!ok)
3580 goto out;
3581 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003582 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3583 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003584 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3585 /* admin may have requested C_DISCONNECTING,
3586 * other threads may have noticed network errors */
3587 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3588 drbd_conn_str(mdev->state.conn));
3589 }
3590
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003591 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003592 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003593 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003594 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3595 drbd_start_resync(mdev, C_SYNC_SOURCE);
3596 free_page((unsigned long) buffer);
3597 return ok;
3598}
3599
Philipp Reisner02918be2010-08-20 14:35:10 +02003600static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003601{
3602 /* TODO zero copy sink :) */
3603 static char sink[128];
3604 int size, want, r;
3605
Philipp Reisner02918be2010-08-20 14:35:10 +02003606 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3607 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003608
Philipp Reisner02918be2010-08-20 14:35:10 +02003609 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003610 while (size > 0) {
3611 want = min_t(int, size, sizeof(sink));
3612 r = drbd_recv(mdev, sink, want);
3613 ERR_IF(r <= 0) break;
3614 size -= r;
3615 }
3616 return size == 0;
3617}
3618
Philipp Reisner02918be2010-08-20 14:35:10 +02003619static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003620{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003621 /* Make sure we've acked all the TCP data associated
3622 * with the data requests being unplugged */
3623 drbd_tcp_quickack(mdev->data.socket);
3624
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003625 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003626}
3627
Philipp Reisner73a01a12010-10-27 14:33:00 +02003628static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3629{
3630 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3631
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003632 switch (mdev->state.conn) {
3633 case C_WF_SYNC_UUID:
3634 case C_WF_BITMAP_T:
3635 case C_BEHIND:
3636 break;
3637 default:
3638 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3639 drbd_conn_str(mdev->state.conn));
3640 }
3641
Philipp Reisner73a01a12010-10-27 14:33:00 +02003642 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3643
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003644 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003645}
3646
Philipp Reisner02918be2010-08-20 14:35:10 +02003647typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003648
Philipp Reisner02918be2010-08-20 14:35:10 +02003649struct data_cmd {
3650 int expect_payload;
3651 size_t pkt_size;
3652 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003653};
3654
Philipp Reisner02918be2010-08-20 14:35:10 +02003655static struct data_cmd drbd_cmd_handler[] = {
3656 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3657 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3658 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3659 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3660 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3661 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3662 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3663 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3664 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3665 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3666 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3667 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3668 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3669 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3670 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3671 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3672 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3673 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3674 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3675 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3676 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003677 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003678 /* anything missing from this table is in
3679 * the asender_tbl, see get_asender_cmd */
3680 [P_MAX_CMD] = { 0, 0, NULL },
3681};
3682
3683/* All handler functions that expect a sub-header get that sub-heder in
3684 mdev->data.rbuf.header.head.payload.
3685
3686 Usually in mdev->data.rbuf.header.head the callback can find the usual
3687 p_header, but they may not rely on that. Since there is also p_header95 !
3688 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003689
3690static void drbdd(struct drbd_conf *mdev)
3691{
Philipp Reisner02918be2010-08-20 14:35:10 +02003692 union p_header *header = &mdev->data.rbuf.header;
3693 unsigned int packet_size;
3694 enum drbd_packets cmd;
3695 size_t shs; /* sub header size */
3696 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003697
3698 while (get_t_state(&mdev->receiver) == Running) {
3699 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003700 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3701 goto err_out;
3702
3703 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3704 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3705 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003706 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003707
Philipp Reisner02918be2010-08-20 14:35:10 +02003708 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003709 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3710 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3711 goto err_out;
3712 }
3713
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003714 if (shs) {
3715 rv = drbd_recv(mdev, &header->h80.payload, shs);
3716 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003717 if (!signal_pending(current))
3718 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003719 goto err_out;
3720 }
3721 }
3722
Philipp Reisner02918be2010-08-20 14:35:10 +02003723 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3724
3725 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003726 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003727 cmdname(cmd), packet_size);
3728 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003729 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003730 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003731
Philipp Reisner02918be2010-08-20 14:35:10 +02003732 if (0) {
3733 err_out:
3734 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003735 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003736 /* If we leave here, we probably want to update at least the
3737 * "Connected" indicator on stable storage. Do so explicitly here. */
3738 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003739}
3740
3741void drbd_flush_workqueue(struct drbd_conf *mdev)
3742{
3743 struct drbd_wq_barrier barr;
3744
3745 barr.w.cb = w_prev_work_done;
3746 init_completion(&barr.done);
3747 drbd_queue_work(&mdev->data.work, &barr.w);
3748 wait_for_completion(&barr.done);
3749}
3750
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003751void drbd_free_tl_hash(struct drbd_conf *mdev)
3752{
3753 struct hlist_head *h;
3754
3755 spin_lock_irq(&mdev->req_lock);
3756
3757 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3758 spin_unlock_irq(&mdev->req_lock);
3759 return;
3760 }
3761 /* paranoia code */
3762 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3763 if (h->first)
3764 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3765 (int)(h - mdev->ee_hash), h->first);
3766 kfree(mdev->ee_hash);
3767 mdev->ee_hash = NULL;
3768 mdev->ee_hash_s = 0;
3769
3770 /* paranoia code */
3771 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3772 if (h->first)
3773 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3774 (int)(h - mdev->tl_hash), h->first);
3775 kfree(mdev->tl_hash);
3776 mdev->tl_hash = NULL;
3777 mdev->tl_hash_s = 0;
3778 spin_unlock_irq(&mdev->req_lock);
3779}
3780
Philipp Reisnerb411b362009-09-25 16:07:19 -07003781static void drbd_disconnect(struct drbd_conf *mdev)
3782{
3783 enum drbd_fencing_p fp;
3784 union drbd_state os, ns;
3785 int rv = SS_UNKNOWN_ERROR;
3786 unsigned int i;
3787
3788 if (mdev->state.conn == C_STANDALONE)
3789 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003790
Lars Ellenberg545752d2011-12-05 14:39:25 +01003791 /* We are about to start the cleanup after connection loss.
3792 * Make sure drbd_make_request knows about that.
3793 * Usually we should be in some network failure state already,
3794 * but just in case we are not, we fix it up here.
3795 */
3796 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
3797
Philipp Reisnerb411b362009-09-25 16:07:19 -07003798 /* asender does not clean up anything. it must not interfere, either */
3799 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003800 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003801
Philipp Reisner85719572010-07-21 10:20:17 +02003802 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003803 spin_lock_irq(&mdev->req_lock);
3804 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3805 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3806 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3807 spin_unlock_irq(&mdev->req_lock);
3808
3809 /* We do not have data structures that would allow us to
3810 * get the rs_pending_cnt down to 0 again.
3811 * * On C_SYNC_TARGET we do not have any data structures describing
3812 * the pending RSDataRequest's we have sent.
3813 * * On C_SYNC_SOURCE there is no data structure that tracks
3814 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3815 * And no, it is not the sum of the reference counts in the
3816 * resync_LRU. The resync_LRU tracks the whole operation including
3817 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3818 * on the fly. */
3819 drbd_rs_cancel_all(mdev);
3820 mdev->rs_total = 0;
3821 mdev->rs_failed = 0;
3822 atomic_set(&mdev->rs_pending_cnt, 0);
3823 wake_up(&mdev->misc_wait);
3824
3825 /* make sure syncer is stopped and w_resume_next_sg queued */
3826 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003827 resync_timer_fn((unsigned long)mdev);
3828
Philipp Reisnerb411b362009-09-25 16:07:19 -07003829 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3830 * w_make_resync_request etc. which may still be on the worker queue
3831 * to be "canceled" */
3832 drbd_flush_workqueue(mdev);
3833
3834 /* This also does reclaim_net_ee(). If we do this too early, we might
3835 * miss some resync ee and pages.*/
3836 drbd_process_done_ee(mdev);
3837
3838 kfree(mdev->p_uuid);
3839 mdev->p_uuid = NULL;
3840
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003841 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003842 tl_clear(mdev);
3843
Philipp Reisnerb411b362009-09-25 16:07:19 -07003844 dev_info(DEV, "Connection closed\n");
3845
3846 drbd_md_sync(mdev);
3847
3848 fp = FP_DONT_CARE;
3849 if (get_ldev(mdev)) {
3850 fp = mdev->ldev->dc.fencing;
3851 put_ldev(mdev);
3852 }
3853
Philipp Reisner87f7be42010-06-11 13:56:33 +02003854 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3855 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003856
3857 spin_lock_irq(&mdev->req_lock);
3858 os = mdev->state;
3859 if (os.conn >= C_UNCONNECTED) {
3860 /* Do not restart in case we are C_DISCONNECTING */
3861 ns = os;
3862 ns.conn = C_UNCONNECTED;
3863 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3864 }
3865 spin_unlock_irq(&mdev->req_lock);
3866
3867 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003868 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003869
Philipp Reisnerb411b362009-09-25 16:07:19 -07003870 crypto_free_hash(mdev->cram_hmac_tfm);
3871 mdev->cram_hmac_tfm = NULL;
3872
3873 kfree(mdev->net_conf);
3874 mdev->net_conf = NULL;
3875 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3876 }
3877
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003878 /* serialize with bitmap writeout triggered by the state change,
3879 * if any. */
3880 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3881
Philipp Reisnerb411b362009-09-25 16:07:19 -07003882 /* tcp_close and release of sendpage pages can be deferred. I don't
3883 * want to use SO_LINGER, because apparently it can be deferred for
3884 * more than 20 seconds (longest time I checked).
3885 *
3886 * Actually we don't care for exactly when the network stack does its
3887 * put_page(), but release our reference on these pages right here.
3888 */
3889 i = drbd_release_ee(mdev, &mdev->net_ee);
3890 if (i)
3891 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003892 i = atomic_read(&mdev->pp_in_use_by_net);
3893 if (i)
3894 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003895 i = atomic_read(&mdev->pp_in_use);
3896 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003897 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003898
3899 D_ASSERT(list_empty(&mdev->read_ee));
3900 D_ASSERT(list_empty(&mdev->active_ee));
3901 D_ASSERT(list_empty(&mdev->sync_ee));
3902 D_ASSERT(list_empty(&mdev->done_ee));
3903
3904 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3905 atomic_set(&mdev->current_epoch->epoch_size, 0);
3906 D_ASSERT(list_empty(&mdev->current_epoch->list));
3907}
3908
3909/*
3910 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3911 * we can agree on is stored in agreed_pro_version.
3912 *
3913 * feature flags and the reserved array should be enough room for future
3914 * enhancements of the handshake protocol, and possible plugins...
3915 *
3916 * for now, they are expected to be zero, but ignored.
3917 */
3918static int drbd_send_handshake(struct drbd_conf *mdev)
3919{
3920 /* ASSERT current == mdev->receiver ... */
3921 struct p_handshake *p = &mdev->data.sbuf.handshake;
3922 int ok;
3923
3924 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3925 dev_err(DEV, "interrupted during initial handshake\n");
3926 return 0; /* interrupted. not ok. */
3927 }
3928
3929 if (mdev->data.socket == NULL) {
3930 mutex_unlock(&mdev->data.mutex);
3931 return 0;
3932 }
3933
3934 memset(p, 0, sizeof(*p));
3935 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3936 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3937 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003938 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003939 mutex_unlock(&mdev->data.mutex);
3940 return ok;
3941}
3942
3943/*
3944 * return values:
3945 * 1 yes, we have a valid connection
3946 * 0 oops, did not work out, please try again
3947 * -1 peer talks different language,
3948 * no point in trying again, please go standalone.
3949 */
3950static int drbd_do_handshake(struct drbd_conf *mdev)
3951{
3952 /* ASSERT current == mdev->receiver ... */
3953 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003954 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3955 unsigned int length;
3956 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003957 int rv;
3958
3959 rv = drbd_send_handshake(mdev);
3960 if (!rv)
3961 return 0;
3962
Philipp Reisner02918be2010-08-20 14:35:10 +02003963 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003964 if (!rv)
3965 return 0;
3966
Philipp Reisner02918be2010-08-20 14:35:10 +02003967 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003968 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003969 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003970 return -1;
3971 }
3972
Philipp Reisner02918be2010-08-20 14:35:10 +02003973 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003974 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003975 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003976 return -1;
3977 }
3978
3979 rv = drbd_recv(mdev, &p->head.payload, expect);
3980
3981 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003982 if (!signal_pending(current))
3983 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003984 return 0;
3985 }
3986
Philipp Reisnerb411b362009-09-25 16:07:19 -07003987 p->protocol_min = be32_to_cpu(p->protocol_min);
3988 p->protocol_max = be32_to_cpu(p->protocol_max);
3989 if (p->protocol_max == 0)
3990 p->protocol_max = p->protocol_min;
3991
3992 if (PRO_VERSION_MAX < p->protocol_min ||
3993 PRO_VERSION_MIN > p->protocol_max)
3994 goto incompat;
3995
3996 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3997
3998 dev_info(DEV, "Handshake successful: "
3999 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
4000
4001 return 1;
4002
4003 incompat:
4004 dev_err(DEV, "incompatible DRBD dialects: "
4005 "I support %d-%d, peer supports %d-%d\n",
4006 PRO_VERSION_MIN, PRO_VERSION_MAX,
4007 p->protocol_min, p->protocol_max);
4008 return -1;
4009}
4010
4011#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4012static int drbd_do_auth(struct drbd_conf *mdev)
4013{
4014 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4015 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004016 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004017}
4018#else
4019#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004020
4021/* Return value:
4022 1 - auth succeeded,
4023 0 - failed, try again (network error),
4024 -1 - auth failed, don't try again.
4025*/
4026
Philipp Reisnerb411b362009-09-25 16:07:19 -07004027static int drbd_do_auth(struct drbd_conf *mdev)
4028{
4029 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4030 struct scatterlist sg;
4031 char *response = NULL;
4032 char *right_response = NULL;
4033 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004034 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4035 unsigned int resp_size;
4036 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004037 enum drbd_packets cmd;
4038 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004039 int rv;
4040
4041 desc.tfm = mdev->cram_hmac_tfm;
4042 desc.flags = 0;
4043
4044 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4045 (u8 *)mdev->net_conf->shared_secret, key_len);
4046 if (rv) {
4047 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004048 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004049 goto fail;
4050 }
4051
4052 get_random_bytes(my_challenge, CHALLENGE_LEN);
4053
4054 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4055 if (!rv)
4056 goto fail;
4057
Philipp Reisner02918be2010-08-20 14:35:10 +02004058 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004059 if (!rv)
4060 goto fail;
4061
Philipp Reisner02918be2010-08-20 14:35:10 +02004062 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004063 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004064 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004065 rv = 0;
4066 goto fail;
4067 }
4068
Philipp Reisner02918be2010-08-20 14:35:10 +02004069 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004070 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004071 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004072 goto fail;
4073 }
4074
Philipp Reisner02918be2010-08-20 14:35:10 +02004075 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004076 if (peers_ch == NULL) {
4077 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004078 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004079 goto fail;
4080 }
4081
Philipp Reisner02918be2010-08-20 14:35:10 +02004082 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004083
Philipp Reisner02918be2010-08-20 14:35:10 +02004084 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004085 if (!signal_pending(current))
4086 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004087 rv = 0;
4088 goto fail;
4089 }
4090
4091 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4092 response = kmalloc(resp_size, GFP_NOIO);
4093 if (response == NULL) {
4094 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004095 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004096 goto fail;
4097 }
4098
4099 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004100 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004101
4102 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4103 if (rv) {
4104 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004105 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004106 goto fail;
4107 }
4108
4109 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4110 if (!rv)
4111 goto fail;
4112
Philipp Reisner02918be2010-08-20 14:35:10 +02004113 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004114 if (!rv)
4115 goto fail;
4116
Philipp Reisner02918be2010-08-20 14:35:10 +02004117 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004118 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004119 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004120 rv = 0;
4121 goto fail;
4122 }
4123
Philipp Reisner02918be2010-08-20 14:35:10 +02004124 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004125 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4126 rv = 0;
4127 goto fail;
4128 }
4129
4130 rv = drbd_recv(mdev, response , resp_size);
4131
4132 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004133 if (!signal_pending(current))
4134 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004135 rv = 0;
4136 goto fail;
4137 }
4138
4139 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004140 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004141 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004142 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004143 goto fail;
4144 }
4145
4146 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4147
4148 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4149 if (rv) {
4150 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004151 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004152 goto fail;
4153 }
4154
4155 rv = !memcmp(response, right_response, resp_size);
4156
4157 if (rv)
4158 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4159 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004160 else
4161 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004162
4163 fail:
4164 kfree(peers_ch);
4165 kfree(response);
4166 kfree(right_response);
4167
4168 return rv;
4169}
4170#endif
4171
4172int drbdd_init(struct drbd_thread *thi)
4173{
4174 struct drbd_conf *mdev = thi->mdev;
4175 unsigned int minor = mdev_to_minor(mdev);
4176 int h;
4177
4178 sprintf(current->comm, "drbd%d_receiver", minor);
4179
4180 dev_info(DEV, "receiver (re)started\n");
4181
4182 do {
4183 h = drbd_connect(mdev);
4184 if (h == 0) {
4185 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004186 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004187 }
4188 if (h == -1) {
4189 dev_warn(DEV, "Discarding network configuration.\n");
4190 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4191 }
4192 } while (h == 0);
4193
4194 if (h > 0) {
4195 if (get_net_conf(mdev)) {
4196 drbdd(mdev);
4197 put_net_conf(mdev);
4198 }
4199 }
4200
4201 drbd_disconnect(mdev);
4202
4203 dev_info(DEV, "receiver terminated\n");
4204 return 0;
4205}
4206
4207/* ********* acknowledge sender ******** */
4208
Philipp Reisner0b70a132010-08-20 13:36:10 +02004209static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004210{
4211 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4212
4213 int retcode = be32_to_cpu(p->retcode);
4214
4215 if (retcode >= SS_SUCCESS) {
4216 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4217 } else {
4218 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4219 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4220 drbd_set_st_err_str(retcode), retcode);
4221 }
4222 wake_up(&mdev->state_wait);
4223
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004224 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004225}
4226
Philipp Reisner0b70a132010-08-20 13:36:10 +02004227static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228{
4229 return drbd_send_ping_ack(mdev);
4230
4231}
4232
Philipp Reisner0b70a132010-08-20 13:36:10 +02004233static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004234{
4235 /* restore idle timeout */
4236 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004237 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4238 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004239
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004240 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004241}
4242
Philipp Reisner0b70a132010-08-20 13:36:10 +02004243static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004244{
4245 struct p_block_ack *p = (struct p_block_ack *)h;
4246 sector_t sector = be64_to_cpu(p->sector);
4247 int blksize = be32_to_cpu(p->blksize);
4248
4249 D_ASSERT(mdev->agreed_pro_version >= 89);
4250
4251 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4252
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004253 if (get_ldev(mdev)) {
4254 drbd_rs_complete_io(mdev, sector);
4255 drbd_set_in_sync(mdev, sector, blksize);
4256 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4257 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4258 put_ldev(mdev);
4259 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004260 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004261 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004262
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004263 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004264}
4265
4266/* when we receive the ACK for a write request,
4267 * verify that we actually know about it */
4268static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4269 u64 id, sector_t sector)
4270{
4271 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4272 struct hlist_node *n;
4273 struct drbd_request *req;
4274
Bart Van Assche24c48302011-05-21 18:32:29 +02004275 hlist_for_each_entry(req, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004276 if ((unsigned long)req == (unsigned long)id) {
4277 if (req->sector != sector) {
4278 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4279 "wrong sector (%llus versus %llus)\n", req,
4280 (unsigned long long)req->sector,
4281 (unsigned long long)sector);
4282 break;
4283 }
4284 return req;
4285 }
4286 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004287 return NULL;
4288}
4289
4290typedef struct drbd_request *(req_validator_fn)
4291 (struct drbd_conf *mdev, u64 id, sector_t sector);
4292
4293static int validate_req_change_req_state(struct drbd_conf *mdev,
4294 u64 id, sector_t sector, req_validator_fn validator,
4295 const char *func, enum drbd_req_event what)
4296{
4297 struct drbd_request *req;
4298 struct bio_and_error m;
4299
4300 spin_lock_irq(&mdev->req_lock);
4301 req = validator(mdev, id, sector);
4302 if (unlikely(!req)) {
4303 spin_unlock_irq(&mdev->req_lock);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004304
4305 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4306 (void *)(unsigned long)id, (unsigned long long)sector);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004307 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004308 }
4309 __req_mod(req, what, &m);
4310 spin_unlock_irq(&mdev->req_lock);
4311
4312 if (m.bio)
4313 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004314 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004315}
4316
Philipp Reisner0b70a132010-08-20 13:36:10 +02004317static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004318{
4319 struct p_block_ack *p = (struct p_block_ack *)h;
4320 sector_t sector = be64_to_cpu(p->sector);
4321 int blksize = be32_to_cpu(p->blksize);
4322 enum drbd_req_event what;
4323
4324 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4325
4326 if (is_syncer_block_id(p->block_id)) {
4327 drbd_set_in_sync(mdev, sector, blksize);
4328 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004329 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004330 }
4331 switch (be16_to_cpu(h->command)) {
4332 case P_RS_WRITE_ACK:
4333 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4334 what = write_acked_by_peer_and_sis;
4335 break;
4336 case P_WRITE_ACK:
4337 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4338 what = write_acked_by_peer;
4339 break;
4340 case P_RECV_ACK:
4341 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4342 what = recv_acked_by_peer;
4343 break;
4344 case P_DISCARD_ACK:
4345 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4346 what = conflict_discarded_by_peer;
4347 break;
4348 default:
4349 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004350 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004351 }
4352
4353 return validate_req_change_req_state(mdev, p->block_id, sector,
4354 _ack_id_to_req, __func__ , what);
4355}
4356
Philipp Reisner0b70a132010-08-20 13:36:10 +02004357static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004358{
4359 struct p_block_ack *p = (struct p_block_ack *)h;
4360 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004361 int size = be32_to_cpu(p->blksize);
4362 struct drbd_request *req;
4363 struct bio_and_error m;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004364
4365 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4366
4367 if (is_syncer_block_id(p->block_id)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004368 dec_rs_pending(mdev);
4369 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004370 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004371 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004372
4373 spin_lock_irq(&mdev->req_lock);
4374 req = _ack_id_to_req(mdev, p->block_id, sector);
4375 if (!req) {
4376 spin_unlock_irq(&mdev->req_lock);
4377 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4378 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4379 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4380 The master bio might already be completed, therefore the
4381 request is no longer in the collision hash.
4382 => Do not try to validate block_id as request. */
4383 /* In Protocol B we might already have got a P_RECV_ACK
4384 but then get a P_NEG_ACK after wards. */
4385 drbd_set_out_of_sync(mdev, sector, size);
4386 return true;
4387 } else {
4388 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4389 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4390 return false;
4391 }
4392 }
4393 __req_mod(req, neg_acked, &m);
4394 spin_unlock_irq(&mdev->req_lock);
4395
4396 if (m.bio)
4397 complete_master_bio(mdev, &m);
4398 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004399}
4400
Philipp Reisner0b70a132010-08-20 13:36:10 +02004401static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004402{
4403 struct p_block_ack *p = (struct p_block_ack *)h;
4404 sector_t sector = be64_to_cpu(p->sector);
4405
4406 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4407 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4408 (unsigned long long)sector, be32_to_cpu(p->blksize));
4409
4410 return validate_req_change_req_state(mdev, p->block_id, sector,
4411 _ar_id_to_req, __func__ , neg_acked);
4412}
4413
Philipp Reisner0b70a132010-08-20 13:36:10 +02004414static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004415{
4416 sector_t sector;
4417 int size;
4418 struct p_block_ack *p = (struct p_block_ack *)h;
4419
4420 sector = be64_to_cpu(p->sector);
4421 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004422
4423 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4424
4425 dec_rs_pending(mdev);
4426
4427 if (get_ldev_if_state(mdev, D_FAILED)) {
4428 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004429 switch (be16_to_cpu(h->command)) {
4430 case P_NEG_RS_DREPLY:
4431 drbd_rs_failed_io(mdev, sector, size);
4432 case P_RS_CANCEL:
4433 break;
4434 default:
4435 D_ASSERT(0);
4436 put_ldev(mdev);
4437 return false;
4438 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004439 put_ldev(mdev);
4440 }
4441
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004442 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004443}
4444
Philipp Reisner0b70a132010-08-20 13:36:10 +02004445static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004446{
4447 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4448
4449 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4450
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004451 if (mdev->state.conn == C_AHEAD &&
4452 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisnere89868a2011-11-09 21:04:03 +01004453 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
Philipp Reisner370a43e2011-01-14 16:03:11 +01004454 mdev->start_resync_timer.expires = jiffies + HZ;
4455 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004456 }
4457
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004458 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004459}
4460
Philipp Reisner0b70a132010-08-20 13:36:10 +02004461static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004462{
4463 struct p_block_ack *p = (struct p_block_ack *)h;
4464 struct drbd_work *w;
4465 sector_t sector;
4466 int size;
4467
4468 sector = be64_to_cpu(p->sector);
4469 size = be32_to_cpu(p->blksize);
4470
4471 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4472
4473 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4474 drbd_ov_oos_found(mdev, sector, size);
4475 else
4476 ov_oos_print(mdev);
4477
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004478 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004479 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004480
Philipp Reisnerb411b362009-09-25 16:07:19 -07004481 drbd_rs_complete_io(mdev, sector);
4482 dec_rs_pending(mdev);
4483
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004484 --mdev->ov_left;
4485
4486 /* let's advance progress step marks only for every other megabyte */
4487 if ((mdev->ov_left & 0x200) == 0x200)
4488 drbd_advance_rs_marks(mdev, mdev->ov_left);
4489
4490 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004491 w = kmalloc(sizeof(*w), GFP_NOIO);
4492 if (w) {
4493 w->cb = w_ov_finished;
4494 drbd_queue_work_front(&mdev->data.work, w);
4495 } else {
4496 dev_err(DEV, "kmalloc(w) failed.");
4497 ov_oos_print(mdev);
4498 drbd_resync_finished(mdev);
4499 }
4500 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004501 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004502 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004503}
4504
Philipp Reisner02918be2010-08-20 14:35:10 +02004505static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004506{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004507 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004508}
4509
Philipp Reisnerb411b362009-09-25 16:07:19 -07004510struct asender_cmd {
4511 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004512 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004513};
4514
4515static struct asender_cmd *get_asender_cmd(int cmd)
4516{
4517 static struct asender_cmd asender_tbl[] = {
4518 /* anything missing from this table is in
4519 * the drbd_cmd_handler (drbd_default_handler) table,
4520 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004521 [P_PING] = { sizeof(struct p_header80), got_Ping },
4522 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004523 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4524 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4525 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4526 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4527 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4528 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4529 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4530 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4531 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4532 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4533 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004534 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004535 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004536 [P_MAX_CMD] = { 0, NULL },
4537 };
4538 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4539 return NULL;
4540 return &asender_tbl[cmd];
4541}
4542
4543int drbd_asender(struct drbd_thread *thi)
4544{
4545 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004546 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004547 struct asender_cmd *cmd = NULL;
4548
4549 int rv, len;
4550 void *buf = h;
4551 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004552 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004553 int empty;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004554 int ping_timeout_active = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004555
4556 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4557
4558 current->policy = SCHED_RR; /* Make this a realtime task! */
4559 current->rt_priority = 2; /* more important than all other tasks */
4560
4561 while (get_t_state(thi) == Running) {
4562 drbd_thread_current_set_cpu(mdev);
4563 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4564 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4565 mdev->meta.socket->sk->sk_rcvtimeo =
4566 mdev->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004567 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004568 }
4569
4570 /* conditionally cork;
4571 * it may hurt latency if we cork without much to send */
4572 if (!mdev->net_conf->no_cork &&
4573 3 < atomic_read(&mdev->unacked_cnt))
4574 drbd_tcp_cork(mdev->meta.socket);
4575 while (1) {
4576 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4577 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004578 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004579 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004580 /* to avoid race with newly queued ACKs */
4581 set_bit(SIGNAL_ASENDER, &mdev->flags);
4582 spin_lock_irq(&mdev->req_lock);
4583 empty = list_empty(&mdev->done_ee);
4584 spin_unlock_irq(&mdev->req_lock);
4585 /* new ack may have been queued right here,
4586 * but then there is also a signal pending,
4587 * and we start over... */
4588 if (empty)
4589 break;
4590 }
4591 /* but unconditionally uncork unless disabled */
4592 if (!mdev->net_conf->no_cork)
4593 drbd_tcp_uncork(mdev->meta.socket);
4594
4595 /* short circuit, recv_msg would return EINTR anyways. */
4596 if (signal_pending(current))
4597 continue;
4598
4599 rv = drbd_recv_short(mdev, mdev->meta.socket,
4600 buf, expect-received, 0);
4601 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4602
4603 flush_signals(current);
4604
4605 /* Note:
4606 * -EINTR (on meta) we got a signal
4607 * -EAGAIN (on meta) rcvtimeo expired
4608 * -ECONNRESET other side closed the connection
4609 * -ERESTARTSYS (on data) we got a signal
4610 * rv < 0 other than above: unexpected error!
4611 * rv == expected: full header or command
4612 * rv < expected: "woken" by signal during receive
4613 * rv == 0 : "connection shut down by peer"
4614 */
4615 if (likely(rv > 0)) {
4616 received += rv;
4617 buf += rv;
4618 } else if (rv == 0) {
4619 dev_err(DEV, "meta connection shut down by peer.\n");
4620 goto reconnect;
4621 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004622 /* If the data socket received something meanwhile,
4623 * that is good enough: peer is still alive. */
4624 if (time_after(mdev->last_received,
4625 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4626 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004627 if (ping_timeout_active) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004628 dev_err(DEV, "PingAck did not arrive in time.\n");
4629 goto reconnect;
4630 }
4631 set_bit(SEND_PING, &mdev->flags);
4632 continue;
4633 } else if (rv == -EINTR) {
4634 continue;
4635 } else {
4636 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4637 goto reconnect;
4638 }
4639
4640 if (received == expect && cmd == NULL) {
4641 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004642 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4643 be32_to_cpu(h->magic),
4644 be16_to_cpu(h->command),
4645 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004646 goto reconnect;
4647 }
4648 cmd = get_asender_cmd(be16_to_cpu(h->command));
4649 len = be16_to_cpu(h->length);
4650 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004651 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4652 be32_to_cpu(h->magic),
4653 be16_to_cpu(h->command),
4654 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004655 goto disconnect;
4656 }
4657 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004658 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004659 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004660 }
4661 if (received == expect) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004662 mdev->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004663 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004664 if (!cmd->process(mdev, h))
4665 goto reconnect;
4666
Lars Ellenbergf36af182011-03-09 22:44:55 +01004667 /* the idle_timeout (ping-int)
4668 * has been restored in got_PingAck() */
4669 if (cmd == get_asender_cmd(P_PING_ACK))
4670 ping_timeout_active = 0;
4671
Philipp Reisnerb411b362009-09-25 16:07:19 -07004672 buf = h;
4673 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004674 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004675 cmd = NULL;
4676 }
4677 }
4678
4679 if (0) {
4680reconnect:
4681 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004682 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004683 }
4684 if (0) {
4685disconnect:
4686 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004687 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004688 }
4689 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4690
4691 D_ASSERT(mdev->state.conn < C_CONNECTED);
4692 dev_info(DEV, "asender terminated\n");
4693
4694 return 0;
4695}