blob: 017eeb745ed9d01f7ed5d37e3f05269f359e46d3 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Bart Van Assche24c48302011-05-21 18:32:29 +0200336 INIT_HLIST_NODE(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200344 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 return e;
347
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return NULL;
351}
352
Lars Ellenberg435f0742010-09-06 12:30:25 +0200353void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200355 if (e->flags & EE_HAS_DIGEST)
356 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Bart Van Assche24c48302011-05-21 18:32:29 +0200359 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 mempool_free(e, drbd_ee_mempool);
361}
362
363int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
364{
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
367 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200368 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
373
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200375 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 count++;
377 }
378 return count;
379}
380
381
382/*
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
386 *
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
390 */
391static int drbd_process_done_ee(struct drbd_conf *mdev)
392{
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
397
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
402
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200404 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
409 */
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
414 }
415 wake_up(&mdev->ee_wait);
416
417 return ok;
418}
419
420void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
421{
422 DEFINE_WAIT(wait);
423
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100429 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
432 }
433}
434
435void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436{
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
440}
441
442/* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
446{
447 struct sock *sk = sock->sk;
448 int err = 0;
449
450 *what = "listen";
451 err = sock->ops->listen(sock, 5);
452 if (err < 0)
453 goto out;
454
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
457 newsock);
458 if (err < 0)
459 goto out;
460
461 *what = "accept";
462 err = sock->ops->accept(sock, *newsock, 0);
463 if (err < 0) {
464 sock_release(*newsock);
465 *newsock = NULL;
466 goto out;
467 }
468 (*newsock)->ops = sock->ops;
Lars Ellenberg47a4f1c2012-01-12 23:01:26 +0100469 __module_get((*newsock)->ops->owner);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470
471out:
472 return err;
473}
474
475static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
476 void *buf, size_t size, int flags)
477{
478 mm_segment_t oldfs;
479 struct kvec iov = {
480 .iov_base = buf,
481 .iov_len = size,
482 };
483 struct msghdr msg = {
484 .msg_iovlen = 1,
485 .msg_iov = (struct iovec *)&iov,
486 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
487 };
488 int rv;
489
490 oldfs = get_fs();
491 set_fs(KERNEL_DS);
492 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
493 set_fs(oldfs);
494
495 return rv;
496}
497
498static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
499{
500 mm_segment_t oldfs;
501 struct kvec iov = {
502 .iov_base = buf,
503 .iov_len = size,
504 };
505 struct msghdr msg = {
506 .msg_iovlen = 1,
507 .msg_iov = (struct iovec *)&iov,
508 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
509 };
510 int rv;
511
512 oldfs = get_fs();
513 set_fs(KERNEL_DS);
514
515 for (;;) {
516 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
517 if (rv == size)
518 break;
519
520 /* Note:
521 * ECONNRESET other side closed the connection
522 * ERESTARTSYS (on sock) we got a signal
523 */
524
525 if (rv < 0) {
526 if (rv == -ECONNRESET)
527 dev_info(DEV, "sock was reset by peer\n");
528 else if (rv != -ERESTARTSYS)
529 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
530 break;
531 } else if (rv == 0) {
532 dev_info(DEV, "sock was shut down by peer\n");
533 break;
534 } else {
535 /* signal came in, or peer/link went down,
536 * after we read a partial message
537 */
538 /* D_ASSERT(signal_pending(current)); */
539 break;
540 }
541 };
542
543 set_fs(oldfs);
544
545 if (rv != size)
546 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
547
548 return rv;
549}
550
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200551/* quoting tcp(7):
552 * On individual connections, the socket buffer size must be set prior to the
553 * listen(2) or connect(2) calls in order to have it take effect.
554 * This is our wrapper to do so.
555 */
556static void drbd_setbufsize(struct socket *sock, unsigned int snd,
557 unsigned int rcv)
558{
559 /* open coded SO_SNDBUF, SO_RCVBUF */
560 if (snd) {
561 sock->sk->sk_sndbuf = snd;
562 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
563 }
564 if (rcv) {
565 sock->sk->sk_rcvbuf = rcv;
566 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
567 }
568}
569
Philipp Reisnerb411b362009-09-25 16:07:19 -0700570static struct socket *drbd_try_connect(struct drbd_conf *mdev)
571{
572 const char *what;
573 struct socket *sock;
574 struct sockaddr_in6 src_in6;
575 int err;
576 int disconnect_on_error = 1;
577
578 if (!get_net_conf(mdev))
579 return NULL;
580
581 what = "sock_create_kern";
582 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
583 SOCK_STREAM, IPPROTO_TCP, &sock);
584 if (err < 0) {
585 sock = NULL;
586 goto out;
587 }
588
589 sock->sk->sk_rcvtimeo =
590 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200591 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
592 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700593
594 /* explicitly bind to the configured IP as source IP
595 * for the outgoing connections.
596 * This is needed for multihomed hosts and to be
597 * able to use lo: interfaces for drbd.
598 * Make sure to use 0 as port number, so linux selects
599 * a free one dynamically.
600 */
601 memcpy(&src_in6, mdev->net_conf->my_addr,
602 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
603 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
604 src_in6.sin6_port = 0;
605 else
606 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
607
608 what = "bind before connect";
609 err = sock->ops->bind(sock,
610 (struct sockaddr *) &src_in6,
611 mdev->net_conf->my_addr_len);
612 if (err < 0)
613 goto out;
614
615 /* connect may fail, peer not yet available.
616 * stay C_WF_CONNECTION, don't go Disconnecting! */
617 disconnect_on_error = 0;
618 what = "connect";
619 err = sock->ops->connect(sock,
620 (struct sockaddr *)mdev->net_conf->peer_addr,
621 mdev->net_conf->peer_addr_len, 0);
622
623out:
624 if (err < 0) {
625 if (sock) {
626 sock_release(sock);
627 sock = NULL;
628 }
629 switch (-err) {
630 /* timeout, busy, signal pending */
631 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
632 case EINTR: case ERESTARTSYS:
633 /* peer not (yet) available, network problem */
634 case ECONNREFUSED: case ENETUNREACH:
635 case EHOSTDOWN: case EHOSTUNREACH:
636 disconnect_on_error = 0;
637 break;
638 default:
639 dev_err(DEV, "%s failed, err = %d\n", what, err);
640 }
641 if (disconnect_on_error)
642 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
643 }
644 put_net_conf(mdev);
645 return sock;
646}
647
648static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
649{
650 int timeo, err;
651 struct socket *s_estab = NULL, *s_listen;
652 const char *what;
653
654 if (!get_net_conf(mdev))
655 return NULL;
656
657 what = "sock_create_kern";
658 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
659 SOCK_STREAM, IPPROTO_TCP, &s_listen);
660 if (err) {
661 s_listen = NULL;
662 goto out;
663 }
664
665 timeo = mdev->net_conf->try_connect_int * HZ;
666 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
667
668 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
669 s_listen->sk->sk_rcvtimeo = timeo;
670 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200671 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
672 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700673
674 what = "bind before listen";
675 err = s_listen->ops->bind(s_listen,
676 (struct sockaddr *) mdev->net_conf->my_addr,
677 mdev->net_conf->my_addr_len);
678 if (err < 0)
679 goto out;
680
681 err = drbd_accept(mdev, &what, s_listen, &s_estab);
682
683out:
684 if (s_listen)
685 sock_release(s_listen);
686 if (err < 0) {
687 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
688 dev_err(DEV, "%s failed, err = %d\n", what, err);
689 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
690 }
691 }
692 put_net_conf(mdev);
693
694 return s_estab;
695}
696
697static int drbd_send_fp(struct drbd_conf *mdev,
698 struct socket *sock, enum drbd_packets cmd)
699{
Philipp Reisner02918be2010-08-20 14:35:10 +0200700 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700701
702 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
703}
704
705static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
706{
Philipp Reisner02918be2010-08-20 14:35:10 +0200707 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700708 int rr;
709
710 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
711
712 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
713 return be16_to_cpu(h->command);
714
715 return 0xffff;
716}
717
718/**
719 * drbd_socket_okay() - Free the socket if its connection is not okay
720 * @mdev: DRBD device.
721 * @sock: pointer to the pointer to the socket.
722 */
723static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
724{
725 int rr;
726 char tb[4];
727
728 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100729 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730
731 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
732
733 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100734 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700735 } else {
736 sock_release(*sock);
737 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100738 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739 }
740}
741
742/*
743 * return values:
744 * 1 yes, we have a valid connection
745 * 0 oops, did not work out, please try again
746 * -1 peer talks different language,
747 * no point in trying again, please go standalone.
748 * -2 We do not have a network config...
749 */
750static int drbd_connect(struct drbd_conf *mdev)
751{
752 struct socket *s, *sock, *msock;
753 int try, h, ok;
Philipp Reisner197296f2012-03-26 16:47:11 +0200754 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700755
756 D_ASSERT(!mdev->data.socket);
757
Philipp Reisnerb411b362009-09-25 16:07:19 -0700758 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
759 return -2;
760
761 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
762
763 sock = NULL;
764 msock = NULL;
765
766 do {
767 for (try = 0;;) {
768 /* 3 tries, this should take less than a second! */
769 s = drbd_try_connect(mdev);
770 if (s || ++try >= 3)
771 break;
772 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100773 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700774 }
775
776 if (s) {
777 if (!sock) {
778 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
779 sock = s;
780 s = NULL;
781 } else if (!msock) {
782 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
783 msock = s;
784 s = NULL;
785 } else {
786 dev_err(DEV, "Logic error in drbd_connect()\n");
787 goto out_release_sockets;
788 }
789 }
790
791 if (sock && msock) {
Philipp Reisnera8e40792011-05-13 12:03:55 +0200792 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700793 ok = drbd_socket_okay(mdev, &sock);
794 ok = drbd_socket_okay(mdev, &msock) && ok;
795 if (ok)
796 break;
797 }
798
799retry:
800 s = drbd_wait_for_connect(mdev);
801 if (s) {
802 try = drbd_recv_fp(mdev, s);
803 drbd_socket_okay(mdev, &sock);
804 drbd_socket_okay(mdev, &msock);
805 switch (try) {
806 case P_HAND_SHAKE_S:
807 if (sock) {
808 dev_warn(DEV, "initial packet S crossed\n");
809 sock_release(sock);
810 }
811 sock = s;
812 break;
813 case P_HAND_SHAKE_M:
814 if (msock) {
815 dev_warn(DEV, "initial packet M crossed\n");
816 sock_release(msock);
817 }
818 msock = s;
819 set_bit(DISCARD_CONCURRENT, &mdev->flags);
820 break;
821 default:
822 dev_warn(DEV, "Error receiving initial packet\n");
823 sock_release(s);
824 if (random32() & 1)
825 goto retry;
826 }
827 }
828
829 if (mdev->state.conn <= C_DISCONNECTING)
830 goto out_release_sockets;
831 if (signal_pending(current)) {
832 flush_signals(current);
833 smp_rmb();
834 if (get_t_state(&mdev->receiver) == Exiting)
835 goto out_release_sockets;
836 }
837
838 if (sock && msock) {
839 ok = drbd_socket_okay(mdev, &sock);
840 ok = drbd_socket_okay(mdev, &msock) && ok;
841 if (ok)
842 break;
843 }
844 } while (1);
845
846 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
847 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
848
849 sock->sk->sk_allocation = GFP_NOIO;
850 msock->sk->sk_allocation = GFP_NOIO;
851
852 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
853 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
854
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855 /* NOT YET ...
856 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
857 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
858 * first set it to the P_HAND_SHAKE timeout,
859 * which we set to 4x the configured ping_timeout. */
860 sock->sk->sk_sndtimeo =
861 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
862
863 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
864 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
865
866 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300867 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700868 drbd_tcp_nodelay(sock);
869 drbd_tcp_nodelay(msock);
870
871 mdev->data.socket = sock;
872 mdev->meta.socket = msock;
873 mdev->last_received = jiffies;
874
875 D_ASSERT(mdev->asender.task == NULL);
876
877 h = drbd_do_handshake(mdev);
878 if (h <= 0)
879 return h;
880
881 if (mdev->cram_hmac_tfm) {
882 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100883 switch (drbd_do_auth(mdev)) {
884 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700885 dev_err(DEV, "Authentication of peer failed\n");
886 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100887 case 0:
888 dev_err(DEV, "Authentication of peer failed, trying again.\n");
889 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700890 }
891 }
892
Philipp Reisnerb411b362009-09-25 16:07:19 -0700893 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
894 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
895
896 atomic_set(&mdev->packet_seq, 0);
897 mdev->peer_seq = 0;
898
Philipp Reisner148efa12011-01-15 00:21:15 +0100899 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200900 return -1;
Philipp Reisner197296f2012-03-26 16:47:11 +0200901 set_bit(STATE_SENT, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700902 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100903 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200905 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700906 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
907 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner1e86ac42011-08-04 10:33:08 +0200908
Philipp Reisner197296f2012-03-26 16:47:11 +0200909 spin_lock_irq(&mdev->req_lock);
910 rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
911 if (mdev->state.conn != C_WF_REPORT_PARAMS)
912 clear_bit(STATE_SENT, &mdev->flags);
913 spin_unlock_irq(&mdev->req_lock);
914
915 if (rv < SS_SUCCESS)
Philipp Reisner1e86ac42011-08-04 10:33:08 +0200916 return 0;
917
918 drbd_thread_start(&mdev->asender);
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100919 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700920
921 return 1;
922
923out_release_sockets:
924 if (sock)
925 sock_release(sock);
926 if (msock)
927 sock_release(msock);
928 return -1;
929}
930
Philipp Reisner02918be2010-08-20 14:35:10 +0200931static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932{
Philipp Reisner02918be2010-08-20 14:35:10 +0200933 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700934 int r;
935
936 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700937 if (unlikely(r != sizeof(*h))) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +0100938 if (!signal_pending(current))
939 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100940 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200941 }
942
943 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
944 *cmd = be16_to_cpu(h->h80.command);
945 *packet_size = be16_to_cpu(h->h80.length);
946 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
947 *cmd = be16_to_cpu(h->h95.command);
948 *packet_size = be32_to_cpu(h->h95.length);
949 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200950 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
951 be32_to_cpu(h->h80.magic),
952 be16_to_cpu(h->h80.command),
953 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100954 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955 }
956 mdev->last_received = jiffies;
957
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100958 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959}
960
Philipp Reisner2451fc32010-08-24 13:43:11 +0200961static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700962{
963 int rv;
964
965 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400966 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200967 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700968 if (rv) {
Philipp Reisnerebd2b0c2011-05-25 11:03:04 +0200969 dev_info(DEV, "local disk flush failed with status %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700970 /* would rather check on EOPNOTSUPP, but that is not reliable.
971 * don't try again for ANY return value != 0
972 * if (rv == -EOPNOTSUPP) */
973 drbd_bump_write_ordering(mdev, WO_drain_io);
974 }
975 put_ldev(mdev);
976 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700977}
978
979/**
980 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
981 * @mdev: DRBD device.
982 * @epoch: Epoch object.
983 * @ev: Epoch event.
984 */
985static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
986 struct drbd_epoch *epoch,
987 enum epoch_event ev)
988{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200989 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700990 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991 enum finish_epoch rv = FE_STILL_LIVE;
992
993 spin_lock(&mdev->epoch_lock);
994 do {
995 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700996
997 epoch_size = atomic_read(&epoch->epoch_size);
998
999 switch (ev & ~EV_CLEANUP) {
1000 case EV_PUT:
1001 atomic_dec(&epoch->active);
1002 break;
1003 case EV_GOT_BARRIER_NR:
1004 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001005 break;
1006 case EV_BECAME_LAST:
1007 /* nothing to do*/
1008 break;
1009 }
1010
Philipp Reisnerb411b362009-09-25 16:07:19 -07001011 if (epoch_size != 0 &&
1012 atomic_read(&epoch->active) == 0 &&
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001013 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001014 if (!(ev & EV_CLEANUP)) {
1015 spin_unlock(&mdev->epoch_lock);
1016 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1017 spin_lock(&mdev->epoch_lock);
1018 }
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001019 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1020 dec_unacked(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021
1022 if (mdev->current_epoch != epoch) {
1023 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1024 list_del(&epoch->list);
1025 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1026 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027 kfree(epoch);
1028
1029 if (rv == FE_STILL_LIVE)
1030 rv = FE_DESTROYED;
1031 } else {
1032 epoch->flags = 0;
1033 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001034 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001035 if (rv == FE_STILL_LIVE)
1036 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001037 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001038 }
1039 }
1040
1041 if (!next_epoch)
1042 break;
1043
1044 epoch = next_epoch;
1045 } while (1);
1046
1047 spin_unlock(&mdev->epoch_lock);
1048
Philipp Reisnerb411b362009-09-25 16:07:19 -07001049 return rv;
1050}
1051
1052/**
1053 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1054 * @mdev: DRBD device.
1055 * @wo: Write ordering method to try.
1056 */
1057void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1058{
1059 enum write_ordering_e pwo;
1060 static char *write_ordering_str[] = {
1061 [WO_none] = "none",
1062 [WO_drain_io] = "drain",
1063 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001064 };
1065
1066 pwo = mdev->write_ordering;
1067 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001068 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1069 wo = WO_drain_io;
1070 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1071 wo = WO_none;
1072 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001073 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001074 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1075}
1076
1077/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001078 * drbd_submit_ee()
1079 * @mdev: DRBD device.
1080 * @e: epoch entry
1081 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001082 *
1083 * May spread the pages to multiple bios,
1084 * depending on bio_add_page restrictions.
1085 *
1086 * Returns 0 if all bios have been submitted,
1087 * -ENOMEM if we could not allocate enough bios,
1088 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1089 * single page to an empty bio (which should never happen and likely indicates
1090 * that the lower level IO stack is in some way broken). This has been observed
1091 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001092 */
1093/* TODO allocate from our own bio_set. */
1094int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1095 const unsigned rw, const int fault_type)
1096{
1097 struct bio *bios = NULL;
1098 struct bio *bio;
1099 struct page *page = e->pages;
1100 sector_t sector = e->sector;
1101 unsigned ds = e->size;
1102 unsigned n_bios = 0;
1103 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001104 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001105
1106 /* In most cases, we will only need one bio. But in case the lower
1107 * level restrictions happen to be different at this offset on this
1108 * side than those of the sending peer, we may need to submit the
1109 * request in more than one bio. */
1110next_bio:
1111 bio = bio_alloc(GFP_NOIO, nr_pages);
1112 if (!bio) {
1113 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1114 goto fail;
1115 }
1116 /* > e->sector, unless this is the first bio */
1117 bio->bi_sector = sector;
1118 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001119 bio->bi_rw = rw;
1120 bio->bi_private = e;
1121 bio->bi_end_io = drbd_endio_sec;
1122
1123 bio->bi_next = bios;
1124 bios = bio;
1125 ++n_bios;
1126
1127 page_chain_for_each(page) {
1128 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1129 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001130 /* A single page must always be possible!
1131 * But in case it fails anyways,
1132 * we deal with it, and complain (below). */
1133 if (bio->bi_vcnt == 0) {
1134 dev_err(DEV,
1135 "bio_add_page failed for len=%u, "
1136 "bi_vcnt=0 (bi_sector=%llu)\n",
1137 len, (unsigned long long)bio->bi_sector);
1138 err = -ENOSPC;
1139 goto fail;
1140 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001141 goto next_bio;
1142 }
1143 ds -= len;
1144 sector += len >> 9;
1145 --nr_pages;
1146 }
1147 D_ASSERT(page == NULL);
1148 D_ASSERT(ds == 0);
1149
1150 atomic_set(&e->pending_bios, n_bios);
1151 do {
1152 bio = bios;
1153 bios = bios->bi_next;
1154 bio->bi_next = NULL;
1155
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001156 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001157 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001158 return 0;
1159
1160fail:
1161 while (bios) {
1162 bio = bios;
1163 bios = bios->bi_next;
1164 bio_put(bio);
1165 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001166 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001167}
1168
Philipp Reisner02918be2010-08-20 14:35:10 +02001169static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001170{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001171 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001172 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001173 struct drbd_epoch *epoch;
1174
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175 inc_unacked(mdev);
1176
Philipp Reisnerb411b362009-09-25 16:07:19 -07001177 mdev->current_epoch->barrier_nr = p->barrier;
1178 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1179
1180 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1181 * the activity log, which means it would not be resynced in case the
1182 * R_PRIMARY crashes now.
1183 * Therefore we must send the barrier_ack after the barrier request was
1184 * completed. */
1185 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001186 case WO_none:
1187 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001188 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001189
1190 /* receiver context, in the writeout path of the other node.
1191 * avoid potential distributed deadlock */
1192 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1193 if (epoch)
1194 break;
1195 else
1196 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1197 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001198
1199 case WO_bdev_flush:
1200 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001201 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001202 drbd_flush(mdev);
1203
1204 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1205 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1206 if (epoch)
1207 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001208 }
1209
Philipp Reisner2451fc32010-08-24 13:43:11 +02001210 epoch = mdev->current_epoch;
1211 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1212
1213 D_ASSERT(atomic_read(&epoch->active) == 0);
1214 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001215
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001216 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001217 default:
1218 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001219 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001220 }
1221
1222 epoch->flags = 0;
1223 atomic_set(&epoch->epoch_size, 0);
1224 atomic_set(&epoch->active, 0);
1225
1226 spin_lock(&mdev->epoch_lock);
1227 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1228 list_add(&epoch->list, &mdev->current_epoch->list);
1229 mdev->current_epoch = epoch;
1230 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001231 } else {
1232 /* The current_epoch got recycled while we allocated this one... */
1233 kfree(epoch);
1234 }
1235 spin_unlock(&mdev->epoch_lock);
1236
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001237 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001238}
1239
1240/* used from receive_RSDataReply (recv_resync_read)
1241 * and from receive_Data */
1242static struct drbd_epoch_entry *
1243read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1244{
Lars Ellenberg66660322010-04-06 12:15:04 +02001245 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001246 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001247 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001248 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001249 void *dig_in = mdev->int_dig_in;
1250 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001251 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001252
1253 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1254 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1255
1256 if (dgs) {
1257 rr = drbd_recv(mdev, dig_in, dgs);
1258 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001259 if (!signal_pending(current))
1260 dev_warn(DEV,
1261 "short read receiving data digest: read %d expected %d\n",
1262 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263 return NULL;
1264 }
1265 }
1266
1267 data_size -= dgs;
1268
Philipp Reisnerd07c9c12011-01-20 16:49:33 +01001269 ERR_IF(data_size == 0) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001270 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001271 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001272
Lars Ellenberg66660322010-04-06 12:15:04 +02001273 /* even though we trust out peer,
1274 * we sometimes have to double check. */
1275 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001276 dev_err(DEV, "request from peer beyond end of local disk: "
1277 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001278 (unsigned long long)capacity,
1279 (unsigned long long)sector, data_size);
1280 return NULL;
1281 }
1282
Philipp Reisnerb411b362009-09-25 16:07:19 -07001283 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1284 * "criss-cross" setup, that might cause write-out on some other DRBD,
1285 * which in turn might block on the other node at this very place. */
1286 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1287 if (!e)
1288 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001289
Philipp Reisnerb411b362009-09-25 16:07:19 -07001290 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001291 page = e->pages;
1292 page_chain_for_each(page) {
1293 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001294 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001295 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001296 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001297 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1298 data[0] = data[0] ^ (unsigned long)-1;
1299 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001300 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001301 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001302 drbd_free_ee(mdev, e);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001303 if (!signal_pending(current))
1304 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1305 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001306 return NULL;
1307 }
1308 ds -= rr;
1309 }
1310
1311 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001312 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001313 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001314 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1315 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001316 drbd_bcast_ee(mdev, "digest failed",
1317 dgs, dig_in, dig_vv, e);
1318 drbd_free_ee(mdev, e);
1319 return NULL;
1320 }
1321 }
1322 mdev->recv_cnt += data_size>>9;
1323 return e;
1324}
1325
1326/* drbd_drain_block() just takes a data block
1327 * out of the socket input buffer, and discards it.
1328 */
1329static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1330{
1331 struct page *page;
1332 int rr, rv = 1;
1333 void *data;
1334
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001335 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001336 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001337
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001338 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001339
1340 data = kmap(page);
1341 while (data_size) {
1342 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1343 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1344 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001345 if (!signal_pending(current))
1346 dev_warn(DEV,
1347 "short read receiving data: read %d expected %d\n",
1348 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001349 break;
1350 }
1351 data_size -= rr;
1352 }
1353 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001354 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001355 return rv;
1356}
1357
1358static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1359 sector_t sector, int data_size)
1360{
1361 struct bio_vec *bvec;
1362 struct bio *bio;
1363 int dgs, rr, i, expect;
1364 void *dig_in = mdev->int_dig_in;
1365 void *dig_vv = mdev->int_dig_vv;
1366
1367 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1368 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1369
1370 if (dgs) {
1371 rr = drbd_recv(mdev, dig_in, dgs);
1372 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001373 if (!signal_pending(current))
1374 dev_warn(DEV,
1375 "short read receiving data reply digest: read %d expected %d\n",
1376 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001377 return 0;
1378 }
1379 }
1380
1381 data_size -= dgs;
1382
1383 /* optimistically update recv_cnt. if receiving fails below,
1384 * we disconnect anyways, and counters will be reset. */
1385 mdev->recv_cnt += data_size>>9;
1386
1387 bio = req->master_bio;
1388 D_ASSERT(sector == bio->bi_sector);
1389
1390 bio_for_each_segment(bvec, bio, i) {
1391 expect = min_t(int, data_size, bvec->bv_len);
1392 rr = drbd_recv(mdev,
1393 kmap(bvec->bv_page)+bvec->bv_offset,
1394 expect);
1395 kunmap(bvec->bv_page);
1396 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001397 if (!signal_pending(current))
1398 dev_warn(DEV, "short read receiving data reply: "
1399 "read %d expected %d\n",
1400 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001401 return 0;
1402 }
1403 data_size -= rr;
1404 }
1405
1406 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001407 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001408 if (memcmp(dig_in, dig_vv, dgs)) {
1409 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1410 return 0;
1411 }
1412 }
1413
1414 D_ASSERT(data_size == 0);
1415 return 1;
1416}
1417
1418/* e_end_resync_block() is called via
1419 * drbd_process_done_ee() by asender only */
1420static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1421{
1422 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1423 sector_t sector = e->sector;
1424 int ok;
1425
Bart Van Assche24c48302011-05-21 18:32:29 +02001426 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001427
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001428 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001429 drbd_set_in_sync(mdev, sector, e->size);
1430 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1431 } else {
1432 /* Record failure to sync */
1433 drbd_rs_failed_io(mdev, sector, e->size);
1434
1435 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1436 }
1437 dec_unacked(mdev);
1438
1439 return ok;
1440}
1441
1442static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1443{
1444 struct drbd_epoch_entry *e;
1445
1446 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001447 if (!e)
1448 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001449
1450 dec_rs_pending(mdev);
1451
Philipp Reisnerb411b362009-09-25 16:07:19 -07001452 inc_unacked(mdev);
1453 /* corresponding dec_unacked() in e_end_resync_block()
1454 * respective _drbd_clear_done_ee */
1455
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001456 e->w.cb = e_end_resync_block;
1457
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458 spin_lock_irq(&mdev->req_lock);
1459 list_add(&e->w.list, &mdev->sync_ee);
1460 spin_unlock_irq(&mdev->req_lock);
1461
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001462 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001463 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001464 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001465
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001466 /* don't care for the reason here */
1467 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001468 spin_lock_irq(&mdev->req_lock);
1469 list_del(&e->w.list);
1470 spin_unlock_irq(&mdev->req_lock);
1471
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001472 drbd_free_ee(mdev, e);
1473fail:
1474 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001475 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001476}
1477
Philipp Reisner02918be2010-08-20 14:35:10 +02001478static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001479{
1480 struct drbd_request *req;
1481 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001482 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001483 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001484
1485 sector = be64_to_cpu(p->sector);
1486
1487 spin_lock_irq(&mdev->req_lock);
1488 req = _ar_id_to_req(mdev, p->block_id, sector);
1489 spin_unlock_irq(&mdev->req_lock);
1490 if (unlikely(!req)) {
1491 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001492 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001493 }
1494
Bart Van Assche24c48302011-05-21 18:32:29 +02001495 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001496 * special casing it there for the various failure cases.
1497 * still no race with drbd_fail_pending_reads */
1498 ok = recv_dless_read(mdev, req, sector, data_size);
1499
1500 if (ok)
1501 req_mod(req, data_received);
1502 /* else: nothing. handled from drbd_disconnect...
1503 * I don't think we may complete this just yet
1504 * in case we are "on-disconnect: freeze" */
1505
1506 return ok;
1507}
1508
Philipp Reisner02918be2010-08-20 14:35:10 +02001509static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001510{
1511 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001513 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001514
1515 sector = be64_to_cpu(p->sector);
1516 D_ASSERT(p->block_id == ID_SYNCER);
1517
1518 if (get_ldev(mdev)) {
1519 /* data is submitted to disk within recv_resync_read.
1520 * corresponding put_ldev done below on error,
1521 * or in drbd_endio_write_sec. */
1522 ok = recv_resync_read(mdev, sector, data_size);
1523 } else {
1524 if (__ratelimit(&drbd_ratelimit_state))
1525 dev_err(DEV, "Can not write resync data to local disk.\n");
1526
1527 ok = drbd_drain_block(mdev, data_size);
1528
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001529 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001530 }
1531
Philipp Reisner778f2712010-07-06 11:14:00 +02001532 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1533
Philipp Reisnerb411b362009-09-25 16:07:19 -07001534 return ok;
1535}
1536
1537/* e_end_block() is called via drbd_process_done_ee().
1538 * this means this function only runs in the asender thread
1539 */
1540static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1541{
1542 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1543 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001544 int ok = 1, pcmd;
1545
Philipp Reisnerb411b362009-09-25 16:07:19 -07001546 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001547 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001548 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1549 mdev->state.conn <= C_PAUSED_SYNC_T &&
1550 e->flags & EE_MAY_SET_IN_SYNC) ?
1551 P_RS_WRITE_ACK : P_WRITE_ACK;
1552 ok &= drbd_send_ack(mdev, pcmd, e);
1553 if (pcmd == P_RS_WRITE_ACK)
1554 drbd_set_in_sync(mdev, sector, e->size);
1555 } else {
1556 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1557 /* we expect it to be marked out of sync anyways...
1558 * maybe assert this? */
1559 }
1560 dec_unacked(mdev);
1561 }
1562 /* we delete from the conflict detection hash _after_ we sent out the
1563 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1564 if (mdev->net_conf->two_primaries) {
1565 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001566 D_ASSERT(!hlist_unhashed(&e->collision));
1567 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001568 spin_unlock_irq(&mdev->req_lock);
1569 } else {
Bart Van Assche24c48302011-05-21 18:32:29 +02001570 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001571 }
1572
1573 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1574
1575 return ok;
1576}
1577
1578static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1579{
1580 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1581 int ok = 1;
1582
1583 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1584 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1585
1586 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001587 D_ASSERT(!hlist_unhashed(&e->collision));
1588 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001589 spin_unlock_irq(&mdev->req_lock);
1590
1591 dec_unacked(mdev);
1592
1593 return ok;
1594}
1595
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001596static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
1597{
1598
1599 struct drbd_epoch_entry *rs_e;
1600 bool rv = 0;
1601
1602 spin_lock_irq(&mdev->req_lock);
1603 list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
1604 if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
1605 rv = 1;
1606 break;
1607 }
1608 }
1609 spin_unlock_irq(&mdev->req_lock);
1610
1611 return rv;
1612}
1613
Philipp Reisnerb411b362009-09-25 16:07:19 -07001614/* Called from receive_Data.
1615 * Synchronize packets on sock with packets on msock.
1616 *
1617 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1618 * packet traveling on msock, they are still processed in the order they have
1619 * been sent.
1620 *
1621 * Note: we don't care for Ack packets overtaking P_DATA packets.
1622 *
1623 * In case packet_seq is larger than mdev->peer_seq number, there are
1624 * outstanding packets on the msock. We wait for them to arrive.
1625 * In case we are the logically next packet, we update mdev->peer_seq
1626 * ourselves. Correctly handles 32bit wrap around.
1627 *
1628 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1629 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1630 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1631 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1632 *
1633 * returns 0 if we may process the packet,
1634 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1635static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1636{
1637 DEFINE_WAIT(wait);
1638 unsigned int p_seq;
1639 long timeout;
1640 int ret = 0;
1641 spin_lock(&mdev->peer_seq_lock);
1642 for (;;) {
1643 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1644 if (seq_le(packet_seq, mdev->peer_seq+1))
1645 break;
1646 if (signal_pending(current)) {
1647 ret = -ERESTARTSYS;
1648 break;
1649 }
1650 p_seq = mdev->peer_seq;
1651 spin_unlock(&mdev->peer_seq_lock);
1652 timeout = schedule_timeout(30*HZ);
1653 spin_lock(&mdev->peer_seq_lock);
1654 if (timeout == 0 && p_seq == mdev->peer_seq) {
1655 ret = -ETIMEDOUT;
1656 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1657 break;
1658 }
1659 }
1660 finish_wait(&mdev->seq_wait, &wait);
1661 if (mdev->peer_seq+1 == packet_seq)
1662 mdev->peer_seq++;
1663 spin_unlock(&mdev->peer_seq_lock);
1664 return ret;
1665}
1666
Lars Ellenberg688593c2010-11-17 22:25:03 +01001667/* see also bio_flags_to_wire()
1668 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1669 * flags and back. We may replicate to other kernel versions. */
1670static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001671{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001672 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1673 (dpf & DP_FUA ? REQ_FUA : 0) |
1674 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1675 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001676}
1677
Philipp Reisnerb411b362009-09-25 16:07:19 -07001678/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001679static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001680{
1681 sector_t sector;
1682 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001683 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001684 int rw = WRITE;
1685 u32 dp_flags;
1686
Philipp Reisnerb411b362009-09-25 16:07:19 -07001687 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001688 spin_lock(&mdev->peer_seq_lock);
1689 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1690 mdev->peer_seq++;
1691 spin_unlock(&mdev->peer_seq_lock);
1692
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001693 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001694 atomic_inc(&mdev->current_epoch->epoch_size);
1695 return drbd_drain_block(mdev, data_size);
1696 }
1697
1698 /* get_ldev(mdev) successful.
1699 * Corresponding put_ldev done either below (on various errors),
1700 * or in drbd_endio_write_sec, if we successfully submit the data at
1701 * the end of this function. */
1702
1703 sector = be64_to_cpu(p->sector);
1704 e = read_in_block(mdev, p->block_id, sector, data_size);
1705 if (!e) {
1706 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001707 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001708 }
1709
Philipp Reisnerb411b362009-09-25 16:07:19 -07001710 e->w.cb = e_end_block;
1711
Lars Ellenberg688593c2010-11-17 22:25:03 +01001712 dp_flags = be32_to_cpu(p->dp_flags);
1713 rw |= wire_flags_to_bio(mdev, dp_flags);
1714
1715 if (dp_flags & DP_MAY_SET_IN_SYNC)
1716 e->flags |= EE_MAY_SET_IN_SYNC;
1717
Philipp Reisnerb411b362009-09-25 16:07:19 -07001718 spin_lock(&mdev->epoch_lock);
1719 e->epoch = mdev->current_epoch;
1720 atomic_inc(&e->epoch->epoch_size);
1721 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001722 spin_unlock(&mdev->epoch_lock);
1723
Philipp Reisnerb411b362009-09-25 16:07:19 -07001724 /* I'm the receiver, I do hold a net_cnt reference. */
1725 if (!mdev->net_conf->two_primaries) {
1726 spin_lock_irq(&mdev->req_lock);
1727 } else {
1728 /* don't get the req_lock yet,
1729 * we may sleep in drbd_wait_peer_seq */
1730 const int size = e->size;
1731 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1732 DEFINE_WAIT(wait);
1733 struct drbd_request *i;
1734 struct hlist_node *n;
1735 struct hlist_head *slot;
1736 int first;
1737
1738 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1739 BUG_ON(mdev->ee_hash == NULL);
1740 BUG_ON(mdev->tl_hash == NULL);
1741
1742 /* conflict detection and handling:
1743 * 1. wait on the sequence number,
1744 * in case this data packet overtook ACK packets.
1745 * 2. check our hash tables for conflicting requests.
1746 * we only need to walk the tl_hash, since an ee can not
1747 * have a conflict with an other ee: on the submitting
1748 * node, the corresponding req had already been conflicting,
1749 * and a conflicting req is never sent.
1750 *
1751 * Note: for two_primaries, we are protocol C,
1752 * so there cannot be any request that is DONE
1753 * but still on the transfer log.
1754 *
1755 * unconditionally add to the ee_hash.
1756 *
1757 * if no conflicting request is found:
1758 * submit.
1759 *
1760 * if any conflicting request is found
1761 * that has not yet been acked,
1762 * AND I have the "discard concurrent writes" flag:
1763 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1764 *
1765 * if any conflicting request is found:
1766 * block the receiver, waiting on misc_wait
1767 * until no more conflicting requests are there,
1768 * or we get interrupted (disconnect).
1769 *
1770 * we do not just write after local io completion of those
1771 * requests, but only after req is done completely, i.e.
1772 * we wait for the P_DISCARD_ACK to arrive!
1773 *
1774 * then proceed normally, i.e. submit.
1775 */
1776 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1777 goto out_interrupted;
1778
1779 spin_lock_irq(&mdev->req_lock);
1780
Bart Van Assche24c48302011-05-21 18:32:29 +02001781 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001782
1783#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1784 slot = tl_hash_slot(mdev, sector);
1785 first = 1;
1786 for (;;) {
1787 int have_unacked = 0;
1788 int have_conflict = 0;
1789 prepare_to_wait(&mdev->misc_wait, &wait,
1790 TASK_INTERRUPTIBLE);
Bart Van Assche24c48302011-05-21 18:32:29 +02001791 hlist_for_each_entry(i, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001792 if (OVERLAPS) {
1793 /* only ALERT on first iteration,
1794 * we may be woken up early... */
1795 if (first)
1796 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1797 " new: %llus +%u; pending: %llus +%u\n",
1798 current->comm, current->pid,
1799 (unsigned long long)sector, size,
1800 (unsigned long long)i->sector, i->size);
1801 if (i->rq_state & RQ_NET_PENDING)
1802 ++have_unacked;
1803 ++have_conflict;
1804 }
1805 }
1806#undef OVERLAPS
1807 if (!have_conflict)
1808 break;
1809
1810 /* Discard Ack only for the _first_ iteration */
1811 if (first && discard && have_unacked) {
1812 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1813 (unsigned long long)sector);
1814 inc_unacked(mdev);
1815 e->w.cb = e_send_discard_ack;
1816 list_add_tail(&e->w.list, &mdev->done_ee);
1817
1818 spin_unlock_irq(&mdev->req_lock);
1819
1820 /* we could probably send that P_DISCARD_ACK ourselves,
1821 * but I don't like the receiver using the msock */
1822
1823 put_ldev(mdev);
1824 wake_asender(mdev);
1825 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001826 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001827 }
1828
1829 if (signal_pending(current)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001830 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001831
1832 spin_unlock_irq(&mdev->req_lock);
1833
1834 finish_wait(&mdev->misc_wait, &wait);
1835 goto out_interrupted;
1836 }
1837
1838 spin_unlock_irq(&mdev->req_lock);
1839 if (first) {
1840 first = 0;
1841 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1842 "sec=%llus\n", (unsigned long long)sector);
1843 } else if (discard) {
1844 /* we had none on the first iteration.
1845 * there must be none now. */
1846 D_ASSERT(have_unacked == 0);
1847 }
1848 schedule();
1849 spin_lock_irq(&mdev->req_lock);
1850 }
1851 finish_wait(&mdev->misc_wait, &wait);
1852 }
1853
1854 list_add(&e->w.list, &mdev->active_ee);
1855 spin_unlock_irq(&mdev->req_lock);
1856
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001857 if (mdev->state.conn == C_SYNC_TARGET)
1858 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
1859
Philipp Reisnerb411b362009-09-25 16:07:19 -07001860 switch (mdev->net_conf->wire_protocol) {
1861 case DRBD_PROT_C:
1862 inc_unacked(mdev);
1863 /* corresponding dec_unacked() in e_end_block()
1864 * respective _drbd_clear_done_ee */
1865 break;
1866 case DRBD_PROT_B:
1867 /* I really don't like it that the receiver thread
1868 * sends on the msock, but anyways */
1869 drbd_send_ack(mdev, P_RECV_ACK, e);
1870 break;
1871 case DRBD_PROT_A:
1872 /* nothing to do */
1873 break;
1874 }
1875
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001876 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001877 /* In case we have the only disk of the cluster, */
1878 drbd_set_out_of_sync(mdev, e->sector, e->size);
1879 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001880 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001881 drbd_al_begin_io(mdev, e->sector);
1882 }
1883
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001884 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001885 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001886
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001887 /* don't care for the reason here */
1888 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001889 spin_lock_irq(&mdev->req_lock);
1890 list_del(&e->w.list);
Bart Van Assche24c48302011-05-21 18:32:29 +02001891 hlist_del_init(&e->collision);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001892 spin_unlock_irq(&mdev->req_lock);
1893 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1894 drbd_al_complete_io(mdev, e->sector);
1895
Philipp Reisnerb411b362009-09-25 16:07:19 -07001896out_interrupted:
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001897 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001898 put_ldev(mdev);
1899 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001900 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001901}
1902
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001903/* We may throttle resync, if the lower device seems to be busy,
1904 * and current sync rate is above c_min_rate.
1905 *
1906 * To decide whether or not the lower device is busy, we use a scheme similar
1907 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1908 * (more than 64 sectors) of activity we cannot account for with our own resync
1909 * activity, it obviously is "busy".
1910 *
1911 * The current sync rate used here uses only the most recent two step marks,
1912 * to have a short time average so we can react faster.
1913 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001914int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001915{
1916 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1917 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001918 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001919 int curr_events;
1920 int throttle = 0;
1921
1922 /* feature disabled? */
1923 if (mdev->sync_conf.c_min_rate == 0)
1924 return 0;
1925
Philipp Reisnere3555d82010-11-07 15:56:29 +01001926 spin_lock_irq(&mdev->al_lock);
1927 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1928 if (tmp) {
1929 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1930 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1931 spin_unlock_irq(&mdev->al_lock);
1932 return 0;
1933 }
1934 /* Do not slow down if app IO is already waiting for this extent */
1935 }
1936 spin_unlock_irq(&mdev->al_lock);
1937
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001938 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1939 (int)part_stat_read(&disk->part0, sectors[1]) -
1940 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001941
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001942 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1943 unsigned long rs_left;
1944 int i;
1945
1946 mdev->rs_last_events = curr_events;
1947
1948 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1949 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001950 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1951
1952 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1953 rs_left = mdev->ov_left;
1954 else
1955 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001956
1957 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1958 if (!dt)
1959 dt++;
1960 db = mdev->rs_mark_left[i] - rs_left;
1961 dbdt = Bit2KB(db/dt);
1962
1963 if (dbdt > mdev->sync_conf.c_min_rate)
1964 throttle = 1;
1965 }
1966 return throttle;
1967}
1968
1969
Philipp Reisner02918be2010-08-20 14:35:10 +02001970static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001971{
1972 sector_t sector;
1973 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1974 struct drbd_epoch_entry *e;
1975 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001976 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001977 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001978 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001979
1980 sector = be64_to_cpu(p->sector);
1981 size = be32_to_cpu(p->blksize);
1982
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001983 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1985 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001986 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001987 }
1988 if (sector + (size>>9) > capacity) {
1989 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1990 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001991 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001992 }
1993
1994 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001995 verb = 1;
1996 switch (cmd) {
1997 case P_DATA_REQUEST:
1998 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1999 break;
2000 case P_RS_DATA_REQUEST:
2001 case P_CSUM_RS_REQUEST:
2002 case P_OV_REQUEST:
2003 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2004 break;
2005 case P_OV_REPLY:
2006 verb = 0;
2007 dec_rs_pending(mdev);
2008 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2009 break;
2010 default:
2011 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2012 cmdname(cmd));
2013 }
2014 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002015 dev_err(DEV, "Can not satisfy peer's read request, "
2016 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002017
Lars Ellenberga821cc42010-09-06 12:31:37 +02002018 /* drain possibly payload */
2019 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002020 }
2021
2022 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2023 * "criss-cross" setup, that might cause write-out on some other DRBD,
2024 * which in turn might block on the other node at this very place. */
2025 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2026 if (!e) {
2027 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002028 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002029 }
2030
Philipp Reisner02918be2010-08-20 14:35:10 +02002031 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002032 case P_DATA_REQUEST:
2033 e->w.cb = w_e_end_data_req;
2034 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002035 /* application IO, don't drbd_rs_begin_io */
2036 goto submit;
2037
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 case P_RS_DATA_REQUEST:
2039 e->w.cb = w_e_end_rsdata_req;
2040 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002041 /* used in the sector offset progress display */
2042 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002043 break;
2044
2045 case P_OV_REPLY:
2046 case P_CSUM_RS_REQUEST:
2047 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002048 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2049 if (!di)
2050 goto out_free_e;
2051
2052 di->digest_size = digest_size;
2053 di->digest = (((char *)di)+sizeof(struct digest_info));
2054
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002055 e->digest = di;
2056 e->flags |= EE_HAS_DIGEST;
2057
Philipp Reisnerb411b362009-09-25 16:07:19 -07002058 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2059 goto out_free_e;
2060
Philipp Reisner02918be2010-08-20 14:35:10 +02002061 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002062 D_ASSERT(mdev->agreed_pro_version >= 89);
2063 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002064 /* used in the sector offset progress display */
2065 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002066 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002067 /* track progress, we may need to throttle */
2068 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002069 e->w.cb = w_e_end_ov_reply;
2070 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002071 /* drbd_rs_begin_io done when we sent this request,
2072 * but accounting still needs to be done. */
2073 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002074 }
2075 break;
2076
2077 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002078 if (mdev->ov_start_sector == ~(sector_t)0 &&
2079 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002080 unsigned long now = jiffies;
2081 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002082 mdev->ov_start_sector = sector;
2083 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002084 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2085 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002086 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2087 mdev->rs_mark_left[i] = mdev->ov_left;
2088 mdev->rs_mark_time[i] = now;
2089 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002090 dev_info(DEV, "Online Verify start sector: %llu\n",
2091 (unsigned long long)sector);
2092 }
2093 e->w.cb = w_e_end_ov_req;
2094 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002095 break;
2096
Philipp Reisnerb411b362009-09-25 16:07:19 -07002097 default:
2098 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002099 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002100 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002101 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002102 }
2103
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002104 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2105 * wrt the receiver, but it is not as straightforward as it may seem.
2106 * Various places in the resync start and stop logic assume resync
2107 * requests are processed in order, requeuing this on the worker thread
2108 * introduces a bunch of new code for synchronization between threads.
2109 *
2110 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2111 * "forever", throttling after drbd_rs_begin_io will lock that extent
2112 * for application writes for the same time. For now, just throttle
2113 * here, where the rest of the code expects the receiver to sleep for
2114 * a while, anyways.
2115 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002116
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002117 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2118 * this defers syncer requests for some time, before letting at least
2119 * on request through. The resync controller on the receiving side
2120 * will adapt to the incoming rate accordingly.
2121 *
2122 * We cannot throttle here if remote is Primary/SyncTarget:
2123 * we would also throttle its application reads.
2124 * In that case, throttling is done on the SyncTarget only.
2125 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002126 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2127 schedule_timeout_uninterruptible(HZ/10);
2128 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002129 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002130
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002131submit_for_resync:
2132 atomic_add(size >> 9, &mdev->rs_sect_ev);
2133
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002134submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002135 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002136 spin_lock_irq(&mdev->req_lock);
2137 list_add_tail(&e->w.list, &mdev->read_ee);
2138 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002139
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002140 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002141 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002142
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002143 /* don't care for the reason here */
2144 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002145 spin_lock_irq(&mdev->req_lock);
2146 list_del(&e->w.list);
2147 spin_unlock_irq(&mdev->req_lock);
2148 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2149
Philipp Reisnerb411b362009-09-25 16:07:19 -07002150out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002151 put_ldev(mdev);
2152 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002153 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002154}
2155
2156static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2157{
2158 int self, peer, rv = -100;
2159 unsigned long ch_self, ch_peer;
2160
2161 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2162 peer = mdev->p_uuid[UI_BITMAP] & 1;
2163
2164 ch_peer = mdev->p_uuid[UI_SIZE];
2165 ch_self = mdev->comm_bm_set;
2166
2167 switch (mdev->net_conf->after_sb_0p) {
2168 case ASB_CONSENSUS:
2169 case ASB_DISCARD_SECONDARY:
2170 case ASB_CALL_HELPER:
2171 dev_err(DEV, "Configuration error.\n");
2172 break;
2173 case ASB_DISCONNECT:
2174 break;
2175 case ASB_DISCARD_YOUNGER_PRI:
2176 if (self == 0 && peer == 1) {
2177 rv = -1;
2178 break;
2179 }
2180 if (self == 1 && peer == 0) {
2181 rv = 1;
2182 break;
2183 }
2184 /* Else fall through to one of the other strategies... */
2185 case ASB_DISCARD_OLDER_PRI:
2186 if (self == 0 && peer == 1) {
2187 rv = 1;
2188 break;
2189 }
2190 if (self == 1 && peer == 0) {
2191 rv = -1;
2192 break;
2193 }
2194 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002195 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002196 "Using discard-least-changes instead\n");
2197 case ASB_DISCARD_ZERO_CHG:
2198 if (ch_peer == 0 && ch_self == 0) {
2199 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2200 ? -1 : 1;
2201 break;
2202 } else {
2203 if (ch_peer == 0) { rv = 1; break; }
2204 if (ch_self == 0) { rv = -1; break; }
2205 }
2206 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2207 break;
2208 case ASB_DISCARD_LEAST_CHG:
2209 if (ch_self < ch_peer)
2210 rv = -1;
2211 else if (ch_self > ch_peer)
2212 rv = 1;
2213 else /* ( ch_self == ch_peer ) */
2214 /* Well, then use something else. */
2215 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2216 ? -1 : 1;
2217 break;
2218 case ASB_DISCARD_LOCAL:
2219 rv = -1;
2220 break;
2221 case ASB_DISCARD_REMOTE:
2222 rv = 1;
2223 }
2224
2225 return rv;
2226}
2227
2228static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2229{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002230 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002231
2232 switch (mdev->net_conf->after_sb_1p) {
2233 case ASB_DISCARD_YOUNGER_PRI:
2234 case ASB_DISCARD_OLDER_PRI:
2235 case ASB_DISCARD_LEAST_CHG:
2236 case ASB_DISCARD_LOCAL:
2237 case ASB_DISCARD_REMOTE:
2238 dev_err(DEV, "Configuration error.\n");
2239 break;
2240 case ASB_DISCONNECT:
2241 break;
2242 case ASB_CONSENSUS:
2243 hg = drbd_asb_recover_0p(mdev);
2244 if (hg == -1 && mdev->state.role == R_SECONDARY)
2245 rv = hg;
2246 if (hg == 1 && mdev->state.role == R_PRIMARY)
2247 rv = hg;
2248 break;
2249 case ASB_VIOLENTLY:
2250 rv = drbd_asb_recover_0p(mdev);
2251 break;
2252 case ASB_DISCARD_SECONDARY:
2253 return mdev->state.role == R_PRIMARY ? 1 : -1;
2254 case ASB_CALL_HELPER:
2255 hg = drbd_asb_recover_0p(mdev);
2256 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002257 enum drbd_state_rv rv2;
2258
2259 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002260 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2261 * we might be here in C_WF_REPORT_PARAMS which is transient.
2262 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002263 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2264 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002265 drbd_khelper(mdev, "pri-lost-after-sb");
2266 } else {
2267 dev_warn(DEV, "Successfully gave up primary role.\n");
2268 rv = hg;
2269 }
2270 } else
2271 rv = hg;
2272 }
2273
2274 return rv;
2275}
2276
2277static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2278{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002279 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002280
2281 switch (mdev->net_conf->after_sb_2p) {
2282 case ASB_DISCARD_YOUNGER_PRI:
2283 case ASB_DISCARD_OLDER_PRI:
2284 case ASB_DISCARD_LEAST_CHG:
2285 case ASB_DISCARD_LOCAL:
2286 case ASB_DISCARD_REMOTE:
2287 case ASB_CONSENSUS:
2288 case ASB_DISCARD_SECONDARY:
2289 dev_err(DEV, "Configuration error.\n");
2290 break;
2291 case ASB_VIOLENTLY:
2292 rv = drbd_asb_recover_0p(mdev);
2293 break;
2294 case ASB_DISCONNECT:
2295 break;
2296 case ASB_CALL_HELPER:
2297 hg = drbd_asb_recover_0p(mdev);
2298 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002299 enum drbd_state_rv rv2;
2300
Philipp Reisnerb411b362009-09-25 16:07:19 -07002301 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2302 * we might be here in C_WF_REPORT_PARAMS which is transient.
2303 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002304 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2305 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002306 drbd_khelper(mdev, "pri-lost-after-sb");
2307 } else {
2308 dev_warn(DEV, "Successfully gave up primary role.\n");
2309 rv = hg;
2310 }
2311 } else
2312 rv = hg;
2313 }
2314
2315 return rv;
2316}
2317
2318static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2319 u64 bits, u64 flags)
2320{
2321 if (!uuid) {
2322 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2323 return;
2324 }
2325 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2326 text,
2327 (unsigned long long)uuid[UI_CURRENT],
2328 (unsigned long long)uuid[UI_BITMAP],
2329 (unsigned long long)uuid[UI_HISTORY_START],
2330 (unsigned long long)uuid[UI_HISTORY_END],
2331 (unsigned long long)bits,
2332 (unsigned long long)flags);
2333}
2334
2335/*
2336 100 after split brain try auto recover
2337 2 C_SYNC_SOURCE set BitMap
2338 1 C_SYNC_SOURCE use BitMap
2339 0 no Sync
2340 -1 C_SYNC_TARGET use BitMap
2341 -2 C_SYNC_TARGET set BitMap
2342 -100 after split brain, disconnect
2343-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002344-1091 requires proto 91
2345-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002346 */
2347static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2348{
2349 u64 self, peer;
2350 int i, j;
2351
2352 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2353 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2354
2355 *rule_nr = 10;
2356 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2357 return 0;
2358
2359 *rule_nr = 20;
2360 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2361 peer != UUID_JUST_CREATED)
2362 return -2;
2363
2364 *rule_nr = 30;
2365 if (self != UUID_JUST_CREATED &&
2366 (peer == UUID_JUST_CREATED || peer == (u64)0))
2367 return 2;
2368
2369 if (self == peer) {
2370 int rct, dc; /* roles at crash time */
2371
2372 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2373
2374 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002375 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002376
2377 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2378 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2379 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2380 drbd_uuid_set_bm(mdev, 0UL);
2381
2382 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2383 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2384 *rule_nr = 34;
2385 } else {
2386 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2387 *rule_nr = 36;
2388 }
2389
2390 return 1;
2391 }
2392
2393 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2394
2395 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002396 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002397
2398 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2399 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2400 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2401
2402 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2403 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2404 mdev->p_uuid[UI_BITMAP] = 0UL;
2405
2406 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2407 *rule_nr = 35;
2408 } else {
2409 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2410 *rule_nr = 37;
2411 }
2412
2413 return -1;
2414 }
2415
2416 /* Common power [off|failure] */
2417 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2418 (mdev->p_uuid[UI_FLAGS] & 2);
2419 /* lowest bit is set when we were primary,
2420 * next bit (weight 2) is set when peer was primary */
2421 *rule_nr = 40;
2422
2423 switch (rct) {
2424 case 0: /* !self_pri && !peer_pri */ return 0;
2425 case 1: /* self_pri && !peer_pri */ return 1;
2426 case 2: /* !self_pri && peer_pri */ return -1;
2427 case 3: /* self_pri && peer_pri */
2428 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2429 return dc ? -1 : 1;
2430 }
2431 }
2432
2433 *rule_nr = 50;
2434 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2435 if (self == peer)
2436 return -1;
2437
2438 *rule_nr = 51;
2439 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2440 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002441 if (mdev->agreed_pro_version < 96 ?
2442 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2443 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2444 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002445 /* The last P_SYNC_UUID did not get though. Undo the last start of
2446 resync as sync source modifications of the peer's UUIDs. */
2447
2448 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002449 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002450
2451 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2452 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002453
2454 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2455 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2456
Philipp Reisnerb411b362009-09-25 16:07:19 -07002457 return -1;
2458 }
2459 }
2460
2461 *rule_nr = 60;
2462 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2463 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2464 peer = mdev->p_uuid[i] & ~((u64)1);
2465 if (self == peer)
2466 return -2;
2467 }
2468
2469 *rule_nr = 70;
2470 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2471 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2472 if (self == peer)
2473 return 1;
2474
2475 *rule_nr = 71;
2476 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2477 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002478 if (mdev->agreed_pro_version < 96 ?
2479 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2480 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2481 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002482 /* The last P_SYNC_UUID did not get though. Undo the last start of
2483 resync as sync source modifications of our UUIDs. */
2484
2485 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002486 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002487
2488 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2489 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2490
Philipp Reisner4a23f262011-01-11 17:42:17 +01002491 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002492 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2493 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2494
2495 return 1;
2496 }
2497 }
2498
2499
2500 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002501 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002502 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2503 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2504 if (self == peer)
2505 return 2;
2506 }
2507
2508 *rule_nr = 90;
2509 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2510 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2511 if (self == peer && self != ((u64)0))
2512 return 100;
2513
2514 *rule_nr = 100;
2515 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2516 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2517 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2518 peer = mdev->p_uuid[j] & ~((u64)1);
2519 if (self == peer)
2520 return -100;
2521 }
2522 }
2523
2524 return -1000;
2525}
2526
2527/* drbd_sync_handshake() returns the new conn state on success, or
2528 CONN_MASK (-1) on failure.
2529 */
2530static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2531 enum drbd_disk_state peer_disk) __must_hold(local)
2532{
2533 int hg, rule_nr;
2534 enum drbd_conns rv = C_MASK;
2535 enum drbd_disk_state mydisk;
2536
2537 mydisk = mdev->state.disk;
2538 if (mydisk == D_NEGOTIATING)
2539 mydisk = mdev->new_state_tmp.disk;
2540
2541 dev_info(DEV, "drbd_sync_handshake:\n");
2542 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2543 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2544 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2545
2546 hg = drbd_uuid_compare(mdev, &rule_nr);
2547
2548 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2549
2550 if (hg == -1000) {
2551 dev_alert(DEV, "Unrelated data, aborting!\n");
2552 return C_MASK;
2553 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002554 if (hg < -1000) {
2555 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002556 return C_MASK;
2557 }
2558
2559 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2560 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2561 int f = (hg == -100) || abs(hg) == 2;
2562 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2563 if (f)
2564 hg = hg*2;
2565 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2566 hg > 0 ? "source" : "target");
2567 }
2568
Adam Gandelman3a11a482010-04-08 16:48:23 -07002569 if (abs(hg) == 100)
2570 drbd_khelper(mdev, "initial-split-brain");
2571
Philipp Reisnerb411b362009-09-25 16:07:19 -07002572 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2573 int pcount = (mdev->state.role == R_PRIMARY)
2574 + (peer_role == R_PRIMARY);
2575 int forced = (hg == -100);
2576
2577 switch (pcount) {
2578 case 0:
2579 hg = drbd_asb_recover_0p(mdev);
2580 break;
2581 case 1:
2582 hg = drbd_asb_recover_1p(mdev);
2583 break;
2584 case 2:
2585 hg = drbd_asb_recover_2p(mdev);
2586 break;
2587 }
2588 if (abs(hg) < 100) {
2589 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2590 "automatically solved. Sync from %s node\n",
2591 pcount, (hg < 0) ? "peer" : "this");
2592 if (forced) {
2593 dev_warn(DEV, "Doing a full sync, since"
2594 " UUIDs where ambiguous.\n");
2595 hg = hg*2;
2596 }
2597 }
2598 }
2599
2600 if (hg == -100) {
2601 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2602 hg = -1;
2603 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2604 hg = 1;
2605
2606 if (abs(hg) < 100)
2607 dev_warn(DEV, "Split-Brain detected, manually solved. "
2608 "Sync from %s node\n",
2609 (hg < 0) ? "peer" : "this");
2610 }
2611
2612 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002613 /* FIXME this log message is not correct if we end up here
2614 * after an attempted attach on a diskless node.
2615 * We just refuse to attach -- well, we drop the "connection"
2616 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002617 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002618 drbd_khelper(mdev, "split-brain");
2619 return C_MASK;
2620 }
2621
2622 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2623 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2624 return C_MASK;
2625 }
2626
2627 if (hg < 0 && /* by intention we do not use mydisk here. */
2628 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2629 switch (mdev->net_conf->rr_conflict) {
2630 case ASB_CALL_HELPER:
2631 drbd_khelper(mdev, "pri-lost");
2632 /* fall through */
2633 case ASB_DISCONNECT:
2634 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2635 return C_MASK;
2636 case ASB_VIOLENTLY:
2637 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2638 "assumption\n");
2639 }
2640 }
2641
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002642 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2643 if (hg == 0)
2644 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2645 else
2646 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2647 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2648 abs(hg) >= 2 ? "full" : "bit-map based");
2649 return C_MASK;
2650 }
2651
Philipp Reisnerb411b362009-09-25 16:07:19 -07002652 if (abs(hg) >= 2) {
2653 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002654 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2655 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002656 return C_MASK;
2657 }
2658
2659 if (hg > 0) { /* become sync source. */
2660 rv = C_WF_BITMAP_S;
2661 } else if (hg < 0) { /* become sync target */
2662 rv = C_WF_BITMAP_T;
2663 } else {
2664 rv = C_CONNECTED;
2665 if (drbd_bm_total_weight(mdev)) {
2666 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2667 drbd_bm_total_weight(mdev));
2668 }
2669 }
2670
2671 return rv;
2672}
2673
2674/* returns 1 if invalid */
2675static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2676{
2677 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2678 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2679 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2680 return 0;
2681
2682 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2683 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2684 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2685 return 1;
2686
2687 /* everything else is valid if they are equal on both sides. */
2688 if (peer == self)
2689 return 0;
2690
2691 /* everything es is invalid. */
2692 return 1;
2693}
2694
Philipp Reisner02918be2010-08-20 14:35:10 +02002695static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002696{
Philipp Reisner02918be2010-08-20 14:35:10 +02002697 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002698 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002699 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002700 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2701
Philipp Reisnerb411b362009-09-25 16:07:19 -07002702 p_proto = be32_to_cpu(p->protocol);
2703 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2704 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2705 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002706 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002707 cf = be32_to_cpu(p->conn_flags);
2708 p_want_lose = cf & CF_WANT_LOSE;
2709
2710 clear_bit(CONN_DRY_RUN, &mdev->flags);
2711
2712 if (cf & CF_DRY_RUN)
2713 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002714
2715 if (p_proto != mdev->net_conf->wire_protocol) {
2716 dev_err(DEV, "incompatible communication protocols\n");
2717 goto disconnect;
2718 }
2719
2720 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2721 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2722 goto disconnect;
2723 }
2724
2725 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2726 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2727 goto disconnect;
2728 }
2729
2730 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2731 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2732 goto disconnect;
2733 }
2734
2735 if (p_want_lose && mdev->net_conf->want_lose) {
2736 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2737 goto disconnect;
2738 }
2739
2740 if (p_two_primaries != mdev->net_conf->two_primaries) {
2741 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2742 goto disconnect;
2743 }
2744
2745 if (mdev->agreed_pro_version >= 87) {
2746 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2747
2748 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002749 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002750
2751 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2752 if (strcmp(p_integrity_alg, my_alg)) {
2753 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2754 goto disconnect;
2755 }
2756 dev_info(DEV, "data-integrity-alg: %s\n",
2757 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2758 }
2759
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002760 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002761
2762disconnect:
2763 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002764 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002765}
2766
2767/* helper function
2768 * input: alg name, feature name
2769 * return: NULL (alg name was "")
2770 * ERR_PTR(error) if something goes wrong
2771 * or the crypto hash ptr, if it worked out ok. */
2772struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2773 const char *alg, const char *name)
2774{
2775 struct crypto_hash *tfm;
2776
2777 if (!alg[0])
2778 return NULL;
2779
2780 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2781 if (IS_ERR(tfm)) {
2782 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2783 alg, name, PTR_ERR(tfm));
2784 return tfm;
2785 }
2786 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2787 crypto_free_hash(tfm);
2788 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2789 return ERR_PTR(-EINVAL);
2790 }
2791 return tfm;
2792}
2793
Philipp Reisner02918be2010-08-20 14:35:10 +02002794static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002795{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002796 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002797 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002798 unsigned int header_size, data_size, exp_max_sz;
2799 struct crypto_hash *verify_tfm = NULL;
2800 struct crypto_hash *csums_tfm = NULL;
2801 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002802 int *rs_plan_s = NULL;
2803 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002804
2805 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2806 : apv == 88 ? sizeof(struct p_rs_param)
2807 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002808 : apv <= 94 ? sizeof(struct p_rs_param_89)
2809 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002810
Philipp Reisner02918be2010-08-20 14:35:10 +02002811 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002812 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002813 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002814 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002815 }
2816
2817 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002818 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2819 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002820 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002821 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2822 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002823 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002824 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002825 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2826 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002827 D_ASSERT(data_size == 0);
2828 }
2829
2830 /* initialize verify_alg and csums_alg */
2831 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2832
Philipp Reisner02918be2010-08-20 14:35:10 +02002833 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002834 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002835
2836 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2837
2838 if (apv >= 88) {
2839 if (apv == 88) {
Philipp Reisner5de73822012-03-28 10:17:32 +02002840 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
2841 dev_err(DEV, "verify-alg of wrong size, "
2842 "peer wants %u, accepting only up to %u byte\n",
2843 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002844 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002845 }
2846
2847 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002848 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002849
2850 /* we expect NUL terminated string */
2851 /* but just in case someone tries to be evil */
2852 D_ASSERT(p->verify_alg[data_size-1] == 0);
2853 p->verify_alg[data_size-1] = 0;
2854
2855 } else /* apv >= 89 */ {
2856 /* we still expect NUL terminated strings */
2857 /* but just in case someone tries to be evil */
2858 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2859 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2860 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2861 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2862 }
2863
2864 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2865 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2866 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2867 mdev->sync_conf.verify_alg, p->verify_alg);
2868 goto disconnect;
2869 }
2870 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2871 p->verify_alg, "verify-alg");
2872 if (IS_ERR(verify_tfm)) {
2873 verify_tfm = NULL;
2874 goto disconnect;
2875 }
2876 }
2877
2878 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2879 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2880 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2881 mdev->sync_conf.csums_alg, p->csums_alg);
2882 goto disconnect;
2883 }
2884 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2885 p->csums_alg, "csums-alg");
2886 if (IS_ERR(csums_tfm)) {
2887 csums_tfm = NULL;
2888 goto disconnect;
2889 }
2890 }
2891
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002892 if (apv > 94) {
2893 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2894 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2895 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2896 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2897 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002898
2899 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2900 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2901 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2902 if (!rs_plan_s) {
2903 dev_err(DEV, "kmalloc of fifo_buffer failed");
2904 goto disconnect;
2905 }
2906 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002907 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002908
2909 spin_lock(&mdev->peer_seq_lock);
2910 /* lock against drbd_nl_syncer_conf() */
2911 if (verify_tfm) {
2912 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2913 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2914 crypto_free_hash(mdev->verify_tfm);
2915 mdev->verify_tfm = verify_tfm;
2916 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2917 }
2918 if (csums_tfm) {
2919 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2920 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2921 crypto_free_hash(mdev->csums_tfm);
2922 mdev->csums_tfm = csums_tfm;
2923 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2924 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002925 if (fifo_size != mdev->rs_plan_s.size) {
2926 kfree(mdev->rs_plan_s.values);
2927 mdev->rs_plan_s.values = rs_plan_s;
2928 mdev->rs_plan_s.size = fifo_size;
2929 mdev->rs_planed = 0;
2930 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931 spin_unlock(&mdev->peer_seq_lock);
2932 }
2933
2934 return ok;
2935disconnect:
2936 /* just for completeness: actually not needed,
2937 * as this is not reached if csums_tfm was ok. */
2938 crypto_free_hash(csums_tfm);
2939 /* but free the verify_tfm again, if csums_tfm did not work out */
2940 crypto_free_hash(verify_tfm);
2941 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002942 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002943}
2944
Philipp Reisnerb411b362009-09-25 16:07:19 -07002945/* warn if the arguments differ by more than 12.5% */
2946static void warn_if_differ_considerably(struct drbd_conf *mdev,
2947 const char *s, sector_t a, sector_t b)
2948{
2949 sector_t d;
2950 if (a == 0 || b == 0)
2951 return;
2952 d = (a > b) ? (a - b) : (b - a);
2953 if (d > (a>>3) || d > (b>>3))
2954 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2955 (unsigned long long)a, (unsigned long long)b);
2956}
2957
Philipp Reisner02918be2010-08-20 14:35:10 +02002958static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002959{
Philipp Reisner02918be2010-08-20 14:35:10 +02002960 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002961 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002962 sector_t p_size, p_usize, my_usize;
2963 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002964 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002965
Philipp Reisnerb411b362009-09-25 16:07:19 -07002966 p_size = be64_to_cpu(p->d_size);
2967 p_usize = be64_to_cpu(p->u_size);
2968
2969 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2970 dev_err(DEV, "some backing storage is needed\n");
2971 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002972 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002973 }
2974
2975 /* just store the peer's disk size for now.
2976 * we still need to figure out whether we accept that. */
2977 mdev->p_size = p_size;
2978
Philipp Reisnerb411b362009-09-25 16:07:19 -07002979 if (get_ldev(mdev)) {
2980 warn_if_differ_considerably(mdev, "lower level device sizes",
2981 p_size, drbd_get_max_capacity(mdev->ldev));
2982 warn_if_differ_considerably(mdev, "user requested size",
2983 p_usize, mdev->ldev->dc.disk_size);
2984
2985 /* if this is the first connect, or an otherwise expected
2986 * param exchange, choose the minimum */
2987 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2988 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2989 p_usize);
2990
2991 my_usize = mdev->ldev->dc.disk_size;
2992
2993 if (mdev->ldev->dc.disk_size != p_usize) {
2994 mdev->ldev->dc.disk_size = p_usize;
2995 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2996 (unsigned long)mdev->ldev->dc.disk_size);
2997 }
2998
2999 /* Never shrink a device with usable data during connect.
3000 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01003001 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07003002 drbd_get_capacity(mdev->this_bdev) &&
3003 mdev->state.disk >= D_OUTDATED &&
3004 mdev->state.conn < C_CONNECTED) {
3005 dev_err(DEV, "The peer's disk size is too small!\n");
3006 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3007 mdev->ldev->dc.disk_size = my_usize;
3008 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003009 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003010 }
3011 put_ldev(mdev);
3012 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013
Philipp Reisnere89b5912010-03-24 17:11:33 +01003014 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003015 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02003016 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003017 put_ldev(mdev);
3018 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003019 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003020 drbd_md_sync(mdev);
3021 } else {
3022 /* I am diskless, need to accept the peer's size. */
3023 drbd_set_my_capacity(mdev, p_size);
3024 }
3025
Philipp Reisner99432fc2011-05-20 16:39:13 +02003026 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3027 drbd_reconsider_max_bio_size(mdev);
3028
Philipp Reisnerb411b362009-09-25 16:07:19 -07003029 if (get_ldev(mdev)) {
3030 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3031 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3032 ldsc = 1;
3033 }
3034
Philipp Reisnerb411b362009-09-25 16:07:19 -07003035 put_ldev(mdev);
3036 }
3037
3038 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3039 if (be64_to_cpu(p->c_size) !=
3040 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3041 /* we have different sizes, probably peer
3042 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003043 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003044 }
3045 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3046 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3047 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003048 mdev->state.disk >= D_INCONSISTENT) {
3049 if (ddsf & DDSF_NO_RESYNC)
3050 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3051 else
3052 resync_after_online_grow(mdev);
3053 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003054 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3055 }
3056 }
3057
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003058 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003059}
3060
Philipp Reisner02918be2010-08-20 14:35:10 +02003061static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003062{
Philipp Reisner02918be2010-08-20 14:35:10 +02003063 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003064 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003065 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003066
Philipp Reisnerb411b362009-09-25 16:07:19 -07003067 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3068
3069 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3070 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3071
3072 kfree(mdev->p_uuid);
3073 mdev->p_uuid = p_uuid;
3074
3075 if (mdev->state.conn < C_CONNECTED &&
3076 mdev->state.disk < D_INCONSISTENT &&
3077 mdev->state.role == R_PRIMARY &&
3078 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3079 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3080 (unsigned long long)mdev->ed_uuid);
3081 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003082 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003083 }
3084
3085 if (get_ldev(mdev)) {
3086 int skip_initial_sync =
3087 mdev->state.conn == C_CONNECTED &&
3088 mdev->agreed_pro_version >= 90 &&
3089 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3090 (p_uuid[UI_FLAGS] & 8);
3091 if (skip_initial_sync) {
3092 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3093 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003094 "clear_n_write from receive_uuids",
3095 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003096 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3097 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3098 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3099 CS_VERBOSE, NULL);
3100 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003101 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003102 }
3103 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003104 } else if (mdev->state.disk < D_INCONSISTENT &&
3105 mdev->state.role == R_PRIMARY) {
3106 /* I am a diskless primary, the peer just created a new current UUID
3107 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003108 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003109 }
3110
3111 /* Before we test for the disk state, we should wait until an eventually
3112 ongoing cluster wide state change is finished. That is important if
3113 we are primary and are detaching from our disk. We need to see the
3114 new disk state... */
3115 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3116 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003117 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3118
3119 if (updated_uuids)
3120 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003121
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003122 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003123}
3124
3125/**
3126 * convert_state() - Converts the peer's view of the cluster state to our point of view
3127 * @ps: The state as seen by the peer.
3128 */
3129static union drbd_state convert_state(union drbd_state ps)
3130{
3131 union drbd_state ms;
3132
3133 static enum drbd_conns c_tab[] = {
3134 [C_CONNECTED] = C_CONNECTED,
3135
3136 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3137 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3138 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3139 [C_VERIFY_S] = C_VERIFY_T,
3140 [C_MASK] = C_MASK,
3141 };
3142
3143 ms.i = ps.i;
3144
3145 ms.conn = c_tab[ps.conn];
3146 ms.peer = ps.role;
3147 ms.role = ps.peer;
3148 ms.pdsk = ps.disk;
3149 ms.disk = ps.pdsk;
3150 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3151
3152 return ms;
3153}
3154
Philipp Reisner02918be2010-08-20 14:35:10 +02003155static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003156{
Philipp Reisner02918be2010-08-20 14:35:10 +02003157 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003158 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003159 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003160
Philipp Reisnerb411b362009-09-25 16:07:19 -07003161 mask.i = be32_to_cpu(p->mask);
3162 val.i = be32_to_cpu(p->val);
3163
3164 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3165 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3166 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003167 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003168 }
3169
3170 mask = convert_state(mask);
3171 val = convert_state(val);
3172
3173 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3174
3175 drbd_send_sr_reply(mdev, rv);
3176 drbd_md_sync(mdev);
3177
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003178 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003179}
3180
Philipp Reisner02918be2010-08-20 14:35:10 +02003181static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003182{
Philipp Reisner02918be2010-08-20 14:35:10 +02003183 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003184 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003185 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003186 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003187 int rv;
3188
Philipp Reisnerb411b362009-09-25 16:07:19 -07003189 peer_state.i = be32_to_cpu(p->state);
3190
3191 real_peer_disk = peer_state.disk;
3192 if (peer_state.disk == D_NEGOTIATING) {
3193 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3194 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3195 }
3196
3197 spin_lock_irq(&mdev->req_lock);
3198 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003199 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003200 spin_unlock_irq(&mdev->req_lock);
3201
Lars Ellenberg545752d2011-12-05 14:39:25 +01003202 /* If some other part of the code (asender thread, timeout)
3203 * already decided to close the connection again,
3204 * we must not "re-establish" it here. */
3205 if (os.conn <= C_TEAR_DOWN)
3206 return false;
3207
Lars Ellenberg40424e42011-09-26 15:24:56 +02003208 /* If this is the "end of sync" confirmation, usually the peer disk
3209 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3210 * set) resync started in PausedSyncT, or if the timing of pause-/
3211 * unpause-sync events has been "just right", the peer disk may
3212 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3213 */
3214 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3215 real_peer_disk == D_UP_TO_DATE &&
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003216 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3217 /* If we are (becoming) SyncSource, but peer is still in sync
3218 * preparation, ignore its uptodate-ness to avoid flapping, it
3219 * will change to inconsistent once the peer reaches active
3220 * syncing states.
3221 * It may have changed syncer-paused flags, however, so we
3222 * cannot ignore this completely. */
3223 if (peer_state.conn > C_CONNECTED &&
3224 peer_state.conn < C_SYNC_SOURCE)
3225 real_peer_disk = D_INCONSISTENT;
3226
3227 /* if peer_state changes to connected at the same time,
3228 * it explicitly notifies us that it finished resync.
3229 * Maybe we should finish it up, too? */
3230 else if (os.conn >= C_SYNC_SOURCE &&
3231 peer_state.conn == C_CONNECTED) {
3232 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3233 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003234 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003235 }
3236 }
3237
3238 /* peer says his disk is inconsistent, while we think it is uptodate,
3239 * and this happens while the peer still thinks we have a sync going on,
3240 * but we think we are already done with the sync.
3241 * We ignore this to avoid flapping pdsk.
3242 * This should not happen, if the peer is a recent version of drbd. */
3243 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3244 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3245 real_peer_disk = D_UP_TO_DATE;
3246
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003247 if (ns.conn == C_WF_REPORT_PARAMS)
3248 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003249
Philipp Reisner67531712010-10-27 12:21:30 +02003250 if (peer_state.conn == C_AHEAD)
3251 ns.conn = C_BEHIND;
3252
Philipp Reisnerb411b362009-09-25 16:07:19 -07003253 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3254 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3255 int cr; /* consider resync */
3256
3257 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003258 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003259 /* if we had an established connection
3260 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003261 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003262 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003263 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003264 /* if we have both been inconsistent, and the peer has been
3265 * forced to be UpToDate with --overwrite-data */
3266 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3267 /* if we had been plain connected, and the admin requested to
3268 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003269 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003270 (peer_state.conn >= C_STARTING_SYNC_S &&
3271 peer_state.conn <= C_WF_BITMAP_T));
3272
3273 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003274 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003275
3276 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003277 if (ns.conn == C_MASK) {
3278 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003279 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003280 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003281 } else if (peer_state.disk == D_NEGOTIATING) {
3282 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3283 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003284 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003285 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003286 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003287 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003288 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003289 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003290 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003291 }
3292 }
3293 }
3294
3295 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003296 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003297 goto retry;
3298 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003299 ns.peer = peer_state.role;
3300 ns.pdsk = real_peer_disk;
3301 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003302 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003303 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003304 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3305 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003306 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3307 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3308 for temporal network outages! */
3309 spin_unlock_irq(&mdev->req_lock);
3310 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3311 tl_clear(mdev);
3312 drbd_uuid_new_current(mdev);
3313 clear_bit(NEW_CUR_UUID, &mdev->flags);
3314 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003315 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003316 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003317 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003318 ns = mdev->state;
3319 spin_unlock_irq(&mdev->req_lock);
3320
3321 if (rv < SS_SUCCESS) {
3322 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003323 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003324 }
3325
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003326 if (os.conn > C_WF_REPORT_PARAMS) {
3327 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003328 peer_state.disk != D_NEGOTIATING ) {
3329 /* we want resync, peer has not yet decided to sync... */
3330 /* Nowadays only used when forcing a node into primary role and
3331 setting its disk to UpToDate with that */
3332 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02003333 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003334 }
3335 }
3336
3337 mdev->net_conf->want_lose = 0;
3338
3339 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3340
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003341 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003342}
3343
Philipp Reisner02918be2010-08-20 14:35:10 +02003344static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003345{
Philipp Reisner02918be2010-08-20 14:35:10 +02003346 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003347
3348 wait_event(mdev->misc_wait,
3349 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003350 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003351 mdev->state.conn < C_CONNECTED ||
3352 mdev->state.disk < D_NEGOTIATING);
3353
3354 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3355
Philipp Reisnerb411b362009-09-25 16:07:19 -07003356 /* Here the _drbd_uuid_ functions are right, current should
3357 _not_ be rotated into the history */
3358 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3359 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3360 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3361
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003362 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003363 drbd_start_resync(mdev, C_SYNC_TARGET);
3364
3365 put_ldev(mdev);
3366 } else
3367 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3368
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003369 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003370}
3371
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003372/**
3373 * receive_bitmap_plain
3374 *
3375 * Return 0 when done, 1 when another iteration is needed, and a negative error
3376 * code upon failure.
3377 */
3378static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003379receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3380 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003381{
3382 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3383 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003384 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003385
Philipp Reisner02918be2010-08-20 14:35:10 +02003386 if (want != data_size) {
3387 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003388 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003389 }
3390 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003391 return 0;
3392 err = drbd_recv(mdev, buffer, want);
3393 if (err != want) {
3394 if (err >= 0)
3395 err = -EIO;
3396 return err;
3397 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003398
3399 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3400
3401 c->word_offset += num_words;
3402 c->bit_offset = c->word_offset * BITS_PER_LONG;
3403 if (c->bit_offset > c->bm_bits)
3404 c->bit_offset = c->bm_bits;
3405
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003406 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003407}
3408
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003409/**
3410 * recv_bm_rle_bits
3411 *
3412 * Return 0 when done, 1 when another iteration is needed, and a negative error
3413 * code upon failure.
3414 */
3415static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003416recv_bm_rle_bits(struct drbd_conf *mdev,
3417 struct p_compressed_bm *p,
3418 struct bm_xfer_ctx *c)
3419{
3420 struct bitstream bs;
3421 u64 look_ahead;
3422 u64 rl;
3423 u64 tmp;
3424 unsigned long s = c->bit_offset;
3425 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003426 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003427 int toggle = DCBP_get_start(p);
3428 int have;
3429 int bits;
3430
3431 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3432
3433 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3434 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003435 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003436
3437 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3438 bits = vli_decode_bits(&rl, look_ahead);
3439 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003440 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003441
3442 if (toggle) {
3443 e = s + rl -1;
3444 if (e >= c->bm_bits) {
3445 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003446 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003447 }
3448 _drbd_bm_set_bits(mdev, s, e);
3449 }
3450
3451 if (have < bits) {
3452 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3453 have, bits, look_ahead,
3454 (unsigned int)(bs.cur.b - p->code),
3455 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003456 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003457 }
3458 look_ahead >>= bits;
3459 have -= bits;
3460
3461 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3462 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003463 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003464 look_ahead |= tmp << have;
3465 have += bits;
3466 }
3467
3468 c->bit_offset = s;
3469 bm_xfer_ctx_bit_to_word_offset(c);
3470
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003471 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003472}
3473
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003474/**
3475 * decode_bitmap_c
3476 *
3477 * Return 0 when done, 1 when another iteration is needed, and a negative error
3478 * code upon failure.
3479 */
3480static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003481decode_bitmap_c(struct drbd_conf *mdev,
3482 struct p_compressed_bm *p,
3483 struct bm_xfer_ctx *c)
3484{
3485 if (DCBP_get_code(p) == RLE_VLI_Bits)
3486 return recv_bm_rle_bits(mdev, p, c);
3487
3488 /* other variants had been implemented for evaluation,
3489 * but have been dropped as this one turned out to be "best"
3490 * during all our tests. */
3491
3492 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3493 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003494 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003495}
3496
3497void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3498 const char *direction, struct bm_xfer_ctx *c)
3499{
3500 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003501 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003502 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3503 + c->bm_words * sizeof(long);
3504 unsigned total = c->bytes[0] + c->bytes[1];
3505 unsigned r;
3506
3507 /* total can not be zero. but just in case: */
3508 if (total == 0)
3509 return;
3510
3511 /* don't report if not compressed */
3512 if (total >= plain)
3513 return;
3514
3515 /* total < plain. check for overflow, still */
3516 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3517 : (1000 * total / plain);
3518
3519 if (r > 1000)
3520 r = 1000;
3521
3522 r = 1000 - r;
3523 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3524 "total %u; compression: %u.%u%%\n",
3525 direction,
3526 c->bytes[1], c->packets[1],
3527 c->bytes[0], c->packets[0],
3528 total, r/10, r % 10);
3529}
3530
3531/* Since we are processing the bitfield from lower addresses to higher,
3532 it does not matter if the process it in 32 bit chunks or 64 bit
3533 chunks as long as it is little endian. (Understand it as byte stream,
3534 beginning with the lowest byte...) If we would use big endian
3535 we would need to process it from the highest address to the lowest,
3536 in order to be agnostic to the 32 vs 64 bits issue.
3537
3538 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003539static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003540{
3541 struct bm_xfer_ctx c;
3542 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003543 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003544 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003545 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003546
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003547 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3548 /* you are supposed to send additional out-of-sync information
3549 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003550
3551 /* maybe we should use some per thread scratch page,
3552 * and allocate that during initial device creation? */
3553 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3554 if (!buffer) {
3555 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3556 goto out;
3557 }
3558
3559 c = (struct bm_xfer_ctx) {
3560 .bm_bits = drbd_bm_bits(mdev),
3561 .bm_words = drbd_bm_words(mdev),
3562 };
3563
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003564 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003565 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003566 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003567 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003568 /* MAYBE: sanity check that we speak proto >= 90,
3569 * and the feature is enabled! */
3570 struct p_compressed_bm *p;
3571
Philipp Reisner02918be2010-08-20 14:35:10 +02003572 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003573 dev_err(DEV, "ReportCBitmap packet too large\n");
3574 goto out;
3575 }
3576 /* use the page buff */
3577 p = buffer;
3578 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003579 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003580 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003581 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3582 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003583 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003584 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003585 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003586 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003587 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003588 goto out;
3589 }
3590
Philipp Reisner02918be2010-08-20 14:35:10 +02003591 c.packets[cmd == P_BITMAP]++;
3592 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003593
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003594 if (err <= 0) {
3595 if (err < 0)
3596 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003597 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003598 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003599 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003600 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003601 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003602
3603 INFO_bm_xfer_stats(mdev, "receive", &c);
3604
3605 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003606 enum drbd_state_rv rv;
3607
Philipp Reisnerb411b362009-09-25 16:07:19 -07003608 ok = !drbd_send_bitmap(mdev);
3609 if (!ok)
3610 goto out;
3611 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003612 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3613 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3615 /* admin may have requested C_DISCONNECTING,
3616 * other threads may have noticed network errors */
3617 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3618 drbd_conn_str(mdev->state.conn));
3619 }
3620
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003621 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003622 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003623 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003624 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3625 drbd_start_resync(mdev, C_SYNC_SOURCE);
3626 free_page((unsigned long) buffer);
3627 return ok;
3628}
3629
Philipp Reisner02918be2010-08-20 14:35:10 +02003630static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003631{
3632 /* TODO zero copy sink :) */
3633 static char sink[128];
3634 int size, want, r;
3635
Philipp Reisner02918be2010-08-20 14:35:10 +02003636 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3637 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003638
Philipp Reisner02918be2010-08-20 14:35:10 +02003639 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003640 while (size > 0) {
3641 want = min_t(int, size, sizeof(sink));
3642 r = drbd_recv(mdev, sink, want);
3643 ERR_IF(r <= 0) break;
3644 size -= r;
3645 }
3646 return size == 0;
3647}
3648
Philipp Reisner02918be2010-08-20 14:35:10 +02003649static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003650{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003651 /* Make sure we've acked all the TCP data associated
3652 * with the data requests being unplugged */
3653 drbd_tcp_quickack(mdev->data.socket);
3654
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003655 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003656}
3657
Philipp Reisner73a01a12010-10-27 14:33:00 +02003658static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3659{
3660 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3661
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003662 switch (mdev->state.conn) {
3663 case C_WF_SYNC_UUID:
3664 case C_WF_BITMAP_T:
3665 case C_BEHIND:
3666 break;
3667 default:
3668 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3669 drbd_conn_str(mdev->state.conn));
3670 }
3671
Philipp Reisner73a01a12010-10-27 14:33:00 +02003672 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3673
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003674 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003675}
3676
Philipp Reisner02918be2010-08-20 14:35:10 +02003677typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003678
Philipp Reisner02918be2010-08-20 14:35:10 +02003679struct data_cmd {
3680 int expect_payload;
3681 size_t pkt_size;
3682 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003683};
3684
Philipp Reisner02918be2010-08-20 14:35:10 +02003685static struct data_cmd drbd_cmd_handler[] = {
3686 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3687 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3688 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3689 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3690 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3691 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3692 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3693 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3694 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3695 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3696 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3697 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3698 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3699 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3700 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3701 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3702 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3703 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3704 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3705 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3706 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003707 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003708 /* anything missing from this table is in
3709 * the asender_tbl, see get_asender_cmd */
3710 [P_MAX_CMD] = { 0, 0, NULL },
3711};
3712
3713/* All handler functions that expect a sub-header get that sub-heder in
3714 mdev->data.rbuf.header.head.payload.
3715
3716 Usually in mdev->data.rbuf.header.head the callback can find the usual
3717 p_header, but they may not rely on that. Since there is also p_header95 !
3718 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003719
3720static void drbdd(struct drbd_conf *mdev)
3721{
Philipp Reisner02918be2010-08-20 14:35:10 +02003722 union p_header *header = &mdev->data.rbuf.header;
3723 unsigned int packet_size;
3724 enum drbd_packets cmd;
3725 size_t shs; /* sub header size */
3726 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003727
3728 while (get_t_state(&mdev->receiver) == Running) {
3729 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003730 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3731 goto err_out;
3732
3733 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3734 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3735 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003736 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003737
Philipp Reisner02918be2010-08-20 14:35:10 +02003738 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003739 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3740 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3741 goto err_out;
3742 }
3743
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003744 if (shs) {
3745 rv = drbd_recv(mdev, &header->h80.payload, shs);
3746 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003747 if (!signal_pending(current))
3748 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003749 goto err_out;
3750 }
3751 }
3752
Philipp Reisner02918be2010-08-20 14:35:10 +02003753 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3754
3755 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003756 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003757 cmdname(cmd), packet_size);
3758 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003759 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003760 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003761
Philipp Reisner02918be2010-08-20 14:35:10 +02003762 if (0) {
3763 err_out:
3764 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003765 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003766 /* If we leave here, we probably want to update at least the
3767 * "Connected" indicator on stable storage. Do so explicitly here. */
3768 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003769}
3770
3771void drbd_flush_workqueue(struct drbd_conf *mdev)
3772{
3773 struct drbd_wq_barrier barr;
3774
3775 barr.w.cb = w_prev_work_done;
3776 init_completion(&barr.done);
3777 drbd_queue_work(&mdev->data.work, &barr.w);
3778 wait_for_completion(&barr.done);
3779}
3780
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003781void drbd_free_tl_hash(struct drbd_conf *mdev)
3782{
3783 struct hlist_head *h;
3784
3785 spin_lock_irq(&mdev->req_lock);
3786
3787 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3788 spin_unlock_irq(&mdev->req_lock);
3789 return;
3790 }
3791 /* paranoia code */
3792 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3793 if (h->first)
3794 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3795 (int)(h - mdev->ee_hash), h->first);
3796 kfree(mdev->ee_hash);
3797 mdev->ee_hash = NULL;
3798 mdev->ee_hash_s = 0;
3799
3800 /* paranoia code */
3801 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3802 if (h->first)
3803 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3804 (int)(h - mdev->tl_hash), h->first);
3805 kfree(mdev->tl_hash);
3806 mdev->tl_hash = NULL;
3807 mdev->tl_hash_s = 0;
3808 spin_unlock_irq(&mdev->req_lock);
3809}
3810
Philipp Reisnerb411b362009-09-25 16:07:19 -07003811static void drbd_disconnect(struct drbd_conf *mdev)
3812{
3813 enum drbd_fencing_p fp;
3814 union drbd_state os, ns;
3815 int rv = SS_UNKNOWN_ERROR;
3816 unsigned int i;
3817
3818 if (mdev->state.conn == C_STANDALONE)
3819 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003820
Lars Ellenberg545752d2011-12-05 14:39:25 +01003821 /* We are about to start the cleanup after connection loss.
3822 * Make sure drbd_make_request knows about that.
3823 * Usually we should be in some network failure state already,
3824 * but just in case we are not, we fix it up here.
3825 */
3826 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
3827
Philipp Reisnerb411b362009-09-25 16:07:19 -07003828 /* asender does not clean up anything. it must not interfere, either */
3829 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003830 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003831
Philipp Reisner85719572010-07-21 10:20:17 +02003832 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003833 spin_lock_irq(&mdev->req_lock);
3834 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3835 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3836 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3837 spin_unlock_irq(&mdev->req_lock);
3838
3839 /* We do not have data structures that would allow us to
3840 * get the rs_pending_cnt down to 0 again.
3841 * * On C_SYNC_TARGET we do not have any data structures describing
3842 * the pending RSDataRequest's we have sent.
3843 * * On C_SYNC_SOURCE there is no data structure that tracks
3844 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3845 * And no, it is not the sum of the reference counts in the
3846 * resync_LRU. The resync_LRU tracks the whole operation including
3847 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3848 * on the fly. */
3849 drbd_rs_cancel_all(mdev);
3850 mdev->rs_total = 0;
3851 mdev->rs_failed = 0;
3852 atomic_set(&mdev->rs_pending_cnt, 0);
3853 wake_up(&mdev->misc_wait);
3854
3855 /* make sure syncer is stopped and w_resume_next_sg queued */
3856 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003857 resync_timer_fn((unsigned long)mdev);
3858
Philipp Reisnerb411b362009-09-25 16:07:19 -07003859 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3860 * w_make_resync_request etc. which may still be on the worker queue
3861 * to be "canceled" */
3862 drbd_flush_workqueue(mdev);
3863
3864 /* This also does reclaim_net_ee(). If we do this too early, we might
3865 * miss some resync ee and pages.*/
3866 drbd_process_done_ee(mdev);
3867
3868 kfree(mdev->p_uuid);
3869 mdev->p_uuid = NULL;
3870
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003871 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003872 tl_clear(mdev);
3873
Philipp Reisnerb411b362009-09-25 16:07:19 -07003874 dev_info(DEV, "Connection closed\n");
3875
3876 drbd_md_sync(mdev);
3877
3878 fp = FP_DONT_CARE;
3879 if (get_ldev(mdev)) {
3880 fp = mdev->ldev->dc.fencing;
3881 put_ldev(mdev);
3882 }
3883
Philipp Reisner87f7be42010-06-11 13:56:33 +02003884 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3885 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003886
3887 spin_lock_irq(&mdev->req_lock);
3888 os = mdev->state;
3889 if (os.conn >= C_UNCONNECTED) {
3890 /* Do not restart in case we are C_DISCONNECTING */
3891 ns = os;
3892 ns.conn = C_UNCONNECTED;
3893 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3894 }
3895 spin_unlock_irq(&mdev->req_lock);
3896
3897 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003898 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003899
Philipp Reisnerb411b362009-09-25 16:07:19 -07003900 crypto_free_hash(mdev->cram_hmac_tfm);
3901 mdev->cram_hmac_tfm = NULL;
3902
3903 kfree(mdev->net_conf);
3904 mdev->net_conf = NULL;
3905 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3906 }
3907
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003908 /* serialize with bitmap writeout triggered by the state change,
3909 * if any. */
3910 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3911
Philipp Reisnerb411b362009-09-25 16:07:19 -07003912 /* tcp_close and release of sendpage pages can be deferred. I don't
3913 * want to use SO_LINGER, because apparently it can be deferred for
3914 * more than 20 seconds (longest time I checked).
3915 *
3916 * Actually we don't care for exactly when the network stack does its
3917 * put_page(), but release our reference on these pages right here.
3918 */
3919 i = drbd_release_ee(mdev, &mdev->net_ee);
3920 if (i)
3921 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003922 i = atomic_read(&mdev->pp_in_use_by_net);
3923 if (i)
3924 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003925 i = atomic_read(&mdev->pp_in_use);
3926 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003927 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003928
3929 D_ASSERT(list_empty(&mdev->read_ee));
3930 D_ASSERT(list_empty(&mdev->active_ee));
3931 D_ASSERT(list_empty(&mdev->sync_ee));
3932 D_ASSERT(list_empty(&mdev->done_ee));
3933
3934 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3935 atomic_set(&mdev->current_epoch->epoch_size, 0);
3936 D_ASSERT(list_empty(&mdev->current_epoch->list));
3937}
3938
3939/*
3940 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3941 * we can agree on is stored in agreed_pro_version.
3942 *
3943 * feature flags and the reserved array should be enough room for future
3944 * enhancements of the handshake protocol, and possible plugins...
3945 *
3946 * for now, they are expected to be zero, but ignored.
3947 */
3948static int drbd_send_handshake(struct drbd_conf *mdev)
3949{
3950 /* ASSERT current == mdev->receiver ... */
3951 struct p_handshake *p = &mdev->data.sbuf.handshake;
3952 int ok;
3953
3954 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3955 dev_err(DEV, "interrupted during initial handshake\n");
3956 return 0; /* interrupted. not ok. */
3957 }
3958
3959 if (mdev->data.socket == NULL) {
3960 mutex_unlock(&mdev->data.mutex);
3961 return 0;
3962 }
3963
3964 memset(p, 0, sizeof(*p));
3965 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3966 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3967 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003968 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003969 mutex_unlock(&mdev->data.mutex);
3970 return ok;
3971}
3972
3973/*
3974 * return values:
3975 * 1 yes, we have a valid connection
3976 * 0 oops, did not work out, please try again
3977 * -1 peer talks different language,
3978 * no point in trying again, please go standalone.
3979 */
3980static int drbd_do_handshake(struct drbd_conf *mdev)
3981{
3982 /* ASSERT current == mdev->receiver ... */
3983 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003984 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3985 unsigned int length;
3986 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003987 int rv;
3988
3989 rv = drbd_send_handshake(mdev);
3990 if (!rv)
3991 return 0;
3992
Philipp Reisner02918be2010-08-20 14:35:10 +02003993 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003994 if (!rv)
3995 return 0;
3996
Philipp Reisner02918be2010-08-20 14:35:10 +02003997 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003998 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003999 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004000 return -1;
4001 }
4002
Philipp Reisner02918be2010-08-20 14:35:10 +02004003 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004004 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004005 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004006 return -1;
4007 }
4008
4009 rv = drbd_recv(mdev, &p->head.payload, expect);
4010
4011 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004012 if (!signal_pending(current))
4013 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004014 return 0;
4015 }
4016
Philipp Reisnerb411b362009-09-25 16:07:19 -07004017 p->protocol_min = be32_to_cpu(p->protocol_min);
4018 p->protocol_max = be32_to_cpu(p->protocol_max);
4019 if (p->protocol_max == 0)
4020 p->protocol_max = p->protocol_min;
4021
4022 if (PRO_VERSION_MAX < p->protocol_min ||
4023 PRO_VERSION_MIN > p->protocol_max)
4024 goto incompat;
4025
4026 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4027
4028 dev_info(DEV, "Handshake successful: "
4029 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
4030
4031 return 1;
4032
4033 incompat:
4034 dev_err(DEV, "incompatible DRBD dialects: "
4035 "I support %d-%d, peer supports %d-%d\n",
4036 PRO_VERSION_MIN, PRO_VERSION_MAX,
4037 p->protocol_min, p->protocol_max);
4038 return -1;
4039}
4040
4041#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4042static int drbd_do_auth(struct drbd_conf *mdev)
4043{
4044 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4045 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004046 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004047}
4048#else
4049#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004050
4051/* Return value:
4052 1 - auth succeeded,
4053 0 - failed, try again (network error),
4054 -1 - auth failed, don't try again.
4055*/
4056
Philipp Reisnerb411b362009-09-25 16:07:19 -07004057static int drbd_do_auth(struct drbd_conf *mdev)
4058{
4059 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4060 struct scatterlist sg;
4061 char *response = NULL;
4062 char *right_response = NULL;
4063 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004064 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4065 unsigned int resp_size;
4066 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004067 enum drbd_packets cmd;
4068 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004069 int rv;
4070
4071 desc.tfm = mdev->cram_hmac_tfm;
4072 desc.flags = 0;
4073
4074 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4075 (u8 *)mdev->net_conf->shared_secret, key_len);
4076 if (rv) {
4077 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004078 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004079 goto fail;
4080 }
4081
4082 get_random_bytes(my_challenge, CHALLENGE_LEN);
4083
4084 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4085 if (!rv)
4086 goto fail;
4087
Philipp Reisner02918be2010-08-20 14:35:10 +02004088 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004089 if (!rv)
4090 goto fail;
4091
Philipp Reisner02918be2010-08-20 14:35:10 +02004092 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004093 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004094 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004095 rv = 0;
4096 goto fail;
4097 }
4098
Philipp Reisner02918be2010-08-20 14:35:10 +02004099 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004100 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004101 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004102 goto fail;
4103 }
4104
Philipp Reisner02918be2010-08-20 14:35:10 +02004105 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004106 if (peers_ch == NULL) {
4107 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004108 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004109 goto fail;
4110 }
4111
Philipp Reisner02918be2010-08-20 14:35:10 +02004112 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004113
Philipp Reisner02918be2010-08-20 14:35:10 +02004114 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004115 if (!signal_pending(current))
4116 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117 rv = 0;
4118 goto fail;
4119 }
4120
4121 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4122 response = kmalloc(resp_size, GFP_NOIO);
4123 if (response == NULL) {
4124 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004125 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004126 goto fail;
4127 }
4128
4129 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004130 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004131
4132 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4133 if (rv) {
4134 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004135 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004136 goto fail;
4137 }
4138
4139 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4140 if (!rv)
4141 goto fail;
4142
Philipp Reisner02918be2010-08-20 14:35:10 +02004143 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004144 if (!rv)
4145 goto fail;
4146
Philipp Reisner02918be2010-08-20 14:35:10 +02004147 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004148 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004149 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004150 rv = 0;
4151 goto fail;
4152 }
4153
Philipp Reisner02918be2010-08-20 14:35:10 +02004154 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004155 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4156 rv = 0;
4157 goto fail;
4158 }
4159
4160 rv = drbd_recv(mdev, response , resp_size);
4161
4162 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004163 if (!signal_pending(current))
4164 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004165 rv = 0;
4166 goto fail;
4167 }
4168
4169 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004170 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004171 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004172 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004173 goto fail;
4174 }
4175
4176 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4177
4178 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4179 if (rv) {
4180 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004181 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004182 goto fail;
4183 }
4184
4185 rv = !memcmp(response, right_response, resp_size);
4186
4187 if (rv)
4188 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4189 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004190 else
4191 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004192
4193 fail:
4194 kfree(peers_ch);
4195 kfree(response);
4196 kfree(right_response);
4197
4198 return rv;
4199}
4200#endif
4201
4202int drbdd_init(struct drbd_thread *thi)
4203{
4204 struct drbd_conf *mdev = thi->mdev;
4205 unsigned int minor = mdev_to_minor(mdev);
4206 int h;
4207
4208 sprintf(current->comm, "drbd%d_receiver", minor);
4209
4210 dev_info(DEV, "receiver (re)started\n");
4211
4212 do {
4213 h = drbd_connect(mdev);
4214 if (h == 0) {
4215 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004216 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004217 }
4218 if (h == -1) {
4219 dev_warn(DEV, "Discarding network configuration.\n");
4220 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4221 }
4222 } while (h == 0);
4223
4224 if (h > 0) {
4225 if (get_net_conf(mdev)) {
4226 drbdd(mdev);
4227 put_net_conf(mdev);
4228 }
4229 }
4230
4231 drbd_disconnect(mdev);
4232
4233 dev_info(DEV, "receiver terminated\n");
4234 return 0;
4235}
4236
4237/* ********* acknowledge sender ******** */
4238
Philipp Reisner0b70a132010-08-20 13:36:10 +02004239static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004240{
4241 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4242
4243 int retcode = be32_to_cpu(p->retcode);
4244
4245 if (retcode >= SS_SUCCESS) {
4246 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4247 } else {
4248 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4249 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4250 drbd_set_st_err_str(retcode), retcode);
4251 }
4252 wake_up(&mdev->state_wait);
4253
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004254 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004255}
4256
Philipp Reisner0b70a132010-08-20 13:36:10 +02004257static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004258{
4259 return drbd_send_ping_ack(mdev);
4260
4261}
4262
Philipp Reisner0b70a132010-08-20 13:36:10 +02004263static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004264{
4265 /* restore idle timeout */
4266 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004267 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4268 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004269
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004270 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004271}
4272
Philipp Reisner0b70a132010-08-20 13:36:10 +02004273static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004274{
4275 struct p_block_ack *p = (struct p_block_ack *)h;
4276 sector_t sector = be64_to_cpu(p->sector);
4277 int blksize = be32_to_cpu(p->blksize);
4278
4279 D_ASSERT(mdev->agreed_pro_version >= 89);
4280
4281 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4282
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004283 if (get_ldev(mdev)) {
4284 drbd_rs_complete_io(mdev, sector);
4285 drbd_set_in_sync(mdev, sector, blksize);
4286 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4287 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4288 put_ldev(mdev);
4289 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004290 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004291 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004292
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004293 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004294}
4295
4296/* when we receive the ACK for a write request,
4297 * verify that we actually know about it */
4298static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4299 u64 id, sector_t sector)
4300{
4301 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4302 struct hlist_node *n;
4303 struct drbd_request *req;
4304
Bart Van Assche24c48302011-05-21 18:32:29 +02004305 hlist_for_each_entry(req, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004306 if ((unsigned long)req == (unsigned long)id) {
4307 if (req->sector != sector) {
4308 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4309 "wrong sector (%llus versus %llus)\n", req,
4310 (unsigned long long)req->sector,
4311 (unsigned long long)sector);
4312 break;
4313 }
4314 return req;
4315 }
4316 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004317 return NULL;
4318}
4319
4320typedef struct drbd_request *(req_validator_fn)
4321 (struct drbd_conf *mdev, u64 id, sector_t sector);
4322
4323static int validate_req_change_req_state(struct drbd_conf *mdev,
4324 u64 id, sector_t sector, req_validator_fn validator,
4325 const char *func, enum drbd_req_event what)
4326{
4327 struct drbd_request *req;
4328 struct bio_and_error m;
4329
4330 spin_lock_irq(&mdev->req_lock);
4331 req = validator(mdev, id, sector);
4332 if (unlikely(!req)) {
4333 spin_unlock_irq(&mdev->req_lock);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004334
4335 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4336 (void *)(unsigned long)id, (unsigned long long)sector);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004337 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004338 }
4339 __req_mod(req, what, &m);
4340 spin_unlock_irq(&mdev->req_lock);
4341
4342 if (m.bio)
4343 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004344 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004345}
4346
Philipp Reisner0b70a132010-08-20 13:36:10 +02004347static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004348{
4349 struct p_block_ack *p = (struct p_block_ack *)h;
4350 sector_t sector = be64_to_cpu(p->sector);
4351 int blksize = be32_to_cpu(p->blksize);
4352 enum drbd_req_event what;
4353
4354 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4355
4356 if (is_syncer_block_id(p->block_id)) {
4357 drbd_set_in_sync(mdev, sector, blksize);
4358 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004359 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004360 }
4361 switch (be16_to_cpu(h->command)) {
4362 case P_RS_WRITE_ACK:
4363 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4364 what = write_acked_by_peer_and_sis;
4365 break;
4366 case P_WRITE_ACK:
4367 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4368 what = write_acked_by_peer;
4369 break;
4370 case P_RECV_ACK:
4371 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4372 what = recv_acked_by_peer;
4373 break;
4374 case P_DISCARD_ACK:
4375 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4376 what = conflict_discarded_by_peer;
4377 break;
4378 default:
4379 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004380 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004381 }
4382
4383 return validate_req_change_req_state(mdev, p->block_id, sector,
4384 _ack_id_to_req, __func__ , what);
4385}
4386
Philipp Reisner0b70a132010-08-20 13:36:10 +02004387static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004388{
4389 struct p_block_ack *p = (struct p_block_ack *)h;
4390 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004391 int size = be32_to_cpu(p->blksize);
4392 struct drbd_request *req;
4393 struct bio_and_error m;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004394
4395 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4396
4397 if (is_syncer_block_id(p->block_id)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004398 dec_rs_pending(mdev);
4399 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004400 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004401 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004402
4403 spin_lock_irq(&mdev->req_lock);
4404 req = _ack_id_to_req(mdev, p->block_id, sector);
4405 if (!req) {
4406 spin_unlock_irq(&mdev->req_lock);
4407 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4408 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4409 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4410 The master bio might already be completed, therefore the
4411 request is no longer in the collision hash.
4412 => Do not try to validate block_id as request. */
4413 /* In Protocol B we might already have got a P_RECV_ACK
4414 but then get a P_NEG_ACK after wards. */
4415 drbd_set_out_of_sync(mdev, sector, size);
4416 return true;
4417 } else {
4418 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4419 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4420 return false;
4421 }
4422 }
4423 __req_mod(req, neg_acked, &m);
4424 spin_unlock_irq(&mdev->req_lock);
4425
4426 if (m.bio)
4427 complete_master_bio(mdev, &m);
4428 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004429}
4430
Philipp Reisner0b70a132010-08-20 13:36:10 +02004431static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004432{
4433 struct p_block_ack *p = (struct p_block_ack *)h;
4434 sector_t sector = be64_to_cpu(p->sector);
4435
4436 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4437 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4438 (unsigned long long)sector, be32_to_cpu(p->blksize));
4439
4440 return validate_req_change_req_state(mdev, p->block_id, sector,
4441 _ar_id_to_req, __func__ , neg_acked);
4442}
4443
Philipp Reisner0b70a132010-08-20 13:36:10 +02004444static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004445{
4446 sector_t sector;
4447 int size;
4448 struct p_block_ack *p = (struct p_block_ack *)h;
4449
4450 sector = be64_to_cpu(p->sector);
4451 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004452
4453 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4454
4455 dec_rs_pending(mdev);
4456
4457 if (get_ldev_if_state(mdev, D_FAILED)) {
4458 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004459 switch (be16_to_cpu(h->command)) {
4460 case P_NEG_RS_DREPLY:
4461 drbd_rs_failed_io(mdev, sector, size);
4462 case P_RS_CANCEL:
4463 break;
4464 default:
4465 D_ASSERT(0);
4466 put_ldev(mdev);
4467 return false;
4468 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004469 put_ldev(mdev);
4470 }
4471
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004472 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004473}
4474
Philipp Reisner0b70a132010-08-20 13:36:10 +02004475static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004476{
4477 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4478
4479 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4480
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004481 if (mdev->state.conn == C_AHEAD &&
4482 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisnere89868a2011-11-09 21:04:03 +01004483 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
Philipp Reisner370a43e2011-01-14 16:03:11 +01004484 mdev->start_resync_timer.expires = jiffies + HZ;
4485 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004486 }
4487
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004488 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004489}
4490
Philipp Reisner0b70a132010-08-20 13:36:10 +02004491static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004492{
4493 struct p_block_ack *p = (struct p_block_ack *)h;
4494 struct drbd_work *w;
4495 sector_t sector;
4496 int size;
4497
4498 sector = be64_to_cpu(p->sector);
4499 size = be32_to_cpu(p->blksize);
4500
4501 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4502
4503 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4504 drbd_ov_oos_found(mdev, sector, size);
4505 else
4506 ov_oos_print(mdev);
4507
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004508 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004509 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004510
Philipp Reisnerb411b362009-09-25 16:07:19 -07004511 drbd_rs_complete_io(mdev, sector);
4512 dec_rs_pending(mdev);
4513
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004514 --mdev->ov_left;
4515
4516 /* let's advance progress step marks only for every other megabyte */
4517 if ((mdev->ov_left & 0x200) == 0x200)
4518 drbd_advance_rs_marks(mdev, mdev->ov_left);
4519
4520 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004521 w = kmalloc(sizeof(*w), GFP_NOIO);
4522 if (w) {
4523 w->cb = w_ov_finished;
4524 drbd_queue_work_front(&mdev->data.work, w);
4525 } else {
4526 dev_err(DEV, "kmalloc(w) failed.");
4527 ov_oos_print(mdev);
4528 drbd_resync_finished(mdev);
4529 }
4530 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004531 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004532 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004533}
4534
Philipp Reisner02918be2010-08-20 14:35:10 +02004535static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004536{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004537 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004538}
4539
Philipp Reisnerb411b362009-09-25 16:07:19 -07004540struct asender_cmd {
4541 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004542 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004543};
4544
4545static struct asender_cmd *get_asender_cmd(int cmd)
4546{
4547 static struct asender_cmd asender_tbl[] = {
4548 /* anything missing from this table is in
4549 * the drbd_cmd_handler (drbd_default_handler) table,
4550 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004551 [P_PING] = { sizeof(struct p_header80), got_Ping },
4552 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004553 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4554 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4555 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4556 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4557 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4558 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4559 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4560 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4561 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4562 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4563 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004564 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004565 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004566 [P_MAX_CMD] = { 0, NULL },
4567 };
4568 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4569 return NULL;
4570 return &asender_tbl[cmd];
4571}
4572
4573int drbd_asender(struct drbd_thread *thi)
4574{
4575 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004576 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004577 struct asender_cmd *cmd = NULL;
4578
4579 int rv, len;
4580 void *buf = h;
4581 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004582 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004583 int empty;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004584 int ping_timeout_active = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004585
4586 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4587
4588 current->policy = SCHED_RR; /* Make this a realtime task! */
4589 current->rt_priority = 2; /* more important than all other tasks */
4590
4591 while (get_t_state(thi) == Running) {
4592 drbd_thread_current_set_cpu(mdev);
4593 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4594 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4595 mdev->meta.socket->sk->sk_rcvtimeo =
4596 mdev->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004597 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004598 }
4599
4600 /* conditionally cork;
4601 * it may hurt latency if we cork without much to send */
4602 if (!mdev->net_conf->no_cork &&
4603 3 < atomic_read(&mdev->unacked_cnt))
4604 drbd_tcp_cork(mdev->meta.socket);
4605 while (1) {
4606 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4607 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004608 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004609 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004610 /* to avoid race with newly queued ACKs */
4611 set_bit(SIGNAL_ASENDER, &mdev->flags);
4612 spin_lock_irq(&mdev->req_lock);
4613 empty = list_empty(&mdev->done_ee);
4614 spin_unlock_irq(&mdev->req_lock);
4615 /* new ack may have been queued right here,
4616 * but then there is also a signal pending,
4617 * and we start over... */
4618 if (empty)
4619 break;
4620 }
4621 /* but unconditionally uncork unless disabled */
4622 if (!mdev->net_conf->no_cork)
4623 drbd_tcp_uncork(mdev->meta.socket);
4624
4625 /* short circuit, recv_msg would return EINTR anyways. */
4626 if (signal_pending(current))
4627 continue;
4628
4629 rv = drbd_recv_short(mdev, mdev->meta.socket,
4630 buf, expect-received, 0);
4631 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4632
4633 flush_signals(current);
4634
4635 /* Note:
4636 * -EINTR (on meta) we got a signal
4637 * -EAGAIN (on meta) rcvtimeo expired
4638 * -ECONNRESET other side closed the connection
4639 * -ERESTARTSYS (on data) we got a signal
4640 * rv < 0 other than above: unexpected error!
4641 * rv == expected: full header or command
4642 * rv < expected: "woken" by signal during receive
4643 * rv == 0 : "connection shut down by peer"
4644 */
4645 if (likely(rv > 0)) {
4646 received += rv;
4647 buf += rv;
4648 } else if (rv == 0) {
4649 dev_err(DEV, "meta connection shut down by peer.\n");
4650 goto reconnect;
4651 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004652 /* If the data socket received something meanwhile,
4653 * that is good enough: peer is still alive. */
4654 if (time_after(mdev->last_received,
4655 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4656 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004657 if (ping_timeout_active) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004658 dev_err(DEV, "PingAck did not arrive in time.\n");
4659 goto reconnect;
4660 }
4661 set_bit(SEND_PING, &mdev->flags);
4662 continue;
4663 } else if (rv == -EINTR) {
4664 continue;
4665 } else {
4666 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4667 goto reconnect;
4668 }
4669
4670 if (received == expect && cmd == NULL) {
4671 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004672 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4673 be32_to_cpu(h->magic),
4674 be16_to_cpu(h->command),
4675 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004676 goto reconnect;
4677 }
4678 cmd = get_asender_cmd(be16_to_cpu(h->command));
4679 len = be16_to_cpu(h->length);
4680 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004681 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4682 be32_to_cpu(h->magic),
4683 be16_to_cpu(h->command),
4684 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004685 goto disconnect;
4686 }
4687 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004688 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004689 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004690 }
4691 if (received == expect) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004692 mdev->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004693 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004694 if (!cmd->process(mdev, h))
4695 goto reconnect;
4696
Lars Ellenbergf36af182011-03-09 22:44:55 +01004697 /* the idle_timeout (ping-int)
4698 * has been restored in got_PingAck() */
4699 if (cmd == get_asender_cmd(P_PING_ACK))
4700 ping_timeout_active = 0;
4701
Philipp Reisnerb411b362009-09-25 16:07:19 -07004702 buf = h;
4703 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004704 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004705 cmd = NULL;
4706 }
4707 }
4708
4709 if (0) {
4710reconnect:
4711 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004712 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004713 }
4714 if (0) {
4715disconnect:
4716 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004717 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004718 }
4719 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4720
4721 D_ASSERT(mdev->state.conn < C_CONNECTED);
4722 dev_info(DEV, "asender terminated\n");
4723
4724 return 0;
4725}