blob: d601501c336a3bb8696ff41da1306abe209ed579 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Bart Van Assche24c48302011-05-21 18:32:29 +0200336 INIT_HLIST_NODE(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200344 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 return e;
347
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return NULL;
351}
352
Lars Ellenberg435f0742010-09-06 12:30:25 +0200353void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200355 if (e->flags & EE_HAS_DIGEST)
356 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Bart Van Assche24c48302011-05-21 18:32:29 +0200359 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 mempool_free(e, drbd_ee_mempool);
361}
362
363int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
364{
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
367 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200368 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
373
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200375 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 count++;
377 }
378 return count;
379}
380
381
382/*
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
386 *
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
390 */
391static int drbd_process_done_ee(struct drbd_conf *mdev)
392{
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
397
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
402
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200404 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
409 */
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
414 }
415 wake_up(&mdev->ee_wait);
416
417 return ok;
418}
419
420void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
421{
422 DEFINE_WAIT(wait);
423
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100429 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
432 }
433}
434
435void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436{
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
440}
441
442/* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
446{
447 struct sock *sk = sock->sk;
448 int err = 0;
449
450 *what = "listen";
451 err = sock->ops->listen(sock, 5);
452 if (err < 0)
453 goto out;
454
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
457 newsock);
458 if (err < 0)
459 goto out;
460
461 *what = "accept";
462 err = sock->ops->accept(sock, *newsock, 0);
463 if (err < 0) {
464 sock_release(*newsock);
465 *newsock = NULL;
466 goto out;
467 }
468 (*newsock)->ops = sock->ops;
Lars Ellenberg47a4f1c2012-01-12 23:01:26 +0100469 __module_get((*newsock)->ops->owner);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470
471out:
472 return err;
473}
474
475static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
476 void *buf, size_t size, int flags)
477{
478 mm_segment_t oldfs;
479 struct kvec iov = {
480 .iov_base = buf,
481 .iov_len = size,
482 };
483 struct msghdr msg = {
484 .msg_iovlen = 1,
485 .msg_iov = (struct iovec *)&iov,
486 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
487 };
488 int rv;
489
490 oldfs = get_fs();
491 set_fs(KERNEL_DS);
492 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
493 set_fs(oldfs);
494
495 return rv;
496}
497
498static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
499{
500 mm_segment_t oldfs;
501 struct kvec iov = {
502 .iov_base = buf,
503 .iov_len = size,
504 };
505 struct msghdr msg = {
506 .msg_iovlen = 1,
507 .msg_iov = (struct iovec *)&iov,
508 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
509 };
510 int rv;
511
512 oldfs = get_fs();
513 set_fs(KERNEL_DS);
514
515 for (;;) {
516 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
517 if (rv == size)
518 break;
519
520 /* Note:
521 * ECONNRESET other side closed the connection
522 * ERESTARTSYS (on sock) we got a signal
523 */
524
525 if (rv < 0) {
526 if (rv == -ECONNRESET)
527 dev_info(DEV, "sock was reset by peer\n");
528 else if (rv != -ERESTARTSYS)
529 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
530 break;
531 } else if (rv == 0) {
532 dev_info(DEV, "sock was shut down by peer\n");
533 break;
534 } else {
535 /* signal came in, or peer/link went down,
536 * after we read a partial message
537 */
538 /* D_ASSERT(signal_pending(current)); */
539 break;
540 }
541 };
542
543 set_fs(oldfs);
544
545 if (rv != size)
546 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
547
548 return rv;
549}
550
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200551/* quoting tcp(7):
552 * On individual connections, the socket buffer size must be set prior to the
553 * listen(2) or connect(2) calls in order to have it take effect.
554 * This is our wrapper to do so.
555 */
556static void drbd_setbufsize(struct socket *sock, unsigned int snd,
557 unsigned int rcv)
558{
559 /* open coded SO_SNDBUF, SO_RCVBUF */
560 if (snd) {
561 sock->sk->sk_sndbuf = snd;
562 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
563 }
564 if (rcv) {
565 sock->sk->sk_rcvbuf = rcv;
566 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
567 }
568}
569
Philipp Reisnerb411b362009-09-25 16:07:19 -0700570static struct socket *drbd_try_connect(struct drbd_conf *mdev)
571{
572 const char *what;
573 struct socket *sock;
574 struct sockaddr_in6 src_in6;
575 int err;
576 int disconnect_on_error = 1;
577
578 if (!get_net_conf(mdev))
579 return NULL;
580
581 what = "sock_create_kern";
582 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
583 SOCK_STREAM, IPPROTO_TCP, &sock);
584 if (err < 0) {
585 sock = NULL;
586 goto out;
587 }
588
589 sock->sk->sk_rcvtimeo =
590 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200591 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
592 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700593
594 /* explicitly bind to the configured IP as source IP
595 * for the outgoing connections.
596 * This is needed for multihomed hosts and to be
597 * able to use lo: interfaces for drbd.
598 * Make sure to use 0 as port number, so linux selects
599 * a free one dynamically.
600 */
601 memcpy(&src_in6, mdev->net_conf->my_addr,
602 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
603 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
604 src_in6.sin6_port = 0;
605 else
606 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
607
608 what = "bind before connect";
609 err = sock->ops->bind(sock,
610 (struct sockaddr *) &src_in6,
611 mdev->net_conf->my_addr_len);
612 if (err < 0)
613 goto out;
614
615 /* connect may fail, peer not yet available.
616 * stay C_WF_CONNECTION, don't go Disconnecting! */
617 disconnect_on_error = 0;
618 what = "connect";
619 err = sock->ops->connect(sock,
620 (struct sockaddr *)mdev->net_conf->peer_addr,
621 mdev->net_conf->peer_addr_len, 0);
622
623out:
624 if (err < 0) {
625 if (sock) {
626 sock_release(sock);
627 sock = NULL;
628 }
629 switch (-err) {
630 /* timeout, busy, signal pending */
631 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
632 case EINTR: case ERESTARTSYS:
633 /* peer not (yet) available, network problem */
634 case ECONNREFUSED: case ENETUNREACH:
635 case EHOSTDOWN: case EHOSTUNREACH:
636 disconnect_on_error = 0;
637 break;
638 default:
639 dev_err(DEV, "%s failed, err = %d\n", what, err);
640 }
641 if (disconnect_on_error)
642 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
643 }
644 put_net_conf(mdev);
645 return sock;
646}
647
648static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
649{
650 int timeo, err;
651 struct socket *s_estab = NULL, *s_listen;
652 const char *what;
653
654 if (!get_net_conf(mdev))
655 return NULL;
656
657 what = "sock_create_kern";
658 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
659 SOCK_STREAM, IPPROTO_TCP, &s_listen);
660 if (err) {
661 s_listen = NULL;
662 goto out;
663 }
664
665 timeo = mdev->net_conf->try_connect_int * HZ;
666 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
667
668 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
669 s_listen->sk->sk_rcvtimeo = timeo;
670 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200671 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
672 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700673
674 what = "bind before listen";
675 err = s_listen->ops->bind(s_listen,
676 (struct sockaddr *) mdev->net_conf->my_addr,
677 mdev->net_conf->my_addr_len);
678 if (err < 0)
679 goto out;
680
681 err = drbd_accept(mdev, &what, s_listen, &s_estab);
682
683out:
684 if (s_listen)
685 sock_release(s_listen);
686 if (err < 0) {
687 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
688 dev_err(DEV, "%s failed, err = %d\n", what, err);
689 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
690 }
691 }
692 put_net_conf(mdev);
693
694 return s_estab;
695}
696
697static int drbd_send_fp(struct drbd_conf *mdev,
698 struct socket *sock, enum drbd_packets cmd)
699{
Philipp Reisner02918be2010-08-20 14:35:10 +0200700 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700701
702 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
703}
704
705static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
706{
Philipp Reisner02918be2010-08-20 14:35:10 +0200707 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700708 int rr;
709
710 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
711
712 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
713 return be16_to_cpu(h->command);
714
715 return 0xffff;
716}
717
718/**
719 * drbd_socket_okay() - Free the socket if its connection is not okay
720 * @mdev: DRBD device.
721 * @sock: pointer to the pointer to the socket.
722 */
723static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
724{
725 int rr;
726 char tb[4];
727
728 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100729 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730
731 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
732
733 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100734 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700735 } else {
736 sock_release(*sock);
737 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100738 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739 }
740}
741
742/*
743 * return values:
744 * 1 yes, we have a valid connection
745 * 0 oops, did not work out, please try again
746 * -1 peer talks different language,
747 * no point in trying again, please go standalone.
748 * -2 We do not have a network config...
749 */
750static int drbd_connect(struct drbd_conf *mdev)
751{
752 struct socket *s, *sock, *msock;
753 int try, h, ok;
754
755 D_ASSERT(!mdev->data.socket);
756
Philipp Reisnerb411b362009-09-25 16:07:19 -0700757 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
758 return -2;
759
760 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
761
762 sock = NULL;
763 msock = NULL;
764
765 do {
766 for (try = 0;;) {
767 /* 3 tries, this should take less than a second! */
768 s = drbd_try_connect(mdev);
769 if (s || ++try >= 3)
770 break;
771 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100772 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773 }
774
775 if (s) {
776 if (!sock) {
777 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
778 sock = s;
779 s = NULL;
780 } else if (!msock) {
781 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
782 msock = s;
783 s = NULL;
784 } else {
785 dev_err(DEV, "Logic error in drbd_connect()\n");
786 goto out_release_sockets;
787 }
788 }
789
790 if (sock && msock) {
Philipp Reisnera8e40792011-05-13 12:03:55 +0200791 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700792 ok = drbd_socket_okay(mdev, &sock);
793 ok = drbd_socket_okay(mdev, &msock) && ok;
794 if (ok)
795 break;
796 }
797
798retry:
799 s = drbd_wait_for_connect(mdev);
800 if (s) {
801 try = drbd_recv_fp(mdev, s);
802 drbd_socket_okay(mdev, &sock);
803 drbd_socket_okay(mdev, &msock);
804 switch (try) {
805 case P_HAND_SHAKE_S:
806 if (sock) {
807 dev_warn(DEV, "initial packet S crossed\n");
808 sock_release(sock);
809 }
810 sock = s;
811 break;
812 case P_HAND_SHAKE_M:
813 if (msock) {
814 dev_warn(DEV, "initial packet M crossed\n");
815 sock_release(msock);
816 }
817 msock = s;
818 set_bit(DISCARD_CONCURRENT, &mdev->flags);
819 break;
820 default:
821 dev_warn(DEV, "Error receiving initial packet\n");
822 sock_release(s);
823 if (random32() & 1)
824 goto retry;
825 }
826 }
827
828 if (mdev->state.conn <= C_DISCONNECTING)
829 goto out_release_sockets;
830 if (signal_pending(current)) {
831 flush_signals(current);
832 smp_rmb();
833 if (get_t_state(&mdev->receiver) == Exiting)
834 goto out_release_sockets;
835 }
836
837 if (sock && msock) {
838 ok = drbd_socket_okay(mdev, &sock);
839 ok = drbd_socket_okay(mdev, &msock) && ok;
840 if (ok)
841 break;
842 }
843 } while (1);
844
845 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
846 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
847
848 sock->sk->sk_allocation = GFP_NOIO;
849 msock->sk->sk_allocation = GFP_NOIO;
850
851 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
852 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
853
Philipp Reisnerb411b362009-09-25 16:07:19 -0700854 /* NOT YET ...
855 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
856 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
857 * first set it to the P_HAND_SHAKE timeout,
858 * which we set to 4x the configured ping_timeout. */
859 sock->sk->sk_sndtimeo =
860 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
861
862 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
863 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
864
865 /* we don't want delays.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300866 * we use TCP_CORK where appropriate, though */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700867 drbd_tcp_nodelay(sock);
868 drbd_tcp_nodelay(msock);
869
870 mdev->data.socket = sock;
871 mdev->meta.socket = msock;
872 mdev->last_received = jiffies;
873
874 D_ASSERT(mdev->asender.task == NULL);
875
876 h = drbd_do_handshake(mdev);
877 if (h <= 0)
878 return h;
879
880 if (mdev->cram_hmac_tfm) {
881 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100882 switch (drbd_do_auth(mdev)) {
883 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700884 dev_err(DEV, "Authentication of peer failed\n");
885 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100886 case 0:
887 dev_err(DEV, "Authentication of peer failed, trying again.\n");
888 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700889 }
890 }
891
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
893 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
894
895 atomic_set(&mdev->packet_seq, 0);
896 mdev->peer_seq = 0;
897
Philipp Reisner148efa12011-01-15 00:21:15 +0100898 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200899 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700900 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100901 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700902 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200903 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
905 clear_bit(RESIZE_PENDING, &mdev->flags);
Philipp Reisner1e86ac42011-08-04 10:33:08 +0200906
907 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
908 return 0;
909
910 drbd_thread_start(&mdev->asender);
Philipp Reisner7fde2be2011-03-01 11:08:28 +0100911 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700912
913 return 1;
914
915out_release_sockets:
916 if (sock)
917 sock_release(sock);
918 if (msock)
919 sock_release(msock);
920 return -1;
921}
922
Philipp Reisner02918be2010-08-20 14:35:10 +0200923static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700924{
Philipp Reisner02918be2010-08-20 14:35:10 +0200925 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700926 int r;
927
928 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700929 if (unlikely(r != sizeof(*h))) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +0100930 if (!signal_pending(current))
931 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100932 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200933 }
934
935 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
936 *cmd = be16_to_cpu(h->h80.command);
937 *packet_size = be16_to_cpu(h->h80.length);
938 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
939 *cmd = be16_to_cpu(h->h95.command);
940 *packet_size = be32_to_cpu(h->h95.length);
941 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200942 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
943 be32_to_cpu(h->h80.magic),
944 be16_to_cpu(h->h80.command),
945 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100946 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700947 }
948 mdev->last_received = jiffies;
949
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100950 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700951}
952
Philipp Reisner2451fc32010-08-24 13:43:11 +0200953static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700954{
955 int rv;
956
957 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400958 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200959 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700960 if (rv) {
Philipp Reisnerebd2b0c2011-05-25 11:03:04 +0200961 dev_info(DEV, "local disk flush failed with status %d\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700962 /* would rather check on EOPNOTSUPP, but that is not reliable.
963 * don't try again for ANY return value != 0
964 * if (rv == -EOPNOTSUPP) */
965 drbd_bump_write_ordering(mdev, WO_drain_io);
966 }
967 put_ldev(mdev);
968 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700969}
970
971/**
972 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
973 * @mdev: DRBD device.
974 * @epoch: Epoch object.
975 * @ev: Epoch event.
976 */
977static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
978 struct drbd_epoch *epoch,
979 enum epoch_event ev)
980{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200981 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700982 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700983 enum finish_epoch rv = FE_STILL_LIVE;
984
985 spin_lock(&mdev->epoch_lock);
986 do {
987 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700988
989 epoch_size = atomic_read(&epoch->epoch_size);
990
991 switch (ev & ~EV_CLEANUP) {
992 case EV_PUT:
993 atomic_dec(&epoch->active);
994 break;
995 case EV_GOT_BARRIER_NR:
996 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700997 break;
998 case EV_BECAME_LAST:
999 /* nothing to do*/
1000 break;
1001 }
1002
Philipp Reisnerb411b362009-09-25 16:07:19 -07001003 if (epoch_size != 0 &&
1004 atomic_read(&epoch->active) == 0 &&
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001005 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001006 if (!(ev & EV_CLEANUP)) {
1007 spin_unlock(&mdev->epoch_lock);
1008 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1009 spin_lock(&mdev->epoch_lock);
1010 }
Philipp Reisner80f9fd52011-07-18 15:45:15 +02001011 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1012 dec_unacked(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001013
1014 if (mdev->current_epoch != epoch) {
1015 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1016 list_del(&epoch->list);
1017 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1018 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001019 kfree(epoch);
1020
1021 if (rv == FE_STILL_LIVE)
1022 rv = FE_DESTROYED;
1023 } else {
1024 epoch->flags = 0;
1025 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001026 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027 if (rv == FE_STILL_LIVE)
1028 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001029 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001030 }
1031 }
1032
1033 if (!next_epoch)
1034 break;
1035
1036 epoch = next_epoch;
1037 } while (1);
1038
1039 spin_unlock(&mdev->epoch_lock);
1040
Philipp Reisnerb411b362009-09-25 16:07:19 -07001041 return rv;
1042}
1043
1044/**
1045 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1046 * @mdev: DRBD device.
1047 * @wo: Write ordering method to try.
1048 */
1049void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1050{
1051 enum write_ordering_e pwo;
1052 static char *write_ordering_str[] = {
1053 [WO_none] = "none",
1054 [WO_drain_io] = "drain",
1055 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001056 };
1057
1058 pwo = mdev->write_ordering;
1059 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001060 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1061 wo = WO_drain_io;
1062 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1063 wo = WO_none;
1064 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001065 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001066 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1067}
1068
1069/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001070 * drbd_submit_ee()
1071 * @mdev: DRBD device.
1072 * @e: epoch entry
1073 * @rw: flag field, see bio->bi_rw
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001074 *
1075 * May spread the pages to multiple bios,
1076 * depending on bio_add_page restrictions.
1077 *
1078 * Returns 0 if all bios have been submitted,
1079 * -ENOMEM if we could not allocate enough bios,
1080 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1081 * single page to an empty bio (which should never happen and likely indicates
1082 * that the lower level IO stack is in some way broken). This has been observed
1083 * on certain Xen deployments.
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001084 */
1085/* TODO allocate from our own bio_set. */
1086int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1087 const unsigned rw, const int fault_type)
1088{
1089 struct bio *bios = NULL;
1090 struct bio *bio;
1091 struct page *page = e->pages;
1092 sector_t sector = e->sector;
1093 unsigned ds = e->size;
1094 unsigned n_bios = 0;
1095 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001096 int err = -ENOMEM;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001097
1098 /* In most cases, we will only need one bio. But in case the lower
1099 * level restrictions happen to be different at this offset on this
1100 * side than those of the sending peer, we may need to submit the
1101 * request in more than one bio. */
1102next_bio:
1103 bio = bio_alloc(GFP_NOIO, nr_pages);
1104 if (!bio) {
1105 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1106 goto fail;
1107 }
1108 /* > e->sector, unless this is the first bio */
1109 bio->bi_sector = sector;
1110 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001111 bio->bi_rw = rw;
1112 bio->bi_private = e;
1113 bio->bi_end_io = drbd_endio_sec;
1114
1115 bio->bi_next = bios;
1116 bios = bio;
1117 ++n_bios;
1118
1119 page_chain_for_each(page) {
1120 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1121 if (!bio_add_page(bio, page, len, 0)) {
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001122 /* A single page must always be possible!
1123 * But in case it fails anyways,
1124 * we deal with it, and complain (below). */
1125 if (bio->bi_vcnt == 0) {
1126 dev_err(DEV,
1127 "bio_add_page failed for len=%u, "
1128 "bi_vcnt=0 (bi_sector=%llu)\n",
1129 len, (unsigned long long)bio->bi_sector);
1130 err = -ENOSPC;
1131 goto fail;
1132 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001133 goto next_bio;
1134 }
1135 ds -= len;
1136 sector += len >> 9;
1137 --nr_pages;
1138 }
1139 D_ASSERT(page == NULL);
1140 D_ASSERT(ds == 0);
1141
1142 atomic_set(&e->pending_bios, n_bios);
1143 do {
1144 bio = bios;
1145 bios = bios->bi_next;
1146 bio->bi_next = NULL;
1147
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001148 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001149 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001150 return 0;
1151
1152fail:
1153 while (bios) {
1154 bio = bios;
1155 bios = bios->bi_next;
1156 bio_put(bio);
1157 }
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001158 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001159}
1160
Philipp Reisner02918be2010-08-20 14:35:10 +02001161static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001162{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001163 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001164 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001165 struct drbd_epoch *epoch;
1166
Philipp Reisnerb411b362009-09-25 16:07:19 -07001167 inc_unacked(mdev);
1168
Philipp Reisnerb411b362009-09-25 16:07:19 -07001169 mdev->current_epoch->barrier_nr = p->barrier;
1170 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1171
1172 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1173 * the activity log, which means it would not be resynced in case the
1174 * R_PRIMARY crashes now.
1175 * Therefore we must send the barrier_ack after the barrier request was
1176 * completed. */
1177 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001178 case WO_none:
1179 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001180 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001181
1182 /* receiver context, in the writeout path of the other node.
1183 * avoid potential distributed deadlock */
1184 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1185 if (epoch)
1186 break;
1187 else
1188 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1189 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001190
1191 case WO_bdev_flush:
1192 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001193 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001194 drbd_flush(mdev);
1195
1196 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1197 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1198 if (epoch)
1199 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001200 }
1201
Philipp Reisner2451fc32010-08-24 13:43:11 +02001202 epoch = mdev->current_epoch;
1203 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1204
1205 D_ASSERT(atomic_read(&epoch->active) == 0);
1206 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001207
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001208 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001209 default:
1210 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001211 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001212 }
1213
1214 epoch->flags = 0;
1215 atomic_set(&epoch->epoch_size, 0);
1216 atomic_set(&epoch->active, 0);
1217
1218 spin_lock(&mdev->epoch_lock);
1219 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1220 list_add(&epoch->list, &mdev->current_epoch->list);
1221 mdev->current_epoch = epoch;
1222 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001223 } else {
1224 /* The current_epoch got recycled while we allocated this one... */
1225 kfree(epoch);
1226 }
1227 spin_unlock(&mdev->epoch_lock);
1228
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001229 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001230}
1231
1232/* used from receive_RSDataReply (recv_resync_read)
1233 * and from receive_Data */
1234static struct drbd_epoch_entry *
1235read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1236{
Lars Ellenberg66660322010-04-06 12:15:04 +02001237 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001238 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001239 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001240 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001241 void *dig_in = mdev->int_dig_in;
1242 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001243 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001244
1245 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1246 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1247
1248 if (dgs) {
1249 rr = drbd_recv(mdev, dig_in, dgs);
1250 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001251 if (!signal_pending(current))
1252 dev_warn(DEV,
1253 "short read receiving data digest: read %d expected %d\n",
1254 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001255 return NULL;
1256 }
1257 }
1258
1259 data_size -= dgs;
1260
Philipp Reisnerd07c9c12011-01-20 16:49:33 +01001261 ERR_IF(data_size == 0) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001263 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001264
Lars Ellenberg66660322010-04-06 12:15:04 +02001265 /* even though we trust out peer,
1266 * we sometimes have to double check. */
1267 if (sector + (data_size>>9) > capacity) {
Lars Ellenbergfdda6542011-01-24 15:11:01 +01001268 dev_err(DEV, "request from peer beyond end of local disk: "
1269 "capacity: %llus < sector: %llus + size: %u\n",
Lars Ellenberg66660322010-04-06 12:15:04 +02001270 (unsigned long long)capacity,
1271 (unsigned long long)sector, data_size);
1272 return NULL;
1273 }
1274
Philipp Reisnerb411b362009-09-25 16:07:19 -07001275 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1276 * "criss-cross" setup, that might cause write-out on some other DRBD,
1277 * which in turn might block on the other node at this very place. */
1278 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1279 if (!e)
1280 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001281
Philipp Reisnerb411b362009-09-25 16:07:19 -07001282 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001283 page = e->pages;
1284 page_chain_for_each(page) {
1285 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001286 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001287 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001288 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001289 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1290 data[0] = data[0] ^ (unsigned long)-1;
1291 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001292 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001293 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001294 drbd_free_ee(mdev, e);
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001295 if (!signal_pending(current))
1296 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1297 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001298 return NULL;
1299 }
1300 ds -= rr;
1301 }
1302
1303 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001304 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001305 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001306 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1307 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001308 drbd_bcast_ee(mdev, "digest failed",
1309 dgs, dig_in, dig_vv, e);
1310 drbd_free_ee(mdev, e);
1311 return NULL;
1312 }
1313 }
1314 mdev->recv_cnt += data_size>>9;
1315 return e;
1316}
1317
1318/* drbd_drain_block() just takes a data block
1319 * out of the socket input buffer, and discards it.
1320 */
1321static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1322{
1323 struct page *page;
1324 int rr, rv = 1;
1325 void *data;
1326
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001327 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001328 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001329
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001330 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331
1332 data = kmap(page);
1333 while (data_size) {
1334 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1335 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1336 rv = 0;
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001337 if (!signal_pending(current))
1338 dev_warn(DEV,
1339 "short read receiving data: read %d expected %d\n",
1340 rr, min_t(int, data_size, PAGE_SIZE));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001341 break;
1342 }
1343 data_size -= rr;
1344 }
1345 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001346 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001347 return rv;
1348}
1349
1350static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1351 sector_t sector, int data_size)
1352{
1353 struct bio_vec *bvec;
1354 struct bio *bio;
1355 int dgs, rr, i, expect;
1356 void *dig_in = mdev->int_dig_in;
1357 void *dig_vv = mdev->int_dig_vv;
1358
1359 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1360 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1361
1362 if (dgs) {
1363 rr = drbd_recv(mdev, dig_in, dgs);
1364 if (rr != dgs) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001365 if (!signal_pending(current))
1366 dev_warn(DEV,
1367 "short read receiving data reply digest: read %d expected %d\n",
1368 rr, dgs);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001369 return 0;
1370 }
1371 }
1372
1373 data_size -= dgs;
1374
1375 /* optimistically update recv_cnt. if receiving fails below,
1376 * we disconnect anyways, and counters will be reset. */
1377 mdev->recv_cnt += data_size>>9;
1378
1379 bio = req->master_bio;
1380 D_ASSERT(sector == bio->bi_sector);
1381
1382 bio_for_each_segment(bvec, bio, i) {
1383 expect = min_t(int, data_size, bvec->bv_len);
1384 rr = drbd_recv(mdev,
1385 kmap(bvec->bv_page)+bvec->bv_offset,
1386 expect);
1387 kunmap(bvec->bv_page);
1388 if (rr != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01001389 if (!signal_pending(current))
1390 dev_warn(DEV, "short read receiving data reply: "
1391 "read %d expected %d\n",
1392 rr, expect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001393 return 0;
1394 }
1395 data_size -= rr;
1396 }
1397
1398 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001399 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001400 if (memcmp(dig_in, dig_vv, dgs)) {
1401 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1402 return 0;
1403 }
1404 }
1405
1406 D_ASSERT(data_size == 0);
1407 return 1;
1408}
1409
1410/* e_end_resync_block() is called via
1411 * drbd_process_done_ee() by asender only */
1412static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1413{
1414 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1415 sector_t sector = e->sector;
1416 int ok;
1417
Bart Van Assche24c48302011-05-21 18:32:29 +02001418 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001419
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001420 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421 drbd_set_in_sync(mdev, sector, e->size);
1422 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1423 } else {
1424 /* Record failure to sync */
1425 drbd_rs_failed_io(mdev, sector, e->size);
1426
1427 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1428 }
1429 dec_unacked(mdev);
1430
1431 return ok;
1432}
1433
1434static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1435{
1436 struct drbd_epoch_entry *e;
1437
1438 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001439 if (!e)
1440 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001441
1442 dec_rs_pending(mdev);
1443
Philipp Reisnerb411b362009-09-25 16:07:19 -07001444 inc_unacked(mdev);
1445 /* corresponding dec_unacked() in e_end_resync_block()
1446 * respective _drbd_clear_done_ee */
1447
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001448 e->w.cb = e_end_resync_block;
1449
Philipp Reisnerb411b362009-09-25 16:07:19 -07001450 spin_lock_irq(&mdev->req_lock);
1451 list_add(&e->w.list, &mdev->sync_ee);
1452 spin_unlock_irq(&mdev->req_lock);
1453
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001454 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001455 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001456 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001457
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001458 /* don't care for the reason here */
1459 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001460 spin_lock_irq(&mdev->req_lock);
1461 list_del(&e->w.list);
1462 spin_unlock_irq(&mdev->req_lock);
1463
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001464 drbd_free_ee(mdev, e);
1465fail:
1466 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001467 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001468}
1469
Philipp Reisner02918be2010-08-20 14:35:10 +02001470static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001471{
1472 struct drbd_request *req;
1473 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001474 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001475 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001476
1477 sector = be64_to_cpu(p->sector);
1478
1479 spin_lock_irq(&mdev->req_lock);
1480 req = _ar_id_to_req(mdev, p->block_id, sector);
1481 spin_unlock_irq(&mdev->req_lock);
1482 if (unlikely(!req)) {
1483 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001484 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001485 }
1486
Bart Van Assche24c48302011-05-21 18:32:29 +02001487 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
Philipp Reisnerb411b362009-09-25 16:07:19 -07001488 * special casing it there for the various failure cases.
1489 * still no race with drbd_fail_pending_reads */
1490 ok = recv_dless_read(mdev, req, sector, data_size);
1491
1492 if (ok)
1493 req_mod(req, data_received);
1494 /* else: nothing. handled from drbd_disconnect...
1495 * I don't think we may complete this just yet
1496 * in case we are "on-disconnect: freeze" */
1497
1498 return ok;
1499}
1500
Philipp Reisner02918be2010-08-20 14:35:10 +02001501static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001502{
1503 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001505 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001506
1507 sector = be64_to_cpu(p->sector);
1508 D_ASSERT(p->block_id == ID_SYNCER);
1509
1510 if (get_ldev(mdev)) {
1511 /* data is submitted to disk within recv_resync_read.
1512 * corresponding put_ldev done below on error,
1513 * or in drbd_endio_write_sec. */
1514 ok = recv_resync_read(mdev, sector, data_size);
1515 } else {
1516 if (__ratelimit(&drbd_ratelimit_state))
1517 dev_err(DEV, "Can not write resync data to local disk.\n");
1518
1519 ok = drbd_drain_block(mdev, data_size);
1520
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001521 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522 }
1523
Philipp Reisner778f2712010-07-06 11:14:00 +02001524 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1525
Philipp Reisnerb411b362009-09-25 16:07:19 -07001526 return ok;
1527}
1528
1529/* e_end_block() is called via drbd_process_done_ee().
1530 * this means this function only runs in the asender thread
1531 */
1532static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1533{
1534 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1535 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001536 int ok = 1, pcmd;
1537
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001539 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001540 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1541 mdev->state.conn <= C_PAUSED_SYNC_T &&
1542 e->flags & EE_MAY_SET_IN_SYNC) ?
1543 P_RS_WRITE_ACK : P_WRITE_ACK;
1544 ok &= drbd_send_ack(mdev, pcmd, e);
1545 if (pcmd == P_RS_WRITE_ACK)
1546 drbd_set_in_sync(mdev, sector, e->size);
1547 } else {
1548 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1549 /* we expect it to be marked out of sync anyways...
1550 * maybe assert this? */
1551 }
1552 dec_unacked(mdev);
1553 }
1554 /* we delete from the conflict detection hash _after_ we sent out the
1555 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1556 if (mdev->net_conf->two_primaries) {
1557 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001558 D_ASSERT(!hlist_unhashed(&e->collision));
1559 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001560 spin_unlock_irq(&mdev->req_lock);
1561 } else {
Bart Van Assche24c48302011-05-21 18:32:29 +02001562 D_ASSERT(hlist_unhashed(&e->collision));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001563 }
1564
1565 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1566
1567 return ok;
1568}
1569
1570static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1571{
1572 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1573 int ok = 1;
1574
1575 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1576 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1577
1578 spin_lock_irq(&mdev->req_lock);
Bart Van Assche24c48302011-05-21 18:32:29 +02001579 D_ASSERT(!hlist_unhashed(&e->collision));
1580 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001581 spin_unlock_irq(&mdev->req_lock);
1582
1583 dec_unacked(mdev);
1584
1585 return ok;
1586}
1587
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001588static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
1589{
1590
1591 struct drbd_epoch_entry *rs_e;
1592 bool rv = 0;
1593
1594 spin_lock_irq(&mdev->req_lock);
1595 list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
1596 if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
1597 rv = 1;
1598 break;
1599 }
1600 }
1601 spin_unlock_irq(&mdev->req_lock);
1602
1603 return rv;
1604}
1605
Philipp Reisnerb411b362009-09-25 16:07:19 -07001606/* Called from receive_Data.
1607 * Synchronize packets on sock with packets on msock.
1608 *
1609 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1610 * packet traveling on msock, they are still processed in the order they have
1611 * been sent.
1612 *
1613 * Note: we don't care for Ack packets overtaking P_DATA packets.
1614 *
1615 * In case packet_seq is larger than mdev->peer_seq number, there are
1616 * outstanding packets on the msock. We wait for them to arrive.
1617 * In case we are the logically next packet, we update mdev->peer_seq
1618 * ourselves. Correctly handles 32bit wrap around.
1619 *
1620 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1621 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1622 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1623 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1624 *
1625 * returns 0 if we may process the packet,
1626 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1627static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1628{
1629 DEFINE_WAIT(wait);
1630 unsigned int p_seq;
1631 long timeout;
1632 int ret = 0;
1633 spin_lock(&mdev->peer_seq_lock);
1634 for (;;) {
1635 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1636 if (seq_le(packet_seq, mdev->peer_seq+1))
1637 break;
1638 if (signal_pending(current)) {
1639 ret = -ERESTARTSYS;
1640 break;
1641 }
1642 p_seq = mdev->peer_seq;
1643 spin_unlock(&mdev->peer_seq_lock);
1644 timeout = schedule_timeout(30*HZ);
1645 spin_lock(&mdev->peer_seq_lock);
1646 if (timeout == 0 && p_seq == mdev->peer_seq) {
1647 ret = -ETIMEDOUT;
1648 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1649 break;
1650 }
1651 }
1652 finish_wait(&mdev->seq_wait, &wait);
1653 if (mdev->peer_seq+1 == packet_seq)
1654 mdev->peer_seq++;
1655 spin_unlock(&mdev->peer_seq_lock);
1656 return ret;
1657}
1658
Lars Ellenberg688593c2010-11-17 22:25:03 +01001659/* see also bio_flags_to_wire()
1660 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1661 * flags and back. We may replicate to other kernel versions. */
1662static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001663{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001664 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1665 (dpf & DP_FUA ? REQ_FUA : 0) |
1666 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1667 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001668}
1669
Philipp Reisnerb411b362009-09-25 16:07:19 -07001670/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001671static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001672{
1673 sector_t sector;
1674 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001675 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001676 int rw = WRITE;
1677 u32 dp_flags;
1678
Philipp Reisnerb411b362009-09-25 16:07:19 -07001679 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001680 spin_lock(&mdev->peer_seq_lock);
1681 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1682 mdev->peer_seq++;
1683 spin_unlock(&mdev->peer_seq_lock);
1684
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001685 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001686 atomic_inc(&mdev->current_epoch->epoch_size);
1687 return drbd_drain_block(mdev, data_size);
1688 }
1689
1690 /* get_ldev(mdev) successful.
1691 * Corresponding put_ldev done either below (on various errors),
1692 * or in drbd_endio_write_sec, if we successfully submit the data at
1693 * the end of this function. */
1694
1695 sector = be64_to_cpu(p->sector);
1696 e = read_in_block(mdev, p->block_id, sector, data_size);
1697 if (!e) {
1698 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001699 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001700 }
1701
Philipp Reisnerb411b362009-09-25 16:07:19 -07001702 e->w.cb = e_end_block;
1703
Lars Ellenberg688593c2010-11-17 22:25:03 +01001704 dp_flags = be32_to_cpu(p->dp_flags);
1705 rw |= wire_flags_to_bio(mdev, dp_flags);
1706
1707 if (dp_flags & DP_MAY_SET_IN_SYNC)
1708 e->flags |= EE_MAY_SET_IN_SYNC;
1709
Philipp Reisnerb411b362009-09-25 16:07:19 -07001710 spin_lock(&mdev->epoch_lock);
1711 e->epoch = mdev->current_epoch;
1712 atomic_inc(&e->epoch->epoch_size);
1713 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001714 spin_unlock(&mdev->epoch_lock);
1715
Philipp Reisnerb411b362009-09-25 16:07:19 -07001716 /* I'm the receiver, I do hold a net_cnt reference. */
1717 if (!mdev->net_conf->two_primaries) {
1718 spin_lock_irq(&mdev->req_lock);
1719 } else {
1720 /* don't get the req_lock yet,
1721 * we may sleep in drbd_wait_peer_seq */
1722 const int size = e->size;
1723 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1724 DEFINE_WAIT(wait);
1725 struct drbd_request *i;
1726 struct hlist_node *n;
1727 struct hlist_head *slot;
1728 int first;
1729
1730 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1731 BUG_ON(mdev->ee_hash == NULL);
1732 BUG_ON(mdev->tl_hash == NULL);
1733
1734 /* conflict detection and handling:
1735 * 1. wait on the sequence number,
1736 * in case this data packet overtook ACK packets.
1737 * 2. check our hash tables for conflicting requests.
1738 * we only need to walk the tl_hash, since an ee can not
1739 * have a conflict with an other ee: on the submitting
1740 * node, the corresponding req had already been conflicting,
1741 * and a conflicting req is never sent.
1742 *
1743 * Note: for two_primaries, we are protocol C,
1744 * so there cannot be any request that is DONE
1745 * but still on the transfer log.
1746 *
1747 * unconditionally add to the ee_hash.
1748 *
1749 * if no conflicting request is found:
1750 * submit.
1751 *
1752 * if any conflicting request is found
1753 * that has not yet been acked,
1754 * AND I have the "discard concurrent writes" flag:
1755 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1756 *
1757 * if any conflicting request is found:
1758 * block the receiver, waiting on misc_wait
1759 * until no more conflicting requests are there,
1760 * or we get interrupted (disconnect).
1761 *
1762 * we do not just write after local io completion of those
1763 * requests, but only after req is done completely, i.e.
1764 * we wait for the P_DISCARD_ACK to arrive!
1765 *
1766 * then proceed normally, i.e. submit.
1767 */
1768 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1769 goto out_interrupted;
1770
1771 spin_lock_irq(&mdev->req_lock);
1772
Bart Van Assche24c48302011-05-21 18:32:29 +02001773 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001774
1775#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1776 slot = tl_hash_slot(mdev, sector);
1777 first = 1;
1778 for (;;) {
1779 int have_unacked = 0;
1780 int have_conflict = 0;
1781 prepare_to_wait(&mdev->misc_wait, &wait,
1782 TASK_INTERRUPTIBLE);
Bart Van Assche24c48302011-05-21 18:32:29 +02001783 hlist_for_each_entry(i, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001784 if (OVERLAPS) {
1785 /* only ALERT on first iteration,
1786 * we may be woken up early... */
1787 if (first)
1788 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1789 " new: %llus +%u; pending: %llus +%u\n",
1790 current->comm, current->pid,
1791 (unsigned long long)sector, size,
1792 (unsigned long long)i->sector, i->size);
1793 if (i->rq_state & RQ_NET_PENDING)
1794 ++have_unacked;
1795 ++have_conflict;
1796 }
1797 }
1798#undef OVERLAPS
1799 if (!have_conflict)
1800 break;
1801
1802 /* Discard Ack only for the _first_ iteration */
1803 if (first && discard && have_unacked) {
1804 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1805 (unsigned long long)sector);
1806 inc_unacked(mdev);
1807 e->w.cb = e_send_discard_ack;
1808 list_add_tail(&e->w.list, &mdev->done_ee);
1809
1810 spin_unlock_irq(&mdev->req_lock);
1811
1812 /* we could probably send that P_DISCARD_ACK ourselves,
1813 * but I don't like the receiver using the msock */
1814
1815 put_ldev(mdev);
1816 wake_asender(mdev);
1817 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001818 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001819 }
1820
1821 if (signal_pending(current)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001822 hlist_del_init(&e->collision);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001823
1824 spin_unlock_irq(&mdev->req_lock);
1825
1826 finish_wait(&mdev->misc_wait, &wait);
1827 goto out_interrupted;
1828 }
1829
1830 spin_unlock_irq(&mdev->req_lock);
1831 if (first) {
1832 first = 0;
1833 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1834 "sec=%llus\n", (unsigned long long)sector);
1835 } else if (discard) {
1836 /* we had none on the first iteration.
1837 * there must be none now. */
1838 D_ASSERT(have_unacked == 0);
1839 }
1840 schedule();
1841 spin_lock_irq(&mdev->req_lock);
1842 }
1843 finish_wait(&mdev->misc_wait, &wait);
1844 }
1845
1846 list_add(&e->w.list, &mdev->active_ee);
1847 spin_unlock_irq(&mdev->req_lock);
1848
Philipp Reisnerb6a370ba2012-02-19 01:27:53 +01001849 if (mdev->state.conn == C_SYNC_TARGET)
1850 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
1851
Philipp Reisnerb411b362009-09-25 16:07:19 -07001852 switch (mdev->net_conf->wire_protocol) {
1853 case DRBD_PROT_C:
1854 inc_unacked(mdev);
1855 /* corresponding dec_unacked() in e_end_block()
1856 * respective _drbd_clear_done_ee */
1857 break;
1858 case DRBD_PROT_B:
1859 /* I really don't like it that the receiver thread
1860 * sends on the msock, but anyways */
1861 drbd_send_ack(mdev, P_RECV_ACK, e);
1862 break;
1863 case DRBD_PROT_A:
1864 /* nothing to do */
1865 break;
1866 }
1867
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001868 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001869 /* In case we have the only disk of the cluster, */
1870 drbd_set_out_of_sync(mdev, e->sector, e->size);
1871 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001872 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001873 drbd_al_begin_io(mdev, e->sector);
1874 }
1875
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001876 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001877 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001878
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001879 /* don't care for the reason here */
1880 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001881 spin_lock_irq(&mdev->req_lock);
1882 list_del(&e->w.list);
Bart Van Assche24c48302011-05-21 18:32:29 +02001883 hlist_del_init(&e->collision);
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001884 spin_unlock_irq(&mdev->req_lock);
1885 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1886 drbd_al_complete_io(mdev, e->sector);
1887
Philipp Reisnerb411b362009-09-25 16:07:19 -07001888out_interrupted:
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01001889 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001890 put_ldev(mdev);
1891 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001892 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001893}
1894
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001895/* We may throttle resync, if the lower device seems to be busy,
1896 * and current sync rate is above c_min_rate.
1897 *
1898 * To decide whether or not the lower device is busy, we use a scheme similar
1899 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1900 * (more than 64 sectors) of activity we cannot account for with our own resync
1901 * activity, it obviously is "busy".
1902 *
1903 * The current sync rate used here uses only the most recent two step marks,
1904 * to have a short time average so we can react faster.
1905 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001906int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001907{
1908 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1909 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001910 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001911 int curr_events;
1912 int throttle = 0;
1913
1914 /* feature disabled? */
1915 if (mdev->sync_conf.c_min_rate == 0)
1916 return 0;
1917
Philipp Reisnere3555d82010-11-07 15:56:29 +01001918 spin_lock_irq(&mdev->al_lock);
1919 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1920 if (tmp) {
1921 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1922 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1923 spin_unlock_irq(&mdev->al_lock);
1924 return 0;
1925 }
1926 /* Do not slow down if app IO is already waiting for this extent */
1927 }
1928 spin_unlock_irq(&mdev->al_lock);
1929
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001930 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1931 (int)part_stat_read(&disk->part0, sectors[1]) -
1932 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001933
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001934 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1935 unsigned long rs_left;
1936 int i;
1937
1938 mdev->rs_last_events = curr_events;
1939
1940 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1941 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001942 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1943
1944 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1945 rs_left = mdev->ov_left;
1946 else
1947 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001948
1949 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1950 if (!dt)
1951 dt++;
1952 db = mdev->rs_mark_left[i] - rs_left;
1953 dbdt = Bit2KB(db/dt);
1954
1955 if (dbdt > mdev->sync_conf.c_min_rate)
1956 throttle = 1;
1957 }
1958 return throttle;
1959}
1960
1961
Philipp Reisner02918be2010-08-20 14:35:10 +02001962static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001963{
1964 sector_t sector;
1965 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1966 struct drbd_epoch_entry *e;
1967 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001968 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001969 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001970 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001971
1972 sector = be64_to_cpu(p->sector);
1973 size = be32_to_cpu(p->blksize);
1974
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001975 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001976 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1977 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001978 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001979 }
1980 if (sector + (size>>9) > capacity) {
1981 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1982 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001983 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984 }
1985
1986 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001987 verb = 1;
1988 switch (cmd) {
1989 case P_DATA_REQUEST:
1990 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1991 break;
1992 case P_RS_DATA_REQUEST:
1993 case P_CSUM_RS_REQUEST:
1994 case P_OV_REQUEST:
1995 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1996 break;
1997 case P_OV_REPLY:
1998 verb = 0;
1999 dec_rs_pending(mdev);
2000 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2001 break;
2002 default:
2003 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2004 cmdname(cmd));
2005 }
2006 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002007 dev_err(DEV, "Can not satisfy peer's read request, "
2008 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02002009
Lars Ellenberga821cc42010-09-06 12:31:37 +02002010 /* drain possibly payload */
2011 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002012 }
2013
2014 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2015 * "criss-cross" setup, that might cause write-out on some other DRBD,
2016 * which in turn might block on the other node at this very place. */
2017 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2018 if (!e) {
2019 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002020 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002021 }
2022
Philipp Reisner02918be2010-08-20 14:35:10 +02002023 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002024 case P_DATA_REQUEST:
2025 e->w.cb = w_e_end_data_req;
2026 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002027 /* application IO, don't drbd_rs_begin_io */
2028 goto submit;
2029
Philipp Reisnerb411b362009-09-25 16:07:19 -07002030 case P_RS_DATA_REQUEST:
2031 e->w.cb = w_e_end_rsdata_req;
2032 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002033 /* used in the sector offset progress display */
2034 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002035 break;
2036
2037 case P_OV_REPLY:
2038 case P_CSUM_RS_REQUEST:
2039 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002040 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2041 if (!di)
2042 goto out_free_e;
2043
2044 di->digest_size = digest_size;
2045 di->digest = (((char *)di)+sizeof(struct digest_info));
2046
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002047 e->digest = di;
2048 e->flags |= EE_HAS_DIGEST;
2049
Philipp Reisnerb411b362009-09-25 16:07:19 -07002050 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2051 goto out_free_e;
2052
Philipp Reisner02918be2010-08-20 14:35:10 +02002053 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002054 D_ASSERT(mdev->agreed_pro_version >= 89);
2055 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002056 /* used in the sector offset progress display */
2057 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002058 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002059 /* track progress, we may need to throttle */
2060 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002061 e->w.cb = w_e_end_ov_reply;
2062 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002063 /* drbd_rs_begin_io done when we sent this request,
2064 * but accounting still needs to be done. */
2065 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002066 }
2067 break;
2068
2069 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002070 if (mdev->ov_start_sector == ~(sector_t)0 &&
2071 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002072 unsigned long now = jiffies;
2073 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002074 mdev->ov_start_sector = sector;
2075 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002076 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2077 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002078 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2079 mdev->rs_mark_left[i] = mdev->ov_left;
2080 mdev->rs_mark_time[i] = now;
2081 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002082 dev_info(DEV, "Online Verify start sector: %llu\n",
2083 (unsigned long long)sector);
2084 }
2085 e->w.cb = w_e_end_ov_req;
2086 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002087 break;
2088
Philipp Reisnerb411b362009-09-25 16:07:19 -07002089 default:
2090 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002091 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002092 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002093 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002094 }
2095
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002096 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2097 * wrt the receiver, but it is not as straightforward as it may seem.
2098 * Various places in the resync start and stop logic assume resync
2099 * requests are processed in order, requeuing this on the worker thread
2100 * introduces a bunch of new code for synchronization between threads.
2101 *
2102 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2103 * "forever", throttling after drbd_rs_begin_io will lock that extent
2104 * for application writes for the same time. For now, just throttle
2105 * here, where the rest of the code expects the receiver to sleep for
2106 * a while, anyways.
2107 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002108
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002109 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2110 * this defers syncer requests for some time, before letting at least
2111 * on request through. The resync controller on the receiving side
2112 * will adapt to the incoming rate accordingly.
2113 *
2114 * We cannot throttle here if remote is Primary/SyncTarget:
2115 * we would also throttle its application reads.
2116 * In that case, throttling is done on the SyncTarget only.
2117 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002118 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2119 schedule_timeout_uninterruptible(HZ/10);
2120 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002121 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002122
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002123submit_for_resync:
2124 atomic_add(size >> 9, &mdev->rs_sect_ev);
2125
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002126submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002127 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002128 spin_lock_irq(&mdev->req_lock);
2129 list_add_tail(&e->w.list, &mdev->read_ee);
2130 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002131
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002132 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002133 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002134
Lars Ellenberg10f6d9922011-01-24 14:47:09 +01002135 /* don't care for the reason here */
2136 dev_err(DEV, "submit failed, triggering re-connect\n");
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002137 spin_lock_irq(&mdev->req_lock);
2138 list_del(&e->w.list);
2139 spin_unlock_irq(&mdev->req_lock);
2140 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2141
Philipp Reisnerb411b362009-09-25 16:07:19 -07002142out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002143 put_ldev(mdev);
2144 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002145 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002146}
2147
2148static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2149{
2150 int self, peer, rv = -100;
2151 unsigned long ch_self, ch_peer;
2152
2153 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2154 peer = mdev->p_uuid[UI_BITMAP] & 1;
2155
2156 ch_peer = mdev->p_uuid[UI_SIZE];
2157 ch_self = mdev->comm_bm_set;
2158
2159 switch (mdev->net_conf->after_sb_0p) {
2160 case ASB_CONSENSUS:
2161 case ASB_DISCARD_SECONDARY:
2162 case ASB_CALL_HELPER:
2163 dev_err(DEV, "Configuration error.\n");
2164 break;
2165 case ASB_DISCONNECT:
2166 break;
2167 case ASB_DISCARD_YOUNGER_PRI:
2168 if (self == 0 && peer == 1) {
2169 rv = -1;
2170 break;
2171 }
2172 if (self == 1 && peer == 0) {
2173 rv = 1;
2174 break;
2175 }
2176 /* Else fall through to one of the other strategies... */
2177 case ASB_DISCARD_OLDER_PRI:
2178 if (self == 0 && peer == 1) {
2179 rv = 1;
2180 break;
2181 }
2182 if (self == 1 && peer == 0) {
2183 rv = -1;
2184 break;
2185 }
2186 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002187 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002188 "Using discard-least-changes instead\n");
2189 case ASB_DISCARD_ZERO_CHG:
2190 if (ch_peer == 0 && ch_self == 0) {
2191 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2192 ? -1 : 1;
2193 break;
2194 } else {
2195 if (ch_peer == 0) { rv = 1; break; }
2196 if (ch_self == 0) { rv = -1; break; }
2197 }
2198 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2199 break;
2200 case ASB_DISCARD_LEAST_CHG:
2201 if (ch_self < ch_peer)
2202 rv = -1;
2203 else if (ch_self > ch_peer)
2204 rv = 1;
2205 else /* ( ch_self == ch_peer ) */
2206 /* Well, then use something else. */
2207 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2208 ? -1 : 1;
2209 break;
2210 case ASB_DISCARD_LOCAL:
2211 rv = -1;
2212 break;
2213 case ASB_DISCARD_REMOTE:
2214 rv = 1;
2215 }
2216
2217 return rv;
2218}
2219
2220static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2221{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002222 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002223
2224 switch (mdev->net_conf->after_sb_1p) {
2225 case ASB_DISCARD_YOUNGER_PRI:
2226 case ASB_DISCARD_OLDER_PRI:
2227 case ASB_DISCARD_LEAST_CHG:
2228 case ASB_DISCARD_LOCAL:
2229 case ASB_DISCARD_REMOTE:
2230 dev_err(DEV, "Configuration error.\n");
2231 break;
2232 case ASB_DISCONNECT:
2233 break;
2234 case ASB_CONSENSUS:
2235 hg = drbd_asb_recover_0p(mdev);
2236 if (hg == -1 && mdev->state.role == R_SECONDARY)
2237 rv = hg;
2238 if (hg == 1 && mdev->state.role == R_PRIMARY)
2239 rv = hg;
2240 break;
2241 case ASB_VIOLENTLY:
2242 rv = drbd_asb_recover_0p(mdev);
2243 break;
2244 case ASB_DISCARD_SECONDARY:
2245 return mdev->state.role == R_PRIMARY ? 1 : -1;
2246 case ASB_CALL_HELPER:
2247 hg = drbd_asb_recover_0p(mdev);
2248 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002249 enum drbd_state_rv rv2;
2250
2251 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002252 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2253 * we might be here in C_WF_REPORT_PARAMS which is transient.
2254 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002255 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2256 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002257 drbd_khelper(mdev, "pri-lost-after-sb");
2258 } else {
2259 dev_warn(DEV, "Successfully gave up primary role.\n");
2260 rv = hg;
2261 }
2262 } else
2263 rv = hg;
2264 }
2265
2266 return rv;
2267}
2268
2269static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2270{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002271 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002272
2273 switch (mdev->net_conf->after_sb_2p) {
2274 case ASB_DISCARD_YOUNGER_PRI:
2275 case ASB_DISCARD_OLDER_PRI:
2276 case ASB_DISCARD_LEAST_CHG:
2277 case ASB_DISCARD_LOCAL:
2278 case ASB_DISCARD_REMOTE:
2279 case ASB_CONSENSUS:
2280 case ASB_DISCARD_SECONDARY:
2281 dev_err(DEV, "Configuration error.\n");
2282 break;
2283 case ASB_VIOLENTLY:
2284 rv = drbd_asb_recover_0p(mdev);
2285 break;
2286 case ASB_DISCONNECT:
2287 break;
2288 case ASB_CALL_HELPER:
2289 hg = drbd_asb_recover_0p(mdev);
2290 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002291 enum drbd_state_rv rv2;
2292
Philipp Reisnerb411b362009-09-25 16:07:19 -07002293 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2294 * we might be here in C_WF_REPORT_PARAMS which is transient.
2295 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002296 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2297 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002298 drbd_khelper(mdev, "pri-lost-after-sb");
2299 } else {
2300 dev_warn(DEV, "Successfully gave up primary role.\n");
2301 rv = hg;
2302 }
2303 } else
2304 rv = hg;
2305 }
2306
2307 return rv;
2308}
2309
2310static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2311 u64 bits, u64 flags)
2312{
2313 if (!uuid) {
2314 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2315 return;
2316 }
2317 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2318 text,
2319 (unsigned long long)uuid[UI_CURRENT],
2320 (unsigned long long)uuid[UI_BITMAP],
2321 (unsigned long long)uuid[UI_HISTORY_START],
2322 (unsigned long long)uuid[UI_HISTORY_END],
2323 (unsigned long long)bits,
2324 (unsigned long long)flags);
2325}
2326
2327/*
2328 100 after split brain try auto recover
2329 2 C_SYNC_SOURCE set BitMap
2330 1 C_SYNC_SOURCE use BitMap
2331 0 no Sync
2332 -1 C_SYNC_TARGET use BitMap
2333 -2 C_SYNC_TARGET set BitMap
2334 -100 after split brain, disconnect
2335-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002336-1091 requires proto 91
2337-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002338 */
2339static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2340{
2341 u64 self, peer;
2342 int i, j;
2343
2344 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2345 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2346
2347 *rule_nr = 10;
2348 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2349 return 0;
2350
2351 *rule_nr = 20;
2352 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2353 peer != UUID_JUST_CREATED)
2354 return -2;
2355
2356 *rule_nr = 30;
2357 if (self != UUID_JUST_CREATED &&
2358 (peer == UUID_JUST_CREATED || peer == (u64)0))
2359 return 2;
2360
2361 if (self == peer) {
2362 int rct, dc; /* roles at crash time */
2363
2364 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2365
2366 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002367 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002368
2369 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2370 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2371 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2372 drbd_uuid_set_bm(mdev, 0UL);
2373
2374 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2375 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2376 *rule_nr = 34;
2377 } else {
2378 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2379 *rule_nr = 36;
2380 }
2381
2382 return 1;
2383 }
2384
2385 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2386
2387 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002388 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002389
2390 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2391 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2392 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2393
2394 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2395 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2396 mdev->p_uuid[UI_BITMAP] = 0UL;
2397
2398 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2399 *rule_nr = 35;
2400 } else {
2401 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2402 *rule_nr = 37;
2403 }
2404
2405 return -1;
2406 }
2407
2408 /* Common power [off|failure] */
2409 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2410 (mdev->p_uuid[UI_FLAGS] & 2);
2411 /* lowest bit is set when we were primary,
2412 * next bit (weight 2) is set when peer was primary */
2413 *rule_nr = 40;
2414
2415 switch (rct) {
2416 case 0: /* !self_pri && !peer_pri */ return 0;
2417 case 1: /* self_pri && !peer_pri */ return 1;
2418 case 2: /* !self_pri && peer_pri */ return -1;
2419 case 3: /* self_pri && peer_pri */
2420 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2421 return dc ? -1 : 1;
2422 }
2423 }
2424
2425 *rule_nr = 50;
2426 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2427 if (self == peer)
2428 return -1;
2429
2430 *rule_nr = 51;
2431 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2432 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002433 if (mdev->agreed_pro_version < 96 ?
2434 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2435 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2436 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002437 /* The last P_SYNC_UUID did not get though. Undo the last start of
2438 resync as sync source modifications of the peer's UUIDs. */
2439
2440 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002441 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002442
2443 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2444 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002445
2446 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2447 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2448
Philipp Reisnerb411b362009-09-25 16:07:19 -07002449 return -1;
2450 }
2451 }
2452
2453 *rule_nr = 60;
2454 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2455 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2456 peer = mdev->p_uuid[i] & ~((u64)1);
2457 if (self == peer)
2458 return -2;
2459 }
2460
2461 *rule_nr = 70;
2462 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2463 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2464 if (self == peer)
2465 return 1;
2466
2467 *rule_nr = 71;
2468 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2469 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002470 if (mdev->agreed_pro_version < 96 ?
2471 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2472 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2473 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002474 /* The last P_SYNC_UUID did not get though. Undo the last start of
2475 resync as sync source modifications of our UUIDs. */
2476
2477 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002478 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002479
2480 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2481 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2482
Philipp Reisner4a23f262011-01-11 17:42:17 +01002483 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002484 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2485 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2486
2487 return 1;
2488 }
2489 }
2490
2491
2492 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002493 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002494 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2495 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2496 if (self == peer)
2497 return 2;
2498 }
2499
2500 *rule_nr = 90;
2501 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2502 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2503 if (self == peer && self != ((u64)0))
2504 return 100;
2505
2506 *rule_nr = 100;
2507 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2508 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2509 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2510 peer = mdev->p_uuid[j] & ~((u64)1);
2511 if (self == peer)
2512 return -100;
2513 }
2514 }
2515
2516 return -1000;
2517}
2518
2519/* drbd_sync_handshake() returns the new conn state on success, or
2520 CONN_MASK (-1) on failure.
2521 */
2522static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2523 enum drbd_disk_state peer_disk) __must_hold(local)
2524{
2525 int hg, rule_nr;
2526 enum drbd_conns rv = C_MASK;
2527 enum drbd_disk_state mydisk;
2528
2529 mydisk = mdev->state.disk;
2530 if (mydisk == D_NEGOTIATING)
2531 mydisk = mdev->new_state_tmp.disk;
2532
2533 dev_info(DEV, "drbd_sync_handshake:\n");
2534 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2535 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2536 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2537
2538 hg = drbd_uuid_compare(mdev, &rule_nr);
2539
2540 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2541
2542 if (hg == -1000) {
2543 dev_alert(DEV, "Unrelated data, aborting!\n");
2544 return C_MASK;
2545 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002546 if (hg < -1000) {
2547 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002548 return C_MASK;
2549 }
2550
2551 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2552 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2553 int f = (hg == -100) || abs(hg) == 2;
2554 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2555 if (f)
2556 hg = hg*2;
2557 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2558 hg > 0 ? "source" : "target");
2559 }
2560
Adam Gandelman3a11a482010-04-08 16:48:23 -07002561 if (abs(hg) == 100)
2562 drbd_khelper(mdev, "initial-split-brain");
2563
Philipp Reisnerb411b362009-09-25 16:07:19 -07002564 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2565 int pcount = (mdev->state.role == R_PRIMARY)
2566 + (peer_role == R_PRIMARY);
2567 int forced = (hg == -100);
2568
2569 switch (pcount) {
2570 case 0:
2571 hg = drbd_asb_recover_0p(mdev);
2572 break;
2573 case 1:
2574 hg = drbd_asb_recover_1p(mdev);
2575 break;
2576 case 2:
2577 hg = drbd_asb_recover_2p(mdev);
2578 break;
2579 }
2580 if (abs(hg) < 100) {
2581 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2582 "automatically solved. Sync from %s node\n",
2583 pcount, (hg < 0) ? "peer" : "this");
2584 if (forced) {
2585 dev_warn(DEV, "Doing a full sync, since"
2586 " UUIDs where ambiguous.\n");
2587 hg = hg*2;
2588 }
2589 }
2590 }
2591
2592 if (hg == -100) {
2593 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2594 hg = -1;
2595 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2596 hg = 1;
2597
2598 if (abs(hg) < 100)
2599 dev_warn(DEV, "Split-Brain detected, manually solved. "
2600 "Sync from %s node\n",
2601 (hg < 0) ? "peer" : "this");
2602 }
2603
2604 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002605 /* FIXME this log message is not correct if we end up here
2606 * after an attempted attach on a diskless node.
2607 * We just refuse to attach -- well, we drop the "connection"
2608 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002609 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002610 drbd_khelper(mdev, "split-brain");
2611 return C_MASK;
2612 }
2613
2614 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2615 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2616 return C_MASK;
2617 }
2618
2619 if (hg < 0 && /* by intention we do not use mydisk here. */
2620 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2621 switch (mdev->net_conf->rr_conflict) {
2622 case ASB_CALL_HELPER:
2623 drbd_khelper(mdev, "pri-lost");
2624 /* fall through */
2625 case ASB_DISCONNECT:
2626 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2627 return C_MASK;
2628 case ASB_VIOLENTLY:
2629 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2630 "assumption\n");
2631 }
2632 }
2633
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002634 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2635 if (hg == 0)
2636 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2637 else
2638 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2639 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2640 abs(hg) >= 2 ? "full" : "bit-map based");
2641 return C_MASK;
2642 }
2643
Philipp Reisnerb411b362009-09-25 16:07:19 -07002644 if (abs(hg) >= 2) {
2645 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002646 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2647 BM_LOCKED_SET_ALLOWED))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002648 return C_MASK;
2649 }
2650
2651 if (hg > 0) { /* become sync source. */
2652 rv = C_WF_BITMAP_S;
2653 } else if (hg < 0) { /* become sync target */
2654 rv = C_WF_BITMAP_T;
2655 } else {
2656 rv = C_CONNECTED;
2657 if (drbd_bm_total_weight(mdev)) {
2658 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2659 drbd_bm_total_weight(mdev));
2660 }
2661 }
2662
2663 return rv;
2664}
2665
2666/* returns 1 if invalid */
2667static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2668{
2669 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2670 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2671 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2672 return 0;
2673
2674 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2675 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2676 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2677 return 1;
2678
2679 /* everything else is valid if they are equal on both sides. */
2680 if (peer == self)
2681 return 0;
2682
2683 /* everything es is invalid. */
2684 return 1;
2685}
2686
Philipp Reisner02918be2010-08-20 14:35:10 +02002687static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002688{
Philipp Reisner02918be2010-08-20 14:35:10 +02002689 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002690 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002691 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002692 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2693
Philipp Reisnerb411b362009-09-25 16:07:19 -07002694 p_proto = be32_to_cpu(p->protocol);
2695 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2696 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2697 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002698 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002699 cf = be32_to_cpu(p->conn_flags);
2700 p_want_lose = cf & CF_WANT_LOSE;
2701
2702 clear_bit(CONN_DRY_RUN, &mdev->flags);
2703
2704 if (cf & CF_DRY_RUN)
2705 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002706
2707 if (p_proto != mdev->net_conf->wire_protocol) {
2708 dev_err(DEV, "incompatible communication protocols\n");
2709 goto disconnect;
2710 }
2711
2712 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2713 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2714 goto disconnect;
2715 }
2716
2717 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2718 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2719 goto disconnect;
2720 }
2721
2722 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2723 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2724 goto disconnect;
2725 }
2726
2727 if (p_want_lose && mdev->net_conf->want_lose) {
2728 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2729 goto disconnect;
2730 }
2731
2732 if (p_two_primaries != mdev->net_conf->two_primaries) {
2733 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2734 goto disconnect;
2735 }
2736
2737 if (mdev->agreed_pro_version >= 87) {
2738 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2739
2740 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002741 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002742
2743 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2744 if (strcmp(p_integrity_alg, my_alg)) {
2745 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2746 goto disconnect;
2747 }
2748 dev_info(DEV, "data-integrity-alg: %s\n",
2749 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2750 }
2751
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002752 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002753
2754disconnect:
2755 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002756 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002757}
2758
2759/* helper function
2760 * input: alg name, feature name
2761 * return: NULL (alg name was "")
2762 * ERR_PTR(error) if something goes wrong
2763 * or the crypto hash ptr, if it worked out ok. */
2764struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2765 const char *alg, const char *name)
2766{
2767 struct crypto_hash *tfm;
2768
2769 if (!alg[0])
2770 return NULL;
2771
2772 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2773 if (IS_ERR(tfm)) {
2774 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2775 alg, name, PTR_ERR(tfm));
2776 return tfm;
2777 }
2778 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2779 crypto_free_hash(tfm);
2780 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2781 return ERR_PTR(-EINVAL);
2782 }
2783 return tfm;
2784}
2785
Philipp Reisner02918be2010-08-20 14:35:10 +02002786static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002787{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002788 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002789 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002790 unsigned int header_size, data_size, exp_max_sz;
2791 struct crypto_hash *verify_tfm = NULL;
2792 struct crypto_hash *csums_tfm = NULL;
2793 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002794 int *rs_plan_s = NULL;
2795 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796
2797 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2798 : apv == 88 ? sizeof(struct p_rs_param)
2799 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002800 : apv <= 94 ? sizeof(struct p_rs_param_89)
2801 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002802
Philipp Reisner02918be2010-08-20 14:35:10 +02002803 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002804 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002805 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002806 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002807 }
2808
2809 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002810 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2811 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002812 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002813 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2814 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002815 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002816 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002817 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2818 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002819 D_ASSERT(data_size == 0);
2820 }
2821
2822 /* initialize verify_alg and csums_alg */
2823 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2824
Philipp Reisner02918be2010-08-20 14:35:10 +02002825 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002826 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002827
2828 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2829
2830 if (apv >= 88) {
2831 if (apv == 88) {
2832 if (data_size > SHARED_SECRET_MAX) {
2833 dev_err(DEV, "verify-alg too long, "
2834 "peer wants %u, accepting only %u byte\n",
2835 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002836 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002837 }
2838
2839 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002840 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002841
2842 /* we expect NUL terminated string */
2843 /* but just in case someone tries to be evil */
2844 D_ASSERT(p->verify_alg[data_size-1] == 0);
2845 p->verify_alg[data_size-1] = 0;
2846
2847 } else /* apv >= 89 */ {
2848 /* we still expect NUL terminated strings */
2849 /* but just in case someone tries to be evil */
2850 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2851 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2852 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2853 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2854 }
2855
2856 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2857 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2858 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2859 mdev->sync_conf.verify_alg, p->verify_alg);
2860 goto disconnect;
2861 }
2862 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2863 p->verify_alg, "verify-alg");
2864 if (IS_ERR(verify_tfm)) {
2865 verify_tfm = NULL;
2866 goto disconnect;
2867 }
2868 }
2869
2870 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2871 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2872 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2873 mdev->sync_conf.csums_alg, p->csums_alg);
2874 goto disconnect;
2875 }
2876 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2877 p->csums_alg, "csums-alg");
2878 if (IS_ERR(csums_tfm)) {
2879 csums_tfm = NULL;
2880 goto disconnect;
2881 }
2882 }
2883
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002884 if (apv > 94) {
2885 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2886 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2887 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2888 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2889 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002890
2891 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2892 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2893 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2894 if (!rs_plan_s) {
2895 dev_err(DEV, "kmalloc of fifo_buffer failed");
2896 goto disconnect;
2897 }
2898 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002899 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002900
2901 spin_lock(&mdev->peer_seq_lock);
2902 /* lock against drbd_nl_syncer_conf() */
2903 if (verify_tfm) {
2904 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2905 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2906 crypto_free_hash(mdev->verify_tfm);
2907 mdev->verify_tfm = verify_tfm;
2908 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2909 }
2910 if (csums_tfm) {
2911 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2912 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2913 crypto_free_hash(mdev->csums_tfm);
2914 mdev->csums_tfm = csums_tfm;
2915 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2916 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002917 if (fifo_size != mdev->rs_plan_s.size) {
2918 kfree(mdev->rs_plan_s.values);
2919 mdev->rs_plan_s.values = rs_plan_s;
2920 mdev->rs_plan_s.size = fifo_size;
2921 mdev->rs_planed = 0;
2922 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002923 spin_unlock(&mdev->peer_seq_lock);
2924 }
2925
2926 return ok;
2927disconnect:
2928 /* just for completeness: actually not needed,
2929 * as this is not reached if csums_tfm was ok. */
2930 crypto_free_hash(csums_tfm);
2931 /* but free the verify_tfm again, if csums_tfm did not work out */
2932 crypto_free_hash(verify_tfm);
2933 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002934 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002935}
2936
Philipp Reisnerb411b362009-09-25 16:07:19 -07002937/* warn if the arguments differ by more than 12.5% */
2938static void warn_if_differ_considerably(struct drbd_conf *mdev,
2939 const char *s, sector_t a, sector_t b)
2940{
2941 sector_t d;
2942 if (a == 0 || b == 0)
2943 return;
2944 d = (a > b) ? (a - b) : (b - a);
2945 if (d > (a>>3) || d > (b>>3))
2946 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2947 (unsigned long long)a, (unsigned long long)b);
2948}
2949
Philipp Reisner02918be2010-08-20 14:35:10 +02002950static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002951{
Philipp Reisner02918be2010-08-20 14:35:10 +02002952 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002953 enum determine_dev_size dd = unchanged;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002954 sector_t p_size, p_usize, my_usize;
2955 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002956 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002957
Philipp Reisnerb411b362009-09-25 16:07:19 -07002958 p_size = be64_to_cpu(p->d_size);
2959 p_usize = be64_to_cpu(p->u_size);
2960
2961 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2962 dev_err(DEV, "some backing storage is needed\n");
2963 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002964 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002965 }
2966
2967 /* just store the peer's disk size for now.
2968 * we still need to figure out whether we accept that. */
2969 mdev->p_size = p_size;
2970
Philipp Reisnerb411b362009-09-25 16:07:19 -07002971 if (get_ldev(mdev)) {
2972 warn_if_differ_considerably(mdev, "lower level device sizes",
2973 p_size, drbd_get_max_capacity(mdev->ldev));
2974 warn_if_differ_considerably(mdev, "user requested size",
2975 p_usize, mdev->ldev->dc.disk_size);
2976
2977 /* if this is the first connect, or an otherwise expected
2978 * param exchange, choose the minimum */
2979 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2980 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2981 p_usize);
2982
2983 my_usize = mdev->ldev->dc.disk_size;
2984
2985 if (mdev->ldev->dc.disk_size != p_usize) {
2986 mdev->ldev->dc.disk_size = p_usize;
2987 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2988 (unsigned long)mdev->ldev->dc.disk_size);
2989 }
2990
2991 /* Never shrink a device with usable data during connect.
2992 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01002993 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07002994 drbd_get_capacity(mdev->this_bdev) &&
2995 mdev->state.disk >= D_OUTDATED &&
2996 mdev->state.conn < C_CONNECTED) {
2997 dev_err(DEV, "The peer's disk size is too small!\n");
2998 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2999 mdev->ldev->dc.disk_size = my_usize;
3000 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003001 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003002 }
3003 put_ldev(mdev);
3004 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003005
Philipp Reisnere89b5912010-03-24 17:11:33 +01003006 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003007 if (get_ldev(mdev)) {
Bart Van Assche24c48302011-05-21 18:32:29 +02003008 dd = drbd_determine_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003009 put_ldev(mdev);
3010 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003011 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003012 drbd_md_sync(mdev);
3013 } else {
3014 /* I am diskless, need to accept the peer's size. */
3015 drbd_set_my_capacity(mdev, p_size);
3016 }
3017
Philipp Reisner99432fc2011-05-20 16:39:13 +02003018 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3019 drbd_reconsider_max_bio_size(mdev);
3020
Philipp Reisnerb411b362009-09-25 16:07:19 -07003021 if (get_ldev(mdev)) {
3022 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3023 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3024 ldsc = 1;
3025 }
3026
Philipp Reisnerb411b362009-09-25 16:07:19 -07003027 put_ldev(mdev);
3028 }
3029
3030 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3031 if (be64_to_cpu(p->c_size) !=
3032 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3033 /* we have different sizes, probably peer
3034 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003035 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003036 }
3037 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3038 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3039 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003040 mdev->state.disk >= D_INCONSISTENT) {
3041 if (ddsf & DDSF_NO_RESYNC)
3042 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3043 else
3044 resync_after_online_grow(mdev);
3045 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003046 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3047 }
3048 }
3049
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003050 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003051}
3052
Philipp Reisner02918be2010-08-20 14:35:10 +02003053static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003054{
Philipp Reisner02918be2010-08-20 14:35:10 +02003055 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003056 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003057 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003058
Philipp Reisnerb411b362009-09-25 16:07:19 -07003059 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3060
3061 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3062 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3063
3064 kfree(mdev->p_uuid);
3065 mdev->p_uuid = p_uuid;
3066
3067 if (mdev->state.conn < C_CONNECTED &&
3068 mdev->state.disk < D_INCONSISTENT &&
3069 mdev->state.role == R_PRIMARY &&
3070 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3071 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3072 (unsigned long long)mdev->ed_uuid);
3073 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003074 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003075 }
3076
3077 if (get_ldev(mdev)) {
3078 int skip_initial_sync =
3079 mdev->state.conn == C_CONNECTED &&
3080 mdev->agreed_pro_version >= 90 &&
3081 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3082 (p_uuid[UI_FLAGS] & 8);
3083 if (skip_initial_sync) {
3084 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3085 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003086 "clear_n_write from receive_uuids",
3087 BM_LOCKED_TEST_ALLOWED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003088 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3089 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3090 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3091 CS_VERBOSE, NULL);
3092 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003093 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003094 }
3095 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003096 } else if (mdev->state.disk < D_INCONSISTENT &&
3097 mdev->state.role == R_PRIMARY) {
3098 /* I am a diskless primary, the peer just created a new current UUID
3099 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003100 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003101 }
3102
3103 /* Before we test for the disk state, we should wait until an eventually
3104 ongoing cluster wide state change is finished. That is important if
3105 we are primary and are detaching from our disk. We need to see the
3106 new disk state... */
3107 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3108 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003109 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3110
3111 if (updated_uuids)
3112 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003113
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003114 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003115}
3116
3117/**
3118 * convert_state() - Converts the peer's view of the cluster state to our point of view
3119 * @ps: The state as seen by the peer.
3120 */
3121static union drbd_state convert_state(union drbd_state ps)
3122{
3123 union drbd_state ms;
3124
3125 static enum drbd_conns c_tab[] = {
3126 [C_CONNECTED] = C_CONNECTED,
3127
3128 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3129 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3130 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3131 [C_VERIFY_S] = C_VERIFY_T,
3132 [C_MASK] = C_MASK,
3133 };
3134
3135 ms.i = ps.i;
3136
3137 ms.conn = c_tab[ps.conn];
3138 ms.peer = ps.role;
3139 ms.role = ps.peer;
3140 ms.pdsk = ps.disk;
3141 ms.disk = ps.pdsk;
3142 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3143
3144 return ms;
3145}
3146
Philipp Reisner02918be2010-08-20 14:35:10 +02003147static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003148{
Philipp Reisner02918be2010-08-20 14:35:10 +02003149 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003150 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003151 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003152
Philipp Reisnerb411b362009-09-25 16:07:19 -07003153 mask.i = be32_to_cpu(p->mask);
3154 val.i = be32_to_cpu(p->val);
3155
3156 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3157 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3158 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003159 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003160 }
3161
3162 mask = convert_state(mask);
3163 val = convert_state(val);
3164
3165 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3166
3167 drbd_send_sr_reply(mdev, rv);
3168 drbd_md_sync(mdev);
3169
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003170 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003171}
3172
Philipp Reisner02918be2010-08-20 14:35:10 +02003173static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003174{
Philipp Reisner02918be2010-08-20 14:35:10 +02003175 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003176 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003177 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003178 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003179 int rv;
3180
Philipp Reisnerb411b362009-09-25 16:07:19 -07003181 peer_state.i = be32_to_cpu(p->state);
3182
3183 real_peer_disk = peer_state.disk;
3184 if (peer_state.disk == D_NEGOTIATING) {
3185 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3186 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3187 }
3188
3189 spin_lock_irq(&mdev->req_lock);
3190 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003191 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003192 spin_unlock_irq(&mdev->req_lock);
3193
Lars Ellenberg545752d2011-12-05 14:39:25 +01003194 /* If some other part of the code (asender thread, timeout)
3195 * already decided to close the connection again,
3196 * we must not "re-establish" it here. */
3197 if (os.conn <= C_TEAR_DOWN)
3198 return false;
3199
Lars Ellenberg40424e42011-09-26 15:24:56 +02003200 /* If this is the "end of sync" confirmation, usually the peer disk
3201 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3202 * set) resync started in PausedSyncT, or if the timing of pause-/
3203 * unpause-sync events has been "just right", the peer disk may
3204 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3205 */
3206 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3207 real_peer_disk == D_UP_TO_DATE &&
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003208 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3209 /* If we are (becoming) SyncSource, but peer is still in sync
3210 * preparation, ignore its uptodate-ness to avoid flapping, it
3211 * will change to inconsistent once the peer reaches active
3212 * syncing states.
3213 * It may have changed syncer-paused flags, however, so we
3214 * cannot ignore this completely. */
3215 if (peer_state.conn > C_CONNECTED &&
3216 peer_state.conn < C_SYNC_SOURCE)
3217 real_peer_disk = D_INCONSISTENT;
3218
3219 /* if peer_state changes to connected at the same time,
3220 * it explicitly notifies us that it finished resync.
3221 * Maybe we should finish it up, too? */
3222 else if (os.conn >= C_SYNC_SOURCE &&
3223 peer_state.conn == C_CONNECTED) {
3224 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3225 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003226 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003227 }
3228 }
3229
3230 /* peer says his disk is inconsistent, while we think it is uptodate,
3231 * and this happens while the peer still thinks we have a sync going on,
3232 * but we think we are already done with the sync.
3233 * We ignore this to avoid flapping pdsk.
3234 * This should not happen, if the peer is a recent version of drbd. */
3235 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3236 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3237 real_peer_disk = D_UP_TO_DATE;
3238
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003239 if (ns.conn == C_WF_REPORT_PARAMS)
3240 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003241
Philipp Reisner67531712010-10-27 12:21:30 +02003242 if (peer_state.conn == C_AHEAD)
3243 ns.conn = C_BEHIND;
3244
Philipp Reisnerb411b362009-09-25 16:07:19 -07003245 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3246 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3247 int cr; /* consider resync */
3248
3249 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003250 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003251 /* if we had an established connection
3252 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003253 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003254 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003255 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003256 /* if we have both been inconsistent, and the peer has been
3257 * forced to be UpToDate with --overwrite-data */
3258 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3259 /* if we had been plain connected, and the admin requested to
3260 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003261 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003262 (peer_state.conn >= C_STARTING_SYNC_S &&
3263 peer_state.conn <= C_WF_BITMAP_T));
3264
3265 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003266 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003267
3268 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003269 if (ns.conn == C_MASK) {
3270 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003271 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003272 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003273 } else if (peer_state.disk == D_NEGOTIATING) {
3274 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3275 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003276 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003277 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003278 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003279 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003280 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003281 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003282 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003283 }
3284 }
3285 }
3286
3287 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003288 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003289 goto retry;
3290 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003291 ns.peer = peer_state.role;
3292 ns.pdsk = real_peer_disk;
3293 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003294 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003295 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003296 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3297 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003298 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3299 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3300 for temporal network outages! */
3301 spin_unlock_irq(&mdev->req_lock);
3302 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3303 tl_clear(mdev);
3304 drbd_uuid_new_current(mdev);
3305 clear_bit(NEW_CUR_UUID, &mdev->flags);
3306 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003307 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003308 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003309 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003310 ns = mdev->state;
3311 spin_unlock_irq(&mdev->req_lock);
3312
3313 if (rv < SS_SUCCESS) {
3314 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003315 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003316 }
3317
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003318 if (os.conn > C_WF_REPORT_PARAMS) {
3319 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003320 peer_state.disk != D_NEGOTIATING ) {
3321 /* we want resync, peer has not yet decided to sync... */
3322 /* Nowadays only used when forcing a node into primary role and
3323 setting its disk to UpToDate with that */
3324 drbd_send_uuids(mdev);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02003325 drbd_send_current_state(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003326 }
3327 }
3328
3329 mdev->net_conf->want_lose = 0;
3330
3331 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3332
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003333 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003334}
3335
Philipp Reisner02918be2010-08-20 14:35:10 +02003336static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003337{
Philipp Reisner02918be2010-08-20 14:35:10 +02003338 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003339
3340 wait_event(mdev->misc_wait,
3341 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003342 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003343 mdev->state.conn < C_CONNECTED ||
3344 mdev->state.disk < D_NEGOTIATING);
3345
3346 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3347
Philipp Reisnerb411b362009-09-25 16:07:19 -07003348 /* Here the _drbd_uuid_ functions are right, current should
3349 _not_ be rotated into the history */
3350 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3351 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3352 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3353
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003354 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003355 drbd_start_resync(mdev, C_SYNC_TARGET);
3356
3357 put_ldev(mdev);
3358 } else
3359 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3360
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003361 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003362}
3363
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003364/**
3365 * receive_bitmap_plain
3366 *
3367 * Return 0 when done, 1 when another iteration is needed, and a negative error
3368 * code upon failure.
3369 */
3370static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003371receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3372 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003373{
3374 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3375 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003376 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003377
Philipp Reisner02918be2010-08-20 14:35:10 +02003378 if (want != data_size) {
3379 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003380 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003381 }
3382 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003383 return 0;
3384 err = drbd_recv(mdev, buffer, want);
3385 if (err != want) {
3386 if (err >= 0)
3387 err = -EIO;
3388 return err;
3389 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003390
3391 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3392
3393 c->word_offset += num_words;
3394 c->bit_offset = c->word_offset * BITS_PER_LONG;
3395 if (c->bit_offset > c->bm_bits)
3396 c->bit_offset = c->bm_bits;
3397
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003398 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003399}
3400
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003401/**
3402 * recv_bm_rle_bits
3403 *
3404 * Return 0 when done, 1 when another iteration is needed, and a negative error
3405 * code upon failure.
3406 */
3407static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003408recv_bm_rle_bits(struct drbd_conf *mdev,
3409 struct p_compressed_bm *p,
3410 struct bm_xfer_ctx *c)
3411{
3412 struct bitstream bs;
3413 u64 look_ahead;
3414 u64 rl;
3415 u64 tmp;
3416 unsigned long s = c->bit_offset;
3417 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003418 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003419 int toggle = DCBP_get_start(p);
3420 int have;
3421 int bits;
3422
3423 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3424
3425 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3426 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003427 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003428
3429 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3430 bits = vli_decode_bits(&rl, look_ahead);
3431 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003432 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003433
3434 if (toggle) {
3435 e = s + rl -1;
3436 if (e >= c->bm_bits) {
3437 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003438 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003439 }
3440 _drbd_bm_set_bits(mdev, s, e);
3441 }
3442
3443 if (have < bits) {
3444 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3445 have, bits, look_ahead,
3446 (unsigned int)(bs.cur.b - p->code),
3447 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003448 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003449 }
3450 look_ahead >>= bits;
3451 have -= bits;
3452
3453 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3454 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003455 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003456 look_ahead |= tmp << have;
3457 have += bits;
3458 }
3459
3460 c->bit_offset = s;
3461 bm_xfer_ctx_bit_to_word_offset(c);
3462
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003463 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003464}
3465
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003466/**
3467 * decode_bitmap_c
3468 *
3469 * Return 0 when done, 1 when another iteration is needed, and a negative error
3470 * code upon failure.
3471 */
3472static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003473decode_bitmap_c(struct drbd_conf *mdev,
3474 struct p_compressed_bm *p,
3475 struct bm_xfer_ctx *c)
3476{
3477 if (DCBP_get_code(p) == RLE_VLI_Bits)
3478 return recv_bm_rle_bits(mdev, p, c);
3479
3480 /* other variants had been implemented for evaluation,
3481 * but have been dropped as this one turned out to be "best"
3482 * during all our tests. */
3483
3484 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3485 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003486 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003487}
3488
3489void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3490 const char *direction, struct bm_xfer_ctx *c)
3491{
3492 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003493 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003494 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3495 + c->bm_words * sizeof(long);
3496 unsigned total = c->bytes[0] + c->bytes[1];
3497 unsigned r;
3498
3499 /* total can not be zero. but just in case: */
3500 if (total == 0)
3501 return;
3502
3503 /* don't report if not compressed */
3504 if (total >= plain)
3505 return;
3506
3507 /* total < plain. check for overflow, still */
3508 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3509 : (1000 * total / plain);
3510
3511 if (r > 1000)
3512 r = 1000;
3513
3514 r = 1000 - r;
3515 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3516 "total %u; compression: %u.%u%%\n",
3517 direction,
3518 c->bytes[1], c->packets[1],
3519 c->bytes[0], c->packets[0],
3520 total, r/10, r % 10);
3521}
3522
3523/* Since we are processing the bitfield from lower addresses to higher,
3524 it does not matter if the process it in 32 bit chunks or 64 bit
3525 chunks as long as it is little endian. (Understand it as byte stream,
3526 beginning with the lowest byte...) If we would use big endian
3527 we would need to process it from the highest address to the lowest,
3528 in order to be agnostic to the 32 vs 64 bits issue.
3529
3530 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003531static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003532{
3533 struct bm_xfer_ctx c;
3534 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003535 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003536 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003537 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003538
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003539 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3540 /* you are supposed to send additional out-of-sync information
3541 * if you actually set bits during this phase */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003542
3543 /* maybe we should use some per thread scratch page,
3544 * and allocate that during initial device creation? */
3545 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3546 if (!buffer) {
3547 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3548 goto out;
3549 }
3550
3551 c = (struct bm_xfer_ctx) {
3552 .bm_bits = drbd_bm_bits(mdev),
3553 .bm_words = drbd_bm_words(mdev),
3554 };
3555
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003556 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003557 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003558 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003559 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003560 /* MAYBE: sanity check that we speak proto >= 90,
3561 * and the feature is enabled! */
3562 struct p_compressed_bm *p;
3563
Philipp Reisner02918be2010-08-20 14:35:10 +02003564 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003565 dev_err(DEV, "ReportCBitmap packet too large\n");
3566 goto out;
3567 }
3568 /* use the page buff */
3569 p = buffer;
3570 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003571 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003572 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003573 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3574 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003575 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003576 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003577 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003578 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003579 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003580 goto out;
3581 }
3582
Philipp Reisner02918be2010-08-20 14:35:10 +02003583 c.packets[cmd == P_BITMAP]++;
3584 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003585
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003586 if (err <= 0) {
3587 if (err < 0)
3588 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003589 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003590 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003591 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003592 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003593 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003594
3595 INFO_bm_xfer_stats(mdev, "receive", &c);
3596
3597 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003598 enum drbd_state_rv rv;
3599
Philipp Reisnerb411b362009-09-25 16:07:19 -07003600 ok = !drbd_send_bitmap(mdev);
3601 if (!ok)
3602 goto out;
3603 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003604 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3605 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003606 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3607 /* admin may have requested C_DISCONNECTING,
3608 * other threads may have noticed network errors */
3609 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3610 drbd_conn_str(mdev->state.conn));
3611 }
3612
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003613 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614 out:
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003615 drbd_bm_unlock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003616 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3617 drbd_start_resync(mdev, C_SYNC_SOURCE);
3618 free_page((unsigned long) buffer);
3619 return ok;
3620}
3621
Philipp Reisner02918be2010-08-20 14:35:10 +02003622static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003623{
3624 /* TODO zero copy sink :) */
3625 static char sink[128];
3626 int size, want, r;
3627
Philipp Reisner02918be2010-08-20 14:35:10 +02003628 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3629 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003630
Philipp Reisner02918be2010-08-20 14:35:10 +02003631 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003632 while (size > 0) {
3633 want = min_t(int, size, sizeof(sink));
3634 r = drbd_recv(mdev, sink, want);
3635 ERR_IF(r <= 0) break;
3636 size -= r;
3637 }
3638 return size == 0;
3639}
3640
Philipp Reisner02918be2010-08-20 14:35:10 +02003641static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003642{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003643 /* Make sure we've acked all the TCP data associated
3644 * with the data requests being unplugged */
3645 drbd_tcp_quickack(mdev->data.socket);
3646
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003647 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003648}
3649
Philipp Reisner73a01a12010-10-27 14:33:00 +02003650static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3651{
3652 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3653
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003654 switch (mdev->state.conn) {
3655 case C_WF_SYNC_UUID:
3656 case C_WF_BITMAP_T:
3657 case C_BEHIND:
3658 break;
3659 default:
3660 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3661 drbd_conn_str(mdev->state.conn));
3662 }
3663
Philipp Reisner73a01a12010-10-27 14:33:00 +02003664 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3665
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003666 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003667}
3668
Philipp Reisner02918be2010-08-20 14:35:10 +02003669typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003670
Philipp Reisner02918be2010-08-20 14:35:10 +02003671struct data_cmd {
3672 int expect_payload;
3673 size_t pkt_size;
3674 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003675};
3676
Philipp Reisner02918be2010-08-20 14:35:10 +02003677static struct data_cmd drbd_cmd_handler[] = {
3678 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3679 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3680 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3681 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3682 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3683 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3684 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3685 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3686 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3687 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3688 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3689 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3690 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3691 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3692 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3693 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3694 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3695 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3696 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3697 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3698 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003699 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003700 /* anything missing from this table is in
3701 * the asender_tbl, see get_asender_cmd */
3702 [P_MAX_CMD] = { 0, 0, NULL },
3703};
3704
3705/* All handler functions that expect a sub-header get that sub-heder in
3706 mdev->data.rbuf.header.head.payload.
3707
3708 Usually in mdev->data.rbuf.header.head the callback can find the usual
3709 p_header, but they may not rely on that. Since there is also p_header95 !
3710 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003711
3712static void drbdd(struct drbd_conf *mdev)
3713{
Philipp Reisner02918be2010-08-20 14:35:10 +02003714 union p_header *header = &mdev->data.rbuf.header;
3715 unsigned int packet_size;
3716 enum drbd_packets cmd;
3717 size_t shs; /* sub header size */
3718 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003719
3720 while (get_t_state(&mdev->receiver) == Running) {
3721 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003722 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3723 goto err_out;
3724
3725 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3726 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3727 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003728 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003729
Philipp Reisner02918be2010-08-20 14:35:10 +02003730 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003731 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3732 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3733 goto err_out;
3734 }
3735
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003736 if (shs) {
3737 rv = drbd_recv(mdev, &header->h80.payload, shs);
3738 if (unlikely(rv != shs)) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01003739 if (!signal_pending(current))
3740 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003741 goto err_out;
3742 }
3743 }
3744
Philipp Reisner02918be2010-08-20 14:35:10 +02003745 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3746
3747 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003748 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003749 cmdname(cmd), packet_size);
3750 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003751 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003752 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003753
Philipp Reisner02918be2010-08-20 14:35:10 +02003754 if (0) {
3755 err_out:
3756 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003757 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003758 /* If we leave here, we probably want to update at least the
3759 * "Connected" indicator on stable storage. Do so explicitly here. */
3760 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003761}
3762
3763void drbd_flush_workqueue(struct drbd_conf *mdev)
3764{
3765 struct drbd_wq_barrier barr;
3766
3767 barr.w.cb = w_prev_work_done;
3768 init_completion(&barr.done);
3769 drbd_queue_work(&mdev->data.work, &barr.w);
3770 wait_for_completion(&barr.done);
3771}
3772
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003773void drbd_free_tl_hash(struct drbd_conf *mdev)
3774{
3775 struct hlist_head *h;
3776
3777 spin_lock_irq(&mdev->req_lock);
3778
3779 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3780 spin_unlock_irq(&mdev->req_lock);
3781 return;
3782 }
3783 /* paranoia code */
3784 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3785 if (h->first)
3786 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3787 (int)(h - mdev->ee_hash), h->first);
3788 kfree(mdev->ee_hash);
3789 mdev->ee_hash = NULL;
3790 mdev->ee_hash_s = 0;
3791
3792 /* paranoia code */
3793 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3794 if (h->first)
3795 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3796 (int)(h - mdev->tl_hash), h->first);
3797 kfree(mdev->tl_hash);
3798 mdev->tl_hash = NULL;
3799 mdev->tl_hash_s = 0;
3800 spin_unlock_irq(&mdev->req_lock);
3801}
3802
Philipp Reisnerb411b362009-09-25 16:07:19 -07003803static void drbd_disconnect(struct drbd_conf *mdev)
3804{
3805 enum drbd_fencing_p fp;
3806 union drbd_state os, ns;
3807 int rv = SS_UNKNOWN_ERROR;
3808 unsigned int i;
3809
3810 if (mdev->state.conn == C_STANDALONE)
3811 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003812
Lars Ellenberg545752d2011-12-05 14:39:25 +01003813 /* We are about to start the cleanup after connection loss.
3814 * Make sure drbd_make_request knows about that.
3815 * Usually we should be in some network failure state already,
3816 * but just in case we are not, we fix it up here.
3817 */
3818 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
3819
Philipp Reisnerb411b362009-09-25 16:07:19 -07003820 /* asender does not clean up anything. it must not interfere, either */
3821 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003822 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003823
Philipp Reisner85719572010-07-21 10:20:17 +02003824 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003825 spin_lock_irq(&mdev->req_lock);
3826 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3827 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3828 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3829 spin_unlock_irq(&mdev->req_lock);
3830
3831 /* We do not have data structures that would allow us to
3832 * get the rs_pending_cnt down to 0 again.
3833 * * On C_SYNC_TARGET we do not have any data structures describing
3834 * the pending RSDataRequest's we have sent.
3835 * * On C_SYNC_SOURCE there is no data structure that tracks
3836 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3837 * And no, it is not the sum of the reference counts in the
3838 * resync_LRU. The resync_LRU tracks the whole operation including
3839 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3840 * on the fly. */
3841 drbd_rs_cancel_all(mdev);
3842 mdev->rs_total = 0;
3843 mdev->rs_failed = 0;
3844 atomic_set(&mdev->rs_pending_cnt, 0);
3845 wake_up(&mdev->misc_wait);
3846
3847 /* make sure syncer is stopped and w_resume_next_sg queued */
3848 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003849 resync_timer_fn((unsigned long)mdev);
3850
Philipp Reisnerb411b362009-09-25 16:07:19 -07003851 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3852 * w_make_resync_request etc. which may still be on the worker queue
3853 * to be "canceled" */
3854 drbd_flush_workqueue(mdev);
3855
3856 /* This also does reclaim_net_ee(). If we do this too early, we might
3857 * miss some resync ee and pages.*/
3858 drbd_process_done_ee(mdev);
3859
3860 kfree(mdev->p_uuid);
3861 mdev->p_uuid = NULL;
3862
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003863 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003864 tl_clear(mdev);
3865
Philipp Reisnerb411b362009-09-25 16:07:19 -07003866 dev_info(DEV, "Connection closed\n");
3867
3868 drbd_md_sync(mdev);
3869
3870 fp = FP_DONT_CARE;
3871 if (get_ldev(mdev)) {
3872 fp = mdev->ldev->dc.fencing;
3873 put_ldev(mdev);
3874 }
3875
Philipp Reisner87f7be42010-06-11 13:56:33 +02003876 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3877 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003878
3879 spin_lock_irq(&mdev->req_lock);
3880 os = mdev->state;
3881 if (os.conn >= C_UNCONNECTED) {
3882 /* Do not restart in case we are C_DISCONNECTING */
3883 ns = os;
3884 ns.conn = C_UNCONNECTED;
3885 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3886 }
3887 spin_unlock_irq(&mdev->req_lock);
3888
3889 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003890 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003891
Philipp Reisnerb411b362009-09-25 16:07:19 -07003892 crypto_free_hash(mdev->cram_hmac_tfm);
3893 mdev->cram_hmac_tfm = NULL;
3894
3895 kfree(mdev->net_conf);
3896 mdev->net_conf = NULL;
3897 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3898 }
3899
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003900 /* serialize with bitmap writeout triggered by the state change,
3901 * if any. */
3902 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3903
Philipp Reisnerb411b362009-09-25 16:07:19 -07003904 /* tcp_close and release of sendpage pages can be deferred. I don't
3905 * want to use SO_LINGER, because apparently it can be deferred for
3906 * more than 20 seconds (longest time I checked).
3907 *
3908 * Actually we don't care for exactly when the network stack does its
3909 * put_page(), but release our reference on these pages right here.
3910 */
3911 i = drbd_release_ee(mdev, &mdev->net_ee);
3912 if (i)
3913 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003914 i = atomic_read(&mdev->pp_in_use_by_net);
3915 if (i)
3916 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003917 i = atomic_read(&mdev->pp_in_use);
3918 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003919 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003920
3921 D_ASSERT(list_empty(&mdev->read_ee));
3922 D_ASSERT(list_empty(&mdev->active_ee));
3923 D_ASSERT(list_empty(&mdev->sync_ee));
3924 D_ASSERT(list_empty(&mdev->done_ee));
3925
3926 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3927 atomic_set(&mdev->current_epoch->epoch_size, 0);
3928 D_ASSERT(list_empty(&mdev->current_epoch->list));
3929}
3930
3931/*
3932 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3933 * we can agree on is stored in agreed_pro_version.
3934 *
3935 * feature flags and the reserved array should be enough room for future
3936 * enhancements of the handshake protocol, and possible plugins...
3937 *
3938 * for now, they are expected to be zero, but ignored.
3939 */
3940static int drbd_send_handshake(struct drbd_conf *mdev)
3941{
3942 /* ASSERT current == mdev->receiver ... */
3943 struct p_handshake *p = &mdev->data.sbuf.handshake;
3944 int ok;
3945
3946 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3947 dev_err(DEV, "interrupted during initial handshake\n");
3948 return 0; /* interrupted. not ok. */
3949 }
3950
3951 if (mdev->data.socket == NULL) {
3952 mutex_unlock(&mdev->data.mutex);
3953 return 0;
3954 }
3955
3956 memset(p, 0, sizeof(*p));
3957 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3958 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3959 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003960 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003961 mutex_unlock(&mdev->data.mutex);
3962 return ok;
3963}
3964
3965/*
3966 * return values:
3967 * 1 yes, we have a valid connection
3968 * 0 oops, did not work out, please try again
3969 * -1 peer talks different language,
3970 * no point in trying again, please go standalone.
3971 */
3972static int drbd_do_handshake(struct drbd_conf *mdev)
3973{
3974 /* ASSERT current == mdev->receiver ... */
3975 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003976 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3977 unsigned int length;
3978 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003979 int rv;
3980
3981 rv = drbd_send_handshake(mdev);
3982 if (!rv)
3983 return 0;
3984
Philipp Reisner02918be2010-08-20 14:35:10 +02003985 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003986 if (!rv)
3987 return 0;
3988
Philipp Reisner02918be2010-08-20 14:35:10 +02003989 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003990 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003991 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003992 return -1;
3993 }
3994
Philipp Reisner02918be2010-08-20 14:35:10 +02003995 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003996 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003997 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003998 return -1;
3999 }
4000
4001 rv = drbd_recv(mdev, &p->head.payload, expect);
4002
4003 if (rv != expect) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004004 if (!signal_pending(current))
4005 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004006 return 0;
4007 }
4008
Philipp Reisnerb411b362009-09-25 16:07:19 -07004009 p->protocol_min = be32_to_cpu(p->protocol_min);
4010 p->protocol_max = be32_to_cpu(p->protocol_max);
4011 if (p->protocol_max == 0)
4012 p->protocol_max = p->protocol_min;
4013
4014 if (PRO_VERSION_MAX < p->protocol_min ||
4015 PRO_VERSION_MIN > p->protocol_max)
4016 goto incompat;
4017
4018 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4019
4020 dev_info(DEV, "Handshake successful: "
4021 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
4022
4023 return 1;
4024
4025 incompat:
4026 dev_err(DEV, "incompatible DRBD dialects: "
4027 "I support %d-%d, peer supports %d-%d\n",
4028 PRO_VERSION_MIN, PRO_VERSION_MAX,
4029 p->protocol_min, p->protocol_max);
4030 return -1;
4031}
4032
4033#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4034static int drbd_do_auth(struct drbd_conf *mdev)
4035{
4036 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4037 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004038 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004039}
4040#else
4041#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01004042
4043/* Return value:
4044 1 - auth succeeded,
4045 0 - failed, try again (network error),
4046 -1 - auth failed, don't try again.
4047*/
4048
Philipp Reisnerb411b362009-09-25 16:07:19 -07004049static int drbd_do_auth(struct drbd_conf *mdev)
4050{
4051 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4052 struct scatterlist sg;
4053 char *response = NULL;
4054 char *right_response = NULL;
4055 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004056 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4057 unsigned int resp_size;
4058 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004059 enum drbd_packets cmd;
4060 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004061 int rv;
4062
4063 desc.tfm = mdev->cram_hmac_tfm;
4064 desc.flags = 0;
4065
4066 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4067 (u8 *)mdev->net_conf->shared_secret, key_len);
4068 if (rv) {
4069 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004070 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004071 goto fail;
4072 }
4073
4074 get_random_bytes(my_challenge, CHALLENGE_LEN);
4075
4076 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4077 if (!rv)
4078 goto fail;
4079
Philipp Reisner02918be2010-08-20 14:35:10 +02004080 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004081 if (!rv)
4082 goto fail;
4083
Philipp Reisner02918be2010-08-20 14:35:10 +02004084 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004085 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004086 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004087 rv = 0;
4088 goto fail;
4089 }
4090
Philipp Reisner02918be2010-08-20 14:35:10 +02004091 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004092 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004093 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004094 goto fail;
4095 }
4096
Philipp Reisner02918be2010-08-20 14:35:10 +02004097 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004098 if (peers_ch == NULL) {
4099 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004100 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004101 goto fail;
4102 }
4103
Philipp Reisner02918be2010-08-20 14:35:10 +02004104 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004105
Philipp Reisner02918be2010-08-20 14:35:10 +02004106 if (rv != length) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004107 if (!signal_pending(current))
4108 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004109 rv = 0;
4110 goto fail;
4111 }
4112
4113 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4114 response = kmalloc(resp_size, GFP_NOIO);
4115 if (response == NULL) {
4116 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004117 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004118 goto fail;
4119 }
4120
4121 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004122 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004123
4124 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4125 if (rv) {
4126 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004127 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004128 goto fail;
4129 }
4130
4131 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4132 if (!rv)
4133 goto fail;
4134
Philipp Reisner02918be2010-08-20 14:35:10 +02004135 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004136 if (!rv)
4137 goto fail;
4138
Philipp Reisner02918be2010-08-20 14:35:10 +02004139 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004140 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004141 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004142 rv = 0;
4143 goto fail;
4144 }
4145
Philipp Reisner02918be2010-08-20 14:35:10 +02004146 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004147 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4148 rv = 0;
4149 goto fail;
4150 }
4151
4152 rv = drbd_recv(mdev, response , resp_size);
4153
4154 if (rv != resp_size) {
Lars Ellenberg0ddc5542011-01-21 12:35:15 +01004155 if (!signal_pending(current))
4156 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004157 rv = 0;
4158 goto fail;
4159 }
4160
4161 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004162 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004163 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004164 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004165 goto fail;
4166 }
4167
4168 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4169
4170 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4171 if (rv) {
4172 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004173 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004174 goto fail;
4175 }
4176
4177 rv = !memcmp(response, right_response, resp_size);
4178
4179 if (rv)
4180 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4181 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004182 else
4183 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004184
4185 fail:
4186 kfree(peers_ch);
4187 kfree(response);
4188 kfree(right_response);
4189
4190 return rv;
4191}
4192#endif
4193
4194int drbdd_init(struct drbd_thread *thi)
4195{
4196 struct drbd_conf *mdev = thi->mdev;
4197 unsigned int minor = mdev_to_minor(mdev);
4198 int h;
4199
4200 sprintf(current->comm, "drbd%d_receiver", minor);
4201
4202 dev_info(DEV, "receiver (re)started\n");
4203
4204 do {
4205 h = drbd_connect(mdev);
4206 if (h == 0) {
4207 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004208 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004209 }
4210 if (h == -1) {
4211 dev_warn(DEV, "Discarding network configuration.\n");
4212 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4213 }
4214 } while (h == 0);
4215
4216 if (h > 0) {
4217 if (get_net_conf(mdev)) {
4218 drbdd(mdev);
4219 put_net_conf(mdev);
4220 }
4221 }
4222
4223 drbd_disconnect(mdev);
4224
4225 dev_info(DEV, "receiver terminated\n");
4226 return 0;
4227}
4228
4229/* ********* acknowledge sender ******** */
4230
Philipp Reisner0b70a132010-08-20 13:36:10 +02004231static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004232{
4233 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4234
4235 int retcode = be32_to_cpu(p->retcode);
4236
4237 if (retcode >= SS_SUCCESS) {
4238 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4239 } else {
4240 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4241 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4242 drbd_set_st_err_str(retcode), retcode);
4243 }
4244 wake_up(&mdev->state_wait);
4245
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004246 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004247}
4248
Philipp Reisner0b70a132010-08-20 13:36:10 +02004249static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004250{
4251 return drbd_send_ping_ack(mdev);
4252
4253}
4254
Philipp Reisner0b70a132010-08-20 13:36:10 +02004255static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004256{
4257 /* restore idle timeout */
4258 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004259 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4260 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004261
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004262 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004263}
4264
Philipp Reisner0b70a132010-08-20 13:36:10 +02004265static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004266{
4267 struct p_block_ack *p = (struct p_block_ack *)h;
4268 sector_t sector = be64_to_cpu(p->sector);
4269 int blksize = be32_to_cpu(p->blksize);
4270
4271 D_ASSERT(mdev->agreed_pro_version >= 89);
4272
4273 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4274
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004275 if (get_ldev(mdev)) {
4276 drbd_rs_complete_io(mdev, sector);
4277 drbd_set_in_sync(mdev, sector, blksize);
4278 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4279 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4280 put_ldev(mdev);
4281 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004282 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004283 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004284
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004285 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004286}
4287
4288/* when we receive the ACK for a write request,
4289 * verify that we actually know about it */
4290static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4291 u64 id, sector_t sector)
4292{
4293 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4294 struct hlist_node *n;
4295 struct drbd_request *req;
4296
Bart Van Assche24c48302011-05-21 18:32:29 +02004297 hlist_for_each_entry(req, n, slot, collision) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004298 if ((unsigned long)req == (unsigned long)id) {
4299 if (req->sector != sector) {
4300 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4301 "wrong sector (%llus versus %llus)\n", req,
4302 (unsigned long long)req->sector,
4303 (unsigned long long)sector);
4304 break;
4305 }
4306 return req;
4307 }
4308 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004309 return NULL;
4310}
4311
4312typedef struct drbd_request *(req_validator_fn)
4313 (struct drbd_conf *mdev, u64 id, sector_t sector);
4314
4315static int validate_req_change_req_state(struct drbd_conf *mdev,
4316 u64 id, sector_t sector, req_validator_fn validator,
4317 const char *func, enum drbd_req_event what)
4318{
4319 struct drbd_request *req;
4320 struct bio_and_error m;
4321
4322 spin_lock_irq(&mdev->req_lock);
4323 req = validator(mdev, id, sector);
4324 if (unlikely(!req)) {
4325 spin_unlock_irq(&mdev->req_lock);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004326
4327 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4328 (void *)(unsigned long)id, (unsigned long long)sector);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004329 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004330 }
4331 __req_mod(req, what, &m);
4332 spin_unlock_irq(&mdev->req_lock);
4333
4334 if (m.bio)
4335 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004336 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004337}
4338
Philipp Reisner0b70a132010-08-20 13:36:10 +02004339static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004340{
4341 struct p_block_ack *p = (struct p_block_ack *)h;
4342 sector_t sector = be64_to_cpu(p->sector);
4343 int blksize = be32_to_cpu(p->blksize);
4344 enum drbd_req_event what;
4345
4346 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4347
4348 if (is_syncer_block_id(p->block_id)) {
4349 drbd_set_in_sync(mdev, sector, blksize);
4350 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004351 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004352 }
4353 switch (be16_to_cpu(h->command)) {
4354 case P_RS_WRITE_ACK:
4355 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4356 what = write_acked_by_peer_and_sis;
4357 break;
4358 case P_WRITE_ACK:
4359 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4360 what = write_acked_by_peer;
4361 break;
4362 case P_RECV_ACK:
4363 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4364 what = recv_acked_by_peer;
4365 break;
4366 case P_DISCARD_ACK:
4367 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4368 what = conflict_discarded_by_peer;
4369 break;
4370 default:
4371 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004372 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004373 }
4374
4375 return validate_req_change_req_state(mdev, p->block_id, sector,
4376 _ack_id_to_req, __func__ , what);
4377}
4378
Philipp Reisner0b70a132010-08-20 13:36:10 +02004379static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004380{
4381 struct p_block_ack *p = (struct p_block_ack *)h;
4382 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004383 int size = be32_to_cpu(p->blksize);
4384 struct drbd_request *req;
4385 struct bio_and_error m;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004386
4387 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4388
4389 if (is_syncer_block_id(p->block_id)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004390 dec_rs_pending(mdev);
4391 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004392 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004393 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004394
4395 spin_lock_irq(&mdev->req_lock);
4396 req = _ack_id_to_req(mdev, p->block_id, sector);
4397 if (!req) {
4398 spin_unlock_irq(&mdev->req_lock);
4399 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4400 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4401 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4402 The master bio might already be completed, therefore the
4403 request is no longer in the collision hash.
4404 => Do not try to validate block_id as request. */
4405 /* In Protocol B we might already have got a P_RECV_ACK
4406 but then get a P_NEG_ACK after wards. */
4407 drbd_set_out_of_sync(mdev, sector, size);
4408 return true;
4409 } else {
4410 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4411 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4412 return false;
4413 }
4414 }
4415 __req_mod(req, neg_acked, &m);
4416 spin_unlock_irq(&mdev->req_lock);
4417
4418 if (m.bio)
4419 complete_master_bio(mdev, &m);
4420 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004421}
4422
Philipp Reisner0b70a132010-08-20 13:36:10 +02004423static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004424{
4425 struct p_block_ack *p = (struct p_block_ack *)h;
4426 sector_t sector = be64_to_cpu(p->sector);
4427
4428 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4429 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4430 (unsigned long long)sector, be32_to_cpu(p->blksize));
4431
4432 return validate_req_change_req_state(mdev, p->block_id, sector,
4433 _ar_id_to_req, __func__ , neg_acked);
4434}
4435
Philipp Reisner0b70a132010-08-20 13:36:10 +02004436static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004437{
4438 sector_t sector;
4439 int size;
4440 struct p_block_ack *p = (struct p_block_ack *)h;
4441
4442 sector = be64_to_cpu(p->sector);
4443 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004444
4445 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4446
4447 dec_rs_pending(mdev);
4448
4449 if (get_ldev_if_state(mdev, D_FAILED)) {
4450 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004451 switch (be16_to_cpu(h->command)) {
4452 case P_NEG_RS_DREPLY:
4453 drbd_rs_failed_io(mdev, sector, size);
4454 case P_RS_CANCEL:
4455 break;
4456 default:
4457 D_ASSERT(0);
4458 put_ldev(mdev);
4459 return false;
4460 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004461 put_ldev(mdev);
4462 }
4463
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004464 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004465}
4466
Philipp Reisner0b70a132010-08-20 13:36:10 +02004467static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004468{
4469 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4470
4471 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4472
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004473 if (mdev->state.conn == C_AHEAD &&
4474 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisnere89868a2011-11-09 21:04:03 +01004475 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
Philipp Reisner370a43e2011-01-14 16:03:11 +01004476 mdev->start_resync_timer.expires = jiffies + HZ;
4477 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004478 }
4479
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004480 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004481}
4482
Philipp Reisner0b70a132010-08-20 13:36:10 +02004483static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004484{
4485 struct p_block_ack *p = (struct p_block_ack *)h;
4486 struct drbd_work *w;
4487 sector_t sector;
4488 int size;
4489
4490 sector = be64_to_cpu(p->sector);
4491 size = be32_to_cpu(p->blksize);
4492
4493 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4494
4495 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4496 drbd_ov_oos_found(mdev, sector, size);
4497 else
4498 ov_oos_print(mdev);
4499
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004500 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004501 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004502
Philipp Reisnerb411b362009-09-25 16:07:19 -07004503 drbd_rs_complete_io(mdev, sector);
4504 dec_rs_pending(mdev);
4505
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004506 --mdev->ov_left;
4507
4508 /* let's advance progress step marks only for every other megabyte */
4509 if ((mdev->ov_left & 0x200) == 0x200)
4510 drbd_advance_rs_marks(mdev, mdev->ov_left);
4511
4512 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004513 w = kmalloc(sizeof(*w), GFP_NOIO);
4514 if (w) {
4515 w->cb = w_ov_finished;
4516 drbd_queue_work_front(&mdev->data.work, w);
4517 } else {
4518 dev_err(DEV, "kmalloc(w) failed.");
4519 ov_oos_print(mdev);
4520 drbd_resync_finished(mdev);
4521 }
4522 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004523 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004524 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004525}
4526
Philipp Reisner02918be2010-08-20 14:35:10 +02004527static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004528{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004529 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004530}
4531
Philipp Reisnerb411b362009-09-25 16:07:19 -07004532struct asender_cmd {
4533 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004534 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004535};
4536
4537static struct asender_cmd *get_asender_cmd(int cmd)
4538{
4539 static struct asender_cmd asender_tbl[] = {
4540 /* anything missing from this table is in
4541 * the drbd_cmd_handler (drbd_default_handler) table,
4542 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004543 [P_PING] = { sizeof(struct p_header80), got_Ping },
4544 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004545 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4546 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4547 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4548 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4549 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4550 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4551 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4552 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4553 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4554 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4555 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004556 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004557 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004558 [P_MAX_CMD] = { 0, NULL },
4559 };
4560 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4561 return NULL;
4562 return &asender_tbl[cmd];
4563}
4564
4565int drbd_asender(struct drbd_thread *thi)
4566{
4567 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004568 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004569 struct asender_cmd *cmd = NULL;
4570
4571 int rv, len;
4572 void *buf = h;
4573 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004574 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004575 int empty;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004576 int ping_timeout_active = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004577
4578 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4579
4580 current->policy = SCHED_RR; /* Make this a realtime task! */
4581 current->rt_priority = 2; /* more important than all other tasks */
4582
4583 while (get_t_state(thi) == Running) {
4584 drbd_thread_current_set_cpu(mdev);
4585 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4586 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4587 mdev->meta.socket->sk->sk_rcvtimeo =
4588 mdev->net_conf->ping_timeo*HZ/10;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004589 ping_timeout_active = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004590 }
4591
4592 /* conditionally cork;
4593 * it may hurt latency if we cork without much to send */
4594 if (!mdev->net_conf->no_cork &&
4595 3 < atomic_read(&mdev->unacked_cnt))
4596 drbd_tcp_cork(mdev->meta.socket);
4597 while (1) {
4598 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4599 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004600 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004601 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004602 /* to avoid race with newly queued ACKs */
4603 set_bit(SIGNAL_ASENDER, &mdev->flags);
4604 spin_lock_irq(&mdev->req_lock);
4605 empty = list_empty(&mdev->done_ee);
4606 spin_unlock_irq(&mdev->req_lock);
4607 /* new ack may have been queued right here,
4608 * but then there is also a signal pending,
4609 * and we start over... */
4610 if (empty)
4611 break;
4612 }
4613 /* but unconditionally uncork unless disabled */
4614 if (!mdev->net_conf->no_cork)
4615 drbd_tcp_uncork(mdev->meta.socket);
4616
4617 /* short circuit, recv_msg would return EINTR anyways. */
4618 if (signal_pending(current))
4619 continue;
4620
4621 rv = drbd_recv_short(mdev, mdev->meta.socket,
4622 buf, expect-received, 0);
4623 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4624
4625 flush_signals(current);
4626
4627 /* Note:
4628 * -EINTR (on meta) we got a signal
4629 * -EAGAIN (on meta) rcvtimeo expired
4630 * -ECONNRESET other side closed the connection
4631 * -ERESTARTSYS (on data) we got a signal
4632 * rv < 0 other than above: unexpected error!
4633 * rv == expected: full header or command
4634 * rv < expected: "woken" by signal during receive
4635 * rv == 0 : "connection shut down by peer"
4636 */
4637 if (likely(rv > 0)) {
4638 received += rv;
4639 buf += rv;
4640 } else if (rv == 0) {
4641 dev_err(DEV, "meta connection shut down by peer.\n");
4642 goto reconnect;
4643 } else if (rv == -EAGAIN) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004644 /* If the data socket received something meanwhile,
4645 * that is good enough: peer is still alive. */
4646 if (time_after(mdev->last_received,
4647 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4648 continue;
Lars Ellenbergf36af182011-03-09 22:44:55 +01004649 if (ping_timeout_active) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004650 dev_err(DEV, "PingAck did not arrive in time.\n");
4651 goto reconnect;
4652 }
4653 set_bit(SEND_PING, &mdev->flags);
4654 continue;
4655 } else if (rv == -EINTR) {
4656 continue;
4657 } else {
4658 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4659 goto reconnect;
4660 }
4661
4662 if (received == expect && cmd == NULL) {
4663 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004664 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4665 be32_to_cpu(h->magic),
4666 be16_to_cpu(h->command),
4667 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004668 goto reconnect;
4669 }
4670 cmd = get_asender_cmd(be16_to_cpu(h->command));
4671 len = be16_to_cpu(h->length);
4672 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004673 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4674 be32_to_cpu(h->magic),
4675 be16_to_cpu(h->command),
4676 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004677 goto disconnect;
4678 }
4679 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004680 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004681 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004682 }
4683 if (received == expect) {
Lars Ellenbergcb6518c2011-06-20 14:44:45 +02004684 mdev->last_received = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004685 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004686 if (!cmd->process(mdev, h))
4687 goto reconnect;
4688
Lars Ellenbergf36af182011-03-09 22:44:55 +01004689 /* the idle_timeout (ping-int)
4690 * has been restored in got_PingAck() */
4691 if (cmd == get_asender_cmd(P_PING_ACK))
4692 ping_timeout_active = 0;
4693
Philipp Reisnerb411b362009-09-25 16:07:19 -07004694 buf = h;
4695 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004696 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004697 cmd = NULL;
4698 }
4699 }
4700
4701 if (0) {
4702reconnect:
4703 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004704 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004705 }
4706 if (0) {
4707disconnect:
4708 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004709 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004710 }
4711 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4712
4713 D_ASSERT(mdev->state.conn < C_CONNECTED);
4714 dev_info(DEV, "asender terminated\n");
4715
4716 return 0;
4717}